metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jnispen/PPSDA",
"score": 3
} |
#### File: code/modules/utils.py
```python
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import arviz as az
def get_color(key):
""" return color for class key"""
color_dict = {0: "blue", 1: "red", 2: "green", 3: "orange", 4: "black"}
return color_dict[key]
def get_color_mean(key):
""" return mean color for class key"""
color_dict = {0: "yellow", 1: "black", 2: "green", 3: "orange", 4: "white"}
return color_dict[key]
def plot_data(cdata, nrows=50):
""" plot the dataframe """
# header data = x-values
x_val = cdata.get_x_val()
# plot rows (== observations) in a single figure
plt.figure(figsize=(12, 6))
ax = plt.axes()
ax.xaxis.set_major_locator(plt.MaxNLocator(10))
ax.set(xlabel='Wavenumber ($cm^{-1}$)')
# used to map label codes to colors
label_codes = pd.Categorical(cdata.data[cdata.label_column]).codes
# list of class labels
clabels = cdata.get_class_labels()
for i in range(len(clabels)):
print(str(clabels[i]) + ": " + get_color(i))
for i in range(nrows):
y_val = cdata.data.values[i]
plt.plot(x_val, y_val[:cdata.non_data_columns], '-', color=get_color(label_codes[i]))
plt.savefig('data_plot.png')
def plot_mean_vs_ppc(cdata, ppc_class_lst):
""" plot data mean vs. posterior samples """
# header data = x-values
x_val = cdata.get_x_val()
plt.figure(figsize=(12, 8))
ax = plt.axes()
ax.set(xlabel='Wavenumber ($cm^{-1}$)')
# plot a sample from the posterior (for each class)
for i in range(1):
for z in range(len(ppc_class_lst)):
plt.plot(x_val, ppc_class_lst[z][i, 0, :], '-', color=get_color(z), alpha=.6)
# list of class labels
class_labels = cdata.get_class_labels()
# plot the posterior mean
for z in range(len(ppc_class_lst)):
cls_label = str(class_labels[z]) + " ppc mean"
plt.plot(x_val, ppc_class_lst[z][:, 0].mean(axis=0), '-', color=get_color(z), alpha=.6, label=cls_label)
# plot mean data for classes (raw data)
df = [ cdata.data.loc[cdata.data[cdata.label_column] == class_labels[k]]
for k in range(len(class_labels)) ]
for z in range(len(df)):
cls_label = str(class_labels[z]) + " real mean"
plt.plot(x_val, df[z].iloc[:,:cdata.non_data_columns].mean(), '--', color=get_color_mean(z),
label=cls_label, linewidth=1)
# plot 94% HPD interval
for z in range(len(ppc_class_lst)):
col = "C" + str(z+1)
az.plot_hpd(x_val, ppc_class_lst[z], smooth=False, color=col)
plt.legend(loc='best')
def plot_real_vs_ppc(cdata, ppc_class_lst, nrows=10):
""" plot real data vs. posterior samples """
# header data = x-values
x_val = cdata.get_x_val()
plt.figure(figsize=(12, 8))
ax = plt.axes()
ax.set(xlabel='Wavenumber ($cm^{-1}$)')
# plot some samples from the posterior
for i in range(5):
for z in range(len(ppc_class_lst)):
plt.plot(x_val, ppc_class_lst[z][i, 0, :], 'o-', color="gray", alpha=.3)
# list of class labels
class_labels = cdata.get_class_labels()
# plot raw data for classes
df = [ cdata.data.loc[cdata.data[cdata.label_column] == class_labels[i]].sample(frac=1)
for i in range(len(class_labels)) ]
for i in range(nrows):
for z in range(len(df)):
plt.plot(x_val, df[z].values[i,:cdata.non_data_columns], '--', color=get_color(z), linewidth=1)
def append_predictions(cdata, trace, test_data, display=True):
""" appends predicted labels to the test dataframe """
# check model predictions on test dataset
a = trace['alpha'].mean()
b = trace['beta'].mean(axis=0)
xt_n = test_data.columns[:cdata.non_data_columns]
xt_s = test_data[xt_n].values
xt_s = (xt_s - xt_s.mean(axis=0)) / xt_s.std(axis=0)
mu_t = a + (b * xt_s).sum(axis=1)
yt_p = 1 / (1 + np.exp(-mu_t))
pt_y = np.zeros(len(xt_s))
lp_t = []
class_labels = cdata.get_class_labels()
for i in range(len(xt_s)):
if yt_p[i] < 0.5:
pt_y[i] = 0
lp_t.append(class_labels[0])
else:
pt_y[i] = 1
lp_t.append(class_labels[1])
#test_data = test_data.assign(pred=pd.Series(pt_y))
test_data = test_data.assign(p_label=pd.Series(lp_t))
if display:
print(test_data.iloc[:, (cdata.non_data_columns-1):])
return test_data
def append_predictions_ppc(cdata, trace, display=True):
""" appends predicted labels to the dataframe """
# check model predictions on test dataset
a = trace['alpha'].mean()
b = trace['beta'].mean(axis=0)
xt_n = cdata.data.columns[:cdata.non_data_columns]
xt_s = cdata.data[xt_n].values
xt_s = (xt_s - xt_s.mean(axis=0)) / xt_s.std(axis=0)
mu_t = a + (b * xt_s).sum(axis=1)
yt_p = 1 / (1 + np.exp(-mu_t))
pt_y = np.zeros(len(xt_s))
lp_t = []
class_labels = cdata.get_class_labels()
for i in range(len(xt_s)):
if yt_p[i] < 0.5:
pt_y[i] = 0
lp_t.append(class_labels[0])
else:
pt_y[i] = 1
lp_t.append(class_labels[1])
#cdata.data = cdata.data.assign(pred=pd.Series(pt_y))
cdata.data = cdata.data.assign(p_label=pd.Series(lp_t))
if display:
print (cdata.data.iloc[:,(cdata.non_data_columns-1):])
def get_score(data, label_column, predicted_column):
""" calculates the logreg score for a single column """
yt = pd.Categorical(data[label_column]).codes
cor = 0; err = 0
for i in range(len(yt)):
if data[label_column][i] == data[predicted_column][i]:
cor += 1
else:
err += 1
tot = len(yt)
score = f'{cor / len(yt) * 100:.1f}'
return tot, cor, err, score
def logistic_score(data, label_column, predicted_column, kfold=False):
""" calculates and prints the logistic score """
if kfold:
print('n tot cor err score (%)')
print('----------------------------')
ttot = 0; tcor = 0; terr = 0
for i in range(len(data)):
tot, cor, err, score = get_score(data[i], label_column, predicted_column)
print(str(i+1) + " " + str(tot) + " " + str(cor) + " " + str(err) + " " + score)
ttot += tot; tcor += cor; terr += err
print('----------------------------')
print(" " + str(ttot) + " " + str(tcor) + " " + str(terr) + " " + f'{tcor / ttot * 100:.1f}')
else:
tot, cor, err, score = get_score(data, label_column, predicted_column)
print("total : " + str(tot))
print("correct: " + str(cor))
print("error : " + str(err))
print("score : " + score + "%")
def logistic_score_ppc(cdata, predicted_column):
""" calculates and prints the logistic regression score """
yt = pd.Categorical(cdata.data[cdata.label_column]).codes
cor = 0; err = 0
for i in range(len(yt)):
if cdata.data[cdata.label_column][i] == cdata.data[predicted_column][i]:
cor += 1
else:
err += 1
print("total : " + str(len(yt)))
print("correct: " + str(cor))
print("error : " + str(err))
print("score : " + f'{cor / len(yt) * 100:.1f}' + "%")
def softmax_score(cdata, data, trace):
""" calculates and prints the softmax score """
yt = pd.Categorical(data[cdata.label_column]).codes
data_pred = trace['mu'].mean(0)
y_pred = [np.exp(point) / np.sum(np.exp(point), axis=0)
for point in data_pred]
cor = 0; err = 0
for i in range(len(y_pred)):
if np.argmax(y_pred[i]) == yt[i]:
cor += 1
else:
err +=1
print("total : " + str(len(y_pred)))
print("correct: " + str(cor))
print("error : " + str(err))
print("score : " + f'{cor / len(y_pred) * 100:.1f}' + "%")
def save_traces(cdata, filename, samples_per_class, ppc_class_lst):
""" saves the trace to a .csv file """
import csv
# create header row
file_header = cdata.get_x_val()
header = np.array(np.around(file_header, 3), dtype='str')
header = header.tolist()
header.append("label")
# list of class labels
class_labels = cdata.get_class_labels()
with open(filename, mode='w') as fp:
ppc_writer = csv.writer(fp, delimiter=',')
ppc_writer.writerow(header)
for i in range(samples_per_class):
for z in range(len(ppc_class_lst)):
row = np.array(ppc_class_lst[z][i, 0, :], dtype='str').tolist()
row.append(class_labels[z])
ppc_writer.writerow(row)
def standardize(x):
""" standardizes the data X, substracts the mean and normalizes the variance """
return (x - x.mean(axis=0)) / x.std(axis=0)
def train_test_split_kfold(data, nfolds):
""" splits the dataframe into n-fold sets of training and test data """
from sklearn.model_selection import KFold
kf = KFold(n_splits=nfolds, random_state=42)
kf.get_n_splits(data)
# list of training and test dataframes
train_lst, test_lst = [], []
for train_index, test_index in kf.split(data):
# create empty dataframe
df_train = pd.DataFrame(columns=data.columns.to_list())
df_test = pd.DataFrame(columns=data.columns.to_list())
# add rows to training and test dataframes, re-index and append to list
for i, val in enumerate(train_index):
df_train.loc[data.index[val]] = data.iloc[val]
df_train.index = range(len(df_train))
train_lst.append(df_train)
for i, val in enumerate(test_index):
df_test.loc[data.index[val]] = data.iloc[val]
df_test.index = range(len(df_test))
test_lst.append(df_test)
return train_lst, test_lst
``` |
{
"source": "jni/storm-cluster",
"score": 2
} |
#### File: jni/storm-cluster/stormcluster.py
```python
import os
import sys
from collections import namedtuple
import numpy as np
import matplotlib
matplotlib.use('Qt5Agg')
import matplotlib.pyplot as plt
from matplotlib.widgets import RectangleSelector
from matplotlib.backends.backend_qt5agg import (FigureCanvasQTAgg
as FigureCanvas)
from matplotlib.figure import Figure
from PyQt5 import QtCore, QtGui, QtWidgets
import pandas as pd
import numba
from sklearn.cluster import DBSCAN
from skimage import io, color
TableParams = namedtuple('TableParams',
['image_shape', 'pixel_scale', 'invert_y'])
DEFAULTPARAMS = TableParams(image_shape=(2048, 2048),
pixel_scale=10,
invert_y=True)
def find_header_line(filename):
with open(filename) as fin:
for i, line in enumerate(fin):
if line.rstrip() == '##':
return (i - 1)
return None
def read_locations_table(filename):
header_line = find_header_line(filename)
skiprows = list(range(header_line)) + [header_line + 1]
table = pd.read_csv(filename, skiprows=skiprows, delimiter='\t')
return table
def image_coords(location_table, params=DEFAULTPARAMS):
image_shape = params.image_shape
scale = params.pixel_scale
xs, ys = location_table[['X_COORD', 'Y_COORD']].values.T
rows, cols = ys / scale, xs / scale
if params.invert_y:
rows = image_shape[0] - rows
return rows, cols
def image_coords_indices(location_table, params=DEFAULTPARAMS):
rows, cols = image_coords(location_table, params)
rrows, rcols = np.round(rows).astype(int), np.round(cols).astype(int)
filter_rows = (0 <= rrows) & (rrows < params.image_shape[0])
filter_cols = (0 <= rcols) & (rcols < params.image_shape[1])
filter_all = filter_cols & filter_rows
return rrows[filter_all], rcols[filter_all], filter_all
@numba.njit
def _fill_image(image, rows, cols):
for i, j in zip(rows, cols):
image[i, j] += 1
def _stretchlim(image, bottom=0.001, top=None, in_place=True):
"""Stretch the image so new image range corresponds to given quantiles.
Parameters
----------
image : array, shape (M, N, [...,] P)
The input image.
bottom : float, optional
The lower quantile.
top : float, optional
The upper quantile. If not provided, it is set to 1 - `bottom`.
in_place : bool, optional
If True, modify the input image in-place (only possible if
it is a float image).
Returns
-------
out : np.ndarray of float
The stretched image.
"""
if in_place and np.issubdtype(image.dtype, np.float):
out = image
else:
out = np.empty(image.shape, np.float32)
out[:] = image
if top is None:
top = 1 - bottom
q0, q1 = np.percentile(image, [100*bottom, 100*top])
out -= q0
if q1 > q0:
out /= q1 - q0
out = np.clip(out, 0, 1, out=out)
return out
def image_from_table(location_table, params=DEFAULTPARAMS, stretch=0.001):
rows, cols, _ = image_coords_indices(location_table)
image = np.zeros(params.image_shape, dtype=float)
_fill_image(image, rows, cols)
image = _stretchlim(image, stretch)
return image
def select_roi(image, rois=None, ax=None, axim=None, qtapp=None):
"""Return a label image based on polygon selections made with the mouse.
Parameters
----------
image : (M, N[, 3]) array
Grayscale or RGB image.
rois : list, optional
If given, append ROIs to this existing list. Otherwise a new list
object will be created.
ax : matplotlib Axes, optional
The Axes on which to do the plotting.
axim : matplotlib AxesImage, optional
An existing AxesImage on which to show the image.
qtapp : QtApplication
The main Qt application for ROI selection. If given, the ROIs will
be inserted at the right location for the image index.
Returns
-------
rois : list of tuple of ints
The selected regions, in the form
[[(row_start, row_end), (col_start, col_end)]].
Notes
-----
Use left click to select the vertices of the polygon
and right click to confirm the selection once all vertices are selected.
Examples
--------
>>> from skimage import data, future, io
>>> camera = data.camera()
>>> mask = future.manual_polygon_segmentation(camera) # doctest: +SKIP
>>> io.imshow(mask) # doctest: +SKIP
>>> io.show() # doctest: +SKIP
"""
if image.ndim not in (2, 3):
raise ValueError('Only 2D grayscale or RGB images are supported.')
if ax is None and axim is None:
fig, ax = plt.subplots()
if axim is None:
ax.clear()
axim = ax.imshow(image, cmap="magma")
ax.set_axis_off()
else:
axim.set_array(image)
rois = rois or []
def toggle_selector(event):
if event.key in ['A', 'a'] and not toggle_selector.RS.active:
toggle_selector.RS.set_active(True)
def onselect(eclick, erelease):
starts = round(eclick.ydata), round(eclick.xdata)
ends = round(erelease.ydata), round(erelease.xdata)
slices = tuple((int(s), int(e)) for s, e in zip(starts, ends))
if qtapp is None:
rois.append(slices)
else:
index = qtapp.image_index
rois[index] = slices
qtapp.rectangle_selector.set_active(False)
qtapp.select_next_image()
qtapp.rectangle_selector.set_active(True)
selector = RectangleSelector(ax, onselect, useblit=True)
if qtapp is None:
# Ensure that the widget remains active by creating a reference to it.
# There's probably a better place to put that reference but this will do
# for now. (From the matplotlib RectangleSelector gallery example.)
toggle_selector.RS = selector
else:
qtapp.rectangle_selector = selector
ax.figure.canvas.mpl_connect('key_press_event', toggle_selector)
selector.set_active(True)
return rois
def _in_range(arr, tup):
low, high = tup
return (low <= arr) & (arr < high)
def cluster(coordinates, radius, core_size):
scan = DBSCAN(eps=radius, min_samples=core_size).fit(coordinates)
return scan
def _centroids(labels, sizes, coords):
clustered = labels > -1
labels, coords = labels[clustered], coords[clustered]
grouping = np.argsort(labels)
sums = np.add.reduceat(coords[grouping], np.cumsum(sizes)[:-1])
means = sums / sizes[1:, np.newaxis]
return means
def analyse_clustering(scan):
labels = scan.labels_
unclustered = labels == -1
num_unclustered = np.sum(unclustered)
cluster_sizes = np.bincount(labels[~unclustered])
counts, bin_edges = np.histogram(cluster_sizes, bins='auto')
histogram = np.convolve(bin_edges, [0.5, 0.5], 'valid'), counts
print(f'There are {len(cluster_sizes)} clusters, and {num_unclustered} '
f'outlier points, out of {labels.size}. The largest cluster size is '
f'{np.max(cluster_sizes)} and the median is '
f'{np.median(cluster_sizes)}')
return labels, cluster_sizes, histogram
def bbox_diameter(coords):
return np.hypot(*np.max(coords, axis=0) - np.min(coords, axis=0))
def summarise_clustering(scan, coords):
labels, sizes, hist = analyse_clustering(scan)
diameters = labeled_comprehension(coords, scan.labels_,
bbox_diameter)[1:, np.newaxis]
cluster_centroids = _centroids(labels, sizes, coords)
cluster_sq_centroids = _centroids(labels, sizes, coords ** 2)
centroid_vars = np.sqrt(cluster_sq_centroids - cluster_centroids ** 2)
column_names = ['centroid row', 'centroid column',
'std dev row', 'std dev column',
'detections', 'cluster id', 'diameter']
idxs = np.arange(1, np.max(labels) + 1)
columns = (cluster_centroids, centroid_vars,
sizes[1:, np.newaxis], idxs[:, np.newaxis], diameters)
print([c.shape for c in columns])
data = np.concatenate(columns, axis=1)
df = pd.DataFrame(data=data, columns=column_names)
return df
def labeled_comprehension(features, labels, function, *args,
extra_args=(), extra_kwargs={}, **kwargs):
"""Like ndi.labeled_comprehension, but features can be higher D."""
nonneg = labels >= 0
features = features[nonneg]
labels = labels[nonneg]
sorter = np.argsort(labels)
labels = labels[sorter]
features = features[sorter]
boundaries = np.concatenate([[0], np.flatnonzero(np.diff(labels)) + 1])
args = args + extra_args
kwargs = {**kwargs, **extra_kwargs}
if hasattr(function, 'reduceat'):
result = function.reduceat(features, boundaries, *args, **kwargs)
else:
boundaries = np.concatenate((boundaries, [labels.size]))
test_index = min(3, features.shape[0])
test_value = function(features[:test_index], *args, **kwargs)
result_shape = (boundaries.size - 1,) + np.shape(test_value)
result = np.empty(result_shape)
for i, (start, stop) in enumerate(zip(boundaries, boundaries[1:])):
result[i] = function(features[start:stop], *args, **kwargs)
return result
def cluster_circle_plot(image, coordinates, data, roi, params=DEFAULTPARAMS,
stretch=0.001, size_threshold=None, max_size=None,
ax=None):
colors = ['gray', 'C9', 'C1', 'C2']
ax = ax or plt.subplots()[1]
image = _stretchlim(image, stretch)
ax.imshow(image, cmap='gray')
ax.set_xlim(*roi[1])
ax.set_ylim(*roi[0])
if size_threshold is None:
size_threshold = 0
if max_size is None:
max_size = np.inf
for y, x, *_, d in data.values:
color_index = int(np.digitize(d, [0, size_threshold, max_size]))
color = colors[color_index]
alpha = 0.9 - (color_index > 2) * 0.5
ax.add_artist(plt.Circle((x, y), d/2, fill=False, color=color,
linewidth=0.5, alpha=alpha))
return ax
def image_from_clustering(scan, coordinates, roi, params=DEFAULTPARAMS,
stretch=0.001, size_threshold=None, max_size=None,
use_diameter=True):
unclustered = scan.labels_ == -1
if size_threshold is not None:
temp_labels = np.copy(scan.labels_)
temp_labels[unclustered] = 0
if use_diameter:
cluster_sizes = labeled_comprehension(coordinates, scan.labels_,
bbox_diameter)
else:
cluster_sizes = np.bincount(scan.labels_[~unclustered])
large = ((cluster_sizes > size_threshold)[temp_labels] &
(~unclustered))
if max_size is not None:
too_large = (((cluster_sizes > max_size)[temp_labels] &
(~unclustered)))
else:
too_large = np.zeros_like(large)
large &= ~too_large
small = (~large) & (~too_large) & (~unclustered)
else:
large = ~unclustered
small = np.zeros_like(large)
too_large = small
rows, cols = np.round(coordinates).astype(int).T
green = np.zeros(params.image_shape, dtype=float)
if np.any(large):
_fill_image(green, rows[large], cols[large])
green = _stretchlim(green, stretch)
green[np.isnan(green)] = 0
red = np.zeros_like(green, dtype=float)
if np.any(small):
_fill_image(red, rows[small], cols[small])
red = _stretchlim(red, stretch)
red[np.isnan(red)] = 0
blue = np.zeros_like(red)
if np.any(unclustered):
_fill_image(blue, rows[unclustered], cols[unclustered])
blue = _stretchlim(blue, stretch)
blue[np.isnan(blue)] = 0
# green += blue # make cyan
gray = np.zeros_like(red)
if np.any(too_large):
_fill_image(gray, rows[too_large], cols[too_large])
gray = _stretchlim(gray, stretch)
gray[np.isnan(gray)] = 0
image = np.stack((red, green, blue), axis=-1)
image[gray > 0] = (gray[gray > 0] * 0.5)[..., np.newaxis]
return image[slice(*roi[0]), slice(*roi[1])]
def parameter_scan_image(coordinates,
radii=(0.1, 0.2, 0.4, 0.8, 1.6, 3.2, 6.4),
core_sizes=(3, 6, 12, 24)):
num_clustered = np.zeros((len(radii), len(core_sizes)))
largest_cluster = np.zeros_like(num_clustered)
for i, r in enumerate(radii):
for j, c in enumerate(core_sizes):
scan = cluster(coordinates, r, c)
clustered = scan.labels_ != -1
num_clustered[i, j] = np.sum(clustered)
if np.any(clustered):
largest_cluster[i, j] = \
np.max(np.bincount(scan.labels_[clustered]))
return num_clustered, largest_cluster
class ImageCanvas(FigureCanvas):
def __init__(self, parent=None, width=5, height=5, dpi=100,
image_size=(2048, 2048), image_cmap='magma'):
fig = Figure(figsize=(width, height), dpi=dpi)
self.axes = fig.add_axes([0, 0, 1, 1])
self.image_size = image_size
self.axim = self.axes.imshow(np.broadcast_to(0., image_size),
cmap=image_cmap, vmin=0, vmax=1)
self.axes.set_axis_off()
fig.set_facecolor('black')
FigureCanvas.__init__(self, fig)
self.setParent(parent)
FigureCanvas.setSizePolicy(self, QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
def set_image(self, image):
self.axim.set_array(image)
self.draw_idle()
def new_image(self, image):
if image.ndim == 3:
self.axim = self.axes.imshow(image)
else:
self.axim = self.axes.imshow(image, cmap='magma')
self.axes.set_axis_off()
self.draw_idle()
class ApplicationWindow(QtWidgets.QMainWindow):
def __init__(self):
super().__init__()
self.image_index = 0
self.files = []
QtWidgets.QMainWindow.__init__(self)
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.setWindowTitle('Select ROIs')
self.main_widget = QtWidgets.QWidget(self)
layout = QtWidgets.QVBoxLayout(self.main_widget)
self.image_canvas = ImageCanvas(self.main_widget)
layout.addWidget(self.image_canvas)
select_files = QtWidgets.QPushButton(text='Select Files')
select_files.clicked.connect(self.open_files)
previous_image = QtWidgets.QPushButton(text='◀')
previous_image.clicked.connect(self.select_previous_image)
next_image = QtWidgets.QPushButton(text='▶')
next_image.clicked.connect(self.select_next_image)
nav_layout = QtWidgets.QHBoxLayout(self.main_widget)
nav = QtWidgets.QGroupBox(self.main_widget)
nav_layout.addWidget(previous_image)
nav_layout.addWidget(next_image)
nav.setLayout(nav_layout)
buttons = QtWidgets.QHBoxLayout(self.main_widget)
buttons.addWidget(select_files, alignment=QtCore.Qt.AlignLeft)
buttons.addWidget(nav, alignment=QtCore.Qt.AlignRight)
layout.addLayout(buttons)
self.image_canvas.draw()
self.rois = []
self._mode = 'roi'
self.main_widget.setFocus()
self.setCentralWidget(self.main_widget)
@QtCore.pyqtSlot()
def open_files(self):
files, resp = QtWidgets.QFileDialog.getOpenFileNames(
self.main_widget, filter='*LDCTracked.txt')
self.open_file_list(files)
def open_file_list(self, files):
self.files = files
print(self.files)
self.set_image_index(0)
self.rois = [(slice(None), slice(None))] * len(self.files)
image = self.image_canvas.axim.get_array()
axes = self.image_canvas.axes
axim = self.image_canvas.axim
select_roi(image, self.rois, ax=axes, axim=axim, qtapp=self)
def set_image_index(self, i=0):
if i == len(self.files):
if self._mode == 'roi':
print(f'rois: {self.rois}')
self.clustering_mode()
i = 0
else: # _mode == 'clustering'
self.save_clustering_results()
return
if len(self.files) > 0:
i = np.clip(i, 0, len(self.files) - 1)
self.image_index = i
if self._mode == 'roi':
file = self.files[i]
table = read_locations_table(file)
image = image_from_table(table)
self.image_canvas.set_image(image)
else:
if i == len(self.images):
scan, coords = self.make_cluster_image(i)
df = summarise_clustering(scan, coords)
df['filename'] = os.path.basename(self.files[i])
self.cluster_data.append(df)
image = self.images[i]
self.image_canvas.new_image(image)
@QtCore.pyqtSlot()
def select_previous_image(self):
self.set_image_index(self.image_index - 1)
@QtCore.pyqtSlot()
def select_next_image(self):
self.set_image_index(self.image_index + 1)
def clustering_mode(self):
self._mode = 'clustering'
self.images = []
self.cluster_data = []
self.set_image_index(0)
def make_cluster_image(self, i, radius=1.6, core_size=6):
cluster_size_threshold = 10
cluster_max_size = 30
file = self.files[i]
roi = self.rois[i]
table = read_locations_table(file)
coords = np.stack(image_coords(table), axis=1)
coords_roi = coords[_in_range(coords[:, 0], roi[0]) &
_in_range(coords[:, 1], roi[1])]
scan = cluster(coords_roi, radius=radius, core_size=core_size)
image = image_from_clustering(scan, coords_roi, roi,
size_threshold=cluster_size_threshold,
max_size=cluster_max_size)
if i >= len(self.images):
self.images.append(image)
else:
self.images[i] = image
return scan, coords_roi
def save_clustering_results(self):
input_dir = os.path.dirname(self.files[0])
name = QtWidgets.QFileDialog.getSaveFileName(self.main_widget,
'Save clusters to file',
directory=input_dir,
filter='*.xlsx')[0]
print(name)
cluster_table = pd.concat(self.cluster_data)
print(name)
cluster_table.to_excel(name)
if __name__ == '__main__':
qApp = QtWidgets.QApplication(sys.argv[:1])
aw = ApplicationWindow()
aw.show()
if len(sys.argv) > 1:
aw.open_file_list(sys.argv[1:])
sys.exit(qApp.exec_())
``` |
{
"source": "jni/useful-histories",
"score": 3
} |
#### File: jni/useful-histories/climate-change-model-test.py
```python
T = pd.read_csv('bundoora-temp.csv')
T.head()
T.rename(columns={'Mean maximum temperature (°C)':'Temperature'},
inplace=True)
T['Date'] = T['Year'] + (T['Month'] - 0.5) / 12
dates = T['Date']
temps = T['Temperature']
def predicted_temperature(parameters, time):
t0, w, A, omega, phi = parameters
return t0 + w*time + A * np.sin(omega*time + phi)
def prediction_error(parameters, time, true_temperature):
return true_temperature - predicted_temperature(parameters, time)
def predicted_temperature_null(parameters, time):
t0, w, A, omega, phi = parameters
return t0 + A * np.sin(omega*time + phi)
t0 = np.mean(temps)
w = 0
A = np.max(temps) - np.min(temps)
omega = np.pi * 2
phi = np.pi / 2
params0 = [t0, w, A, omega, phi]
params, success = optimize.leastsq(prediction_error, params0,
args=(dates, temps))
from scipy import optimize
params, success = optimize.leastsq(prediction_error, params0,
args=(dates, temps))
success
def prediction_error_null(parameters, time, true_temperature):
return true_temperature - predicted_temperature_null(parameters, time)
paramsnull, successnull = optimize.leastsq(prediction_error_null,
params0,
args=(dates, temps))
successnull
from scipy import stats
predicted = predicted_temperature(params, dates)
predicted_null = predicted_temperature_null(params, dates)
chisq1 = (temps - predicted)**2 / predicted
chisq0 = (temps - predicted_null)**2 / predicted_null
chisqdiff = chisq1 - chisq0
chisqdiff
chisq1 = np.sum((temps - predicted)**2 / predicted)
chisq0 = np.sum((temps - predicted_null)**2 / predicted_null)
chisqdiff = chisq1 - chisq0
chisqdiff
chisq_dof = len(temps)
chisq_dof
chisq1
chisq2
chisq0
plt.plot(dates, predicted_null)
import statsmodels
from statsmodels import stats
np.mean((temps - predicted)**2)
plt.plot(dates, predicted)
params
plt.plot(dates, temps)
def predicted_temperature_null(parameters, time):
t0, A, omega, phi = parameters
return t0 + A * np.sin(omega*time + phi)
def prediction_error_null(parameters, time, true_temperature):
return true_temperature - predicted_temperature_null(parameters, time)
paramsnull, successnull = optimize.leastsq(prediction_error_null,
[params0[0]] + params0[2:],
args=(dates, temps))
successnull
predicted_null = predicted_temperature_null(paramsnull, dates)
plt.plot(dates, temps)
plt.plot(dates, predicted_null)
np.mean((temps - predicted_null)**2)
np.mean((temps - predicted)**2)
ssdiff = 401 * (_48 - _49)
ssdiff
from scipy import stats
stats.gamma
stats.chi2
get_ipython().magic('pinfo stats.chisquare')
get_ipython().set_next_input('c2 = stats.chi2');get_ipython().magic('pinfo stats.chi2')
c2 = stats.chi2.sf(ssdiff, 401)
c2
c2 = stats.chi2.sf(ssdiff, 4)
c2
```
#### File: jni/useful-histories/db.py
```python
datadir = '/Users/nuneziglesiasj/Data/myo/'
import os
import scandir
def all_images(path):
for p, _, files in scandir.walk(path):
for f in files:
if f.endswith('.tif'):
yield os.path.join(p, f)
image_fns = list(all_images(datadir))
import subprocess as sp
sp.Popen(["mongod", "--dbpath", "/Users/nuneziglesiasj/mongodb"])
from husc.screens import myofusion as myo
filenames = [l.rstrip() for l in open('db/filenames.txt', 'r').readlines()]
myo.populate_db('db/all_annots2.csv', filenames)
from pymongo import MongoClient
collection = MongoClient()["myofusion"]["wells"]
for image_fn in [image_fns[450]]:
key = myo.filename2coord(image_fn)
mongo_id = myo.key2mongo(key)
doc = collection.update({'_id': mongo_id},
{'$set': {u'local_filename': image_fn}})
```
#### File: jni/useful-histories/husc-imgrid.py
```python
from skimage import io
import os
fns = os.listdir
fns = os.listdir('.')
get_ipython().set_next_input(u'fns = filter');get_ipython().magic(u'pinfo filter')
fns = filter(lambda x: x.endswith('illum-95-51.tif'), fns)
len(fns)
fns = sorted(fns)
images = []
for r, g, b in [fns[i:i+3] for i in range(48)]:
images.append(np.dstack([io.imread(j) for j in [r, g, b]]))
imshow(images[0])
images = []
for g, b, r in [fns[i:i+3] for i in range(48)]:
images.append(np.dstack([io.imread(j) for j in [r, g, b]]))
len(images)
imshow(images[0])
sample_names = list(it.product('CDE', ['%02i'%j for j in range(3, 23)]))
import itertools as it
sample_names = list(it.product('CDE', ['%02i'%j for j in range(3, 23)]))
sample_names = [i+j for i, j in sample_names]
sample_names = sample_names[1:-11]
def imgrid(names, grid_shape):
figure()
idxs = [sample_names.find(name) for name in names]
sh = grid_shape
for i, idx in enumerate(idxs):
subplot(sh + (i+1,))
imshow(images[idx], interpolation='nearest')
imgrid(['D14', 'D16', 'D17', 'D15', 'D18'], (2, 3))
def imgrid(names, grid_shape):
figure()
idxs = [sample_names.index(name) for name in names]
sh = grid_shape
for i, idx in enumerate(idxs):
subplot(sh + (i+1,))
imshow(images[idx], interpolation='nearest')
imgrid(['D14', 'D16', 'D17', 'D15', 'D18'], (2, 3))
get_ipython().magic(u'pinfo subplot')
def imgrid(names, grid_shape):
figure()
idxs = [sample_names.index(name) for name in names]
sh = grid_shape
for i, idx in enumerate(idxs):
subplot(*(sh + (i+1,)))
imshow(images[idx], interpolation='nearest')
imgrid(['D14', 'D16', 'D17', 'D15', 'D18'], (2, 3))
fns[:6]
imshow(images[1])
images = []
for g, b, r in [fns[3*i:3*i+3] for i in range(48)]:
images.append(np.dstack([io.imread(j) for j in [r, g, b]]))
len(images)
imgrid(['D14', 'D16', 'D17', 'D15', 'D18'], (2, 3))
images = []
for r, b, g in [fns[3*i:3*i+3] for i in range(48)]:
images.append(np.dstack([io.imread(j) for j in [r, g, b]]))
imgrid(['D14', 'D16', 'D17', 'D15', 'D18'], (2, 3))
for im, sn in zip(images, sample_names):
io.imsave(sn+'.illum-95-51.rbg.tif', im)
imgrid(['C16', 'C17', 'D13'], (1, 3))
imgrid(['C09', 'C18', 'C22'], (1, 3))
imgrid(['C11', 'C12', 'C21', 'E07'], (2, 2))
imgrid(['C04', 'C08'], (1, 2))
imgrid(['D06', 'D22'], (1, 2))
imgrid(['D08', 'D20'], (1, 2))
imgrid(['E08', 'C14', 'E03'], (1, 3))
imgrid(['E08', 'C14', 'E03', 'E09', 'C05'], (2, 3))
imgrid(['C16', 'C17', 'D13', 'C09', 'C18', 'C22'], (2, 3))
imgrid(['C11', 'C12', 'C21', 'E07'], (2, 2))
```
#### File: jni/useful-histories/mask-white-artifacts.py
```python
from matplotlib import pyplot as plt, cm
imshow = plt.imshow
clf = plt.clf
figure = plt.figure
colorbar = plt.colorbar
hist = plt.hist
import numpy as np
from IPython import get_ipython
import mahotas as mh
import glob
import os
fns = os.listdir('.')
import functools as ft
fnw = ft.partial(glob.fnmatch.fnmatch, pat='*s1*crop*.tif')
fns_nw = filter(fnw, fns)
len(fns_nw)
len(fns_nw) / 3
def intensity(im): return im.sum(axis=-1)
def whiteness(im): return 1 / (np.var(im, axis=-1) + 1)
def artifact(im): return whiteness(im) * intensity(im)
ims_nw = map(mh.imread, fns_nw)
rgbs_nw = [np.dstack(ims_nw[3*i:3*i+3]) for i in range(48)]
imshow(rgbs_nw[0])
imshow(rgbs_nw[1])
imshow(rgbs_nw[5])
rgbs_nw[0].dtype
ims_nw[0].dtype
ims_nw[0].min()
ims_nw[0].max()
ims_nw[1].max()
ims_nw[1].min()
ims_nw[1].dtype
ims_nw[2].dtype
ims_nw[2].min()
ims_nw[2].max()
def cshow(im):
# assume image data is in the final two dimensions
if im.ndim > 2:
mid = im.shape[0] // 2
cshow(im[mid])
else:
plt.imshow(im, cmap=cm.cubehelix, interpolation='nearest')
cshow(ims_nw[0])
cshow(ims_nw[1])
cshow(ims_nw[2])
imshow(rgbs_nw[0])
ims_nw = map(lambda x: (x.astype(float) / x.max() * 255).astype(np.uint8), ims_nw)
ims_nw = map(mh.imread, fns_nw)
rbs_nw = map(lambda x: (x.astype(float) / x.max() * 255).astype(np.uint8), rgbs_nw)
imshow(rgbs_nw[0])
rgbs_nw[0].dtype
imshow(rbs_nw[0])
rbs_nw = map(lambda x: (x.astype(float) / x.max(axis=0).max(axis=0)[None, None, :] * 255).astype(np.uint8), rgbs_nw)
imshow(rbs_nw[0])
cshow(ims_nw[0])
cshow(ims_nw[1])
cshow(ims_nw[2])
rbs_nw = map(lambda x: (x.astype(float) / x.max(axis=0).max(axis=0)[None, None, :] * 255).astype(np.uint8), rgbs_nw)
clf()
imshow(rbs_nw[4])
imshow(rbs_nw[5])
arts_nw = map(artifact, rbs_nw)
cshow(arts_nw[0])
cshow(arts_nw[1])
cshow(arts_nw[2])
cshow(arts_nw[3])
cshow(arts_nw[4])
cshow(arts_nw[5])
max_arts = np.array(map(np.max, arts_nw)).reshape((6, 8))
cshow(max_arts)
figure()
cshow(arts_nw[24])
cshow(arts_nw[25])
cshow(arts_nw[26])
cshow(arts_nw[27])
arts_nw = map(artifact, rbs_nw)
max_arts = np.array(map(np.max, arts_nw)).reshape((6, 8))
cshow(max_arts)
figure()
figure(1)
colorbar()
figure(2)
cshow(arts_nw[5])
cshow(arts_nw[37])
fse = ft.partial(glob.fnmatch.fnmatch, pat='*s4*crop*.tif')
fns_se = filter(fse, fns)
ims_se = map(mh.imread, fns_se)
rgbs_se = [np.dstack([ims_se[j] for j in [3*i+2, 3*i, 3*i+1]]) for i in range(48)]
rbs_se = map(lambda x: (x.astype(float) / x.max(axis=0).max(axis=0)[None, None, :] * 255).astype(np.uint8), rgbs_nw)
arts_se = map(artifact, rbs_se)
max_arts_se = np.array(map(np.max, arts_se)).reshape((6, 8))
figure(1); cshow(max_arts_se)
clf()
cshow(max_arts_se)
rbs_se = map(lambda x: (x.astype(float) / x.max(axis=0).max(axis=0)[None, None, :] * 255).astype(np.uint8), rgbs_se)
arts_se = map(artifact, rbs_se)
max_arts_se = np.array(map(np.max, arts_se)).reshape((6, 8))
cshow(max_arts_se)
figure(); cshow(arts_se[1])
figure(); imshow(rbs_se[1])
figure(); cshow(arts_se[1])
colorbar()
from husc.preprocess import stretchlim
figure(); cshow(stretchlim(arts_se[1]))
def artifact2(im): return np.prod(im, axis=-1)
arts_se2 = map(artifact2, rbs_se)
figure(); cshow(arts_se[1])
figure(); cshow(stretchlim(arts_se[1]))
figure(); cshow(arts_se2[1])
colorbar()
figure(); cshow(stretchlim(arts_se2[1]))
max_arts_se2 = np.array(map(np.max, arts_se2)).reshape((6, 8))
figure(); cshow(max_arts_se2)
colorbar()
figure(); hist(max_arts_se2.ravel())
```
#### File: jni/useful-histories/mz-data.py
```python
import numpy as np
import napari
import matplotlib.pyplot as plt
from matplotlib.backends.backend_qt5agg import FigureCanvas
from matplotlib.figure import Figure
# load the data
cube = np.load('datacube.npy')
peaks = np.load('peaklist.npy')
mz = peaks[0]
thresh = np.load('hsr_thresholds.npy')
cubet = np.transpose(cube, (2, 0, 1))
cubet_norm = cubet / thresh[:, np.newaxis, np.newaxis]
# create qt application context
with napari.gui_qt():
# create the viewer
viewer = napari.view_image(cubet_norm)
# create the intensity plot
with plt.style.context('dark_background'):
mz_canvas = FigureCanvas(Figure(figsize=(5, 3)))
mz_axes = mz_canvas.figure.subplots()
intensities = cubet[:, 0, 0]
intensity_line = mz_axes.plot(mz, intensities)[0] # returns line list
position_line = mz_axes.axvline(x=mz[0], c='C1')
position_line.set_zorder(-1) # keep the spectra in front
minval, maxval = np.min(cube), np.max(cube)
range_ = maxval - minval
centre = (maxval + minval) / 2
min_y = centre - 1.05 * range_ / 2
max_y = centre + 1.05 * range_ / 2
mz_axes.set_ylim(min_y, max_y)
mz_axes.set_xlabel('m/z')
mz_axes.set_ylabel('intensity')
title = mz_axes.set_title(f'coord=(0, 0); m/z={mz[0]:.3f}')
mz_canvas.figure.tight_layout()
# add the plot to the viewer
viewer.window.add_dock_widget(mz_canvas)
# create a function to update the plot
def update_plot(axis_event):
axis = axis_event.axis
if axis != 0:
return
slice_num = axis_event.value
x = mz[slice_num]
position_line.set_data([x, x], [0, 1])
coord_str, mz_str = title.get_text().split(';')
title.set_text(coord_str + '; ' + f'm/z={x:.3f}')
mz_canvas.draw_idle()
# connect the function to the dims axis
viewer.dims.events.axis.connect(update_plot)
# grab the image layer
layer = viewer.layers[0]
# add a click callback to the layer to update the spectrum being viewed
@layer.mouse_drag_callbacks.append
def update_intensity(layer, event):
xs, ys = intensity_line.get_data()
coords_full = tuple(np.round(layer.coordinates).astype(int))
if all(coords_full[i] in range(cubet.shape[i])
for i in range(cubet.ndim)):
coords = coords_full[1:] # rows, columns
new_ys = cube[coords]
intensity_line.set_data(xs, new_ys)
coord_str, mz_str = title.get_text().split(';')
title.set_text(str(coords) + '; ' + mz_str)
mz_canvas.draw_idle()
```
#### File: jni/useful-histories/skeleton_demo.py
```python
import os
os.chdir('/Users/jni/Dropbox/data1/malaria/adam-oli-schizont-30sec')
import imageio as iio
im = iio.imread('Shchizont4_UninfRBC10_02.tif', format='fei')
scale = im.meta['Scan']['PixelHeight'] * 1e9 # convert m to nm
sigma = 5/scale # nm / (nm/pixel) ==> pixel
import matplotlib.pyplot as plt
plt.imshow(im, cmap='gray')
from skan import pre
from skimage import morphology
binary = pre.threshold(im, sigma=sigma, radius=round(10*sigma))
skeleton_image = morphology.skeletonize(binary)
plt.imshow(skeleton_image)
shape = feature.shape_index(im, sigma=5)
plt.imshow(shape, cmap='RdBu')
shape_skeleton = skeleton_image * shape
from skan import summarise
data = summarise(shape_skeleton)
data.head()
from skan import draw
draw.overlay_skeleton_2d(im, shape_skeleton, dilate=1)
def filtered(values):
return np.digitize(values, [0.125, 0.625])
data['filtered'] = filtered(data['mean pixel value'])
draw.overlay_euclidean_skeleton_2d(im, data,
skeleton_color_source='filtered')
from skan import Skeleton
skeleton = Skeleton(shape_skeleton, source_image=im)
# second time is much faster thanks to Numba JIT
skeleton = Skeleton(shape_skeleton, source_image=im)
def filtered2(skeleton):
values = skeleton.path_means()
return filtered(values)
ax, cvals = draw.overlay_skeleton_2d_class(skeleton,
skeleton_color_source=filtered2)
``` |
{
"source": "jnjaby/DISCNet",
"score": 2
} |
#### File: datasets/data_scripts/post_process.py
```python
import os
from pdb import Pdb, set_trace
import pdb
import sys
import argparse
import numpy as np
import cv2
import matplotlib.pyplot as plt
from tqdm import tqdm
sys.path.append(os.path.abspath(os.getcwd()+'/../..'))
from utils.process_utils import (imread, imwrite, ccm_info_get, cc_img,
simple_to_linear, linear_to_gamma, WB, contrast)
def post_processing(data_path, ref_path, save_path, color_correct=True,
rgb_scaling=True, contrast_enhance=True):
"""
Convolution of ground truth image and PSF to simulate UDC image
Args:
data_path (str) : Path to data to be processed
ref_path (str) : Path to root directory that contains reference files
save_path (str) : Path to save post-processing images
color_correct (bool): Whether to correct by Color Correction Matrix from camera
rgb_scaling (bool): Whether to copy color from camera output (JPEG)
contrast_enhance (bool):Whether to apply contrast enhancement algorithm
"""
jpg_dir = os.path.join(ref_path, 'jpg_ZTE/')
CCM_dir = os.path.join(ref_path, 'CCM_txt/')
os.makedirs(save_path, exist_ok=True)
img_list = sorted([x.replace('.npy', '') for x in os.listdir(data_path) \
if x.endswith('.npy')])
# import pdb; pdb.set_trace()
assert len(img_list) != 0, (f'No npy files found in {data_path}.')
for img_path in tqdm(img_list):
# Load data
jpg_img = imread(os.path.join(jpg_dir, img_path + '.jpg')) / 255.
ccm = ccm_info_get(os.path.join(CCM_dir, img_path + '_ccm.txt'))
# The output of network are assumed to be in simple tone-mapped domain.
simple_img = np.load(os.path.join(data_path, img_path + '.npy'))
# Convert to linear domain before post processing.
post_img = simple_to_linear(simple_img)
if color_correct:
post_img = cc_img(post_img, ccm)
# Gamma correction
post_img = linear_to_gamma(post_img, linear_max=6) # gamma domain [0,1]
if rgb_scaling:
# The reference jpg images are gamma-corrected, requiring post images
# in the same domain.
post_img = WB(post_img, jpg_img)
if contrast_enhance:
post_img = contrast(post_img, limit=1.0)
save_img = np.clip(post_img, a_min=0, a_max=1)
imwrite(os.path.join(save_path, img_path + '.png'), save_img)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data_path', help='Specify path to dataset to be processed',
type=str, default='.')
parser.add_argument('--ref_path', help='Specify path to root directory that contains reference \
files, mainly including camera output and ccm files.',
type=str, default='datasets/real_data')
parser.add_argument('--save_path', help='Specify path to save post-processing images',
type=str, default='results/post_images')
parser.add_argument('--color_correct', help='Whether to correct by Color Correction Matrix \
from camera',
type=bool, default=True)
parser.add_argument('--rgb_scaling', help='Whether to copy color from camera output (JPEG)',
type=bool, default=True)
parser.add_argument('--contrast_enhance', help='Whether to apply contrast enhancement',
type=bool, default=True)
args = parser.parse_args()
post_processing(data_path=args.data_path, ref_path=args.ref_path, \
save_path=args.save_path, color_correct=args.color_correct, \
rgb_scaling=args.rgb_scaling, contrast_enhance=args.contrast_enhance)
```
#### File: data_scripts/utils/process_utils.py
```python
import numpy as np
import cv2
from os import path as osp
from tqdm import tqdm
import shutil
import sys
def imwrite(path, img):
img = (img[:, :, [2,1,0]] * 255.0).round().astype(np.uint8)
cv2.imwrite(path, img)
def imread(path):
img = cv2.imread(path)
return img[:, :, [2, 1, 0]]
def tmap(x):
'''
Tone mapping algorithm. Refered to as simple tone-mapped domain.
'''
return x / (x + 0.25)
def ccm_info_get(ccm_txt):
with open(ccm_txt) as fi:
for line in fi:
(key, val) = line.split(':')
val_list = val.split()
ccm = [np.float32(v) for v in val_list]
ccm = np.array(ccm)
ccm = ccm.reshape((3, 3))
return ccm
def ccmProcess_rgb(img, ccm):
'''
Input images are in RGB domain.
'''
new_img = img.copy()
new_img[:,:,0] = ccm[0,0] * img[:,:,0] + ccm[0,1]* img[:,:,1] + \
ccm[0,2] * img[:,:,2]
new_img[:,:,1] = ccm[1,0] * img[:,:,0] + ccm[1,1]* img[:,:,1] + \
ccm[1,2] * img[:,:,2]
new_img[:,:,2] = ccm[2,0] * img[:,:,0] + ccm[2,1]* img[:,:,1] + \
ccm[2,2] * img[:,:,2]
return new_img
def cc_img(img, ccm):
'''
Color correct an image given corresponding matrix.
Assume images in linear domain.
'''
# clip to fit ZTE sensor
img = np.clip(img, 0, 16.0)
img_cc = ccmProcess_rgb(img, ccm)
img_cc = np.clip(img_cc, 0, 16)
return img_cc
def WB(img, ref):
'''
Simple white balance algorithm to copy color from reference image.
Assume both images range [0, 1].
'''
balanced_img = np.zeros_like(img, dtype=np.float32)
for c in range(3):
balanced_img[:, :, c] = img[:, :, c] / img[:, :, c].sum() * ref[:, :, c].sum()
balanced_img = np.clip(balanced_img, 0, 1)
return balanced_img
def simple_to_linear(img, linear_max=500):
'''
From simple tone-mapped domain to linear domain.
'''
img = np.clip(img, 0, tmap(linear_max))
img = img / (4 * (1-img))
return img
def linear_to_gamma(img, linear_max=12):
A = 1 / linear_max**(1/2.8)
img = np.clip(img, 0, linear_max)
img = A*(img**(1/2.8))
return img
def contrast(img, limit=1.0):
'''
Apply contrast enhancement. Tune argument "limit" to adjust.
'''
img = (img[:, :, [2,1,0]] * 255.0).round().astype(np.uint8)
clahe = cv2.createCLAHE(clipLimit=limit, tileGridSize=(8,8))
lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB) # convert from BGR to LAB color space
l, a, b = cv2.split(lab) # split on 3 different channels
l2 = clahe.apply(l) # apply CLAHE to the L-channel
lab = cv2.merge((l2,a,b)) # merge channels
img2 = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR) # convert from LAB to BGR
return img2[:, :, [2,1,0]] / 255.0
``` |
{
"source": "jnjcc/pymatch2",
"score": 2
} |
#### File: pymatch2/test/benchmark.py
```python
import timeit
import numpy as np
import pandas as pd
from pymatch2.MatcherPlus import MatcherPlus
class MatcherCmp:
def __init__(self, col_identity = "identity", col_treatment = "treatment", col_matchid = "match_id",
col_scores = "scores"):
self.col_identity_ = col_identity
self.col_treatment_ = col_treatment
self.col_matchid_ = col_matchid
self.col_scores_ = col_scores
def _merged(self, matched):
reduced = matched[[self.col_matchid_, self.col_identity_, self.col_treatment_, self.col_scores_]]
mask = (reduced[self.col_treatment_] == True)
treated = reduced[mask]
control = reduced[~mask]
merged = pd.merge(treated, control, how = "inner", on = self.col_matchid_, suffixes = ("_x", "_y"))
col_diff = "diff"
merged[col_diff] = abs(merged[self.col_scores_ + "_x"] - merged[self.col_scores_ + "_y"])
merged[self.col_identity_] = merged[self.col_identity_ + "_x"]
return merged[[self.col_identity_, col_diff]], col_diff
def _compare(self, lhs, rhs):
assert lhs.shape == rhs.shape
assert lhs.shape[0] > 0
lmerged, col_diff = self._merged(lhs)
rmerged, _ = self._merged(rhs)
merged = pd.merge(lmerged, rmerged, how = "inner", on = self.col_identity_, suffixes = ("_x", "_y"))
col_did = "did"
merged[col_did] = merged[col_diff + "_x"] - merged[col_diff + "_y"]
return merged, col_did
def less_than(self, lhs, rhs):
merged, col_did = self._compare(lhs, rhs)
return np.all(merged[col_did] < 0)
def lt_pct(self, lhs, rhs):
merged, col_did = self._compare(lhs, rhs)
return np.mean(merged[col_did] < 0), merged.shape
def less_eql(self, lhs, rhs):
merged, col_did = self._compare(lhs, rhs)
return np.all(merged[col_did] <= 0)
def equal(self, lhs, rhs):
merged, col_did = self._compare(lhs, rhs)
return np.all(merged[col_did] == 0)
class MatcherTest:
def __init__(self, ntest = 10000, nctrl = 100000):
self.ntest_ = ntest
self.nctrl_ = nctrl
self.npopu_ = ntest + nctrl
self.col_treatment_ = "treatment"
col_scores = "scores"
self.df_ = pd.DataFrame({
self.col_treatment_: [0] * self.npopu_,
col_scores: np.random.random(self.npopu_)
})
itest = np.random.choice(self.npopu_, ntest, replace = False)
self.df_.loc[itest, self.col_treatment_] = 1
self.col_identity_ = "identity"
self.df_[self.col_identity_] = self.df_.index.values
test_mask = (self.df_[self.col_treatment_] == 1)
self.matcher_ = MatcherPlus(self.df_[test_mask], self.df_[~test_mask], self.col_treatment_)
col_matchid = "match_id"
self.compare_ = MatcherCmp(self.col_identity_, self.col_treatment_, col_matchid, col_scores)
def timed_match(self, method = "nnm", nmatches = 1, threshold = 0.001):
self.matcher_.data = self.df_.copy()
start = timeit.default_timer()
if method == "nnm":
self.matcher_.match_nnm(nmatches = nmatches)
elif method == "bs":
self.matcher_.match_bs(nmatches = nmatches)
else:
self.matcher_.match(method = method, nmatches = nmatches, threshold = threshold)
end = timeit.default_timer()
return end - start, self.matcher_.matched_data
def assert_lt(self, nmatches = 1, threshold = 0.001):
""" nnm match less than random match """
rnd_time, rnd_matched = self.timed_match("random", nmatches = nmatches, threshold = threshold)
nnm_time, nnm_matched = self.timed_match("nnm", nmatches = nmatches, threshold = threshold)
assert self.compare_.less_than(nnm_matched, rnd_matched)
## assert nnm_time <= rnd_time
print(self.ntest_, self.nctrl_, rnd_time, "-", nnm_time, "-")
def assert_le(self, nmatches = 1, threshold = 0.001):
""" nnm match less than or equal to random match """
rnd_time, rnd_matched = self.timed_match("random", nmatches = nmatches, threshold = threshold)
nnm_time, nnm_matched = self.timed_match("nnm", nmatches = nmatches, threshold = threshold)
assert self.compare_.less_eql(nnm_matched, rnd_matched)
## assert nnm_time <= rnd_time
bs_time, bs_matched = self.timed_match("bs", nmatches = nmatches, threshold = threshold)
assert self.compare_.less_eql(bs_matched, rnd_matched)
print(self.ntest_, self.nctrl_, rnd_time, "-", nnm_time, bs_time)
def assert_eq(self, nmatches = 1, threshold = 0.001):
""" nnm match equal to min match """
min_time, min_matched = self.timed_match("min", nmatches = nmatches, threshold = threshold)
nnm_time, nnm_matched = self.timed_match("nnm", nmatches = nmatches, threshold = threshold)
assert self.compare_.equal(nnm_matched, min_matched)
# assert nnm_time <= min_time
print(self.ntest_, self.nctrl_, "-", min_time, nnm_time, "-")
def assert_match(self, nmatches = 1, threshold = 0.001):
rnd_time, rnd_matched = self.timed_match("random", nmatches = nmatches, threshold = threshold)
nnm_time, nnm_matched = self.timed_match("nnm", nmatches = nmatches, threshold = threshold)
assert self.compare_.less_eql(nnm_matched, rnd_matched)
min_time, min_matched = self.timed_match("min", nmatches = nmatches, threshold = threshold)
assert self.compare_.equal(nnm_matched, min_matched)
bs_time, bs_matched = self.timed_match("bs", nmatches = nmatches, threshold = threshold)
assert self.compare_.equal(nnm_matched, bs_matched)
print(self.ntest_, self.nctrl_, rnd_time, min_time, nnm_time, bs_time)
if __name__ == "__main__":
print("# test", "# control", "random", "min", "nnm", "bs", flush = True)
for ntest, nctrl in [(10000, 200000), (15000, 200000), (20000, 200000)]:
test = MatcherTest(ntest, nctrl)
test.assert_match()
for ntest, nctrl in [(50000, 900000), (100000, 1000000)]:
test = MatcherTest(ntest, nctrl)
test.assert_le()
``` |
{
"source": "JNjinnan/firstblog",
"score": 2
} |
#### File: firstblog/firstblog/views.py
```python
from django.shortcuts import render
from gallery.models import Gallery
def home(request):
gallerys = Gallery.objects
return render(request,'home.html', {'gallerys': gallerys})
``` |
{
"source": "JnkDog/i-rheo-web",
"score": 3
} |
#### File: algorithm/mot/pai.py
```python
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
def pai_test():
'''
输入只有file
在gamma位置输出
'''
df_news = pd.read_table('data.txt',sep=' ',header = None)
pai = []
pai = 1 - df_news[1]
df_news[1] = df_news[1].apply(lambda x: 1-x)
print(df_news[1])
print(pai)
plt.plot(df_news[0], df_news[1], '-o', lw=3, color='royalblue',label='$G^{II}$')
plt.xscale('log')
plt.xlabel('t (sec)')
plt.ylabel('Π(t)')
plt.show()
# pai_test()
def pai_processing(df):
df["pai"] = df[1].apply(lambda x: 1-x)
return df
```
#### File: i-rheo-web/algorithm/read_data.py
```python
import pandas as pd
import base64
import io
# generate dataframe
def generate_df(content):
decoded_content = parse_contents(content)
# read seprate includes space and tab (delim_whitespace=True)
df = pd.read_table(io.StringIO(decoded_content.decode("utf-8")), header=None, delim_whitespace=True)
return df
def parse_contents(content):
content = content.split(",")[-1]
decoded_content = base64.b64decode(content)
return decoded_content
def generate_df_from_local(path):
df = pd.read_table(path, header=None, delim_whitespace=True)
return df
def convert_lists_to_df(data):
x = data.get("x")
y = data.get("y")
if data.get("z"):
z = data.get("z")
return pd.DataFrame(list(zip(x, y, z)))
df = pd.DataFrame(list(zip(x, y)))
return df
# replace the dict's value
def replace_dict_value(dict, replacement, replacement_keys):
idx = 0
for key in dict:
dict[key] = replacement[idx] if key in replacement_keys else dict[key]
idx = idx + 1 if key in replacement_keys else idx
return dict
```
#### File: i-rheo-web/algorithm/saving_process.py
```python
import pandas as pd
import numpy as np
i = complex(0, 1)
"""
Combine the real_list and imag_list to complex_ndarray
"""
def combine_as_complex(real, imaginary):
real_nd = np.asarray(real)
imaginary_nd = np.asarray(imaginary)
# combine as complex with the real and imag
complex = np.array(real_nd, dtype=np.complex128)
complex.imag = imaginary_nd
return complex.tolist()
"""
Formating saving data as six decimal place
"""
def six_decimal_saving(data):
for key, item in data.items():
data[key] = np.around(item, decimals=6)
return data
```
#### File: apps/mot/mot_app.py
```python
import dash
from dash.dependencies import Input, Output, State, ClientsideFunction
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
from dash.exceptions import PreventUpdate
import plotly.express as px
import pandas as pd
from enum import Enum, unique
from app import app
# import components and its generation
from components.upload.upload import upload_component_generate
from components.download.download import download_component_generate
from components.oversampling.oversampling import mot_oversampling_generate
from components.input.parameter import stiffness_radius_generate
from components.tab.tabs import mot_tabs_generate
from components.radio.radio import MOTRadioitems
from components.loglinearswitch.axisSwitch import vertical_axis_swith
# import algorithm
from algorithm.mot.mot_At_oversampling import mot_oversampling
from algorithm.read_data import generate_df, generate_df_from_local, convert_lists_to_df
from algorithm.mot.mot import fast_mot_procressing
from algorithm.mot.mot_two_function import mot_integrated_processing, chenged_G_start_to_g
from algorithm.saving_process import combine_as_complex, six_decimal_saving
# Using your own app name. Can't be same.
prefix_app_name = "MOT"
# Function type At or Pait
@unique
class FUNTION_TYPE(Enum):
AT = 0
PAI = 1
# Selection options
@unique
class DOWNLOAD_OPTIONS(Enum):
OVERSAMPLED_RAW_DATA = 0
FT_RAW_DATA = 1
FT_OVERSAMPLED_DATA = 2
BOUNDARY_COMPONENTS_ID = [
"MOT-g_0",
"MOT-g_inf",
]
STIFFNESS_RADIUS_COMPONENTS_ID = [
"MOT-kt",
"MOT-at"
]
DEFAULT_G_0_AT = 1
DEFAULT_G_0_PAIT = 0
Layout = dbc.Row([
dbc.Col([
html.H5("Support .txt"),
html.Div([
upload_component_generate("MOT-upload"),
dcc.Store(id="MOT-raw-data-store", storage_type="session"),
dcc.Store(id="MOT-oversampling-data-store", storage_type="session"),
dcc.Store(id="MOT-ft-data-store", storage_type="session"),
dcc.Loading(dcc.Store(id="MOT-oversampled-ft-data-store",
storage_type="session"),
id="MOT-full-screen-mask",
fullscreen=True),
], className="btn-group me-2"),
html.Div([dbc.Button("Load Example data", id="MOT-load-example",
color="primary", style={"margin": "5px"})],
className="btn-group me-2"),
MOTRadioitems,
html.Div(id="MOT-upload-message"),
# This is just for show the loading message
html.Div(id="MOT-loading-message"),
html.Hr(),
mot_oversampling_generate(prefix_app_name),
# html.Hr(),
# stiffness_radius_generate(prefix_app_name),
html.Hr(),
download_component_generate(prefix_app_name)
], width=3),
dbc.Col([
mot_tabs_generate(prefix_app_name),
vertical_axis_swith(prefix_app_name),
],
width=True),
# Loading
])
# ================ Upload callback ========================
"""
Trigger when the experiental data(raw data) uploaded
"""
@app.callback(
Output("MOT-raw-data-store", "data"),
Output("MOT-ft-data-store", "data"),
# Output("upload-message", "children"),
Output("MOT-loading-message", "children"),
Input("MOT-upload", "contents"),
Input("MOT-load-example", "n_clicks"),
Input("MOT-refresh-btn", "n_clicks"),
# TODO need change later
# Boundary conditions
State("MOT-g_0", "value"),
State("MOT-g_inf", "value"),
# Trap stiffness and radius
State("MOT-kt", "value"),
State("MOT-at", "value"),
State("MOT-oversampling-Nf", "value"),
# Decide the A(t) or ∏(t)
State("MOT-radioitems-input", "value"),
State("MOT-upload", "filename"),
# Get the modified time to avoid inital time operating
# State("MOT-ft-data-store", "modified_timestamp"),
State("MOT-raw-data-store", "data"),
State("MOT-ft-data-store", "data"),
prevent_initial_call=True
)
def store_raw_data(content, example_click, refresh_click,
g_0, g_inf, kt, at,
N_f, func_flag, file_name,
prev_raw_data, prev_ft_data):
raw_data = {}
ft_raw_data = {}
is_disable_FT = disable_FT(g_0, g_inf, kt, at, N_f, prev_ft_data)
"""
default kt: 1e-6, at: 1e-6
kt is the value of trap stiffness
at is the value of radius
"""
kt = 1e-6 if kt is None else kt
at = 1e-6 if at is None else at
"""
Boundary conditions (aka. Oversampling parameters)
default g_0 : 1 as A(t), 0 as ∏(t)
g_inf : 0
"""
if g_0 is None:
is_double_FT = True
# Whatever the value is, the important value is the is_double_FT
g_0 = 1
else:
is_double_FT = False
g_0 = float(g_0)
g_inf = 0 if g_inf is None else float(g_inf)
N_f = 100 if N_f is None else int(N_f)
trigger_id = get_trigger_id()
# mot_integrated_processing could take a lot of time depending your PC
df = pd.DataFrame()
# get the example data from loacl
if trigger_id == "MOT-load-example":
path = "example_data/mot/data.txt"
df = generate_df_from_local(path)
raw_data, ft_raw_data = \
upload_local_data_workflow(df, g_0, g_inf, kt, at,
N_f, file_name, is_double_FT)
# upload the data from users
elif trigger_id == "MOT-upload":
df = generate_df(content)
raw_data, ft_raw_data = \
upload_local_data_workflow(df, g_0, g_inf, kt, at,
N_f, file_name, is_double_FT)
else:
data_null_prevents_updated(prev_ft_data)
if is_disable_FT:
raw_data = prev_raw_data
ft_raw_data = update_ft_data(prev_ft_data, kt, at, func_flag)
else:
df = convert_lists_to_df(prev_raw_data)
file_name = prev_raw_data["filename"]
raw_data, ft_raw_data = \
upload_local_data_workflow(df, g_0, g_inf, kt, at,
N_f, file_name,is_double_FT)
# return data, upload_messge, ft_data
return raw_data, ft_raw_data, ""
"""
Trigger when the experiental data(raw data) has already uploaded
and the oversampling button clicked with the oversampling ntimes.
"""
@app.callback(
Output("MOT-oversampling-data-store", "data"),
Output("MOT-oversampled-ft-data-store", "data"),
Input("MOT-oversampling-btn", "n_clicks"),
Input("MOT-refresh-btn", "n_clicks"),
# TODO need change later
# Boundary conditions
State("MOT-g_0", "value"),
State("MOT-g_inf", "value"),
# Trap stiffness and radius
State("MOT-kt", "value"),
State("MOT-at", "value"),
# Decide the A(t) or ∏(t)
State("MOT-radioitems-input", "value"),
State("MOT-raw-data-store", "data"),
# Get ntimes
State("MOT-oversampling-input", "value"),
State("MOT-oversampling-Nf", "value"),
# Get the modified time to avoid inital time operating
# State("MOT-ft-data-store", "modified_timestamp"),
State("MOT-oversampling-data-store", "data"),
State("MOT-oversampled-ft-data-store", "data"),
prevent_initial_call=True
)
def store_oversampling_data(oversampling_click, refresh_click,
g_0, g_inf, kt, at, func_flag, raw_data,
ntimes, N_f, prev_oversampled_data,
prev_ft_oversampled_data):
if raw_data is None and ntimes is None:
raise PreventUpdate
is_disable_FT = disable_FT(g_0, g_inf, kt, at, N_f, prev_ft_oversampled_data)
trigger_id = get_trigger_id()
# avoid float number or string
ntimes = 10 if ntimes is None else int(ntimes)
N_f = 100 if N_f is None else int(N_f)
func_flag = int(func_flag)
"""
Boundary conditions (aka. Oversampling parameters)
default g_0 : 1 as A(t), 0 as ∏(t)
g_inf : 0
"""
if g_0 is None:
is_double_FT = True
# Whatever the value is, the important value is the is_double_FT
g_0 = 1
else:
is_double_FT = False
g_0 = float(g_0)
g_inf = 0 if g_inf is None else float(g_inf)
"""
default kt: 1e-6, at: 1e-6
kt is the value of trap stiffness
at is the value of radius
"""
kt = 1e-6 if kt is None else kt
at = 1e-6 if at is None else at
oversampled_data = {}
ft_oversampled_data = {}
if trigger_id == "MOT-oversampling-btn":
df = convert_lists_to_df(raw_data)
oversampled_data, ft_oversampled_data = \
oversampling_button_workflow(df, kt, at, g_0, g_inf, N_f, ntimes, is_double_FT)
else:
if is_disable_FT:
oversampled_data = prev_oversampled_data
ft_oversampled_data = update_ft_data(prev_ft_oversampled_data, kt, at, func_flag)
else:
df = convert_lists_to_df(raw_data)
raw_data, ft_oversampled_data = \
oversampling_button_workflow(df, kt, at, g_0, g_inf, N_f, ntimes, is_double_FT)
return oversampled_data, ft_oversampled_data
# ================ Download callback ========================
@app.callback(
Output("MOT-download-text", "data"),
Output("MOT-download-message", "children"),
Input("MOT-download-btn", "n_clicks"),
# State("MOT-downlaod-selection", "value"),
State("MOT-raw-data-store","data"),
# State("MOT-oversampling-data-store", "data"),
# State("MOT-ft-data-store", "data"),
State("MOT-oversampled-ft-data-store", "data"),
State("MOT-radioitems-input", "value"),
prevent_initial_call=True,
)
def download(n_clicks, raw_data, ft_oversampled_data, func_flag):
if ft_oversampled_data is None:
return None, "No data available!"
file_suffix_name = raw_data.get("filename")
saved_file_name = "FT_oversampled_" + file_suffix_name
# TODO At or Pai t's download data convert...
# Covert the option from string to int
if int(func_flag) == FUNTION_TYPE.AT.value:
saved_omega = ft_oversampled_data["at_x"]
complex_list = combine_as_complex(
ft_oversampled_data["at_y1"],
ft_oversampled_data["at_y2"]
)
else:
saved_omega = ft_oversampled_data["pai_x"]
complex_list = combine_as_complex(
ft_oversampled_data["pai_y1"],
ft_oversampled_data["pai_y2"]
)
saved_data_df = pd.DataFrame(six_decimal_saving({
"x": saved_omega,
"y": complex_list
}))
return (dcc.send_data_frame(saved_data_df.to_csv, saved_file_name,
header=False, index=False,
sep='\t', encoding='utf-8'),
"Download OK !")
# =================== Clientside callback ===================
"""
Trigger when the experiental data(raw data) or oversampling data changed
"""
app.clientside_callback(
ClientsideFunction(
namespace="clientsideMot",
function_name="tabChangeFigRender"
),
Output("MOT-A(t)-display", "figure"),
Input("MOT-raw-data-store", "data"),
Input("MOT-oversampling-data-store", "data"),
Input("MOT-oversampling-render-switch", "value"),
Input("MOT-vertical-axis-switch", "value"),
Input("MOT-radioitems-input", "value"),
# Due to the dcc.Stroe's storage_type is session
# if prevent_initial_call=True, the fig cannot show
# prevent_initial_call=True
)
"""
Trigger when the ft data generated
"""
app.clientside_callback(
ClientsideFunction(
namespace="clientsideMot",
function_name="tabChangeMotRender"
),
Output("MOT-Mot-display", "figure"),
Input("MOT-ft-data-store", "data"),
Input("MOT-oversampled-ft-data-store", "data"),
Input("MOT-oversampling-render-switch", "value"),
Input("MOT-radioitems-input", "value"),
Input("MOT-vertical-axis-switch", "value"),
# prevent_initial_call=True
)
app.clientside_callback(
ClientsideFunction(
namespace="clientsideMessageRec",
function_name="uploadMessage"
),
Output("MOT-upload-message", "children"),
Input("MOT-raw-data-store", "data"),
# prevent_initial_call=True
)
# =================== Normal function ===================
# Get the triggered component id
def get_trigger_id():
ctx = dash.callback_context
trigger_id = ctx.triggered[0]['prop_id'].split('.')[0]
return trigger_id
# Prevent updated when the datastore is NULL
def data_null_prevents_updated(data):
if data is None:
raise PreventUpdate
# If the data exists and the g_o is None with the g_inf
def disable_FT(g_0, g_inf, kt, at, N_f, prev_data):
if any([g_0, g_inf, N_f]) is False and prev_data is not None:
return True
else:
return False
# replace the dict's value
def replace_dict_value(dict, replacement, replacement_keys):
idx = 0
for key in dict:
dict[key] = replacement[idx] if key in replacement_keys else dict[key]
idx = idx + 1 if key in replacement_keys else idx
return dict
# Extract data
def extract_from_prev_data(original_data, func_flag):
if func_flag == FUNTION_TYPE.AT.value:
data = {
"x": original_data["at_x"],
"ft_real": original_data["at_ft_real"],
"ft_imag": original_data["at_ft_imag"],
}
else:
data = {
"x": original_data["pai_x"],
"ft_real": original_data["pai_ft_real"],
"ft_imag": original_data["pai_ft_imag"],
}
return data
# Updata FT data acroding the func_flag
def update_ft_data(ft_data, kt, at, func_flag):
data = extract_from_prev_data(ft_data, func_flag)
if func_flag == FUNTION_TYPE.AT.value:
_, _, replace_g_p, replace_g_pp = chenged_G_start_to_g(kt, at, data)
replacement_keys = ["at_y1", "at_y2"]
replacement_elements = [replace_g_p, replace_g_pp]
else:
replace_g_p, replace_g_pp, _, _ = chenged_G_start_to_g(kt, at, data)
replacement_keys = ["pai_y1", "pai_y2"]
replacement_elements = [replace_g_p, replace_g_pp]
updated_data = replace_dict_value(ft_data, replacement_elements, replacement_keys)
return updated_data
# only the upload button and example button trigger this
def upload_local_data_workflow(df, g_0, g_inf, kt, at,
N_f, file_name, is_double_FT):
# save file_name and lens for message recovering when app changing
raw_data = {
"x": df[0],
"y": df[1],
"filename": file_name,
"lines": len(df)
}
if is_double_FT:
# At
omega_at, _, _, a_t_g_p, a_t_g_pp, at_ft_real, at_ft_imag = \
mot_integrated_processing(df, kt, at, DEFAULT_G_0_AT, g_inf, N_f, False)
# Pait
omega_pait, pai_g_p, pai_g_pp, _, _, pait_ft_real, pait_ft_imag = \
mot_integrated_processing(df, kt, at, DEFAULT_G_0_PAIT, g_inf, N_f, False)
ft_raw_data = {
"at_x": omega_at,
"pai_x": omega_pait,
"pai_y1": pai_g_p,
"pai_y2": pai_g_pp,
"pai_ft_real": pait_ft_real,
"pai_ft_imag": pait_ft_imag,
"at_y1" : a_t_g_p,
"at_y2" : a_t_g_pp,
"at_ft_real": at_ft_real,
"at_ft_imag": at_ft_imag
}
else:
omega, pai_g_p, pai_g_pp, a_t_g_p, a_t_g_pp, ft_real, ft_imag = \
mot_integrated_processing(df, kt, at, g_0, g_inf, N_f, False)
ft_raw_data = {
"at_x": omega,
"pai_x": omega,
"pai_y1": pai_g_p,
"pai_y2": pai_g_pp,
"pai_ft_real": ft_real,
"pai_ft_imag": ft_imag,
"at_y1" : a_t_g_p,
"at_y2" : a_t_g_pp,
"at_ft_real": ft_real,
"at_ft_imag": ft_imag
}
return raw_data, ft_raw_data
def oversampling_button_workflow(df, kt, at, g_0, g_inf, N_f, ntimes, is_double_FT):
x, y = mot_oversampling(df, ntimes)
oversampled_data = {
"x": x,
"y": y,
}
if is_double_FT:
# At
omega_at, _, _, a_t_g_p, a_t_g_pp, at_ft_real, at_ft_imag = \
mot_integrated_processing(df, kt, at, DEFAULT_G_0_AT, g_inf, N_f, True, ntimes)
# Pait
omega_pait, pai_g_p, pai_g_pp, _, _, pait_ft_real, pait_ft_imag = \
mot_integrated_processing(df, kt, at, DEFAULT_G_0_PAIT, g_inf, N_f, True, ntimes)
ft_oversampled_data = {
"at_x": omega_at,
"pai_x": omega_pait,
"pai_y1": pai_g_p,
"pai_y2": pai_g_pp,
"pai_ft_real": pait_ft_real,
"pai_ft_imag": pait_ft_imag,
"at_y1" : a_t_g_p,
"at_y2" : a_t_g_pp,
"at_ft_real": at_ft_real,
"at_ft_imag": at_ft_imag
}
else:
omega, pai_g_p, pai_g_pp, a_t_g_p, a_t_g_pp, ft_real, ft_imag = \
mot_integrated_processing(df, kt, at, g_0, g_inf, N_f, True, ntimes)
ft_oversampled_data = {
"at_x": omega,
"pai_x": omega,
"pai_y1": pai_g_p,
"pai_y2": pai_g_pp,
"pai_ft_real": ft_real,
"pai_ft_imag": ft_imag,
"at_y1" : a_t_g_p,
"at_y2" : a_t_g_pp,
"at_ft_real": ft_real,
"at_ft_imag": ft_imag
}
return oversampled_data, ft_oversampled_data
```
#### File: components/oversampling/switch.py
```python
import dash_html_components as html
import dash_core_components as dcc
import dash_bootstrap_components as dbc
SWITCH_ID_SUFFIX = "-oversampling-render-switch"
Switch = dbc.FormGroup([
dbc.Checklist(options=[
{"label" : "oversampling render", "value" : True}
],
# value=[False],
id="oversampling-render-switch",
switch=True,
style={"fontSize" : "20px"}
)])
def switch_component_generate(prefix_app_name):
switch_id = prefix_app_name + SWITCH_ID_SUFFIX
return dbc.FormGroup([
dbc.Checklist(
options=[
{"label": "oversampling render", "value": True}
],
# value=[False],
id=switch_id,
switch=True,
style={"fontSize": "20px"}
)])
def FT_rendering_switch_generate(prefix_app_name):
switch_id = prefix_app_name + "-time-derivative"
Switch = dbc.FormGroup(
dbc.Checklist(
options=[
{"label": "time derivative", "value": True}
],
id=switch_id,
switch=True,
style={"fontSize": "20px"}),
id="time-derivate-form",
className="mt-1")
return Switch
``` |
{
"source": "JNKielmann/cotect",
"score": 2
} |
#### File: cotect-endpoints/cotect_endpoints/db_handler.py
```python
import logging
from neo4j import GraphDatabase, Session
from neo4j.exceptions import ServiceUnavailable
from cotect_endpoints.utils import id_utils
from cotect_endpoints.schema import (
CaseContact,
CasePlace,
CaseReport,
CaseSymptom,
User,
)
def create_unique_constraint(tx, entity_type: str, property_name: str):
constraint_name = (entity_type + "_" + property_name).strip().lower()
constraint_stmt = (
"CREATE CONSTRAINT "
+ constraint_name
+ " ON (n:"
+ entity_type
+ ") ASSERT n."
+ property_name
+ " IS UNIQUE"
)
tx.run(constraint_stmt)
def init_graph(tx):
# TODO: Check if no contstaints are set
create_unique_constraint(tx, "Place", "place_id")
create_unique_constraint(tx, "User", "user_id")
create_unique_constraint(tx, "Symptom", "symptom_id")
def delete_user(tx, user_id: str):
delete_user_stmt = "MATCH (user:User { user_id: $user_id}) DETACH DELETE user;"
tx.run(delete_user_stmt, user_id=user_id)
def add_user(tx, user: User, report: CaseReport):
# Create or update a user
user_stmt = (
"MERGE (user:User { user_id: $user_id})\n"
"ON CREATE SET user.created = timestamp()\n"
)
set_user_props = ""
if report.age:
set_user_props += " user.age = $age,"
if report.gender:
set_user_props += " user.gender = $gender,"
if report.covid_test:
set_user_props += " user.covid_test = $covid_test,"
if report.covid_contact:
set_user_props += " user.covid_contact = $covid_contact,"
set_user_props = set_user_props.rstrip(",")
if set_user_props:
# TODO only set on ON CREATE?
user_stmt += " SET " + set_user_props + "\n"
tx.run(
user_stmt,
user_id=user.user_id,
age=report.age,
gender=report.gender,
covid_test=report.covid_test,
covid_contact=report.covid_contact,
)
def add_place(tx, place: CasePlace):
# Create or update place
place_stmt = "MERGE (place:Place { place_id: $place_id})\n"
place_stmt += "ON CREATE SET place.created = timestamp()\n"
set_place_props = ""
if place.latitude:
set_place_props += " place.latitude = $latitude,"
if place.longitude:
set_place_props += " place.longitude = $longitude,"
if place.place_name:
set_place_props += " place.name = $place_name,"
if place.place_types:
set_place_props += " place.types = $place_types,"
set_place_props = set_place_props.rstrip(",")
if set_place_props:
# TODO only set on ON CREATE
place_stmt += " SET " + set_place_props + "\n"
tx.run(
place_stmt,
place_id=place.place_id,
place_name=place.place_name,
place_types=place.place_types,
latitude=place.latitude,
longitude=place.longitude,
)
def add_place_visit(tx, user: User, place: CasePlace):
# Create or update place
add_place(tx, place)
# initialize with single element to prevent code duplication below
visit_dates = [None]
if place.visit_dates:
visit_dates = place.visit_dates
for visit_date in visit_dates:
relation_date_check = ""
if visit_date:
relation_date_check = "{visit_date: $visit_date}"
# Create has visited relation
visit_stmt = (
"MATCH (user:User {user_id: $user_id }), (place:Place {place_id: $place_id })\n"
"MERGE (user)-[r:HAS_VISITED " + relation_date_check + "]->(place)\n"
"ON CREATE SET r.created = timestamp()\n"
)
tx.run(
visit_stmt,
user_id=user.user_id,
place_id=place.place_id,
visit_date=visit_date,
)
def add_residence(tx, user: User, place: CasePlace):
# Create or update place
add_place(tx, place)
# Create has visited relation
residence_stmt = (
"MATCH (user:User {user_id: $user_id }), (place:Place {place_id: $place_id })\n"
"MERGE (user)-[r:LOCATED_IN]->(place)\n"
"ON CREATE SET r.created = timestamp()\n"
)
tx.run(residence_stmt, user_id=user.user_id, place_id=place.place_id)
def add_contact(tx, user: User, contact: CaseContact):
contact_id = id_utils.generate_user_id(
contact.phone_number, id_utils.get_id_generation_secret()
)
# Create contact user
user_stmt = (
"MERGE (user:User {user_id: $user_id})\n"
"ON CREATE SET user.created = timestamp()\n"
)
tx.run(user_stmt, user_id=contact_id)
# Create contact relation
# initialize with single element to prevent code duplicates below
contact_dates = [None]
if contact.contact_dates:
contact_dates = contact.contact_dates
for contact_date in contact_dates:
relation_date_check = ""
if contact_date:
relation_date_check = "{contact_date: $contact_date}"
contact_stmt = (
"MATCH (user:User {user_id: $user_id}), (contact:User {user_id: $contact_id})\n"
"MERGE (user)-[r:IN_CONTACT " + relation_date_check + "]->(contact)\n"
"ON CREATE SET r.created = timestamp()\n"
)
tx.run(
contact_stmt,
user_id=user.user_id,
contact_id=contact_id,
contact_date=contact_date,
)
def add_symptom(tx, user: User, symptom: CaseSymptom):
symptom_id = id_utils.generate_symptom_id(symptom.symptom_name)
# Create or update symptom
symptom_node_stmt = (
"MERGE (symptom:Symptom {symptom_id: $symptom_id})\n"
"ON CREATE SET symptom.symptom_name = $symptom_name, symptom.created = timestamp()\n"
)
tx.run(symptom_node_stmt, symptom_id=symptom_id, symptom_name=symptom.symptom_name)
# Create symptom relation
relation_date_check = ""
if symptom.report_date:
relation_date_check = "{report_date: $report_date}"
symptom_relation_stmt = (
"MATCH (user:User {user_id: $user_id }), (symptom:Symptom {symptom_id: $symptom_id })\n"
"MERGE (user)-[r:HAS_SYMPTOM " + relation_date_check + "]->(symptom)\n"
"ON CREATE SET r.created = timestamp()\n"
)
set_symptom_props = ""
if symptom.severity:
set_symptom_props += " r.severity = $severity,"
set_symptom_props = set_symptom_props.rstrip(",")
if set_symptom_props:
symptom_relation_stmt += " SET " + set_symptom_props + "\n"
tx.run(
symptom_relation_stmt,
user_id=user.user_id,
report_date=symptom.report_date,
symptom_id=symptom_id,
severity=symptom.severity,
)
def add_report(tx, user: User, report: CaseReport):
add_user(tx, user, report)
if report.residence:
add_residence(tx, user, report.residence)
if report.places:
for place in report.places:
add_place_visit(tx, user, place)
if report.symptoms:
for symptom in report.symptoms:
add_symptom(tx, user, symptom)
if report.contacts:
for contact in report.contacts:
add_contact(tx, user, contact)
class GraphHandler:
def __init__(self, neo_uri: str, user: str, password: str):
# Initialize logger
self.log = logging.getLogger(__name__)
# Initialized default variables
self._neo_uri = neo_uri
self._neo_auth = (user, password)
# Lazy load graph driver
self._driver = None
def get_driver(self):
if self._driver is None or self._driver.closed():
# TODO improve driver configuration
# encrypted = TLS encryption
# Set connection timeouts?
# max_connection_lifetime=30 * 60, max_connection_pool_size=50,
# connection_acquisition_timeout=2 * 60
# connection_timeout=15
# max_retry_time=15
self._driver = GraphDatabase.driver(
self._neo_uri, auth=self._neo_auth, encrypted=False, keep_alive=True
)
return self._driver
def check_connection(self):
try:
self.get_driver().session().begin_transaction().close()
except ServiceUnavailable:
# Failed to write to defunct connection Address -> reconnect graph driver
self.close()
self.get_driver()
def close(self):
if self._driver and not self._driver.closed():
self._driver.close()
def get_session(self) -> Session:
self.check_connection()
return self.get_driver().session()
def init_graph(self):
try:
with self.get_session() as session:
tx = session.begin_transaction()
init_graph(tx)
tx.commit()
except Exception:
self.log.info(
"Could not initialize the graph constraints. Graph is probably already initialized." # , ex,
)
def delete_user(self, user_id: str):
with self.get_session() as session:
tx = session.begin_transaction()
delete_user(tx, user_id)
tx.commit()
def add_report(self, user: User, report: CaseReport):
with self.get_session() as session:
tx = session.begin_transaction()
add_report(tx, user, report)
tx.commit()
def run_get_query(self, query: str):
def fetch(tx, query):
return tx.run(query)
session = self.get_session()
result = session.read_transaction(fetch, query)
session.close()
return result
def run_post_query(self, query: str, data):
def put(tx, query, data):
return tx.run(query, **data).single().value()
session = self.get_session()
result = session.write_transaction(put, query, data)
session.close()
return result
```
#### File: cotect-endpoints/cotect_endpoints/security.py
```python
import logging
import os
import firebase_admin
from fastapi import HTTPException, Security, status
from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer
from fastapi.security.api_key import APIKeyCookie, APIKeyHeader, APIKeyQuery
from firebase_admin import auth
from cotect_endpoints.utils import id_utils
from cotect_endpoints.schema import User
# Initialize logger
log = logging.getLogger(__name__)
firebase_app = None
firebase_credentials = os.getenv("GOOGLE_APPLICATION_CREDENTIALS")
if firebase_credentials and os.path.isfile(firebase_credentials):
# Initilize firebase
firebase_app = firebase_admin.initialize_app()
else:
log.warning(
"GOOGLE_APPLICATION_CREDENTIALS was not set with a valid path. Firebase will not be initalized."
)
API_KEY_NAME = "api_token"
api_key_bearer = HTTPBearer(auto_error=False)
api_key_query = APIKeyQuery(name=API_KEY_NAME, auto_error=False)
api_key_header = APIKeyHeader(name=API_KEY_NAME, auto_error=False)
# Cookie security specification is not supported by swagger 2.0 specs
# api_key_cookie = APIKeyCookie(name=API_KEY_NAME, auto_error=False)
def get_active_user(
api_key_bearer: HTTPAuthorizationCredentials = Security(api_key_bearer),
api_key_query: str = Security(api_key_query),
api_key_header: str = Security(api_key_header),
# api_key_cookie: str = Security(api_key_cookie),
) -> User:
# https://medium.com/data-rebels/fastapi-authentication-revisited-enabling-api-key-authentication-122dc5975680
secret = id_utils.get_id_generation_secret()
api_key = None
if api_key_bearer:
api_key = api_key_bearer.credentials
elif api_key_query:
api_key = api_key_query
elif api_key_header:
api_key = api_key_header
else:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN, detail="No API Key was provided.",
)
#elif api_key_cookie:
# api_key = api_key_header
if api_key == "demo":
# Remove
return User(
user_id=id_utils.generate_user_id("+4917691377102", secret), verified=False,
)
if not firebase_app:
# firebase app was not initalized
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Failed to verify user.",
headers={"WWW-Authenticate": "Bearer"},
)
try:
decoded_token = auth.verify_id_token(
api_key, app=firebase_app, check_revoked=False
)
if "phone_number" in decoded_token and decoded_token["phone_number"]:
return User(
user_id=id_utils.generate_user_id(decoded_token["phone_number"], secret),
verified=True,
)
else:
# use uid as fallback or for anonymous users
return User(
user_id=id_utils.generate_user_id(decoded_token["uid"], secret),
verified=False,
)
except Exception as ex:
log.info("Failed to validate firebase token: " + str(ex.msg))
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Failed to validate the firebase token.",
headers={"WWW-Authenticate": "Bearer"},
)
```
#### File: cotect_endpoints/tests/test_graph_handler.py
```python
from cotect_endpoints.schema import User, CaseReport, CasePlace, CaseSymptom, CaseContact
from datetime import date
from cotect_endpoints.db_handler import GraphHandler
# add neo4j logging
from logging import getLogger, StreamHandler, DEBUG
# ```python
# from firebase_admin import auth
# user = auth.get_user("", app=default_app)
# user.phone_number
# auth.delete_user(uid, app=None)
# id_token = ""
# ```
# ```python
# # https://fastapi.tiangolo.com/tutorial/sql-databases/#note
# def get_db():
# try:
# db = SessionLocal()
# yield db
# finally:
# db.close()
#
# def get_db(db_state=Depends(reset_db_state)):
# try:
# database.db.connect()
# yield
# finally:
# if not database.db.is_closed():
# database.db.close()
# ```
handler = StreamHandler()
handler.setLevel(DEBUG)
getLogger("neo4j").addHandler(handler)
user = User(user_id="1", verified=True)
report = CaseReport()
report.age = 20
report.gender = "male"
report.residence = CasePlace(
place_id="berlin",
latitude=1.5,
longitude=19
)
report.covid_test = "tested-negative"
report.symptoms = [
CaseSymptom(symptom_name="Caugh", report_date=date.today(), severity="mild"),
CaseSymptom(symptom_name="Headache", report_date=date.today()),
CaseSymptom(symptom_name="Fever", report_date=date.today(), severity="37.5"),
]
report.places = [
CasePlace(
place_id="my-place-1",
visit_date=date.today(),
latitude=1.1,
longitude=1.2,
place_name="test",
place_types=["testtype", "type-2"],
),
CasePlace(
place_id="my-place-3",
latitude=1.5,
longitude=19
),
CasePlace(
place_id="my-place-4",
latitude=1.9,
longitude=19
)
]
report.contacts = [
CaseContact(phone_number="+4917691377102", contact_date = date.today()),
CaseContact(phone_number="+49176947102", contact_date = date.today()),
CaseContact(phone_number="+491769 1377102", contact_date = date.today()),
CaseContact(phone_number="+49176934432", contact_date = date.today()),
]
graph_handler = GraphHandler("bolt://docker.for.mac.localhost:7687", "", "")
graph_handler.init_graph()
graph_handler.add_report(user, report)
graph_handler.get_driver
graph_handler.get_session().begin_transaction().close()
graph_handler.get_driver().close()
# +
from neo4j.exceptions import ServiceUnavailable
try:
graph_handler.get_session().begin_transaction().close()
except ServiceUnavailable:
graph_handler.close()
# -
graph_handler.add_report(user, report)
``` |
{
"source": "JNKielmann/Master-Thesis",
"score": 3
} |
#### File: expert_voting/author_score_modifiers/last_kit_paper_modifier.py
```python
import time
from typing import List
import numpy as np
from expert_voting.author_score_modifiers.author_score_modifier import AuthorScoreModifier
from expert_voting.scored_expert import ScoredExpert
class LastKitPaperModifier(AuthorScoreModifier):
def __init__(self, author_info, alpha, beta):
self.author_info = author_info
self.alpha = alpha
self.beta = beta
def modify(self, input_list: List[ScoredExpert]) -> List[ScoredExpert]:
result = []
for author in input_list:
last_publication_timestamp = self.author_info[str(author.id)][
"last_publication_date"]
last_publication_timestamp /= (1000 * 60 * 60 * 24 * 365)
current_timestamp = time.time() / (60 * 60 * 24 * 365)
age = current_timestamp - last_publication_timestamp
new_score = author.score * ((1 - self.beta)
* (1 / np.power(self.alpha, age))
+ self.beta)
result.append(author.change_score(new_score))
return result
```
#### File: expert_voting/document_score_aggregations/author_order_aggregation.py
```python
from typing import List
import numpy as np
from expert_voting.document_score_aggregations.document_score_aggregation import \
DocumentScoreAggregation
from expert_voting.scored_document import ScoredDocument
from expert_voting.scored_expert import ScoredExpert
class AuthorOrderAggregation(DocumentScoreAggregation):
def __init__(self, doc_info, alpha, num_docs=None):
super().__init__(doc_info, num_docs)
self.alpha = alpha
def _calc_doc_score(self, author, num_authors, doc):
normalization = np.power(self.alpha, np.arange(num_authors)).sum()
factor = np.power(self.alpha, author["order_in_paper"]-1) / normalization
return doc.score * factor
```
#### File: expert_voting/document_score_aggregations/document_score_aggregation.py
```python
import abc
from typing import List
from expert_voting.scored_document import ScoredDocument
from expert_voting.scored_expert import ScoredExpert
class DocumentScoreAggregation(abc.ABC):
def __init__(self, doc_info, num_docs=None):
self.doc_info = doc_info
self.num_docs = num_docs
def modify(self, input_list: List[ScoredDocument]) -> List[ScoredExpert]:
experts = {}
for doc in input_list:
authors = self.doc_info[str(doc.id)]["authors"]
seen_author_ids = set()
for author in authors:
if author["id"] in seen_author_ids:
continue
seen_author_ids.add(author["id"])
if not author["id"] in experts:
experts[author["id"]] = {
"scores": [],
"relevant_docs": [],
}
expert = experts[author["id"]]
expert["relevant_docs"].append((doc.id, round(doc.score, 3)))
expert["scores"].append(self._calc_doc_score(author, len(authors), doc))
return [ScoredExpert(expert_id,
# sum(sorted(expert["scores"], reverse=True)[:self.num_docs]),
sum(sorted(expert["scores"], reverse=True)[:self.num_docs]) if len(expert["scores"])>1 else 0,
expert["relevant_docs"])
for expert_id, expert in experts.items()]
@abc.abstractmethod
def _calc_doc_score(self, author, num_authors, doc):
pass
```
#### File: expert_voting/document_score_modifiers/exponential_modifier.py
```python
from typing import List
import numpy as np
from expert_voting.document_score_modifiers.document_score_modifier import \
DocumentScoreModifier
from expert_voting.scored_document import ScoredDocument
class ExponentialModifier(DocumentScoreModifier):
def modify(self, input_list: List[ScoredDocument]) -> List[ScoredDocument]:
result = []
for rank, doc in enumerate(input_list):
new_score = np.exp(doc.score*2)
result.append(doc.change_score(new_score))
return result
```
#### File: paper_retrieval/preprocessing/pipeline.py
```python
import copy
import pickle
from multiprocessing.pool import Pool
from time import time
from contextlib import suppress
import numpy as np
import pandas as pd
import os
import logging
logger = logging.getLogger(__name__)
def apply_pipeline(text, pipeline):
for stage in pipeline:
text = stage(text)
return text
class Corpus:
def __init__(self, file_path, preprocessing_pipeline, load_from_cache=True,
id_column="id", text_column="text", n_jobs=8):
logger.info(f"Start preprocessing pipeline "
f"\"{_get_pipeline_name(preprocessing_pipeline)}\" "
f"for file {file_path}.")
self.pipeline = copy.deepcopy(preprocessing_pipeline)
cache_file_path = self._get_cache_file_path(file_path)
if load_from_cache:
if self._load_from_file(cache_file_path, id_column, text_column):
logger.info(f"Loaded cached preprocessed corpus from {cache_file_path}")
return
df = pd.read_csv(file_path).fillna("")
self.ids = df[id_column]
self.data = df[text_column]
pool = None
for stage in self.pipeline:
logger.info(f"Start stage \"{stage.name}\"")
start_time = time()
with suppress(AttributeError):
stage.fit_corpus(self.data)
if n_jobs > 1:
if pool is None:
pool = Pool(n_jobs)
self.data = pd.concat(
pool.map(ApplyStage(stage), np.array_split(self.data, n_jobs)))
else:
self.data = self.data.progress_apply(stage)
logger.info(f"Finished stage \"{stage.name}\" in "
f"{time() - start_time:.2f} seconds")
if pool is not None:
pool.close()
self._save_to_file(cache_file_path)
self.data = self.data.str.split(" ")
logger.info(f"Finished preprocessing pipeline. "
f"Saved preprocessed corpus to cache file {cache_file_path}")
def _save_to_file(self, file_path):
pd.concat([self.ids, self.data], axis=1).to_csv(file_path + ".csv")
with open(file_path + ".pipeline", "wb") as file:
pickle.dump(self.pipeline, file)
def _load_from_file(self, file_path, id_column, text_column):
data_exists = os.path.isfile(file_path + ".csv")
pipeline_exists = os.path.isfile(file_path + ".pipeline")
if not data_exists or not pipeline_exists:
return False
df = pd.read_csv(file_path + ".csv")
self.ids = df[id_column]
self.data = df[text_column].str.split(" ")
with open(file_path + ".pipeline", "rb") as file:
self.pipeline = pickle.load(file)
return True
def _get_cache_file_path(self, original_file_path):
pipeline_name = _get_pipeline_name(self.pipeline)
root, ext = os.path.splitext(original_file_path)
return root + "_" + pipeline_name
def _get_pipeline_name(pipeline):
return "_".join([stage.name for stage in pipeline])
class ApplyStage:
def __init__(self, stage):
self.stage = stage
def __call__(self, data):
return data.progress_apply(self.stage)
```
#### File: paper_retrieval/retrieval_algorithms/retrieval_algorithm.py
```python
import abc
import pandas as pd
from preprocessing import Corpus
class RetrievalAlgorithm(abc.ABC):
@abc.abstractmethod
def prepare(self, corpus: Corpus):
pass
@abc.abstractmethod
def get_ranking(self, query: str) -> pd.DataFrame:
pass
```
#### File: paper_retrieval/retrieval_algorithms/tf_idf_retrieval_algorithm.py
```python
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import normalize
from preprocessing import Corpus, apply_pipeline
from retrieval_algorithms import RetrievalAlgorithm
from .identity import identity
class TfIdfRetrievalAlgorithm(RetrievalAlgorithm):
def __init__(self, use_idf=True, sublinear_tf=True, max_ngram=1, min_df=1):
self.pipeline = None
self.ids = None
self.vectorizer = TfidfVectorizer(
analyzer="word",
tokenizer=identity,
preprocessor=identity,
ngram_range=(1, max_ngram),
use_idf=use_idf,
sublinear_tf=sublinear_tf,
min_df=min_df
)
self.vectorized_corpus = None
def prepare(self, corpus: Corpus):
if self.ids is not None:
return
self.pipeline = corpus.pipeline
self.ids = pd.Series(corpus.ids, name="id")
self.vectorized_corpus = self.vectorizer.fit_transform(corpus.data)
self.vectorized_corpus = normalize(self.vectorized_corpus)
def get_ranking(self, query) -> pd.DataFrame:
if self.ids is None:
raise RuntimeError("Prepare of class TfIdfRetrievalAlgorithm has to be "
"called before using get_ranking")
if type(query) is str:
query = [query]
vectorized_query = np.zeros(self.vectorized_corpus.shape[1])
for q in query:
q = apply_pipeline(query, self.pipeline)
q = q.split(" ")
vectorized_q = self.vectorizer.transform([query])
vectorized_query += vectorized_q
vectorized_query = normalize(vectorized_query)
df = pd.DataFrame(self.ids)
df["score"] = (self.vectorized_corpus * vectorized_query.T).toarray()
df.sort_values(by="score", ascending=False, inplace=True)
return df[df["score"] > 0]
```
#### File: paper_retrieval/server/flask_server.py
```python
import json
import pickle
import runpy
import spacy
from flask import Flask, jsonify, request, abort
from flask_cors import CORS, cross_origin
import sys
sys.path.insert(0, "..")
app = Flask(__name__)
app.config["JSONIFY_PRETTYPRINT_REGULAR"] = True
cors = CORS(app)
try:
spacy.load('en', disable=['ner'])
except IOError:
print("Downloading spacy model...")
spacy.cli.download("en")
file_path = "../../data/models/expert_voting.model"
with open(file_path, "rb") as file:
model = pickle.load(file)
with open("../../data/kit_expert_2019_all_author_info.json") as file:
author_info = json.load(file)
@app.route("/experts", methods=["GET"])
@cross_origin()
def get_experts():
if "query" not in request.args:
return {"error": 'Query parameter "query" is required'}, 400
query = request.args["query"]
limit = request.args.get("limit", 15)
result = model.get_ranking(query, author_info).head(limit)
result = result[["id", "score", "name"]].to_dict(orient="records")
return jsonify({"experts": result})
if __name__ == '__main__':
app.run(port=8071, debug=False, use_reloader=False)
``` |
{
"source": "jnkm/MXFusion",
"score": 3
} |
#### File: components/distributions/gamma.py
```python
import numpy as np
import mxnet as mx
from ...common.config import get_default_MXNet_mode
from ..variables import Variable
from .univariate import UnivariateDistribution, UnivariateLogPDFDecorator, UnivariateDrawSamplesDecorator
from .distribution import Distribution, LogPDFDecorator, DrawSamplesDecorator
from ..variables import is_sampled_array, get_num_samples
from ...util.customop import broadcast_to_w_samples
class Gamma(UnivariateDistribution):
"""
Gamma distribution parameterized using Alpha and Beta.
Takes dependency on Scipy to compute the log-gamma function.
:param alpha: the alpha parameter of the Gamma distribution.
:type alpha: Variable
:param beta: beta parameter of the Gamma distribution.
:type beta: Variable
:param rand_gen: the random generator (default: MXNetRandomGenerator).
:type rand_gen: RandomGenerator
:param dtype: the data type for float point numbers.
:type dtype: numpy.float32 or numpy.float64
:param ctx: the mxnet context (default: None/current context).
:type ctx: None or mxnet.cpu or mxnet.gpu
"""
def __init__(self, alpha, beta, rand_gen=None, dtype=None, ctx=None):
if not isinstance(alpha, Variable):
alpha = Variable(value=alpha)
if not isinstance(beta, Variable):
beta = Variable(value=beta)
inputs = [('alpha', alpha), ('beta', beta)]
input_names = [k for k, _ in inputs]
output_names = ['random_variable']
super(Gamma, self).__init__(inputs=inputs, outputs=None,
input_names=input_names,
output_names=output_names,
rand_gen=rand_gen, dtype=dtype, ctx=ctx)
@UnivariateLogPDFDecorator()
def log_pdf(self, alpha, beta, random_variable, F=None):
"""
Computes the logarithm of the probability density function (PDF) of the Gamma distribution.
:param random_variable: the random variable of the Gamma distribution.
:type random_variable: MXNet NDArray or MXNet Symbol
:param F: the MXNet computation mode (mxnet.symbol or mxnet.ndarray).
:returns: log pdf of the distribution.
:rtypes: MXNet NDArray or MXNet Symbol
"""
F = get_default_MXNet_mode() if F is None else F
g_alpha = F.gammaln(alpha)
p1 = (alpha - 1.) * F.log(random_variable)
return (p1 - beta * random_variable) - (g_alpha - alpha * F.log(beta))
@UnivariateDrawSamplesDecorator()
def draw_samples(self, alpha, beta, rv_shape, num_samples=1, F=None):
"""
Draw samples from the Gamma distribution.
:param rv_shape: the shape of each sample.
:type rv_shape: tuple
:param nSamples: the number of drawn samples (default: one).
:int nSamples: int
:param F: the MXNet computation mode (mxnet.symbol or mxnet.ndarray).
:returns: a set samples of the Gamma distribution.
:rtypes: MXNet NDArray or MXNet Symbol
"""
F = get_default_MXNet_mode() if F is None else F
return F.random.gamma(alpha=alpha, beta=beta, dtype=self.dtype, ctx=self.ctx)
@staticmethod
def define_variable(alpha=0., beta=1., shape=None, rand_gen=None,
dtype=None, ctx=None):
"""
Creates and returns a random variable drawn from a Gamma distribution parameterized with a and b parameters.
:param shape: the shape of the random variable(s).
:type shape: tuple or [tuple]
:param rand_gen: the random generator (default: MXNetRandomGenerator).
:type rand_gen: RandomGenerator
:param dtype: the data type for float point numbers.
:type dtype: numpy.float32 or numpy.float64
:param ctx: the mxnet context (default: None/current context).
:type ctx: None or mxnet.cpu or mxnet.gpu
:returns: the random variables drawn from the Gamma distribution.
:rtypes: Variable
"""
dist = Gamma(alpha=alpha, beta=beta, rand_gen=rand_gen,
dtype=dtype, ctx=ctx)
dist._generate_outputs(shape=shape)
return dist.random_variable
class GammaMeanVariance(UnivariateDistribution):
"""
Gamma distribution parameterized using Mean and Variance.
Takes dependency on Scipy to compute the log-gamma function.
:param mean: the mean parameter of the Gamma distribution.
:type mean: Variable
:param variance: variance parameter of the Gamma distribution.
:type variance: Variable
:param rand_gen: the random generator (default: MXNetRandomGenerator).
:type rand_gen: RandomGenerator
:param dtype: the data type for float point numbers.
:type dtype: numpy.float32 or numpy.float64
:param ctx: the mxnet context (default: None/current context).
:type ctx: None or mxnet.cpu or mxnet.gpu
"""
def __init__(self, mean, variance, rand_gen=None, dtype=None, ctx=None):
if not isinstance(mean, Variable):
mean = Variable(value=mean)
if not isinstance(variance, Variable):
variance = Variable(value=variance)
inputs = [('mean', mean), ('variance', variance)]
input_names = [k for k, _ in inputs]
output_names = ['random_variable']
super(GammaMeanVariance, self).__init__(inputs=inputs, outputs=None,
input_names=input_names,
output_names=output_names,
rand_gen=rand_gen, dtype=dtype, ctx=ctx)
def _get_alpha_beta(self, a, b):
"""
Returns the alpha/beta representation of the input variables.
Based on if the variable was parameterized using alpha/beta or with mean/variance.
:param a: alpha or mean
:type a: mx.ndarray.array or mx.symbol.array
:param b: beta or variance
:type b: mx.ndarray.array or mx.symbol.array
"""
beta = a / b
alpha = a * beta
return alpha, beta
@UnivariateLogPDFDecorator()
def log_pdf(self, mean, variance, random_variable, F=None):
"""
Computes the logarithm of the probability density function (PDF) of the Gamma distribution.
:param random_variable: the random variable of the Gamma distribution.
:type random_variable: MXNet NDArray or MXNet Symbol
:param F: the MXNet computation mode (mxnet.symbol or mxnet.ndarray).
:returns: log pdf of the distribution.
:rtypes: MXNet NDArray or MXNet Symbol
"""
F = get_default_MXNet_mode() if F is None else F
alpha, beta = self._get_alpha_beta(mean, variance)
g_alpha = F.gammaln(alpha)
p1 = (alpha - 1.) * F.log(random_variable)
return (p1 - beta * random_variable) - (g_alpha - alpha * F.log(beta))
@UnivariateDrawSamplesDecorator()
def draw_samples(self, mean, variance, rv_shape, num_samples=1, F=None):
"""
Draw samples from the Gamma distribution.
:param rv_shape: the shape of each sample.
:type rv_shape: tuple
:param nSamples: the number of drawn samples (default: one).
:int nSamples: int
:param F: the MXNet computation mode (mxnet.symbol or mxnet.ndarray).
:returns: a set samples of the Gamma distribution.
:rtypes: MXNet NDArray or MXNet Symbol
"""
F = get_default_MXNet_mode() if F is None else F
alpha, beta = self._get_alpha_beta(mean, variance)
return F.random.gamma(alpha=alpha, beta=beta, dtype=self.dtype, ctx=self.ctx)
@staticmethod
def define_variable(mean=0., variance=1., shape=None, rand_gen=None,
dtype=None, ctx=None):
"""
Creates and returns a random variable drawn from a Gamma distribution parameterized with mean and variance.
:param shape: the shape of the random variable(s).
:type shape: tuple or [tuple]
:param rand_gen: the random generator (default: MXNetRandomGenerator).
:type rand_gen: RandomGenerator
:param dtype: the data type for float point numbers.
:type dtype: numpy.float32 or numpy.float64
:param ctx: the mxnet context (default: None/current context).
:type ctx: None or mxnet.cpu or mxnet.gpu
:returns: the random variables drawn from the Gamma distribution.
:rtypes: Variable
"""
dist = GammaMeanVariance(mean=mean, variance=variance, rand_gen=rand_gen,
dtype=dtype, ctx=ctx)
dist._generate_outputs(shape=shape)
return dist.random_variable
```
#### File: mxfusion/components/factor.py
```python
from copy import copy
from .model_component import ModelComponent
class Factor(ModelComponent):
"""
A factor represents a relation among multiple variables in a model such as a distribution, a function or a module. It consists of a list of output
variables and optionally a list of input variables.
The ``inputs`` and ``outputs`` argument of ``__init__`` holds the input and output of the factor, which are represented in Python dict. The key of a variable in
the dict is the name of the variable referred in the context of the factor, e.g., the mean and variance of a normal distribution. The value of a
variable is the reference to the variable in memory. Both input and output variables are accessible as class attributes.
The ``inputs`` and ``outputs`` argument of ``__init__`` can be:
* A list of variables
* An empty list (no input/output variables)
* None (the input/output variables are not provided yet.)
Note that the ``outputs`` argument should never be an empty list, as a factor always outputs some variables.
:param inputs: the input variables of the factor.
:type inputs: List of tuples of name to node e.g. [('random_variable': Variable y)] or None
:param outputs: the output variables of the factor.
:type outputs: List of tuples of name to node e.g. [('random_variable': Variable y)] or None
"""
def __getattr__(self, value):
if value.startswith("__"):
"""
When python copies objects, it begins by checking for ``__setstate__()`` which doesn't exist, so it calls ``__getattr__()``. Our implementation then
calls the ``self.inputs`` getter before the object is fully prepared because ``__init__()`` never gets called during the copy. This causes an infinite
recursion to ``__getattr__()``. By skipping magic methods with "__" prefix, we allow the object to initialize correctly during copying.
# TODO this is very inefficient, can be improved.
"""
raise AttributeError(value)
if value in self._input_names:
for name, node in self.inputs:
if name == value:
return node
if value in self._output_names:
for name, node in self.outputs:
if name == value:
return node
else:
raise AttributeError("''%s' object has no attribute '%s'" % (type(self), value))
def __init__(self, inputs, outputs, input_names, output_names):
super(Factor, self).__init__()
self._check_name_conflict(inputs, outputs)
self._input_names = input_names if input_names is not None else []
self._output_names = output_names if output_names is not None else []
self.predecessors = inputs if inputs is not None else []
self.successors = outputs if outputs is not None else []
def __repr__(self):
out_str = str(self.__class__.__name__)
if self.predecessors is not None:
out_str = out_str + \
'(' + ', '.join([str(name) + '=' + str(var) for
name, var in self.predecessors]) + ')'
return out_str
def replicate_self(self, attribute_map=None):
"""
This functions is a copy constructor for the object.
In order to perform copy construction we first call ``__new__()`` on the class which creates a blank object.
We then initialize that object using the method's standard init procedures, and do any extra copying of attributes.
Replicates this Factor, using new inputs, outputs, and a new uuid.
Used during model replication to functionally replicate a factor into a new graph.
:param inputs: new input variables of the factor.
:type inputs: List of tuples of name to node e.g. [('random_variable': Variable y)] or None
:param outputs: new output variables of the factor.
:type outputs: List of tuples of name to node e.g. [('random_variable': Variable y)] or None
"""
replicant = self.__class__.__new__(self.__class__)
Factor.__init__(
replicant, None, None, copy(self.input_names),
copy(self.output_names))
replicant._uuid = self.uuid
return replicant
def _check_name_conflict(self, inputs, outputs):
if inputs is not None and outputs is not None:
intersect = set([v for _, v in inputs]) & set([v for _, v in outputs])
if intersect:
raise RuntimeError("The inputs and outputs variables of " +
self.__class__.__name__ + " have name conflict: " + str(intersect) + ".")
@property
def inputs(self):
"""
Return a list of nodes whose edges point into this node.
"""
if self.graph is not None:
pred = {e['name']: v for v, e in self.graph.pred[self].items()}
return [(name, pred[name]) for name in self.input_names]
else:
return self._predecessors
@property
def outputs(self):
"""
Return a list of nodes pointed to by the edges of this node.
"""
if self.graph is not None:
succ = {e['name']: v for v, e in self.graph.succ[self].items()}
return [(name, succ[name]) for name in self.output_names]
else:
return self._successors
@inputs.setter
def inputs(self, inputs):
"""
Set Input variables of the factor.
:param inputs: Input variables of the factor.
:type inputs: List of tuples of name to node e.g. [('random_variable': Variable y)]
"""
self.predecessors = inputs
@outputs.setter
def outputs(self, outputs):
"""
Set Output variables of the factor.
:param outputs: Output variables of the factor.
:type outputs: List of tuples of name to node e.g. [('random_variable': Variable y)]
"""
self.successors = outputs
def set_outputs(self, variables):
"""
TODO We don't actually support multi-output.
"""
variables = [variables] if not isinstance(variables, (list, tuple)) else variables
self.successors = [(name, variable) for name, variable in zip(self.output_names, variables)]
def set_single_input(self, key, value):
"""
Set a single input variable of a factor.
:param key: the name of the input variable in the factor
:type key: str
:param value: the variable to be set
:type value: Variable
"""
inputs = [(k, value) if k == key else (k, v) for k, v in self.inputs]
self.predecessors = inputs
@property
def input_names(self):
"""
Return Input names.
"""
return self._input_names
@property
def output_names(self):
"""
Return Output names.
"""
return self._output_names
def fetch_runtime_inputs(self, params):
"""
The helper function to fetch the input variables from a set of
variables according to the UUIDs of the input variables. It returns a
dictionary of variables at runtime, where the keys are the name of the
input variables and the values are the MXNet array at runtime. The
returned dict can be directly passed into runtime functions of factors
such as eval for functions and log_pdf and draw_samples for
distributions.
:param params: the set of variables where the input variables are
fetched from.
:type params: {str (UUID): MXNet NDArray or MXNet Symbol}
:return: a dict of the input variables, where the keys are the name
of the input variables and the values are the MXNet array at runtime.
:rtype: {str (kernel name): MXNet NDArray or MXNet Symbol}
"""
return {n: params[v.uuid] for n, v in self.inputs}
def fetch_runtime_outputs(self, params):
"""
The helper function to fetch the output variables from a set of
variables according to the UUIDs of the output variables. It returns a
dictionary of variables at runtime, where the keys are the name of the
output variables and the values are the MXNet array at runtime. The
returned dict can be directly passed into runtime functions of factors
such as eval for functions and log_pdf and draw_samples for
distributions.
:param params: the set of variables where the output variables are
fetched from.
:type params: {str (UUID): MXNet NDArray or MXNet Symbol}
:return: a dict of the output variables, where the keys are the name
of the output variables and the values are the MXNet array at runtime.
:rtype: {str (kernel name): MXNet NDArray or MXNet Symbol}
"""
return {n: params[v.uuid] for n, v in self.outputs}
``` |
{
"source": "jnktsj/fiss",
"score": 3
} |
#### File: fiss/firecloud/entity.py
```python
import json
import firecloud.api as fapi
class Entity(object):
"""A FireCloud Entity
Attributes:
etype (str): Enity type, e.g. "sample". Must be one of
Entity.ENTITY_TYPES
entity_id (str): Unique id of this entity. Becomes the entity's
name in FireCloud.
attrs (dict): Dictionary of attributes and their values
"""
ENTITY_TYPES = { "participant", "participant_set",
"sample", "sample_set",
"pair", "pair_set"
}
def __init__(self, etype, entity_id, attrs=dict()):
"""Create Entity."""
if etype not in Entity.ENTITY_TYPES:
raise ValueError("Invalid entity type: " + etype)
self.entity_id = entity_id
self.etype = etype
self.attrs = attrs
def get_attribute(self, attr):
"""Return attribute value."""
return self.attrs.get(attr, None)
def set_attribute(self, attr, value):
"""Set and return attribute value."""
self.attrs[attr] = value
return value
@staticmethod
def create_payload(entities):
"""Create a tsv payload describing entities.
A TSV payload consists of 1 header row describing entity type
and attribute names. Each subsequent line is an entity_id followed
by attribute values separated by the tab "\\t" character. This
payload can be uploaded to the workspace via
firecloud.api.upload_entities()
"""
#First check that all entities are of the same type
types = {e.etype for e in entities}
if len(types) != 1:
raise ValueError("Can't create payload with " +
str(len(types)) + " types")
all_attrs = set()
for e in entities:
all_attrs.update(set(e.attrs.keys()))
#Write a header line
all_attrs = list(all_attrs)
header = "entity:" + entities[0].etype + "_id"
payload = '\t'.join([header] + all_attrs) + '\n'
for e in entities:
line = e.entity_id
for a in all_attrs:
line += '\t' + e.attrs.get(a, "")
payload += line + '\n'
return payload
@staticmethod
def create_loadfile(entities, f):
"""Create payload and save to file."""
with open(f, 'w') as out:
out.write(Entity.create_payload(entities))
```
#### File: fiss/firecloud/fiss.py
```python
from __future__ import print_function
import json
import sys
import os
import time
from inspect import getsourcelines
from traceback import print_tb as print_traceback
from io import open
from fnmatch import fnmatchcase
import argparse
import subprocess
import re
import collections
from difflib import unified_diff
from six import iteritems, string_types, itervalues, u, text_type
from six.moves import input
from firecloud import api as fapi
from firecloud import fccore
from firecloud.errors import *
from firecloud.__about__ import __version__
from firecloud import supervisor
fcconfig = fccore.config_parse()
def fiss_cmd(function):
""" Decorator to indicate a function is a FISS command """
function.fiss_cmd = True
return function
#################################################
# SubCommands
#################################################
@fiss_cmd
def space_list(args):
''' List accessible workspaces, in TSV form: <namespace><TAB>workspace'''
r = fapi.list_workspaces(fields="workspace.name,workspace.namespace")
fapi._check_response_code(r, 200)
spaces = []
project = args.project
if project:
project = re.compile('^' + project)
for space in r.json():
ns = space['workspace']['namespace']
if project and not project.match(ns):
continue
ws = space['workspace']['name']
spaces.append(ns + '\t' + ws)
# Sort for easier downstream viewing, ignoring case
return sorted(spaces, key=lambda s: s.lower())
@fiss_cmd
def space_exists(args):
""" Determine if the named space exists in the given project (namespace)"""
# The return value is the INVERSE of UNIX exit status semantics, (where
# 0 = good/true, 1 = bad/false), so to check existence in UNIX one would do
# if ! fissfc space_exists blah ; then
# ...
# fi
try:
r = fapi.get_workspace(args.project, args.workspace, fields="workspace.name")
fapi._check_response_code(r, 200)
exists = True
except FireCloudServerError as e:
if e.code == 404:
exists = False
else:
raise
if fcconfig.verbosity:
result = "DOES NOT" if not exists else "DOES"
eprint('Space <%s> %s exist in project <%s>' %
(args.workspace, result, args.project))
return exists
@fiss_cmd
def space_lock(args):
""" Lock a workspace """
r = fapi.lock_workspace(args.project, args.workspace)
fapi._check_response_code(r, 204)
if fcconfig.verbosity:
eprint('Locked workspace {0}/{1}'.format(args.project, args.workspace))
return 0
@fiss_cmd
def space_unlock(args):
""" Unlock a workspace """
r = fapi.unlock_workspace(args.project, args.workspace)
fapi._check_response_code(r, 204)
if fcconfig.verbosity:
eprint('Unlocked workspace {0}/{1}'.format(args.project,args.workspace))
return 0
@fiss_cmd
def space_new(args):
""" Create a new workspace. """
r = fapi.create_workspace(args.project, args.workspace,
args.authdomain, dict())
fapi._check_response_code(r, 201)
if fcconfig.verbosity:
eprint(r.content)
return 0
@fiss_cmd
def space_info(args):
""" Get metadata for a workspace. """
r = fapi.get_workspace(args.project, args.workspace)
fapi._check_response_code(r, 200)
return r.text
@fiss_cmd
def space_delete(args):
""" Delete a workspace. """
message = "WARNING: this will delete workspace: \n\t{0}/{1}".format(
args.project, args.workspace)
if not args.yes and not _confirm_prompt(message):
return 0
r = fapi.delete_workspace(args.project, args.workspace)
fapi._check_response_code(r, [200, 202, 204, 404])
if fcconfig.verbosity:
print('Deleted workspace {0}/{1}'.format(args.project, args.workspace))
return 0
@fiss_cmd
def space_clone(args):
""" Replicate a workspace """
# FIXME: add --deep copy option (shallow by default)
# add aliasing capability, then make space_copy alias
if not args.to_workspace:
args.to_workspace = args.workspace
if not args.to_project:
args.to_project = args.project
if (args.project == args.to_project
and args.workspace == args.to_workspace):
eprint("Error: destination project and namespace must differ from"
" cloned workspace")
return 1
r = fapi.clone_workspace(args.project, args.workspace, args.to_project,
args.to_workspace, args.copyFilesWithPrefix)
fapi._check_response_code(r, 201)
if fcconfig.verbosity:
msg = "{}/{} successfully cloned to {}/{}".format(
args.project, args.workspace,
args.to_project, args.to_workspace)
print(msg)
return 0
@fiss_cmd
def space_acl(args):
''' Retrieve access control list for a workspace'''
r = fapi.get_workspace_acl(args.project, args.workspace)
fapi._check_response_code(r, 200)
result = dict()
for user, info in sorted(r.json()['acl'].items()):
result[user] = info['accessLevel']
return result
@fiss_cmd
def space_set_acl(args):
""" Assign an ACL role to list of users for a workspace """
acl_updates = [{"email": user,
"accessLevel": args.role} for user in args.users]
r = fapi.update_workspace_acl(args.project, args.workspace, acl_updates)
fapi._check_response_code(r, 200)
errors = r.json()['usersNotFound']
if len(errors):
eprint("Unable to assign role for unrecognized users:")
for user in errors:
eprint("\t{0}".format(user['email']))
return 1
if fcconfig.verbosity:
print("Successfully updated {0} role(s)".format(len(acl_updates)))
return 0
@fiss_cmd
def space_search(args):
""" Search for workspaces matching certain criteria """
r = fapi.list_workspaces(fields="workspace.name,workspace.namespace,workspace.bucketName")
fapi._check_response_code(r, 200)
# Parse the JSON for workspace + namespace; then filter by
# search terms: each term is treated as a regular expression
workspaces = r.json()
extra_terms = []
if args.bucket:
workspaces = [w for w in workspaces
if re.search(args.bucket, w['workspace']['bucketName'])]
extra_terms.append('bucket')
# FIXME: add more filter terms
pretty_spaces = []
for space in workspaces:
ns = space['workspace']['namespace']
ws = space['workspace']['name']
pspace = ns + '/' + ws
# Always show workspace storage id
pspace += '\t' + space['workspace']['bucketName']
pretty_spaces.append(pspace)
# Sort for easier viewing, ignore case
return sorted(pretty_spaces, key=lambda s: s.lower())
@fiss_cmd
def entity_import(args):
""" Upload an entity loadfile. """
project = args.project
workspace = args.workspace
chunk_size = args.chunk_size
model = args.model
with open(args.tsvfile) as tsvf:
headerline = tsvf.readline().strip()
entity_data = [l.rstrip('\n') for l in tsvf]
return _batch_load(project, workspace, headerline, entity_data, chunk_size,
model)
@fiss_cmd
def set_export(args):
'''Return a list of lines in TSV form that would suffice to reconstitute a
container (set) entity, if passed to entity_import. The first line in
the list is the header, and subsequent lines are the container members.
'''
r = fapi.get_entity(args.project, args.workspace, args.entity_type, args.entity)
fapi._check_response_code(r, 200)
set_type = args.entity_type
set_name = args.entity
member_type = set_type.split('_')[0]
members = r.json()['attributes'][member_type+'s']['items']
result = ["membership:{}_id\t{}_id".format(set_type, member_type)]
result += ["%s\t%s" % (set_name, m['entityName']) for m in members ]
return result
@fiss_cmd
def entity_types(args):
""" List entity types in a workspace """
r = fapi.list_entity_types(args.project, args.workspace)
fapi._check_response_code(r, 200)
return r.json().keys()
@fiss_cmd
def entity_list(args):
""" List entities in a workspace. """
r = fapi.get_entities_with_type(args.project, args.workspace)
fapi._check_response_code(r, 200)
return [ '{0}\t{1}'.format(e['entityType'], e['name']) for e in r.json() ]
# REMOVED: This now returns a *.zip* file containing two tsvs, which is far
# less useful for FISS users...
@fiss_cmd
def entity_tsv(args):
""" Get list of entities in TSV format. Download files for which the
encoding is undetected (e.g. ZIP archives). """
r = fapi.get_entities_tsv(args.project, args.workspace,
args.entity_type, args.attrs, args.model)
fapi._check_response_code(r, 200)
if r.apparent_encoding is not None:
return r.content.decode(r.apparent_encoding)
else:
content = r.headers['Content-Disposition'].split('; ')[-1].split('=')
if len(content) == 2 and content[0] == 'filename':
filename = content[1]
if os.path.exists(filename) and (args.yes or not _confirm_prompt(
'This will overwrite {}'.format(filename))):
return
with open(filename, 'wb') as outfile:
for chunk in r:
outfile.write(chunk)
print('Downloaded {}.'.format(filename))
return
else:
eprint("Unable to determine name of file to download.")
return 1
def __entity_names(entities):
return [ entity['name'] for entity in entities ]
def __get_entities(args, kind, page_size=1000, handler=__entity_names):
entities = _entity_paginator(args.project, args.workspace, kind,
page_size=page_size)
return handler(entities)
@fiss_cmd
def participant_list(args):
''' List participants within a container'''
# Case 1: retrieve participants within a named data entity
if args.entity_type and args.entity:
# Edge case: caller asked for participant within participant (itself)
if args.entity_type == 'participant':
return [ args.entity.strip() ]
# Otherwise retrieve the container entity
r = fapi.get_entity(args.project, args.workspace, args.entity_type, args.entity)
fapi._check_response_code(r, 200)
participants = r.json()['attributes']["participants"]['items']
return [ participant['entityName'] for participant in participants ]
# Case 2: retrieve all participants within a workspace
return __get_entities(args, "participant", page_size=2000)
@fiss_cmd
def pair_list(args):
''' List pairs within a container. '''
# Case 1: retrieve pairs within a named data entity
if args.entity_type and args.entity:
# Edge case: caller asked for pair within a pair (itself)
if args.entity_type == 'pair':
return [ args.entity.strip() ]
# Edge case: pairs for a participant, which has to be done hard way
# by iteratiing over all samples (see firecloud/discussion/9648)
elif args.entity_type == 'participant':
entities = _entity_paginator(args.project, args.workspace,
'pair', page_size=2000)
return [ e['name'] for e in entities if
e['attributes']['participant']['entityName'] == args.entity]
# Otherwise retrieve the container entity
r = fapi.get_entity(args.project, args.workspace, args.entity_type, args.entity)
fapi._check_response_code(r, 200)
pairs = r.json()['attributes']["pairs"]['items']
return [ pair['entityName'] for pair in pairs]
# Case 2: retrieve all pairs within a workspace
return __get_entities(args, "pair", page_size=2000)
@fiss_cmd
def sample_list(args):
''' List samples within a container. '''
# Case 1: retrieve samples within a named data entity
if args.entity_type and args.entity:
# Edge case: caller asked for samples within a sample (itself)
if args.entity_type == 'sample':
return [ args.entity.strip() ]
# Edge case: samples for a participant, which has to be done hard way
# by iteratiing over all samples (see firecloud/discussion/9648)
elif args.entity_type == 'participant':
samples = _entity_paginator(args.project, args.workspace,
'sample', page_size=2000)
return [ e['name'] for e in samples if
e['attributes']['participant']['entityName'] == args.entity]
# Otherwise retrieve the container entity
r = fapi.get_entity(args.project, args.workspace, args.entity_type, args.entity)
fapi._check_response_code(r, 200)
if args.entity_type == 'pair':
pair = r.json()['attributes']
samples = [ pair['case_sample'], pair['control_sample'] ]
else:
samples = r.json()['attributes']["samples"]['items']
return [ sample['entityName'] for sample in samples ]
# Case 2: retrieve all samples within a workspace
return __get_entities(args, "sample", page_size=2000)
@fiss_cmd
def sset_list(args):
""" List sample sets in a workspace """
return __get_entities(args, "sample_set")
@fiss_cmd
def entity_delete(args):
""" Delete entity in a workspace. """
msg = "WARNING: this will delete {0} {1} in {2}/{3}".format(
args.entity_type, args.entity, args.project, args.workspace)
if not (args.yes or _confirm_prompt(msg)):
return
json_body=[{"entityType": args.entity_type,
"entityName": args.entity}]
r = fapi.delete_entities(args.project, args.workspace, json_body)
fapi._check_response_code(r, 204)
if fcconfig.verbosity:
print("Succesfully deleted " + args.type + " " + args.entity)
@fiss_cmd
def participant_delete(args):
args.entity_type = "participant"
return entity_delete(args)
@fiss_cmd
def sample_delete(args):
args.entity_type = "sample"
return entity_delete(args)
@fiss_cmd
def sset_delete(args):
args.entity_type = "sample_set"
return entity_delete(args)
@fiss_cmd
def meth_new(args):
""" Submit a new workflow (or update) to the methods repository. """
r = fapi.update_repository_method(args.namespace, args.method,
args.synopsis, args.wdl, args.doc,
args.comment)
fapi._check_response_code(r, 201)
if fcconfig.verbosity:
print("Method %s installed to project %s" % (args.method,
args.namespace))
return 0
@fiss_cmd
def meth_delete(args):
""" Remove (redact) a method from the method repository """
message = "WARNING: this will delete workflow \n\t{0}/{1}:{2}".format(
args.namespace, args.method, args.snapshot_id)
if not args.yes and not _confirm_prompt(message):
return
r = fapi.delete_repository_method(args.namespace, args.method,
args.snapshot_id)
fapi._check_response_code(r, 200)
if fcconfig.verbosity:
print("Method %s removed from project %s" % (args.method,args.namespace))
return 0
@fiss_cmd
def meth_wdl(args):
''' Retrieve WDL for given version of a repository method'''
r = fapi.get_repository_method(args.namespace, args.method,
args.snapshot_id, True)
fapi._check_response_code(r, 200)
return r.text
@fiss_cmd
def meth_acl(args):
''' Retrieve access control list for given version of a repository method'''
r = fapi.get_repository_method_acl(args.namespace, args.method,
args.snapshot_id)
fapi._check_response_code(r, 200)
acls = sorted(r.json(), key=lambda k: k['user'])
return map(lambda acl: '{0}\t{1}'.format(acl['user'], acl['role']), acls)
@fiss_cmd
def meth_set_acl(args):
""" Assign an ACL role to a list of users for a workflow. """
acl_updates = [{"user": user, "role": args.role} \
for user in set(expand_fc_groups(args.users)) \
if user != fapi.whoami()]
id = args.snapshot_id
if not id:
# get the latest snapshot_id for this method from the methods repo
r = fapi.list_repository_methods(namespace=args.namespace,
name=args.method)
fapi._check_response_code(r, 200)
versions = r.json()
if len(versions) == 0:
if fcconfig.verbosity:
eprint("method {0}/{1} not found".format(args.namespace,
args.method))
return 1
latest = sorted(versions, key=lambda m: m['snapshotId'])[-1]
id = latest['snapshotId']
r = fapi.update_repository_method_acl(args.namespace, args.method, id,
acl_updates)
fapi._check_response_code(r, 200)
if fcconfig.verbosity:
print("Updated ACL for {0}/{1}:{2}".format(args.namespace, args.method,
id))
return 0
def expand_fc_groups(users, groups=None, seen=set()):
""" If user is a firecloud group, return all members of the group.
Caveat is that only group admins may do this.
"""
for user in users:
fcgroup = None
if user.lower() == 'public':
yield 'public'
continue
if '@' not in user:
fcgroup = user
elif user.lower().endswith('@firecloud.org'):
if groups is None:
r = fapi.get_groups()
fapi._check_response_code(r, 200)
groups = {group['groupEmail'].lower():group['groupName'] \
for group in r.json() if group['role'] == 'Admin'}
if user.lower() not in groups:
if fcconfig.verbosity:
eprint("You do not have access to the members of {}".format(user))
yield user
continue
else:
fcgroup = groups[user.lower()]
else:
yield user
continue
# Avoid infinite loops due to nested groups
if fcgroup in seen:
continue
r = fapi.get_group(fcgroup)
fapi._check_response_code(r, [200, 403])
if r.status_code == 403:
if fcconfig.verbosity:
eprint("You do not have access to the members of {}".format(fcgroup))
continue
fcgroup_data = r.json()
seen.add(fcgroup)
for admin in expand_fc_groups(fcgroup_data['adminsEmails'], groups, seen):
yield admin
for member in expand_fc_groups(fcgroup_data['membersEmails'], groups, seen):
yield member
@fiss_cmd
def meth_list(args):
""" List workflows in the methods repository """
r = fapi.list_repository_methods(namespace=args.namespace,
name=args.method,
snapshotId=args.snapshot_id)
fapi._check_response_code(r, 200)
# Parse the JSON for the workspace + namespace
methods = r.json()
results = []
for m in methods:
ns = m['namespace']
n = m['name']
sn_id = m['snapshotId']
results.append('{0}\t{1}\t{2}'.format(ns,n,sn_id))
# Sort for easier viewing, ignore case
return sorted(results, key=lambda s: s.lower())
@fiss_cmd
def meth_exists(args):
'''Determine whether a given workflow is present in methods repo'''
args.namespace = None
args.snapshot_id = None
return len(meth_list(args)) != 0
@fiss_cmd
def config_start(args):
'''Invoke a task (method configuration), on given entity in given space'''
# Try to use call caching (job avoidance)? Flexibly accept range of answers
cache = getattr(args, "cache", True)
cache = cache is True or (cache.lower() in ["y", "true", "yes", "t", "1"])
if not args.namespace:
args.namespace = fcconfig.method_ns
if not args.namespace:
raise RuntimeError("namespace not provided, or configured by default")
# If no entity name is given, unset entity_type
if args.entity is None:
args.entity_type = None
r = fapi.create_submission(args.project, args.workspace,args.namespace,
args.config, args.entity, args.entity_type,
args.expression, use_callcache=cache)
fapi._check_response_code(r, 201)
id = r.json()['submissionId']
return ("Started {0}/{1} in {2}/{3}: id={4}".format(
args.namespace, args.config, args.project, args.workspace, id)), id
@fiss_cmd
def config_stop(args):
'''Abort a task (method configuration) by submission ID in given space'''
r = fapi.abort_submission(args.project, args.workspace,
args.submission_id)
fapi._check_response_code(r, 204)
return ("Aborted {0} in {1}/{2}".format(args.submission_id,
args.project,
args.workspace))
@fiss_cmd
def config_list(args):
""" List configuration(s) in the methods repository or a workspace. """
verbose = fcconfig.verbosity
if args.workspace:
if verbose:
print("Retrieving method configs from space {0}".format(args.workspace))
if not args.project:
eprint("No project given, and no default project configured")
return 1
r = fapi.list_workspace_configs(args.project, args.workspace)
fapi._check_response_code(r, 200)
else:
if verbose:
print("Retrieving method configs from method repository")
r = fapi.list_repository_configs(namespace=args.namespace,
name=args.config,
snapshotId=args.snapshot_id)
fapi._check_response_code(r, 200)
# Parse the JSON for the workspace + namespace
methods = r.json()
results = []
for m in methods:
ns = m['namespace']
if not ns:
ns = '__EMPTYSTRING__'
name = m['name']
# Ugh: configs in workspaces look different from configs in methodRepo
mver = m.get('methodRepoMethod', None)
if mver:
mver = mver.get('methodVersion', 'unknown') # space config
else:
mver = m.get('snapshotId', 'unknown') # repo config
results.append('{0}\t{1}\tsnapshotId:{2}'.format(ns, name, mver))
# Sort for easier viewing, ignore case
return sorted(results, key=lambda s: s.lower())
@fiss_cmd
def config_acl(args):
''' Retrieve access control list for a method configuration'''
r = fapi.get_repository_config_acl(args.namespace, args.config,
args.snapshot_id)
fapi._check_response_code(r, 200)
acls = sorted(r.json(), key=lambda k: k['user'])
return map(lambda acl: '{0}\t{1}'.format(acl['user'], acl['role']), acls)
@fiss_cmd
def config_set_acl(args):
""" Assign an ACL role to a list of users for a config. """
acl_updates = [{"user": user, "role": args.role} \
for user in set(expand_fc_groups(args.users)) \
if user != fapi.whoami()]
id = args.snapshot_id
if not id:
# get the latest snapshot_id for this method from the methods repo
r = fapi.list_repository_configs(namespace=args.namespace,
name=args.config)
fapi._check_response_code(r, 200)
versions = r.json()
if len(versions) == 0:
if fcconfig.verbosity:
eprint("Configuration {0}/{1} not found".format(args.namespace,
args.config))
return 1
latest = sorted(versions, key=lambda c: c['snapshotId'])[-1]
id = latest['snapshotId']
r = fapi.update_repository_config_acl(args.namespace, args.config, id,
acl_updates)
fapi._check_response_code(r, 200)
if fcconfig.verbosity:
print("Updated ACL for {0}/{1}:{2}".format(args.namespace, args.config,
id))
return 0
@fiss_cmd
def config_get(args):
""" Retrieve a method config from a workspace, send stdout """
r = fapi.get_workspace_config(args.project, args.workspace,
args.namespace, args.config)
fapi._check_response_code(r, 200)
# Setting ensure_ascii to False ensures unicode string returns
return json.dumps(r.json(), indent=4, separators=(',', ': '),
sort_keys=True, ensure_ascii=False)
@fiss_cmd
def config_wdl(args):
""" Retrieve the WDL for a method config in a workspace, send stdout """
r = fapi.get_workspace_config(args.project, args.workspace,
args.namespace, args.config)
fapi._check_response_code(r, 200)
method = r.json()["methodRepoMethod"]
args.namespace = method["methodNamespace"]
args.method = method["methodName"]
args.snapshot_id = method["methodVersion"]
return meth_wdl(args)
@fiss_cmd
def config_diff(args):
"""Compare method configuration definitions across workspaces. Ignores
methodConfigVersion if the verbose argument is not set"""
config_1 = config_get(args).splitlines()
args.project = args.Project
args.workspace = args.Workspace
cfg_1_name = args.config
if args.Config is not None:
args.config = args.Config
if args.Namespace is not None:
args.namespace = args.Namespace
config_2 = config_get(args).splitlines()
if not args.verbose:
config_1 = skip_cfg_ver(config_1)
config_2 = skip_cfg_ver(config_2)
return list(unified_diff(config_1, config_2, cfg_1_name, args.config, lineterm=''))
def skip_cfg_ver(cfg):
return [line for line in cfg if not line.startswith(' "methodConfigVersion": ')]
@fiss_cmd
def config_put(args):
'''Install a valid method configuration into a workspace, in one of several
ways: from a JSON file containing a config definition (both file names
and objects are supported); as a string representing the content of such
a JSON file; or as a dict generated from such JSON content, e.g via
json.loads(). Note that the CLI supports only string & filename input.'''
config = args.config
if os.path.isfile(config):
with open(config, 'r') as fp:
config = json.loads(fp.read())
elif isinstance(config, str):
config = json.loads(config)
elif isinstance(config, dict):
pass
elif hasattr(config, "read"):
config = json.loads(config.read())
else:
raise ValueError('Input method config must be filename, string or dict')
r = fapi.create_workspace_config(args.project, args.workspace, config)
fapi._check_response_code(r, [201])
return True
__EDITME__ = u'EDITME, or abort edit/install by leaving entire config unchanged'
@fiss_cmd
def config_template(args):
c = fapi.get_config_template(args.namespace, args.method, args.snapshot_id)
fapi._check_response_code(c, 200)
c = c.json()
c[u'name'] = args.configname or __EDITME__
c[u'namespace'] = args.namespace or __EDITME__
c[u'rootEntityType'] = args.entity_type or __EDITME__
outputs = c[u'outputs']
for o in outputs:
outputs[o] = __EDITME__
inputs = c[u'inputs']
for i in inputs:
inputs[i] = __EDITME__
return json.dumps(c, indent=4, separators=(',', ': '), sort_keys=True,
ensure_ascii=False)
@fiss_cmd
def config_edit(args):
# Placeholder: accept either a method config name or a file containing
# a method config definition (e.g. such as returned by config_get)
pass
@fiss_cmd
def config_new(args):
'''Attempt to install a new method config into a workspace, by: generating
a template from a versioned method in the methods repo, then launching
a local editor (respecting the $EDITOR environment variable) to fill in
the incomplete input/output fields. Returns True if the config was
successfully installed, otherwise False'''
cfg = config_template(args)
# Iteratively try to edit/install the config: exit iteration by EITHER
# Successful config_put() after editing
# Leaving config unchanged in editor, e.g. quitting out of VI with :q
# FIXME: put an small integer upper bound on the # of loops here
while True:
try:
edited = fccore.edit_text(cfg)
if edited == cfg:
eprint("No edits made, method config not installed ...")
break
if __EDITME__ in edited:
eprint("Edit is incomplete, method config not installed ...")
time.sleep(1)
continue
args.config = cfg = edited
config_put(args)
return True
except FireCloudServerError as fce:
__pretty_print_fc_exception(fce)
return False
@fiss_cmd
def config_delete(args):
""" Remove a method config from a workspace """
r = fapi.delete_workspace_config(args.project, args.workspace,
args.namespace, args.config)
fapi._check_response_code(r, [200,204])
return r.text if r.text else None
@fiss_cmd
def config_copy(args):
""" Copy a method config to new name/space/namespace/project (or all 4) """
if not (args.tospace or args.toname or args.toproject or args.tonamespace):
raise RuntimeError('A new config name OR workspace OR project OR ' +
'namespace must be given (or all)')
copy = fapi.get_workspace_config(args.fromproject, args.fromspace,
args.namespace, args.config)
fapi._check_response_code(copy, 200)
copy = copy.json()
if not args.toname:
args.toname = args.config
if not args.tonamespace:
args.tonamespace = args.namespace
if not args.toproject:
args.toproject = args.fromproject
if not args.tospace:
args.tospace = args.fromspace
copy['name'] = args.toname
copy['namespace'] = args.tonamespace
# Instantiate the copy
r = fapi.overwrite_workspace_config(args.toproject, args.tospace,
args.tonamespace, args.toname, copy)
fapi._check_response_code(r, 200)
if fcconfig.verbosity:
print("Method %s/%s:%s copied to %s/%s:%s" % (
args.fromproject, args.fromspace, args.config,
args.toproject, args.tospace, args.toname))
return 0
@fiss_cmd
def attr_get(args):
'''Return a dict of attribute name/value pairs: if entity name & type
are specified then attributes will be retrieved from that entity,
otherwise workspace-level attributes will be returned. By default all
attributes attached to the given object will be returned, but a subset
can be selected by specifying a list of attribute names; names which
refer to a non-existent attribute will be silently ignored. By default
a special __header__ entry is optionally added to the result. '''
if args.entity_type and (args.entity or args.entity_type == "ref"):
if args.entity_type == "ref": # return referenceData attributes
r = fapi.get_workspace(args.project, args.workspace, fields="workspace.attributes")
fapi._check_response_code(r, 200)
ws_attrs = r.json()['workspace']['attributes']
# check for referenceData in workspace
ref_attrs = {attr:ws_attrs[attr] for attr in ws_attrs if attr.startswith('referenceData_')}
if not ref_attrs:
print("There are no reference data available in workspace. Load a reference and try again.")
return {}
if args.entity:
attrs = {attr:ref_attrs[attr] for attr in ref_attrs if attr.startswith('referenceData_{}'.format(args.entity))}
if not attrs: # if chosen referenceData is not in workspace
ref_options = sorted({attr.split('_')[1] for attr in ref_attrs})
print("The given reference is not in workspace. Available option(s): {}.".format(", ".join(ref_options)))
return {}
else:
attrs = ref_attrs
else: # return named entity attrs
r = fapi.get_entity(args.project, args.workspace, args.entity_type, args.entity)
fapi._check_response_code(r, 200)
attrs = r.json()['attributes']
# It is wrong for the members of container objects to appear as metadata
# (attributes) attached to the container, as it conflates fundamentally
# different things: annotation vs membership. This can be seen by using
# a suitcase model of reasoning: ATTRIBUTES are METADATA like the weight
# & height of the suitcase, the shipping label and all of its passport
# stamps; while MEMBERS are the actual CONTENTS INSIDE the suitcase.
# This conflation also contradicts the design & docs (which make a clear
# distinction between MEMBERSHIP and UPDATE loadfiles). For this reason
# a change has been requested of the FireCloud dev team (via forum), and
# until it is implemented we will elide "list of members" attributes here
# (but later may provide a way for users to request such, if wanted)
for k in ["samples", "participants", "pairs"]:
attrs.pop(k, None)
elif args.ws_attrs: # return all workspace attrs (no referenceData attrs)
r = fapi.get_workspace(args.project, args.workspace, fields="workspace.attributes")
fapi._check_response_code(r, 200)
ws_attrs = r.json()['workspace']['attributes']
attrs = {attr:ws_attrs[attr] for attr in ws_attrs if not attr.startswith('referenceData')}
else: # return all attributes (workspace + referenceData attrs)
r = fapi.get_workspace(args.project, args.workspace, fields="workspace.attributes")
fapi._check_response_code(r, 200)
attrs = r.json()['workspace']['attributes']
if args.attributes: # return a subset of attributes, if requested
attrs = {k:attrs[k] for k in set(attrs).intersection(args.attributes)}
# If some attributes have been collected, return in appropriate format
if attrs:
if args.entity: # Entity attributes
def textify(thing):
if isinstance(thing, dict):
thing = thing.get("items", thing.get("entityName", "__UNKNOWN__"))
return "{0}".format(thing)
result = {args.entity : u'\t'.join(map(textify, attrs.values()))}
# Add "hidden" header of attribute names, for downstream convenience
object_id = u'entity:%s_id' % args.entity_type
result['__header__'] = [object_id] + list(attrs.keys())
else:
result = attrs # Workspace attributes
else:
result = {}
return result
@fiss_cmd
def attr_list(args):
'''Retrieve names of all attributes attached to a given object, either
an entity (if entity type+name is provided) or workspace (if not)'''
args.attributes = None
result = attr_get(args)
names = result.get("__header__",[])
if names:
names = names[1:]
else:
names = result.keys()
return sorted(names)
@fiss_cmd
def attr_set(args):
''' Set key=value attributes: if entity name & type are specified then
attributes will be set upon that entity, otherwise the attribute will
be set at the workspace level'''
if args.entity_type and args.entity:
prompt = "Set {0}={1} for {2}:{3} in {4}/{5}?\n[Y\\n]: ".format(
args.attribute, args.value, args.entity_type,
args.entity, args.project, args.workspace)
if not (args.yes or _confirm_prompt("", prompt)):
return 0
update = fapi._attr_set(args.attribute, args.value)
r = fapi.update_entity(args.project, args.workspace, args.entity_type,
args.entity, [update])
fapi._check_response_code(r, 200)
else:
prompt = "Set {0}={1} in {2}/{3}?\n[Y\\n]: ".format(
args.attribute, args.value, args.project, args.workspace
)
if not (args.yes or _confirm_prompt("", prompt)):
return 0
update = fapi._attr_set(args.attribute, args.value)
r = fapi.update_workspace_attributes(args.project, args.workspace,
[update])
fapi._check_response_code(r, 200)
return 0
@fiss_cmd
def attr_delete(args):
''' Delete key=value attributes: if entity name & type are specified then
attributes will be deleted from that entity, otherwise the attribute will
be removed from the workspace'''
if args.entity_type and args.entities:
# Since there is no attribute deletion endpoint, we must perform 2 steps
# here: first we retrieve the entity_ids, and any foreign keys (e.g.
# participant_id for sample_id); and then construct a loadfile which
# specifies which entities are to have what attributes removed. Note
# that FireCloud uses the magic keyword __DELETE__ to indicate that
# an attribute should be deleted from an entity.
# Step 1: see what entities are present, and filter to those requested
entities = _entity_paginator(args.project, args.workspace,
args.entity_type,
page_size=1000, filter_terms=None,
sort_direction="asc")
if args.entities:
entities = [e for e in entities if e['name'] in args.entities]
# Step 2: construct a loadfile to delete these attributes
attrs = sorted(args.attributes)
etype = args.entity_type
entity_data = []
for entity_dict in entities:
name = entity_dict['name']
line = name
# TODO: Fix other types?
if etype in ("sample", "pair"):
line += "\t" + entity_dict['attributes']['participant']['entityName']
if etype == "pair":
line += "\t" + entity_dict['attributes']['case_sample']['entityName']
line += "\t" + entity_dict['attributes']['control_sample']['entityName']
for _ in attrs:
line += "\t__DELETE__"
# Improve performance by only updating records that have changed
entity_data.append(line)
entity_header = ["entity:" + etype + "_id"]
if etype == "sample":
entity_header.append("participant_id")
if etype == "pair":
entity_header += ["participant", "case_sample", "control_sample"]
entity_header = '\t'.join(entity_header + list(attrs))
# Remove attributes from an entity
message = "WARNING: this will delete these attributes:\n\n" + \
','.join(args.attributes) + "\n\n"
if args.entities:
message += 'on these {0}s:\n\n'.format(args.entity_type) + \
', '.join(args.entities)
else:
message += 'on all {0}s'.format(args.entity_type)
message += "\n\nin workspace {0}/{1}\n".format(args.project, args.workspace)
if not args.yes and not _confirm_prompt(message):
return 0
# TODO: reconcile with other batch updates
# Chunk the entities into batches of 500, and upload to FC
if args.verbose:
print("Batching " + str(len(entity_data)) + " updates to Firecloud...")
chunk_len = 500
total = int(len(entity_data) / chunk_len) + 1
batch = 0
for i in range(0, len(entity_data), chunk_len):
batch += 1
if args.verbose:
print("Updating samples {0}-{1}, batch {2}/{3}".format(
i+1, min(i+chunk_len, len(entity_data)), batch, total
))
this_data = entity_header + '\n' + '\n'.join(entity_data[i:i+chunk_len])
# Now push the entity data back to firecloud
r = fapi.upload_entities(args.project, args.workspace, this_data)
fapi._check_response_code(r, 200)
else:
message = "WARNING: this will delete the following attributes in " + \
"{0}/{1}\n\t".format(args.project, args.workspace) + \
"\n\t".join(args.attributes)
if not (args.yes or _confirm_prompt(message)):
return 0
updates = [fapi._attr_rem(a) for a in args.attributes]
r = fapi.update_workspace_attributes(args.project, args.workspace,
updates)
fapi._check_response_code(r, 200)
return 0
@fiss_cmd
def attr_copy(args):
""" Copy workspace attributes between workspaces. """
if not args.to_workspace:
args.to_workspace = args.workspace
if not args.to_project:
args.to_project = args.project
if (args.project == args.to_project
and args.workspace == args.to_workspace):
eprint("destination project and namespace must differ from"
" source workspace")
return 1
# First get the workspace attributes of the source workspace
r = fapi.get_workspace(args.project, args.workspace, fields="workspace.attributes")
fapi._check_response_code(r, 200)
# Parse the attributes
workspace_attrs = r.json()['workspace']['attributes']
# If we passed attributes, only use those
if args.attributes:
workspace_attrs = {k:v for k, v in iteritems(workspace_attrs)
if k in args.attributes}
if len(workspace_attrs) == 0:
print("No workspace attributes defined in {0}/{1}".format(
args.project, args.workspace))
return 1
message = "This will copy the following workspace attributes to {0}/{1}\n"
message = message.format(args.to_project, args.to_workspace)
for k, v in sorted(iteritems(workspace_attrs)):
message += '\t{0}\t{1}\n'.format(k, v)
if not args.yes and not _confirm_prompt(message):
return 0
# make the attributes into updates
updates = [fapi._attr_set(k,v) for k,v in iteritems(workspace_attrs)]
r = fapi.update_workspace_attributes(args.to_project, args.to_workspace,
updates)
fapi._check_response_code(r, 200)
return 0
@fiss_cmd
def attr_fill_null(args):
"""
Assign the null sentinel value for all entities which do not have a value
for the given attributes.
see gs://broad-institute-gdac/GDAC_FC_NULL for more details
"""
NULL_SENTINEL = "gs://broad-institute-gdac/GDAC_FC_NULL"
attrs = args.attributes
if not attrs:
print("Error: provide at least one attribute to set")
return 1
if 'participant' in attrs or 'samples' in attrs:
print("Error: can't assign null to samples or participant")
return 1
# Set entity attributes
if args.entity_type is not None:
print("Collecting entity data...")
# Get existing attributes
entities = _entity_paginator(args.project, args.workspace,
args.entity_type,
page_size=1000, filter_terms=None,
sort_direction="asc")
# samples need participant_id as well
#TODO: This may need more fixing for other types
orig_attrs = list(attrs)
if args.entity_type == "sample":
attrs.insert(0, "participant_id")
header = "entity:" + args.entity_type + "_id\t" + "\t".join(attrs)
# Book keep the number of updates for each attribute
attr_update_counts = {a : 0 for a in orig_attrs}
# construct new entity data by inserting null sentinel, and counting
# the number of updates
entity_data = []
for entity_dict in entities:
name = entity_dict['name']
etype = entity_dict['entityType']
e_attrs = entity_dict['attributes']
line = name
altered = False
for attr in attrs:
if attr == "participant_id":
line += "\t" + e_attrs['participant']['entityName']
continue # This attribute is never updated by fill_null
if attr not in e_attrs:
altered = True
attr_update_counts[attr] += 1
line += "\t" + str(e_attrs.get(attr, NULL_SENTINEL))
# Improve performance by only updating records that have changed
if altered:
entity_data.append(line)
# Check to see if all entities are being set to null for any attributes
# This is usually a mistake, so warn the user
num_entities = len(entities)
prompt = "Continue? [Y\\n]: "
for attr in orig_attrs:
if num_entities == attr_update_counts[attr]:
message = "WARNING: no {0}s with attribute '{1}'\n".format(
args.entity_type, attr
)
if not args.yes and not _confirm_prompt(message, prompt):
return
# check to see if no sentinels are necessary
if not any(c != 0 for c in itervalues(attr_update_counts)):
print("No null sentinels required, exiting...")
return 0
if args.to_loadfile:
print("Saving loadfile to " + args.to_loadfile)
with open(args.to_loadfile, "w") as f:
f.write(header + '\n')
f.write("\n".join(entity_data))
return 0
updates_table = " count attribute\n"
for attr in sorted(attr_update_counts):
count = attr_update_counts[attr]
updates_table += "{0:>10} {1}\n".format(count, attr)
message = "WARNING: This will insert null sentinels for " \
"these attributes:\n" + updates_table
if not args.yes and not _confirm_prompt(message):
return 0
# Chunk the entities into batches of 500, and upload to FC
print("Batching " + str(len(entity_data)) + " updates to Firecloud...")
chunk_len = 500
total = int(len(entity_data) / chunk_len) + 1
batch = 0
for i in range(0, len(entity_data), chunk_len):
batch += 1
print("Updating samples {0}-{1}, batch {2}/{3}".format(
i+1, min(i+chunk_len, len(entity_data)), batch, total
))
this_data = header + '\n' + '\n'.join(entity_data[i:i+chunk_len])
# Now push the entity data back to firecloud
r = fapi.upload_entities(args.project, args.workspace, this_data)
fapi._check_response_code(r, 200)
return 0
else:
# TODO: set workspace attributes
print("attr_fill_null requires an entity type")
return 1
@fiss_cmd
def health(args):
""" Health FireCloud Server """
r = fapi.health()
fapi._check_response_code(r, 200)
return r.content
@fiss_cmd
def mop(args):
''' Clean up unreferenced data in a workspace'''
# First retrieve the workspace to get bucket information
if args.verbose:
print("Retrieving workspace information...")
fields = "workspace.bucketName,workspace.name,workspace.attributes"
r = fapi.get_workspace(args.project, args.workspace, fields=fields)
fapi._check_response_code(r, 200)
workspace = r.json()
bucket = workspace['workspace']['bucketName']
bucket_prefix = 'gs://' + bucket
workspace_name = workspace['workspace']['name']
if args.verbose:
print("{} -- {}".format(workspace_name, bucket_prefix))
# Handle Basic Values, Compound data structures, and Nestings thereof
def update_referenced_files(referenced_files, attrs, bucket_prefix):
for attr in attrs:
# 1-D array attributes are dicts with the values stored in 'items'
if isinstance(attr, dict) and attr.get('itemsType') == 'AttributeValue':
update_referenced_files(referenced_files, attr['items'], bucket_prefix)
# Compound data structures resolve to dicts
elif isinstance(attr, dict):
update_referenced_files(referenced_files, attr.values(), bucket_prefix)
# Nested arrays resolve to lists
elif isinstance(attr, list):
update_referenced_files(referenced_files, attr, bucket_prefix)
elif isinstance(attr, string_types) and attr.startswith(bucket_prefix):
referenced_files.add(attr)
referenced_files = set()
update_referenced_files(referenced_files,
workspace['workspace']['attributes'].values(),
bucket_prefix)
# TODO: Make this more efficient with a native api call?
# # Now run a gsutil ls to list files present in the bucket
try:
gsutil_args = ['gsutil', 'ls', '-l', bucket_prefix + '/**']
if args.verbose:
print(' '.join(gsutil_args))
bucket_files = subprocess.check_output(gsutil_args, stderr=subprocess.STDOUT)
# Check output produces a string in Py2, Bytes in Py3, so decode if necessary
if type(bucket_files) == bytes:
bucket_files = bucket_files.decode()
# Store size of each file in bucket to report recovered space
bucket_file_sizes = {}
for listing in bucket_files.split('\n'):
listing = listing.strip().split(' ')
if len(listing) != 3:
break
bucket_file_sizes[listing[2]] = int(listing[0])
# Now make a call to the API for the user's submission information.
user_submission_request = fapi.list_submissions(args.project, args.workspace)
# Check if API call was successful, in the case of failure, the function will return an error
fapi._check_response_code(user_submission_request, 200)
# Sort user submission ids for future bucket file verification
submission_ids = set(item['submissionId'] for item in user_submission_request.json())
# Check to see if bucket file path contain the user's submission id
# to ensure deletion of files in the submission directories only.
# Splits the bucket file: "gs://bucket_Id/submission_id/file_path", by the '/' symbol
# and stores values in a 5 length array: ['gs:', '' , 'bucket_Id', submission_id, file_path]
# to extract the submission id from the 4th element (index 3) of the array
bucket_files = set(bucket_file for bucket_file in bucket_file_sizes if bucket_file.split('/', 4)[3] in submission_ids)
except subprocess.CalledProcessError as e:
eprint("Error retrieving files from bucket:" +
"\n\t{}\n\t{}".format(str(e), e.output))
return 1
if args.verbose:
num = len(bucket_files)
if args.verbose:
print("Found {} files in bucket {}".format(num, bucket))
# Now build a set of files that are referenced in the bucket
# 1. Get a list of the entity types in the workspace
r = fapi.list_entity_types(args.project, args.workspace)
fapi._check_response_code(r, 200)
entity_types = r.json().keys()
# 2. For each entity type, request all the entities
for etype in entity_types:
if args.verbose:
print("Getting annotations for " + etype + " entities...")
# use the paginated version of the query
entities = _entity_paginator(args.project, args.workspace, etype,
page_size=1000, filter_terms=None,
sort_direction="asc")
for entity in entities:
update_referenced_files(referenced_files,
entity['attributes'].values(),
bucket_prefix)
if args.verbose:
num = len(referenced_files)
print("Found {} referenced files in workspace {}".format(num, workspace_name))
# Set difference shows files in bucket that aren't referenced
unreferenced_files = bucket_files - referenced_files
# Filter out files like .logs and rc.txt
def can_delete(f):
'''Return true if this file should not be deleted in a mop.'''
filename = f.rsplit('/', 1)[-1]
# Don't delete logs
if filename.endswith('.log'):
return False
# Don't delete return codes from jobs
if filename.endswith('-rc.txt'):
return False
if filename == "rc":
return False
# Don't delete tool's exec.sh or script
if filename in ('exec.sh', 'script'):
return False
# keep stdout, stderr, and output
if filename in ('stderr', 'stdout', 'output'):
return False
# Only delete specified unreferenced files
if args.include:
for glob in args.include:
if fnmatchcase(filename, glob):
return True
return False
# Don't delete specified unreferenced files
if args.exclude:
for glob in args.exclude:
if fnmatchcase(filename, glob):
return False
return True
deletable_files = [f for f in unreferenced_files if can_delete(f)]
if len(deletable_files) == 0:
if args.verbose:
print("No files to mop in " + workspace['workspace']['name'])
return 0
units = ['bytes', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB']
def human_readable_size(size_in_bytes):
'''Takes a bytes value and returns a human-readable string with an
appropriate unit conversion'''
reduce_count = 0
while size_in_bytes >= 1024.0 and reduce_count < 5:
size_in_bytes /= 1024.0
reduce_count += 1
size_str = "{:.2f}".format(size_in_bytes) if reduce_count > 0 else str(size_in_bytes)
return "{} {}".format(size_str, units[reduce_count])
deletable_size = human_readable_size(sum(bucket_file_sizes[f]
for f in deletable_files))
if args.verbose or args.dry_run:
print("Found {} files to delete:\n".format(len(deletable_files)) +
"\n".join("{} {}".format(human_readable_size(bucket_file_sizes[f]).rjust(11), f)
for f in deletable_files) +
'\nTotal Size: {}\n'.format(deletable_size))
message = "WARNING: Delete {} files totaling {} in {} ({})".format(
len(deletable_files), deletable_size, bucket_prefix,
workspace['workspace']['name'])
if args.dry_run or (not args.yes and not _confirm_prompt(message)):
return 0
# Pipe the deletable_files into gsutil rm to remove them
gsrm_args = ['gsutil', '-m', 'rm', '-I']
PIPE = subprocess.PIPE
STDOUT=subprocess.STDOUT
if args.verbose:
print("Deleting files with gsutil...")
gsrm_proc = subprocess.Popen(gsrm_args, stdin=PIPE, stdout=PIPE, stderr=STDOUT)
# Pipe the deletable_files into gsutil
result = gsrm_proc.communicate(input='\n'.join(deletable_files).encode())[0]
if args.verbose:
if type(result) == bytes:
result = result.decode()
print(result.rstrip())
return 0
def _call_gsstat(object_list):
"""
Call gsutil stat on a list of objects and return list of ones that can't be
found.
"""
gsstat_args = ['gsutil', '-m', 'stat'] + object_list
no_stats = list()
try:
gsstats = subprocess.check_output(gsstat_args, universal_newlines=True,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
# Account for gsutil stat bug that doesn't emit newlines after missing
# or inaccessible objects
gsstats = re.sub(r"\B(No URLs matched: |You aren't authorized to read |gs://)",
r"\n\1", e.output)
except:
raise
for line in gsstats.split('\n'):
if line.startswith("No URLs matched: "):
no_stats.append("{}\tNot Found".format(line[17:]))
if line.startswith("You aren't authorized to read "):
no_stats.append("{}\tNot Authorized".format(line[30:-11]))
return no_stats
@fiss_cmd
def validate_file_attrs(args):
bucket_prefix = "gs://"
verbose = fcconfig.verbosity
if verbose:
eprint("Retrieving workspace information...")
r = fapi.get_workspace(args.project, args.workspace, fields="workspace.attributes")
fapi._check_response_code(r, 200)
workspace = r.json()
referenced_files = set()
# Get all workspace file attributes
for value in workspace['workspace']['attributes'].values():
if isinstance(value, str) and value.startswith(bucket_prefix):
referenced_files.add(value)
# Now build a set of files that are referenced in the bucket
# 1. Get a list of the entity types in the workspace
r = fapi.list_entity_types(args.project, args.workspace)
fapi._check_response_code(r, 200)
entity_types = r.json().keys()
# 2. For each entity type, request all the entities
for etype in entity_types:
if verbose:
eprint("Getting annotations for " + etype + " entities...")
# use the paginated version of the query
entities = _entity_paginator(args.project, args.workspace, etype,
page_size=1000, filter_terms=None,
sort_direction="asc")
for entity in entities:
for value in entity['attributes'].values():
if isinstance(value, str) and value.startswith(bucket_prefix):
referenced_files.add(value)
sorted_files = sorted(referenced_files)
chunk_size = 100
total_files = len(sorted_files)
no_stats = list()
if verbose:
eprint("Total files:", total_files)
for idx in range(0, total_files, chunk_size):
if total_files - idx <= chunk_size:
if verbose:
eprint("checking", idx + 1, "to", total_files)
no_stats += _call_gsstat(sorted_files[idx:])
else:
if verbose:
eprint("checking", idx + 1, "to", idx + chunk_size)
no_stats += _call_gsstat(sorted_files[idx:idx+chunk_size])
return no_stats
@fiss_cmd
def noop(args):
if args.verbose:
proj = getattr(args, "project", "unspecified")
space = getattr(args, "workspace", "unspecified")
print('fiss no-op command: Project=%s, Space=%s' % (proj, space))
return 0
@fiss_cmd
def config_cmd(args):
values = fccore.attrdict()
names = args.variables
if not names:
names = list(fcconfig.keys())
for name in names:
values[name] = fcconfig.get(name, "__undefined__")
return values
@fiss_cmd
def sset_loop(args):
''' Loop over all sample sets in a workspace, performing a func '''
# Ensure that the requested action is a valid fiss_cmd
fiss_func = __cmd_to_func(args.action)
if not fiss_func:
eprint("invalid FISS cmd '" + args.action + "'")
return 1
# First get the sample set names
r = fapi.get_entities(args.project, args.workspace, "sample_set")
fapi._check_response_code(r, 200)
sample_sets = [entity['name'] for entity in r.json()]
args.entity_type = "sample_set"
for sset in sample_sets:
print('\n# {0}::{1}/{2} {3}'.format(args.project, args.workspace, sset,
args.action))
args.entity = sset
# Note how this code is similar to how args.func is called in
# main so it may make sense to try to a common method for both
try:
result = fiss_func(args)
except Exception as e:
status = __pretty_print_fc_exception(e)
if not args.keep_going:
return status
printToCLI(result)
return 0
@fiss_cmd
def monitor(args):
''' Retrieve status of jobs submitted from a given workspace, as a list
of TSV lines sorted by descending order of job submission date'''
r = fapi.list_submissions(args.project, args.workspace)
fapi._check_response_code(r, 200)
statuses = sorted(r.json(), key=lambda k: k['submissionDate'], reverse=True)
header = '\t'.join(list(statuses[0].keys()))
expander = lambda v: '{0}'.format(v)
def expander(thing):
if isinstance(thing, dict):
entityType = thing.get("entityType", None)
if entityType:
return "{0}:{1}".format(entityType, thing['entityName'])
return "{0}".format(thing)
# FIXME: this will generally return different column order between Python 2/3
return [header] + ['\t'.join( map(expander, v.values())) for v in statuses]
@fiss_cmd
def supervise(args):
''' Run legacy, Firehose-style workflow of workflows'''
project = args.project
workspace = args.workspace
namespace = args.namespace
workflow = args.workflow
sample_sets = args.sample_sets
recovery_file = args.json_checkpoint
# If no sample sets are provided, run on all sample sets
if not sample_sets:
r = fapi.get_entities(args.project, args.workspace, "sample_set")
fapi._check_response_code(r, 200)
sample_sets = [s['name'] for s in r.json()]
message = "Sample Sets ({}):\n\t".format(len(sample_sets)) + \
"\n\t".join(sample_sets)
prompt = "\nLaunch workflow in " + project + "/" + workspace + \
" on these sample sets? [Y\\n]: "
if not args.yes and not _confirm_prompt(message, prompt):
return
return supervisor.supervise(project, workspace, namespace, workflow,
sample_sets, recovery_file)
@fiss_cmd
def supervise_recover(args):
recovery_file = args.recovery_file
return supervisor.recover_and_supervise(recovery_file)
@fiss_cmd
def entity_copy(args):
""" Copy entities from one workspace to another. """
if not args.to_workspace:
args.to_workspace = args.workspace
if not args.to_project:
args.to_project = args.project
if (args.project == args.to_project
and args.workspace == args.to_workspace):
eprint("destination project and namespace must differ from"
" source workspace")
return 1
if not args.entities:
# get a list of entities from source workspace matching entity_type
ents = _entity_paginator(args.project, args.workspace, args.entity_type,
page_size=500, filter_terms=None,
sort_direction='asc')
args.entities = [e['name'] for e in ents]
prompt = "Copy {0} {1}(s) from {2}/{3} to {4}/{5}?\n[Y\\n]: "
prompt = prompt.format(len(args.entities), args.entity_type, args.project,
args.workspace, args.to_project, args.to_workspace)
if not args.yes and not _confirm_prompt("", prompt):
return
r = fapi.copy_entities(args.project, args.workspace, args.to_project,
args.to_workspace, args.entity_type, args.entities,
link_existing_entities=args.link)
fapi._check_response_code(r, 201)
return 0
@fiss_cmd
def proj_list(args):
'''Retrieve the list of billing projects accessible to the caller/user, and
show the level of access granted for each (e.g. Owner, User, ...)'''
projects = fapi.list_billing_projects()
fapi._check_response_code(projects, 200)
projects = sorted(projects.json(), key=lambda d: d['projectName'])
l = map(lambda p: '{0}\t{1}'.format(p['projectName'], p['role']), projects)
# FIXME: add username col to output, for when iterating over multiple users
return ["Project\tRole"] + list(l)
@fiss_cmd
def config_validate(args):
''' Validate a workspace configuration: if an entity was specified (i.e.
upon which the configuration should operate), then also validate that
the entity has the necessary attributes'''
r = fapi.validate_config(args.project, args.workspace, args.namespace,
args.config)
fapi._check_response_code(r, 200)
entity_d = None
config_d = r.json()
if args.entity:
entity_type = config_d['methodConfiguration']['rootEntityType']
entity_r = fapi.get_entity(args.project, args.workspace,
entity_type, args.entity)
fapi._check_response_code(entity_r, [200,404])
if entity_r.status_code == 404:
eprint("Error: No {0} named '{1}'".format(entity_type, args.entity))
return 2
else:
entity_d = entity_r.json()
# also get the workspace info
w = fapi.get_workspace(args.project, args.workspace, fields="workspace.attributes")
fapi._check_response_code(w, 200)
workspace_d = w.json()
ii, io, ma, mwa = _validate_helper(args, config_d, workspace_d, entity_d)
ii_msg = "\nInvalid inputs:"
io_msg = "\nInvalid outputs:"
ma_msg = "\n{0} {1} doesn't satisfy the following inputs:".format(entity_type, args.entity) if args.entity else ""
mwa_msg = "\nWorkspace {0}/{1} doesn't satisfy following inputs:".format(args.project, args.workspace)
for errs, msg in zip([ii, io, ma, mwa], [ii_msg, io_msg, ma_msg, mwa_msg]):
if errs:
print(msg)
for inp, val in errs:
print("{0} -> {1}".format(inp, val))
if ii + io + ma + mwa:
return 1
def _validate_helper(args, config_d, workspace_d, entity_d=None):
""" Return FISSFC validation information on config for a certain entity """
# 4 ways to have invalid config:
invalid_inputs = sorted(config_d["invalidInputs"])
invalid_outputs = sorted(config_d["invalidOutputs"])
# Also insert values for invalid i/o
invalid_inputs = [(i, config_d['methodConfiguration']['inputs'][i]) for i in invalid_inputs]
invalid_outputs = [(i, config_d['methodConfiguration']['outputs'][i]) for i in invalid_outputs]
missing_attrs = []
missing_wksp_attrs = []
# If an entity was provided, also check to see if that entity has the necessary inputs
if entity_d:
entity_type = config_d['methodConfiguration']['rootEntityType']
# If the attribute is listed here, it has an entry
entity_attrs = set(entity_d['attributes'])
# Optimization, only get the workspace attrs if the method config has any
workspace_attrs = workspace_d['workspace']['attributes']
# So now iterate over the inputs
for inp, val in iteritems(config_d['methodConfiguration']['inputs']):
# Must be an attribute on the entity
if val.startswith("this."):
# Normally, the value is of the form 'this.attribute',
# but for operations on sets, e.g. one can also do
# 'this.samples.attr'. But even in this case, there must be a
# 'samples' attribute on the sample set, so checking for the middle
# value works as expected. Other pathological cases would've been
# caught above by the validation endpoint
expected_attr = val.split('.')[1]
# 'name' is special, it really means '_id', which everything has
if expected_attr == "name":
continue
if expected_attr not in entity_attrs:
missing_attrs.append((inp, val))
if val.startswith("workspace."):
# Anything not matching this format will be caught above
expected_attr = val.split('.')[1]
if expected_attr not in workspace_attrs:
missing_wksp_attrs.append((inp, val))
# Anything else is a literal
return invalid_inputs, invalid_outputs, missing_attrs, missing_wksp_attrs
@fiss_cmd
def runnable(args):
""" Show me what can be run in a given workspace """
w = fapi.get_workspace(args.project, args.workspace)
fapi._check_response_code(w, 200)
workspace_d = w.json()
if args.config and args.namespace and not args.entity:
# See what entities I can run on with this config
r = fapi.validate_config(args.project, args.workspace, args.namespace,
args.config)
fapi._check_response_code(r, 200)
config_d = r.json()
# First validate without any sample sets
errs = sum(_validate_helper(args, config_d, workspace_d, None), [])
if errs:
print("Configuration contains invalid expressions")
return 1
# Now get all the possible entities, and evaluate each
entity_type = config_d['methodConfiguration']['rootEntityType']
ent_r = fapi.get_entities(args.project, args.workspace, entity_type)
fapi._check_response_code(r, 200)
entities = ent_r.json()
can_run_on = []
cannot_run_on = []
# Validate every entity
for entity_d in entities:
# If there are errors in the validation
if sum(_validate_helper(args, config_d, workspace_d, entity_d), []):
cannot_run_on.append(entity_d['name'])
else:
can_run_on.append(entity_d['name'])
# Print what can be run
if can_run_on:
print("{0} CAN be run on {1} {2}(s):".format(args.config, len(can_run_on), entity_type))
print("\n".join(can_run_on)+"\n")
print("{0} CANNOT be run on {1} {2}(s)".format(args.config, len(cannot_run_on), entity_type))
#print("\n".join(cannot_run_on))
# See what method configs are possible for the given sample set
elif args.entity and args.entity_type and not args.config:
entity_r = fapi.get_entity(args.project, args.workspace,
args.entity_type, args.entity)
fapi._check_response_code(entity_r, [200,404])
if entity_r.status_code == 404:
print("Error: No {0} named '{1}'".format(args.entity_type, args.entity))
return 2
entity_d = entity_r.json()
# Now get all the method configs in the workspace
conf_r = fapi.list_workspace_configs(args.project, args.workspace)
fapi._check_response_code(conf_r, 200)
# Iterate over configs in the workspace, and validate against them
for cfg in conf_r.json():
# If we limit search to a particular namespace, skip ones that don't match
if args.namespace and cfg['namespace'] != args.namespace:
continue
# But we have to get the full description
r = fapi.validate_config(args.project, args.workspace,
cfg['namespace'], cfg['name'])
fapi._check_response_code(r, [200, 404])
if r.status_code == 404:
# Permission error, continue
continue
config_d = r.json()
errs = sum(_validate_helper(args, config_d, workspace_d, entity_d),[])
if not errs:
print(cfg['namespace'] + "/" + cfg['name'])
elif args.entity_type:
# Last mode, build a matrix of everything based on the entity type
# Get all of the entity_type
ent_r = fapi.get_entities(args.project, args.workspace, args.entity_type)
fapi._check_response_code(ent_r, 200)
entities = ent_r.json()
entity_names = sorted(e['name'] for e in entities)
conf_r = fapi.list_workspace_configs(args.project, args.workspace)
fapi._check_response_code(conf_r, 200)
conf_list = conf_r.json()
config_names = sorted(c['namespace'] + '/' + c['name'] for c in conf_list)
mat = {c:dict() for c in config_names}
# Now iterate over configs, building up the matrix
# Iterate over configs in the workspace, and validate against them
for cfg in conf_list:
# If we limit search to a particular namespace, skip ones that don't match
if args.namespace and cfg['namespace'] != args.namespace:
continue
# But we have to get the full description
r = fapi.validate_config(args.project, args.workspace,
cfg['namespace'], cfg['name'])
fapi._check_response_code(r, [200, 404])
if r.status_code == 404:
# Permission error, continue
continue
config_d = r.json()
# Validate against every entity
for entity_d in entities:
errs = sum(_validate_helper(args, config_d, workspace_d, entity_d),[])
#TODO: True/False? Y/N?
symbol = "X" if not errs else ""
cfg_name = cfg['namespace'] + '/' + cfg['name']
mat[cfg_name][entity_d['name']] = symbol
# Now print the validation matrix
# headers
print("Namespace/Method Config\t" + "\t".join(entity_names))
for conf in config_names:
print(conf + "\t" + "\t".join(mat[conf][e] for e in entity_names))
else:
print("runnable requires a namespace+configuration or entity type")
return 1
#################################################
# Utilities
#################################################
def _make_set_export_cmd(subparsers, parent_parsers, type, prefix):
subp = subparsers.add_parser(prefix + '_export',
parents = parent_parsers,
description='Export a %s entity from a given workspace, returning '\
'a list of lines in TSV form which completely defines the %s and '\
'is suitable for reloading with entity_import.' % (type, type))
subp.set_defaults(func=set_export, entity_type=type)
def _confirm_prompt(message, prompt="\nAre you sure? [y/yes (default: no)]: ",
affirmations=("Y", "Yes", "yes", "y")):
"""
Display a message, then confirmation prompt, and return true
if the user responds with one of the affirmations.
"""
answer = input(message + prompt)
return answer in affirmations
def _nonempty_project(string):
"""
Argparse validator for ensuring a workspace is provided
"""
value = str(string)
if len(value) == 0:
msg = "No project provided and no default project configured"
raise argparse.ArgumentTypeError(msg)
return value
def _entity_paginator(namespace, workspace, etype, page_size=500,
filter_terms=None, sort_direction="asc"):
"""Pages through the get_entities_query endpoint to get all entities in
the workspace without crashing.
"""
page = 1
all_entities = []
# Make initial request
r = fapi.get_entities_query(namespace, workspace, etype, page=page,
page_size=page_size, sort_direction=sort_direction,
filter_terms=filter_terms)
fapi._check_response_code(r, 200)
response_body = r.json()
# Get the total number of pages
total_pages = response_body['resultMetadata']['filteredPageCount']
# append the first set of results
entities = response_body['results']
all_entities.extend(entities)
# Now iterate over remaining pages to retrieve all the results
page = 2
while page <= total_pages:
r = fapi.get_entities_query(namespace, workspace, etype, page=page,
page_size=page_size, sort_direction=sort_direction,
filter_terms=filter_terms)
fapi._check_response_code(r, 200)
entities = r.json()['results']
all_entities.extend(entities)
page += 1
return all_entities
def eprint(*args, **kwargs):
""" Print a message to stderr """
print(*args, file=sys.stderr, **kwargs)
def __cmd_to_func(cmd):
""" Returns the function object in this module matching cmd. """
fiss_module = sys.modules[__name__]
# Returns None if string is not a recognized FISS command
func = getattr(fiss_module, cmd, None)
if func and not hasattr(func, 'fiss_cmd'):
func = None
return func
def _valid_headerline(l, model='firecloud'):
"""return true if the given string is a valid loadfile header"""
if not l:
return False
headers = l.split('\t')
first_col = headers[0]
tsplit = first_col.split(':')
if len(tsplit) != 2:
return False
if tsplit[0] in ('entity', 'update'):
if model == 'flexible':
return tsplit[1].endswith('_id')
else:
return tsplit[1] in ('participant_id', 'participant_set_id',
'sample_id', 'sample_set_id',
'pair_id', 'pair_set_id')
elif tsplit[0] == 'membership':
if len(headers) < 2:
return False
# membership:sample_set_id sample_id, e.g.
return tsplit[1].replace('set_', '') == headers[1]
else:
return False
def _batch_load(project, workspace, headerline, entity_data, chunk_size=500,
model='firecloud'):
""" Submit a large number of entity updates in batches of chunk_size """
if fcconfig.verbosity:
print("Batching " + str(len(entity_data)) + " updates to Firecloud...")
# Parse the entity type from the first cell, e.g. "entity:sample_id"
# First check that the header is valid
if not _valid_headerline(headerline, model):
eprint("Invalid loadfile header:\n" + headerline)
return 1
update_type = "membership" if headerline.startswith("membership") else "entity"
etype = headerline.split('\t')[0].split(':')[1].replace("_id", "")
# Split entity_data into chunks
total = int(len(entity_data) / chunk_size) + 1
batch = 0
for i in range(0, len(entity_data), chunk_size):
batch += 1
if fcconfig.verbosity:
print("Updating {0} {1}s {2}-{3}, batch {4}/{5}".format(
etype, update_type, i+1, min(i+chunk_size, len(entity_data)),
batch, total))
this_data = headerline + '\n' + '\n'.join(entity_data[i:i+chunk_size])
# Now push the entity data to firecloud
r = fapi.upload_entities(project, workspace, this_data, model)
fapi._check_response_code(r, 200)
return 0
__PatternsToFilter = [
# This provides a systematic way of turning complex FireCloud messages into
# shorter, more comprehensible feedback: each filter entry has the form
# [regex_to_match, replacement_template, match_groups_to_fill_in_template]
['^(.+)SlickWorkspaceContext\(Workspace\(([^,]+),([^,]*).*$', '%s%s::%s', (1,2,3) ],
]
for i in range(len(__PatternsToFilter)):
__PatternsToFilter[i][0] = re.compile(__PatternsToFilter[i][0])
def __pretty_print_fc_exception(e):
# Look for integer error code, but fallback to the exception's classname
preface = 'Error '
code = getattr(e, "code", type(e).__name__)
if fcconfig.verbosity:
(_, _, trback) = sys.exc_info()
print_traceback(trback)
try:
# Attempt to unpack error message as JSON
e = json.loads(e.args[0])
# Default to 'FireCloud' if component which gave error was not specified
source = ' (' + e.get('source','FireCloud') + ')'
msg = e['message']
for pattern in __PatternsToFilter:
match = pattern[0].match(msg)
if match:
msg = pattern[1] % (match.group(*(pattern[2])))
break
except Exception as je:
# Could not unpack error to JSON, fallback to original message
if isinstance(code, str):
preface = ''
source = ''
msg = "{0}".format(e)
print("{0}{1}{2}: {3}".format(preface, code, source, msg))
return 99
def printToCLI(value):
retval = value if isinstance(value, int) else 0
if isinstance(value, dict):
# See attr_get for genesis of __header__
header = value.pop("__header__", None)
if header:
print('\t'.join(header))
for k, v in sorted(value.items()):
print(u'{0}\t{1}'.format(k, v))
elif isinstance(value, (list, tuple)):
list(map(lambda v: print(v), value))
elif not isinstance(value, int):
if isinstance(value, text_type):
print(value)
else:
print(u("{0}".format(value)))
return retval
#################################################
# Main entrypoints
#################################################
def main(argv=None):
# Use this entry point to call high level api and have objects returned,
# (see firecloud/tests/highlevel_tests.py:call_func for usage examples)
if not argv:
argv = sys.argv
proj_required = not bool(fcconfig.project)
meth_ns_required = not bool(fcconfig.method_ns)
workspace_required = not bool(fcconfig.workspace)
etype_required = not bool(fcconfig.entity_type)
etype_choices = ['participant', 'participant_set', 'sample', 'sample_set',
'pair', 'pair_set']
# Initialize core parser (TODO: Add longer description)
usage = 'fissfc [OPTIONS] [CMD [-h | arg ...]]'
epilog = 'positional arguments:\n' + \
' CMD [-h | arg ...] Command and arguments to run.'
parser = argparse.ArgumentParser(description='FISS: The FireCloud CLI',
formatter_class=argparse.RawDescriptionHelpFormatter,
usage=usage, epilog=epilog)
# Core Flags
parser.add_argument('-u', '--url', dest='api_url', default=None,
help='Firecloud API root URL [default: %s]' % fcconfig.root_url)
parser.add_argument('-c', '--credentials', default=None,
help='Firecloud credentials file')
parser.add_argument("-v", "--version", action='version',version=__version__)
parser.add_argument('-V', '--verbose', action='count', default=0,
help='Emit progressively more detailed feedback during execution, '
'e.g. to confirm when actions have completed or to show URL '
'and parameters of REST calls. Multiple -V may be given.')
parser.add_argument("-y", "--yes", action='store_true',
help="Assume yes for any prompts")
parser.add_argument("-l", "--list", nargs='?', metavar="CMD",
help="list or search available commands and exit")
parser.add_argument("-F", "--function", nargs='+', metavar="CMD",
help="Show the code for the given command(s) and exit")
# Many commands share arguments, and we can make parent parsers to make it
# easier to reuse arguments. Commands that operate on workspaces
# all take a (google) project and a workspace name
workspace_parent = argparse.ArgumentParser(add_help=False)
workspace_parent.add_argument('-w', '--workspace',
default=fcconfig.workspace, required=workspace_required,
help='Workspace name (required if no default workspace configured)')
proj_help = 'Project (workspace namespace). Required if no default ' \
'project was configured'
workspace_parent.add_argument('-p', '--project', default=fcconfig.project,
help=proj_help, required=proj_required)
dest_space_parent = argparse.ArgumentParser(add_help=False)
dest_space_parent.add_argument("-P", "--to-project",
help="Project (Namespace) of clone workspace")
# FIXME: change to --tospace
dest_space_parent.add_argument("-W", "--to-workspace",
help="Name of clone workspace")
# Commands that update ACL roles require a role and list of users
acl_parent = argparse.ArgumentParser(add_help=False)
acl_parent.add_argument('-r', '--role', help='ACL role', required=True,
choices=['OWNER', 'READER', 'WRITER', 'NO ACCESS'])
acl_parent.add_argument('--users', nargs='+', required=True, metavar='user',
help='FireCloud usernames. Use "public" to set global permissions.')
# Commands that operates on entity_types
etype_parent = argparse.ArgumentParser(add_help=False)
etype_help = \
"Entity type, required if no default entity_type was configured"
etype_parent.add_argument('-t', '--entity-type', required=etype_required,
default=fcconfig.entity_type, help=etype_help)
# Commands that require an entity name
entity_parent = argparse.ArgumentParser(add_help=False)
entity_parent.add_argument('-e', '--entity', required=True,
help="Entity name (required)")
# Commands that work with methods
meth_parent = argparse.ArgumentParser(add_help=False)
meth_parent.add_argument('-m', '--method', required=True,
help='method name')
meth_parent.add_argument('-n', '--namespace', help='Method namespace',
default=fcconfig.method_ns,
required=meth_ns_required)
# Commands that work with method configurations
conf_parent = argparse.ArgumentParser(add_help=False)
conf_parent.add_argument('-c', '--config', required=True,
help='Method config name')
conf_parent.add_argument('-n', '--namespace', default=fcconfig.method_ns,
help='Method config namespace',
required=meth_ns_required)
# Commands that need a snapshot_id
snapshot_parent = argparse.ArgumentParser(add_help=False)
snapshot_parent.add_argument('-i', '--snapshot-id', required=True,
help="Snapshot ID (version) of method/config")
# Commands that take an optional list of attributes
attr_parent = argparse.ArgumentParser(add_help=False)
attr_parent.add_argument('-a', '--attributes', nargs='*', metavar='attr',
help='List of attributes')
# Create one subparser for each fiss equivalent
subparsers = parser.add_subparsers(prog='fissfc [OPTIONS]',
help=argparse.SUPPRESS)
# Create Workspace
subp = subparsers.add_parser('space_new', parents=[workspace_parent],
description='Create new workspace')
phelp = 'Limit access to the workspace to a specific authorization ' \
'domain. For dbGaP-controlled access (domain name: ' \
'dbGapAuthorizedUsers) you must have linked NIH credentials to ' \
'your account.'
subp.add_argument('--authdomain', default="", help=phelp)
subp.set_defaults(func=space_new)
# Determine existence of workspace
subp = subparsers.add_parser('space_exists', parents=[workspace_parent],
description='Determine if given workspace exists in given project')
phelp = 'Do not print message, only return numeric status'
subp.add_argument('-q', '--quiet', action='store_true', help=phelp)
subp.set_defaults(func=space_exists)
# Delete workspace
subp = subparsers.add_parser('space_delete', description='Delete workspace')
subp.add_argument('-w', '--workspace', help='Workspace name', required=True)
subp.add_argument('-p', '--project', default=fcconfig.project,
help=proj_help, required=proj_required)
subp.set_defaults(func=space_delete)
# Get workspace information
subp = subparsers.add_parser('space_info', parents=[workspace_parent],
description='Show workspace information')
subp.set_defaults(func=space_info)
# List workspaces
subp = subparsers.add_parser('space_list',
description='List available workspaces in projects (namespaces) ' +
'to which you have access. If you have a config ' +
'file which defines a default project, then only ' +
'the workspaces in that project will be listed.')
subp.add_argument('-p', '--project', default=fcconfig.project,
help='List spaces for projects whose names start with this ' +
'prefix. You may also specify . (a dot), to list everything.')
subp.set_defaults(func=space_list)
# Lock workspace
subp = subparsers.add_parser('space_lock', description='Lock a workspace',
parents=[workspace_parent])
subp.set_defaults(func=space_lock)
# Unlock Workspace
subp = subparsers.add_parser('space_unlock', parents=[workspace_parent],
description='Unlock a workspace')
subp.set_defaults(func=space_unlock)
# Clone workspace
clone_desc = 'Clone a workspace. The destination namespace or name must ' \
'be different from the workspace being cloned'
subp = subparsers.add_parser('space_clone', description=clone_desc,
parents=[workspace_parent, dest_space_parent])
subp.add_argument('-f', '--copyFilesWithPrefix', help='Specify a prefix ' +
'of bucket objects to copy to the destination workspace')
subp.set_defaults(func=space_clone)
# Import data into a workspace
subp = subparsers.add_parser('entity_import', parents=[workspace_parent],
description='Import data into a workspace')
subp.add_argument('-f','--tsvfile', required=True,
help='Tab-delimited loadfile')
subp.add_argument('-C', '--chunk-size', default=500, type=int,
help='Maximum entities to import per api call')
subp.add_argument('-m', '--model', default='firecloud',
choices=['firecloud', 'flexible'], help='Data Model ' +
'type. "%(default)s" (default) or "flexible"')
subp.set_defaults(func=entity_import)
# Export data (set/container entity) from workspace
export_cmd_args = [workspace_parent, entity_parent]
_make_set_export_cmd(subparsers, export_cmd_args, 'sample_set', 'sset')
_make_set_export_cmd(subparsers, export_cmd_args, 'pair_set', 'pset')
_make_set_export_cmd(subparsers, export_cmd_args, 'participant_set','ptset')
# List of entity types in a workspace
subp = subparsers.add_parser(
'entity_types', parents=[workspace_parent],
description='List entity types in a workspace')
subp.set_defaults(func=entity_types)
# List of entities in a workspace
subp = subparsers.add_parser(
'entity_list', description='List entity types in a workspace',
parents=[workspace_parent])
subp.set_defaults(func=entity_list)
# List of entities in a workspace
subp = subparsers.add_parser(
'entity_tsv', description='Get list of entities in TSV format. ' +
'Download files for which the encoding is undetected (e.g. ZIP ' +
'archives).',
parents=[workspace_parent, etype_parent])
subp.add_argument('-a', '--attrs', nargs='*',
help='list of ordered attribute names')
subp.add_argument('-m', '--model', default='firecloud',
choices=['firecloud', 'flexible'], help='Data Model ' +
'type. "%(default)s" (default) or "flexible"')
subp.set_defaults(func=entity_tsv)
# List of participants
subp = subparsers.add_parser(
'participant_list',
parents=[workspace_parent],
description='Return list of participants within a given container, '\
'which by default is the workspace; otherwise, participants in '\
'the named entity will be listed. If an entity is named but no ' \
'type is given, then participant_set is assumed. The containers ' \
'supported are: participant, participant_set, workspace'
)
subp.add_argument('-e', '--entity', default=None,
help='Entity name, to list participants within container entities')
subp.add_argument('-t', '--entity-type', default='participant_set',
help='The type for named entity [default:%(default)s]`')
subp.set_defaults(func=participant_list)
# List of pairs
subp = subparsers.add_parser(
'pair_list',
parents=[workspace_parent],
description='Return the list of pairs within a given container, ' \
'which by default is the workspace; otherwise, the pairs within '\
'the named entity will be listed. If an entity is named but no '\
'type is given, then pair_set will be assumed. The containers '\
'supported are: pair, pair_set, participant, workspace')
subp.add_argument('-e', '--entity', default=None,
help='Entity name, to list pairs within container entities')
subp.add_argument('-t', '--entity-type', default='pair_set',
help='The type for named entity [default:%(default)s]`')
subp.set_defaults(func=pair_list)
# List of samples
subp = subparsers.add_parser(
'sample_list',
parents=[workspace_parent],
description='Return the list of samples within a given container, ' \
'which by default is the workspace; otherwise, the samples within '\
'the named entity will be listed. If an entity is named but no '\
'type is not given, then sample_set is assumed. The containers '\
'supported are:\n'\
'sample, sample_set, pair, participant, workspace')
subp.add_argument('-e', '--entity', default=None,
help='Entity name, to list samples within container entities')
subp.add_argument('-t', '--entity-type', default='sample_set',
help='The type for named entity [default:%(default)s]`')
subp.set_defaults(func=sample_list)
# List of sample sets
subp = subparsers.add_parser(
'sset_list', description='List sample sets in a workspace',
parents=[workspace_parent])
subp.set_defaults(func=sset_list)
# Delete entity in a workspace
subp = subparsers.add_parser(
'entity_delete', description='Delete entity in a workspace',
parents=[workspace_parent, etype_parent, entity_parent])
subp.set_defaults(func=entity_delete)
subp = subparsers.add_parser(
'participant_delete', description='Delete participant in a workspace',
parents=[workspace_parent, entity_parent])
subp.set_defaults(func=participant_delete)
subp = subparsers.add_parser(
'sample_delete', description='Delete sample in a workspace',
parents=[workspace_parent, entity_parent])
subp.set_defaults(func=sample_delete)
subp = subparsers.add_parser(
'sset_delete', description='Delete sample set in a workspace',
parents=[workspace_parent, entity_parent])
subp.set_defaults(func=sset_delete)
# Show workspace roles
subp = subparsers.add_parser(
'space_acl', description='Show users and roles in workspace',
parents=[workspace_parent])
subp.set_defaults(func=space_acl)
# Set workspace roles
subp = subparsers.add_parser('space_set_acl',
description='Show users and roles in workspace',
parents=[workspace_parent, acl_parent])
subp.set_defaults(func=space_set_acl)
# Push a new workflow to the methods repo
subp = subparsers.add_parser('meth_new', parents=[meth_parent],
description='Install a method definition to the repository')
subp.add_argument('-d','--wdl', required=True,
help='Method definiton, as a file of WDL (Workflow ' +
'Description Language)')
subp.add_argument('-s', '--synopsis',
help='Short (<80 chars) description of method')
subp.add_argument('--doc', help='Optional documentation file <10Kb')
subp.add_argument('-c', '--comment', metavar='SNAPSHOT_COMMENT',
help='Optional comment specific to this snapshot',
default='')
subp.set_defaults(func=meth_new)
# Redact a method
subp = subparsers.add_parser('meth_delete',
description='Redact method from the methods repository',
parents=[meth_parent, snapshot_parent])
subp.set_defaults(func=meth_delete)
# Retreive the WDL of a method
subp = subparsers.add_parser('meth_wdl',
description='Retrieve the WDL of a method',
parents=[meth_parent, snapshot_parent])
subp.set_defaults(func=meth_wdl)
# Access control list operations (upon methods)
# Get ACL
subp = subparsers.add_parser('meth_acl',
description='Show users and roles for a method',
parents=[meth_parent, snapshot_parent])
subp.set_defaults(func=meth_acl)
# Set ACL
subp = subparsers.add_parser('meth_set_acl',
description='Assign an ACL role to a list of users for a workflow',
parents=[meth_parent, acl_parent])
subp.add_argument('-i', '--snapshot-id',
help="Snapshot ID (version) of method/config")
subp.set_defaults(func=meth_set_acl)
# List available methods
subp = subparsers.add_parser('meth_list',
description='List available workflows')
subp.add_argument('-m', '--method', default=None,
help='name of single workflow to search for (optional)')
subp.add_argument('-n', '--namespace', default=None,
help='name of single workflow to search for (optional)')
subp.add_argument('-i', '--snapshot-id', default=None,
help="Snapshot ID (version) of method/config")
subp.set_defaults(func=meth_list)
subp = subparsers.add_parser('meth_exists',
description='Determine if named workflow exists in method repository')
subp.add_argument('method', help='name of method to search for in repository')
subp.set_defaults(func=meth_exists)
# Configuration: list
subp = subparsers.add_parser(
'config_list', description='List available configurations')
subp.add_argument('-w', '--workspace', help='Workspace name')
subp.add_argument('-p', '--project', default=fcconfig.project,
help=proj_help)
subp.add_argument('-c', '--config', default=None,
help='name of single workflow to search for (optional)')
subp.add_argument('-n', '--namespace', default=None,
help='name of single workflow to search for (optional)')
subp.add_argument('-i', '--snapshot-id', default=None,
help="Snapshot ID (version) of config (optional)")
subp.set_defaults(func=config_list)
# Configuration: delete
subp = subparsers.add_parser('config_delete', parents=[conf_parent],
description='Delete a workspace configuration')
subp.add_argument('-w', '--workspace', help='Workspace name',
default=fcconfig.workspace, required=workspace_required)
subp.add_argument('-p', '--project', default=fcconfig.project,
help=proj_help, required=proj_required)
subp.set_defaults(func=config_delete)
# Method configuration commands
subp = subparsers.add_parser('config_get',
description='Retrieve method configuration definition',
parents=[conf_parent])
subp.add_argument('-w', '--workspace', help='Workspace name',
default=fcconfig.workspace, required=workspace_required)
subp.add_argument('-p', '--project', default=fcconfig.project,
help=proj_help, required=proj_required)
subp.set_defaults(func=config_get)
subp = subparsers.add_parser('config_wdl',
description='Retrieve method configuration WDL',
parents=[conf_parent])
subp.add_argument('-w', '--workspace', help='Workspace name',
default=fcconfig.workspace, required=workspace_required)
subp.add_argument('-p', '--project', default=fcconfig.project,
help=proj_help, required=proj_required)
subp.set_defaults(func=config_wdl)
subp = subparsers.add_parser('config_diff',
description='Compare method configuration definitions across workspaces',
parents=[conf_parent])
subp.add_argument('-w', '--workspace', help='First Workspace name',
default=fcconfig.workspace, required=workspace_required)
subp.add_argument('-p', '--project', default=fcconfig.project,
help="First " + proj_help, required=proj_required)
subp.add_argument('-C', '--Config', help="Second method config name")
subp.add_argument('-N', '--Namespace', help="Second method config namespace")
subp.add_argument('-W', '--Workspace', help='Second Workspace name',
default=fcconfig.workspace, required=workspace_required)
subp.add_argument('-P', '--Project', default=fcconfig.project,
help="Second " + proj_help, required=proj_required)
subp.set_defaults(func=config_diff)
subp = subparsers.add_parser('config_copy', description=
'Copy a method config to a new name/space/namespace/project, ' +
'at least one of which MUST be specified.', parents=[conf_parent])
subp.add_argument('-p', '--fromproject', default=fcconfig.project,
help=proj_help, required=proj_required)
subp.add_argument('-s', '--fromspace', help='from workspace',
default=fcconfig.workspace, required=workspace_required)
subp.add_argument('-C', '--toname', help='name of the copied config')
subp.add_argument('-S', '--tospace', help='destination workspace')
subp.add_argument("-N", "--tonamespace", help="destination namespace")
subp.add_argument("-P", "--toproject", help="destination project")
subp.set_defaults(func=config_copy)
subp = subparsers.add_parser('config_new', description=config_new.__doc__,
parents=[meth_parent, snapshot_parent, workspace_parent, etype_parent])
subp.add_argument('-c', '--configname', default=None,
help='name of new config; if unspecified, method name will be used')
subp.set_defaults(func=config_new)
subp = subparsers.add_parser('config_template',
parents=[meth_parent, snapshot_parent],
description='Generate a template method ' +
'configuration, from the given ' +
'repository method')
subp.add_argument('-c', '--configname', default=None,
help='name of new config; if unspecified, method name will be used')
subp.add_argument('-t', '--entity-type', required=False, default='',
help='Root entity type, over which method config will execute')
subp.set_defaults(func=config_template)
subp = subparsers.add_parser('config_put', parents=[workspace_parent],
description=config_put.__doc__)
subp.add_argument('-c', '--config', required=True,
help='Method configuration definition, as described above')
subp.set_defaults(func=config_put)
subp = subparsers.add_parser('config_acl',
description='Show users and roles for a method configuration',
parents=[conf_parent, snapshot_parent])
subp.set_defaults(func=config_acl)
# FIXME: continue subp = ... meme below, instead of uniquely naming each
# subparse; better yet, most of this can be greatly collapsed and
# pushed into a separate function and/or auto-generated
# Set ACL
subp = subparsers.add_parser('config_set_acl', description='Assign an ' +
'ACL role to a list of users for a config',
parents=[conf_parent, acl_parent])
subp.add_argument('-i', '--snapshot-id',
help="Snapshot ID (version) of method/config")
subp.set_defaults(func=config_set_acl)
# Status
subp = subparsers.add_parser('health',
description='Show health of FireCloud services')
subp.set_defaults(func=health)
subp = subparsers.add_parser('attr_get',
description='Retrieve attribute values from an entity identified by ' +
'name and type. If either name or type are omitted then workspace ' +
'attributes will be returned.',
parents=[workspace_parent, attr_parent])
# etype_parent not used for attr_get, because entity type is optional
subp.add_argument('-t', '--entity-type', choices=etype_choices + ['ref'], default='',
required=False, help='Entity type to retrieve ' +
'attributes from.')
subp.add_argument('-e', '--entity',
help="Entity or reference to retrieve attributes from")
subp.add_argument('-s', '--ws_attrs', action='store_true',
help="Argument retrieves workspace attributes only (no referenceData attributes).")
subp.set_defaults(func=attr_get)
subp = subparsers.add_parser('attr_set', parents=[workspace_parent],
description="Set attributes on a workspace")
subp.add_argument('-a', '--attribute', required=True, metavar='attr',
help='Name of attribute to set')
subp.add_argument('-v', '--value', required=True, help='Attribute value')
subp.add_argument('-t', '--entity-type', choices=etype_choices,
required=etype_required, default=fcconfig.entity_type,
help=etype_help)
subp.add_argument('-e', '--entity', help="Entity to set attribute on")
subp.set_defaults(func=attr_set)
subp = subparsers.add_parser('attr_list', parents=[workspace_parent],
description='Retrieve names of attributes attached to given entity. ' +
'If no entity Type+Name is given, workspace-level ' +
'attributes will be listed.')
# FIXME: this should explain that default entity is workspace
subp.add_argument('-e', '--entity', help="Entity name or referenceData name.")
subp.add_argument('-t', '--entity-type', choices=etype_choices + ['ref'],
required=False, default=fcconfig.entity_type,
help='Entity type to retrieve attributes from.')
subp.add_argument('-s', '--ws_attrs', action='store_true',
help="Argument retrieves workspace attributes only (no referenceData attributes).")
subp.set_defaults(func=attr_list)
# Copy attributes
subp = subparsers.add_parser(
'attr_copy', description="Copy workspace attributes between workspaces",
parents=[workspace_parent, dest_space_parent, attr_parent])
subp.set_defaults(func=attr_copy)
# Delete attributes
subp = subparsers.add_parser(
'attr_delete', description="Delete attributes in a workspace",
parents=[workspace_parent, attr_parent])
subp.add_argument('-t', '--entity-type', choices=etype_choices,
required=etype_required, default=fcconfig.entity_type,
help=etype_help)
subp.add_argument('-e', '--entities', nargs='*', help='FireCloud entities')
subp.set_defaults(func=attr_delete)
# Set null sentinel values
subp = subparsers.add_parser(
'attr_fill_null', parents=[workspace_parent, etype_parent, attr_parent],
description='Assign NULL sentinel value to attributes')
subp.add_argument("-o", "--to-loadfile", metavar='loadfile',
help="Save changes to provided loadfile, but do not " +
"perform update")
subp.set_defaults(func=attr_fill_null)
# Delete unreferenced files from a workspace's bucket
subp = subparsers.add_parser(
'mop', description='Remove unused files from a workspace\'s bucket',
parents=[workspace_parent])
subp.add_argument('--dry-run', action='store_true',
help='Show deletions that would be performed')
group = subp.add_mutually_exclusive_group()
group.add_argument('-i', '--include', nargs='+', metavar="glob",
help="Only delete unreferenced files matching the " +
"given UNIX glob-style pattern(s)")
group.add_argument('-x', '--exclude', nargs='+', metavar="glob",
help="Only delete unreferenced files that don't match" +
" the given UNIX glob-style pattern(s)")
subp.set_defaults(func=mop)
# List all invalid file attributes of a workspaces and its entities
subp = subparsers.add_parser('validate_file_attrs',
parents=[workspace_parent],
description='List all invalid file ' + \
'attributes of a workspaces and its entities')
subp.set_defaults(func=validate_file_attrs)
subp = subparsers.add_parser('noop',
description='Simple no-op command, for ' +
'exercising interface')
subp.set_defaults(func=noop, proj=fcconfig.project, space=fcconfig.workspace)
subp = subparsers.add_parser('config',
description='Display value(s) of one or more configuration variables')
subp.add_argument('variables', nargs='*',
help='Name of configuration variable(s) (e.g. workspace, project). '
'If no name is given, all config variables will be displayed.')
subp.set_defaults(func=config_cmd)
# Invoke a method configuration
subp = subparsers.add_parser('config_start',
description='Start running workflow in a given space',
parents=[workspace_parent, conf_parent])
subp.add_argument('-e', '--entity', help="Entity name (required if " +
"executing on an entity)")
# Duplicate entity type here since we want sample_set to be default
subp.add_argument('-t', '--entity-type', default='sample_set',
choices=etype_choices,
help='Entity type of specified entity. Not used if no ' +
'entity is named. Default: %(default)s')
expr_help = "(optional) Entity expression to use when entity type " \
"doesn't match the method configuration." \
"Example: 'this.samples'"
subp.add_argument('-x', '--expression', help=expr_help, default='')
subp.add_argument('-C', '--cache', default=True,
help='boolean: use previously cached results if possible [%(default)s]')
subp.set_defaults(func=config_start)
# Abort a running method configuration
subp = subparsers.add_parser('config_stop',
description='Stop running submission ID in a given space',
parents=[workspace_parent])
subp.add_argument('-i', '--submission_id', required=True)
subp.set_defaults(func=config_stop)
# Loop over sample sets, performing a command
ssloop_help = 'Loop over sample sets in a workspace, performing <action>'
subp = subparsers.add_parser(
'sset_loop', description=ssloop_help,
parents=[workspace_parent, attr_parent])
subp.add_argument('action', help='FISS command to execute')
subp.add_argument('-c', '--config',
help='Method configuration name')
subp.add_argument('-n', '--namespace',
help='Method configuration namespace')
khelp = "Loop through all sample sets, ignoring errors"
subp.add_argument('-k', '--keep-going', action='store_true',
help=khelp)
subp.add_argument('-x', '--expression', help=expr_help)
subp.set_defaults(func=sset_loop)
subp = subparsers.add_parser('monitor', help="Monitor submitted jobs.",
parents=[workspace_parent])
subp.set_defaults(func=monitor)
# Supervisor mode
sup_help = "Run a Firehose-style workflow of workflows specified in DOT"
subp = subparsers.add_parser('supervise', description=sup_help,
parents=[workspace_parent])
subp.add_argument('workflow', help='Workflow description in DOT')
subp.add_argument('-n', '--namespace', default=fcconfig.method_ns,
required=meth_ns_required,
help='Methods namespace')
subp.add_argument('-s', '--sample-sets', nargs='+',
help='Sample sets to run workflow on')
jhelp = "File to save monitor data. This file can be passed to " + \
"fissfc supervise_recover in case the supervisor crashes " + \
"(Default: %(default)s)"
recovery = os.path.expanduser('~/.fiss/monitor_data.json')
subp.add_argument('-j', '--json-checkpoint', default=recovery,
help=jhelp)
subp.set_defaults(func=supervise)
# Recover an old supervisor
rec_help = "Recover a supervisor submission from the checkpoint file"
subp = subparsers.add_parser('supervise_recover', description=rec_help)
subp.add_argument('recovery_file', default=recovery, nargs='?',
help='File where supervisor metadata was stored')
subp.set_defaults(func=supervise_recover)
# Space search
subp = subparsers.add_parser(
'space_search', description="Search for workspaces"
)
subp.add_argument('-b', '--bucket', help='Regex to match bucketName')
subp.set_defaults(func=space_search)
# Entity copy
subp = subparsers.add_parser(
'entity_copy', description='Copy entities from one workspace to another',
parents=[workspace_parent, dest_space_parent, etype_parent])
subp.add_argument('-e', '--entities', nargs='+', metavar='entity',
help='Entities to copy. If omitted, all entities will be copied.')
subp.add_argument('-l', '--link', action='store_true',
help='link new entities to existing entities')
subp.set_defaults(func=entity_copy)
# List billing projects
subp = subparsers.add_parser('proj_list',
description="List available billing projects")
subp.set_defaults(func=proj_list)
# Validate config
subp = subparsers.add_parser('config_validate', parents=[workspace_parent],
description="Validate a workspace configuration")
subp.add_argument('-e', '--entity',
help="Validate config against this entity. Entity is assumed to be " +
"the same type as the config's root entity type")
subp.add_argument('-c', '--config',
help='Method configuration name')
subp.add_argument('-n', '--namespace',
help='Method configuration namespace')
subp.set_defaults(func=config_validate)
subp = subparsers.add_parser('runnable', parents=[workspace_parent],
description="Show what configurations can be run on which entities.")
subp.add_argument('-c', '--config',
help='Method configuration name')
subp.add_argument('-n', '--namespace',
help='Method configuration namespace')
subp.add_argument('-e', '--entity',
help="Show me what configurations can be run on this entity")
subp.add_argument('-t', '--entity-type', choices=etype_choices,
required=etype_required, default=fcconfig.entity_type,
help=etype_help)
subp.set_defaults(func=runnable)
# Create the .fiss directory if it doesn't exist
fiss_home = os.path.expanduser("~/.fiss")
if not os.path.isdir(fiss_home):
os.makedirs(fiss_home)
result = None
# Special cases, print help with no arguments
if len(argv) == 1:
parser.print_help()
elif argv[1] in ('-l', '--list'):
# Print commands in a more readable way
choices=[]
for a in parser._actions:
if isinstance(a, argparse._SubParsersAction):
for choice, _ in a.choices.items():
choices.append(choice)
# next arg is search term, if specified
search = ''
if len(argv) > 2:
search = argv[2]
result = list(filter(lambda c: search in c, sorted(choices)))
elif argv[1] in ('-F', '--function'):
# Show source for remaining args
for fname in argv[2:]:
# Get module name
fiss_module = sys.modules[__name__]
try:
func = getattr(fiss_module, fname)
result = u(''.join(getsourcelines(func)[0]))
except AttributeError:
result = None
else:
# Otherwise parse args & call correct subcommand (skipping argv[0])
args = parser.parse_args(argv[1:])
# Ensure CLI flags have greatest precedence (e.g. over config file)
if args.verbose:
fcconfig.set_verbosity(args.verbose)
if args.api_url:
fcconfig.set_root_url(args.api_url)
if args.credentials:
fcconfig.set_credentials(args.credentials)
result = args.func(args)
if result is None:
result = 0
return result
def main_as_cli(argv=None):
'''Use this entry point to call HL fiss funcs as though from the UNIX CLI.
(see firecloud/tests/highlevel_tests.py:call_cli for usage examples)'''
try:
result = main(argv)
except Exception as e:
result = __pretty_print_fc_exception(e)
# FIXME: we should invert True/False return values to 0/1 here, to comply
# with UNIX exit code semantics (and avoid problems with make, scripts, etc)
return printToCLI(result)
if __name__ == '__main__':
sys.exit( main_as_cli() )
```
#### File: fiss/firecloud/method.py
```python
import json
from os.path import isfile
from firecloud.errors import FireCloudServerError
import firecloud.api as fapi
class Method(object):
"""A FireCloud Method.
Attributes:
namespace (str): Method namespace for this method
name (str): Method name
snapshot_id (int): Version number
wdl (str): WDL description
synopsis (str): Short description of task
documentation (str): Extra documentation for method
api_url (str): FireCloud API root
"""
def __init__(self, namespace, name,
snapshot_id, api_url=fapi.PROD_API_ROOT):
r = fapi.get_method(namespace, name, snapshot_id, api_url)
fapi._check_response_code(r, 200)
data = r.json()
self.namespace = namespace
self.name = name
self.snapshot_id = int(data["snapshotId"])
self.wdl = data["payload"]
self.synopsis = data["synopsis"]
self.documentation = data["documentation"]
self.api_url = api_url
@staticmethod
def new(namespace, name, wdl, synopsis,
documentation=None, api_url=fapi.PROD_API_ROOT):
"""Create new FireCloud method.
If the namespace + name already exists, a new snapshot is created.
Args:
namespace (str): Method namespace for this method
name (str): Method name
wdl (file): WDL description
synopsis (str): Short description of task
documentation (file): Extra documentation for method
"""
r = fapi.update_workflow(namespace, name, synopsis,
wdl, documentation, api_url)
fapi._check_response_code(r, 201)
d = r.json()
return Method(namespace, name, d["snapshotId"])
def template(self):
"""Return a method template for this method."""
r = fapi.get_config_template(self.namespace, self.name,
self.snapshot_id, self.api_url)
fapi._check_response_code(r, 200)
return r.json()
def inputs_outputs(self):
"""Get information on method inputs & outputs."""
r = fapi.get_inputs_outputs(self.namespace, self.name,
self.snapshot_id, self.api_url)
fapi._check_response_code(r, 200)
return r.json()
def acl(self):
"""Get the access control list for this method."""
r = fapi.get_repository_method_acl(
self.namespace, self.name, self.snapshot_id, self.api_url)
fapi._check_response_code(r, 200)
return r.json()
def set_acl(self, role, users):
"""Set permissions for this method.
Args:
role (str): Access level
one of {one of "OWNER", "READER", "WRITER", "NO ACCESS"}
users (list(str)): List of users to give role to
"""
acl_updates = [{"user": user, "role": role} for user in users]
r = fapi.update_repository_method_acl(
self.namespace, self.name, self.snapshot_id,
acl_updates, self.api_url
)
fapi._check_response_code(r, 200)
```
#### File: fiss/firecloud/workspace.py
```python
import json
import os
from firecloud import api as fapi
from firecloud.errors import FireCloudServerError
from firecloud.entity import Entity
class Workspace(object):
"""A FireCloud Workspace.
Attributes:
api_url (str): API root used to interact with FireCloud,
normally https://api.firecloud.org/api
namespace (str): Google project for this workspace
name (str): Workspace name
"""
def __init__(self, namespace, name, api_url=fapi.PROD_API_ROOT):
"""Get an existing workspace from Firecloud by name.
This method assumes that a workspace with the given name and
namespace is present at the api_url given, and raises an error
if it does not exist. To create a new workspace, use
Workspace.new()
Raises:
FireCloudServerError: Workspace does not exist, or
API call fails
"""
self.api_url = api_url
self.namespace = namespace
self.name = name
## Call out to FireCloud
r = fapi.get_workspace(namespace, name, api_url)
fapi._check_response_code(r, 200)
self.data = r.json()
@staticmethod
def new(namespace, name, protected=False,
attributes=dict(), api_url=fapi.PROD_API_ROOT):
"""Create a new FireCloud workspace.
Returns:
Workspace: A new FireCloud workspace
Raises:
FireCloudServerError: API call failed.
"""
r = fapi.create_workspace(namespace, name, protected, attributes, api_url)
fapi._check_response_code(r, 201)
return Workspace(namespace, name, api_url)
def refresh(self):
"""Reload workspace metadata from firecloud.
Workspace metadata is cached in the data attribute of a Workspace,
and may become stale, requiring a refresh().
"""
r = fapi.get_workspace(self.namespace, self.name, self.api_url)
fapi._check_response_code(r, 200)
self.data = r.json()
return self
def delete(self):
"""Delete the workspace from FireCloud.
Note:
This action cannot be undone. Be careful!
"""
r = fapi.delete_workspace(self.namespace, self.name)
fapi._check_response_code(r, 202)
# Getting useful information out of the bucket
def __str__(self):
"""Return a JSON representation of the bucket."""
return json.dumps(self.data, indent=2)
def bucket(self):
"""Return google bucket id for this workspace."""
return str(self.data["workspace"]["bucketName"])
def lock(self):
"""Lock this Workspace.
This causes the workspace to behave in a read-only way,
regardless of access permissions.
"""
r = fapi.lock_workspace(self.namespace, self.name, self.api_url)
fapi._check_response_code(r, 204)
self.data['workspace']['isLocked'] = True
return self
def unlock(self):
"""Unlock this Workspace."""
r = fapi.unlock_workspace(self.namespace, self.name, self.api_url)
fapi._check_response_code(r, 204)
self.data['workspace']['isLocked'] = False
return self
def attributes(self):
"""Return a dictionary of workspace attributes"""
return self.data["workspace"]["attributes"]
def get_attribute(self, attr):
"""Return value of workspace attribute.
If the attribute does not exist, return None
"""
return self.data["workspace"]["attributes"].get(attr, None)
def update_attribute(self, attr, value):
"""Set the value of a workspace attribute."""
update = [fapi._attr_up(attr, value)]
r = fapi.update_workspace_attributes(self.namespace, self.name,
update, self.api_url)
fapi._check_response_code(r, 200)
def remove_attribute(self, attr):
"""Remove attribute from a workspace.
Args:
attr (str): attribute name
"""
update = [fapi._attr_rem(attr)]
r = fapi.update_workspace_attributes(self.namespace, self.name,
update, self.api_url)
self.data["workspace"]["attributes"].pop(attr, None)
fapi._check_response_code(r, 200)
def import_tsv(self, tsv_file):
"""Upload entity data to workspace from tsv loadfile.
Args:
tsv_file (file): Tab-delimited file of entity data
"""
r = fapi.upload_entities_tsv(self.namespace, self.name,
self.tsv_file, self.api_url)
fapi._check_response_code(r, 201)
def get_entity(self, etype, entity_id):
"""Return entity in this workspace.
Args:
etype (str): Entity type
entity_id (str): Entity name/unique id
"""
r = fapi.get_entity(self.namespace, self.name, etype,
entity_id, self.api_url)
fapi._check_response_code(r, 200)
dresp = r.json()
return Entity(etype, entity_id, dresp['attributes'])
def delete_entity(self, etype, entity_id):
"""Delete an entity in this workspace.
Args:
etype (str): Entity type
entity_id (str): Entity name/unique id
"""
r = fapi.delete_entity(self.namespace, self.name, etype,
entity_id, self.api_url)
fapi._check_response_code(r, 202)
def import_entities(self, entities):
"""Upload entity objects.
Args:
entities: iterable of firecloud.Entity objects.
"""
edata = Entity.create_payload(entities)
r = fapi.upload_entities(self.namespace, self.name,
edata, self.api_url)
fapi._check_response_code(r, 201)
def create_set(self, set_id, etype, entities):
"""Create a set of entities and upload to FireCloud.
Args
etype (str): one of {"sample, "pair", "participant"}
entities: iterable of firecloud.Entity objects.
"""
if etype not in {"sample", "pair", "participant"}:
raise ValueError("Unsupported entity type:" + str(etype))
payload = "membership:" + etype + "_set_id\t" + etype + "_id\n"
for e in entities:
if e.etype != etype:
msg = "Entity type '" + e.etype + "' does not match "
msg += "set type '" + etype + "'"
raise ValueError(msg)
payload += set_id + '\t' + e.entity_id + '\n'
r = fapi.upload_entities(self.namespace, self.name,
payload, self.api_url)
fapi._check_response_code(r, 201)
def create_sample_set(self, sset_id, samples):
"""Create FireCloud sample_set"""
return self.create_set(sset_id, "sample", samples)
def create_pair_set(self, pset_id, pairs):
"""Create FireCloud pair_set"""
return self.create_set(pset_id, "pair", pairs)
def create_participant_set(self, pset_id, participants):
"""Create FireCloud participant_set"""
return self.create_set(pset_id, "participant", participants)
def submissions(self):
"""List job submissions in workspace."""
r = fapi.get_submissions(self.namespace, self.name, self.api_url)
fapi._check_response_code(r, 200)
return r.json()
def entity_types(self):
"""List entity types in workspace."""
r = fapi.get_entity_types(self.namespace, self.name, self.api_url)
fapi._check_response_code(r, 200)
return r.json().keys()
def entities(self):
"""List all entities in workspace."""
r = fapi.get_entities_with_type(self.namespace,
self.name, self.api_url)
fapi._check_response_code(r, 200)
edicts = r.json()
return [Entity(e['entityType'], e['name'], e['attributes'])
for e in edicts]
def __get_entities(self, etype):
"""Helper to get entities for a given type."""
r = fapi.get_entities(self.namespace, self.name,
etype, self.api_url)
fapi._check_response_code(r, 200)
return [Entity(e['entityType'], e['name'], e['attributes'])
for e in r.json()]
def samples(self):
"""List samples in a workspace."""
return self.__get_entities("sample")
def participants(self):
"""List participants in a workspace."""
return self.__get_entities("participant")
def pairs(self):
"""List pairs in a workspace."""
return self.__get_entities("pair")
def sample_sets(self):
"""List sample sets in a workspace."""
return self.__get_entities("sample_set")
def participant_sets(self):
"""List participant sets in a workspace."""
return self.__get_entities("participant_set")
def pair_sets(self):
"""List pair sets in a workspace."""
return self.__get_entities("pair_set")
def copy_entities(self, from_namespace, from_workspace, etype, enames):
"""Copy entities from another workspace.
Args:
from_namespace (str): Source workspace namespace
from_workspace (str): Source workspace name
etype (str): Entity type
enames (list(str)): List of entity names to copy
"""
r = fapi.copy_entities(from_namespace, from_workspace,
self.namespace, self.name, etype, enames,
self.api_url)
fapi._check_response_code(r, 201)
def configs(self):
"""Get method configurations in a workspace."""
raise NotImplementedError
r = fapi.get_configs(self.namespace, self.name, self.api_url)
fapi._check_response_code(r, 200)
cdata = r.json()
configs = []
for c in cdata:
cnamespace = c['namespace']
cname = c['name']
root_etype = c['rootEntityType']
method_namespace = c['methodRepoMethod']['methodNamespace']
method_name = c['methodRepoMethod']['methodName']
method_version = c['methodRepoMethod']['methodVersion']
def acl(self):
"""Get the access control list for this workspace."""
r = fapi.get_workspace_acl(self.namespace, self.name, self.api_url)
fapi._check_response_code(r, 200)
return r.json()
def set_acl(self, role, users):
"""Set access permissions for this workspace
Args:
role (str): Access level
one of {one of "OWNER", "READER", "WRITER", "NO ACCESS"}
users (list(str)): List of users to give role to
"""
acl_updates = [{"email": user, "accessLevel": role} for user in users]
r = fapi.update_workspace_acl(self.namespace, self.name,
acl_updates, self.api_url)
fapi._check_response_code(r, 200)
def clone(self, to_namespace, to_name):
"""Clone this workspace.
Args:
to_namespace (str): Target workspace namespace
to_name (str): Target workspace name
"""
r = fapi.clone_workspace(self.namespace, self.name,
to_namespace, to_name, self.api_url)
fapi._check_response_code(r, 201)
return Workspace(to_namespace, to_name, self.api_url)
``` |
{
"source": "jnlp-snowman/easy-japanese-checker",
"score": 2
} |
#### File: jnlp-snowman/easy-japanese-checker/index.py
```python
import sys
import os
# add libs path
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'libs'))
from bottle import route, static_file, default_app
# load controller files
from app.controllers import *
# static files routing
@route('/static/<filepath:path>')
def static(filepath):
return static_file(filepath, root='./static/')
## Run with bottle http server.
# from bottle import run
# run(host='localhost', port=8080, debug=True, reloader=True)
# e.g. python index.py
## Run with gunicorn
app = default_app()
# e.g. gunicorn -b 127.0.0.1:8080 -w 1 index:app
## Run with PasteServer
# from bottle import run, PasteServer
# run(server=PasteServer, host='localhost', port=8080, debug=True, reloader=True)
# e.g. python index.py
```
#### File: easy-japanese-checker/libs/db_snow.py
```python
import sqlalchemy
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import MetaData, create_engine, orm
from sqlalchemy import CHAR, VARCHAR, Integer, Column, PrimaryKeyConstraint, ForeignKey
from sqlalchemy import exc
from sqlalchemy import event
from sqlalchemy.pool import Pool
import configparser
config = configparser.ConfigParser()
config.sections()
config.read('./config.ini')
sql_DATABASE = config.get('settings', "db_snow")
sql_metadata = MetaData()
sql_engine = create_engine(
sql_DATABASE,
encoding='utf-8',
pool_recycle=5,
)
db_session = orm.scoped_session(
orm.sessionmaker(autocommit=True,
autoflush=True,
bind=sql_engine)
)
sql_Base = declarative_base()
sql_Base.query = db_session.query_property()
@event.listens_for(Pool, "checkout")
def ping_connection(dbapi_connection, connection_record, connection_proxy):
cursor = dbapi_connection.cursor()
try:
cursor.execute("SELECT 1")
except:
# optional - dispose the whole pool
# instead of invalidating one at a time
# connection_proxy._pool.dispose()
# raise DisconnectionError - pool will try
# connecting again up to three times before raising.
raise exc.DisconnectionError()
cursor.close()
class Word(sql_Base):
__tablename__ = 'Word'
__table_args__ = (
PrimaryKeyConstraint('Id'),
)
Id = Column('Id', CHAR(length=5))
SnowmanPOS = Column('SnowmanPOS', VARCHAR(length=40))
POS = Column('POS', VARCHAR(length=20))
POS_s1 = Column('POS_s1', VARCHAR(length=20))
POS_s2 = Column('POS_s2', VARCHAR(length=20))
lemma = Column('lemma', VARCHAR(length=120))
org_lemma = Column('org_lemma', VARCHAR(length=120))
freq = Column('BCCWJ_freq', Integer)
def __repr__(self):
return 'word:{}'.format(self.lemma)
class Morph(sql_Base):
__tablename__ = 'Morph'
__table_args__ = (
PrimaryKeyConstraint('CharId'),
)
CharId = Column('CharId', CHAR(length=5))
lemma = Column('lemma', VARCHAR(length=120))
Id = Column('Id', CHAR(length=5), ForeignKey('Word.Id'))
CharMorph = orm.relationship("CharMorph")
Word = orm.relationship("Word", backref=orm.backref('Morph'))
def __repr__(self):
return 'morph:{}'.format(self.lemma)
class CharMorph(sql_Base):
__tablename__ = 'CharMorph'
__table_args__ = (
PrimaryKeyConstraint('UniDicId'),
)
UniDicId = Column('UniDicId', CHAR(length=5), ForeignKey('UniDic.ID'))
CharId = Column('CharId', CHAR(length=5), ForeignKey('Morph.CharId'))
ambiguity = Column('ambiguity', Integer)
freq = Column('freq', Integer)
UniDic = orm.relationship("UniDic", backref=orm.backref('CharMorph'), order_by=UniDicId)
Morph = orm.relationship("Morph", backref=orm.backref('Morph'))
# Morph = orm.relationship("Morph", back_populates="Morph")
def __repr__(self):
return 'CharMorph:{}'.format(self.CharId)
class UniDic(sql_Base):
__tablename__ = 'UniDic'
__table_args__ = (
PrimaryKeyConstraint('ID'),
)
ID = Column('ID', CHAR(length=6))
POS = Column('POS', VARCHAR(length=20))
POS_s1 = Column('POS_s1', VARCHAR(length=40))
POS_s2 = Column('POS_s2', VARCHAR(length=40))
POS_s3 = Column('POS_s3', VARCHAR(length=40))
Conj_type = Column('Conj_type', VARCHAR(length=40))
Conj_form = Column('Conj_form', VARCHAR(length=40))
org_kana = Column('org_kana', VARCHAR(length=40))
org_kanji = Column('org_kanji', VARCHAR(length=40))
lemma = Column('lemma', VARCHAR(length=40))
reading = Column('reading', VARCHAR(length=40))
org2_kanji = Column('org2_kanji', VARCHAR(length=40))
org2_kana = Column('org2_kana', VARCHAR(length=40))
freq = Column('freq', Integer)
def __repr__(self):
return 'UniDic:{}'.format(self.lemma)
``` |
{
"source": "jnmaloney/aaisd",
"score": 2
} |
#### File: aaisd/chart/process_outliers_locs.py
```python
from bokeh.plotting import figure, show, save, output_file
from bokeh.models.tickers import *
from bokeh.models.axes import *
from bokeh.palettes import brewer
from os import listdir
from os.path import isfile, join
# Open outliers files
years = {
'temp': [],
'dewp': [],
't_min': [],
't_max': [],
'precip': [],
'snow_d': [],
'wind_max': [],
}
values_high = {
'temp': [],
'dewp': [],
't_min': [],
't_max': [],
'precip': [],
'snow_d': [],
'wind_max': [],
}
values_low = {
'temp': [],
'dewp': [],
't_min': [],
't_max': [],
'precip': [],
'snow_d': [],
'wind_max': [],
}
onlyfiles = [f for f in listdir('locs') if isfile(join('locs', f))]
for fname in onlyfiles:
f = open(join('locs', fname), "r") #"locs/833780-99999-outliers.txt", "r")
#f = open("locs/833780-99999-outliers.txt", "r")
for line in f.readlines():
s = line.split()
year = int(s[1])
value = s[0]
r = int(s[2])
u = int(s[3])
l = int(s[4])
years[value].append(year)
values_high[value].append((100.0*u) / r)
values_low[value].append((100.0*l) / r)
# x = []
# for i in range(len(l)):
# x.append( (100.0*(l[i]+u[i])) / r[i] )
colors = brewer['Spectral'][4]
colorsMap = {
'temp': colors[0],
'dewp': colors[1],
't_min': colors[2],
't_max': colors[3],
'precip': 'lightblue',
'snow_d': 'lightgrey',
'wind_max': 'lightsteelblue',
}
# Do figures seperately
# for value in [
# 'temp',
# 'dewp',
# 't_min',
# 't_max',
# 'precip',
# 'snow_d',
# 'wind_max']:
# p = figure(
# title=None,
# x_axis_label='year', #'Month-Day',
# y_axis_label='%',
# background_fill_color="#EFE8E2",
# plot_width=1200, plot_height=600,
# )
# p.xaxis[0].ticker = SingleIntervalTicker(interval=10, num_minor_ticks=0)
#
# poly1 = range(1920, 2021)
# poly2 = values_low[value]
# print len(poly1), len(poly2)
# poly1.insert(0, 1920)
# poly2.insert(0, 0)
# poly1.append(2020)
# poly2.append(0)
#
# p.patch(poly1, poly2,
# #fill_color=colorsMap[value],
# line_color=colorsMap[value])
# #fill_alpha=0.5)
#
# output_file("outlier_count_value_low_%s.html" % value)
#
# show(p)
# save(p)
# y
# p = figure(
# title=None,
# x_axis_label='year', #'Month-Day',
# y_axis_label='%',
# background_fill_color="#EFE8E2",
# plot_width=1200, plot_height=600,
# )
# p.xaxis[0].ticker = SingleIntervalTicker(interval=10, num_minor_ticks=0)
#
# p.patch(poly1, poly2,
# #fill_color=colorsMap[value],
# line_color=colorsMap[value])
# #fill_alpha=0.75)
#
# output_file("outlier_count_value_high_%s.html" % value)
#
# show(p)
# save(p)
def sub(y, x, z, value):
p = figure(
title=None,
x_axis_label='year', #'Month-Day',
y_axis_label='%',
background_fill_color="#EFE8E2",
plot_width=1200, plot_height=600,
)
p.xaxis[0].ticker = SingleIntervalTicker(interval=10, num_minor_ticks=0)
p.patch(y, x,
fill_color=colorsMap[value],
line_color=colorsMap[value],
line_width=2,
fill_alpha=0.5)
output_file("outlier_count_value_low_%s.html" % value)
show(p)
save(p)
y
p = figure(
title=None,
x_axis_label='year', #'Month-Day',
y_axis_label='%',
background_fill_color="#EFE8E2",
plot_width=1200, plot_height=600,
)
p.xaxis[0].ticker = SingleIntervalTicker(interval=10, num_minor_ticks=0)
p.patch(y, z,
fill_color=colorsMap[value],
line_color=colorsMap[value],
line_width=2,
fill_alpha=0.75)
output_file("outlier_count_value_high_%s.html" % value)
show(p)
save(p)
# All poly
p = figure(
title=None,
x_axis_label='year', #'Month-Day',
y_axis_label='%',
background_fill_color="#EFE8E2",
plot_width=1200, plot_height=600,
)
p.xaxis[0].ticker = SingleIntervalTicker(interval=10, num_minor_ticks=0)
# # Plot 1
# for v in ['temp', 'dewp', 't_min', 't_max', 'precip', 'snow_d', 'wind_max']:
# p.line(years[v], values_low[v])
# p.line(years[v], values_high[v])
# Plot 2
v_store = {}
polys1 = {
'temp': [],
'dewp': [],
't_min': [],
't_max': [],
'precip': [],
'snow_d': [],
'wind_max': [],
}
polys2 = {
'temp': [],
'dewp': [],
't_min': [],
't_max': [],
'precip': [],
'snow_d': [],
'wind_max': [],
}
polysM1 = {
'temp': [],
'dewp': [],
't_min': [],
't_max': [],
'precip': [],
'snow_d': [],
'wind_max': [],
}
polysM2 = {
'temp': [],
'dewp': [],
't_min': [],
't_max': [],
'precip': [],
'snow_d': [],
'wind_max': [],
}
tracko = [0 for i in range(1920, 2021)]
y = []
for i in range(1920, 2021, 1):
y.append(i)
for i in range(2020, 1920, -1):
y.append(i)
for v in ['temp', 'dewp', 't_min', 't_max', 'precip', 'snow_d', 'wind_max']:
x = []
z = []
for year in range(1920, 2021, 1):
value_low = 0
value_high = 0
for year2 in range(1):
try:
i = years[v].index(year+year2)
value_low = values_low[v][i]
value_high = values_high[v][i]
except Exception as e:
pass
x.append(value_low)
z.append(value_high)
sub(y, x, z, v)
for i in range(len(x)):
polys1[v].append(tracko[i])
polys2[v].append(tracko[i] + x[i])
for i in range(len(x)-1, 0, -1):
polys1[v].append(tracko[i] + x[i])
polys2[v].append(tracko[i] + x[i] + z[i])
# for i in range(len(x)):
#
# polys1[v].append(x[i])
# polys2[v].append(z[i])
#
# polysM1[v].append(tracko[i] + x[i] * 0.5)
# polysM2[v].append(tracko[i] + x[i] + z[i] * 0.5)
tracko[i] += x[i] + z[i]
#p.patch(y, polys1[v])
#p.line(y, polys1[v])
colors = brewer['Spectral'][4]
p.patch(y, polys2['temp'], fill_color=colors[0], fill_alpha=0.75)
p.patch(y, polys1['temp'], fill_color=colors[0], fill_alpha=0.5)
p.patch(y, polys2['dewp'], fill_color=colors[1], fill_alpha=0.75)
p.patch(y, polys1['dewp'], fill_color=colors[1], fill_alpha=0.5)
p.patch(y, polys2['t_min'], fill_color=colors[2], fill_alpha=0.75)
p.patch(y, polys1['t_min'], fill_color=colors[2], fill_alpha=0.5)
p.patch(y, polys2['t_max'], fill_color=colors[3], fill_alpha=0.75)
p.patch(y, polys1['t_max'], fill_color=colors[3], fill_alpha=0.5)
p.patch(y, polys2['precip'], fill_color='lightblue', fill_alpha=0.75)
p.patch(y, polys1['precip'], fill_color='lightblue', fill_alpha=0.5)
p.patch(y, polys2['snow_d'], fill_color='lightgrey', fill_alpha=0.75)
p.patch(y, polys1['snow_d'], fill_color='lightgrey', fill_alpha=0.5)
p.patch(y, polys2['wind_max'], fill_color='lightsteelblue', fill_alpha=0.75)
p.patch(y, polys1['wind_max'], fill_color='lightsteelblue', fill_alpha=0.5)
# p.rect(y, polysM2['wind_max'], 9, polys2['wind_max'],fill_color='lightsteelblue', fill_alpha=0.75, line_color="black", legend="wind_max +")
# p.rect(y, polysM1['wind_max'], 9,polys1['wind_max'],fill_color='lightsteelblue', fill_alpha=0.5, line_color="black", legend="wind_max -")
# p.rect(y, polysM2['snow_d'], 9, polys2['snow_d'],fill_color='lightgrey', fill_alpha=0.75, line_color="black", legend="snow_d +")
# p.rect(y, polysM1['snow_d'], 9, polys1['snow_d'],fill_color='lightgrey', fill_alpha=0.5, line_color="black", legend="snow_d -")
# p.rect(y, polysM2['precip'], 9, polys2['precip'],fill_color='lightblue', fill_alpha=0.75, line_color="black", legend="precip +")
# p.rect(y, polysM1['precip'], 9, polys1['precip'],fill_color='lightblue', fill_alpha=0.5, line_color="black", legend="precip -")
# p.rect(y, polysM2['t_max'], 9, polys2['t_max'], fill_color=colors[3], fill_alpha=0.75, line_color="black", legend="t_max +")
# p.rect(y, polysM1['t_max'], 9, polys1['t_max'], fill_color=colors[3], fill_alpha=0.5, line_color="black", legend="t_max -")
# p.rect(y, polysM2['t_min'], 9, polys2['t_min'], fill_color=colors[2], fill_alpha=0.75, line_color="black", legend="t_min +")
# p.rect(y, polysM1['t_min'], 9, polys1['t_min'], fill_color=colors[2], fill_alpha=0.5, line_color="black", legend="t_min -")
#
# p.rect(y, polysM2['dewp'], 9, polys2['dewp'], fill_color=colors[1], fill_alpha=0.75, line_color="black", legend="dewp +")
# p.rect(y, polysM1['dewp'], 9, polys1['dewp'], fill_color=colors[1], fill_alpha=0.5, line_color="black", legend="dewp -")
# p.rect(y, polysM2['temp'], 9, polys2['temp'], fill_color=colors[0], fill_alpha=0.75, line_color="black", legend="temp +")
# p.rect(y, polysM1['temp'], 9, polys1['temp'], fill_color=colors[0], fill_alpha=0.5, line_color="black", legend="temp -")
#for year, value_low, value_high in years[v], values_low[v], values_high[v]:
# for i in range(len(years[v])):
# year = years[v][i]
# value_low = values_low[v][i]
# value_high = values_high[v][i]
# x.append(year)
# if not v_store.has_key(year):
# v_store[year] = 0
# v_store[year] += value_low
# y.append(v_store[year])
# v_store[year] += value_high
# z.append(v_store[year])
# x.append(x[-1])
# y.append(0)
# z.append(0)
#
# for year in range(1920, 2021):
# value_low = 0
# value_high = 0
# try:
# i = years[v].index(year)
# value_low = values_low[v][i]
# value_high = values_high[v][i]
# except Exception as e:
# pass
#
# y.append(year)
# x.append(value_low)
# z.append(value_high)
#
#
#
# #p.rect(x, [0.5*i for i in y], 1, y, fill_color="lightgrey")
# #p.rect(x, [0.5*i for i in z], 1, z, fill_color="steelblue")
#
# xx = []
# for i in y:
# xx.append(i)
# y.reverse()
# for i in y:
# xx.append(i)
#
# yy = []
# for i in x:
# yy.append(i)
# z.reverse()
# for i in z:
# yy.append(i)
#
# p.line(xx, yy)
# p.line(x, y)
# p.line(x, z)
output_file("outlier_count_master.html")
show(p)
#save(p)
```
#### File: jnmaloney/aaisd/process_gsod.py
```python
import gzip
from datetime import datetime
import os
import numpy as np
import pandas as pd
from bokeh.plotting import figure, show, output_file
from bokeh.models.tickers import *
from bokeh.models.axes import *
# Data containers
moda_values = {
't_max': {},
't_min': {},
'wind_max': {},
'precip': {},
'snow_d': {},
}
# Conversion
def FStoC(f_string): return (float(f_string) - 32.0) * 5.0 / 9.0
# Read from a single archive
def read_archive(archivename):
if not os.path.isfile(archivename): return
with gzip.open(archivename, 'rb') as f:
for line in f.readlines():
process_linedata(line)
# Process a line from the data file
def process_linedata(line):
# Ignore first line
if line[0:6] == "STN---": return
# Read data points
line_data = line.split()
yearmoda = line_data[2]
wind_max = line_data[15]
t_max_f = line[103:108]
t_min_f = line[111:116]
precip = line_data[19]
snowd = line_data[20]
# Add to collection for Mo-Da
# Store data for Year-Mo-Da
add_values(yearmoda, wind_max, 'wind_max')
add_values(yearmoda, FStoC(t_max_f), 't_max')
add_values(yearmoda, FStoC(t_min_f), 't_min')
add_values(yearmoda, precip, 'precip')
add_values(yearmoda, snowd, 'snow_d')
def add_values(yearmoda, value, label):
if value > 500: return # 999
store = moda_values[label]
year = yearmoda[0:4]
moda = yearmoda[4:]
if not store.has_key(moda):
store[moda] = []
store[moda].append((value, year))
# Read each year's archive
for year in range(1901, 2017+1):
archivename = 'gsod/600600-99999-%s.op.gz' % year
read_archive(archivename)
# Process total statistics
# Box plot demo
values = moda_values['t_max']
modas = []
xx = []
for k, v in values.iteritems():
for i in v:
modas.append(k)
xx.append(i[0])
df = pd.DataFrame(dict(score=xx, group=modas))
select_modas = ['0101', '0201', '0301', '0401', '0501', '0601', '0701', '0801', '0901', '1001','1101', '1201']
#select_modas = ['0101', '0301', '0701', '1001']
#select_modas = ['0301']
all_modas = sorted(values.keys())
# find the quartiles and IQR for each category
groups = df.groupby('group')
q1 = groups.quantile(q=0.25)
q2 = groups.quantile(q=0.5)
q3 = groups.quantile(q=0.75)
iqr = q3 - q1
upper = q3 + 1.5*iqr
lower = q1 - 1.5*iqr
# ?
#print lower.score['0301']
# find the outliers for each category
def outliers(group):
cat = group.name
return group[(group.score > upper.loc[cat]['score']) | (group.score < lower.loc[cat]['score'])]['score']
out = groups.apply(outliers).dropna()
# prepare outlier data for plotting, we need coordinates for every outlier.
if not out.empty:
outx = []
outy = []
for cat in all_modas:
# only add outliers if they exist
if not out.loc[cat].empty:
for value in out[cat]:
outx.append(cat)
outy.append(value)
#p = figure(tools="save", background_fill_color="#EFE8E2", title="", x_range=all_modas)
p = figure(
title="Outlying Temperature Events",
x_axis_label=None, #'Month-Day',
y_axis_label='Daily High Temperature from 1950 - 2017',
background_fill_color="#EFE8E2",
x_minor_ticks=0,
x_range=all_modas)
# ticker = SingleIntervalTicker(interval=5, num_minor_ticks=10)
# xaxis = LinearAxis(ticker=ticker)
# plot.add_layout(xaxis, 'below')
ticker = FixedTicker()
ticker.ticks = [0101, 0201, 0301, 0401]
xaxis = LinearAxis(ticker=ticker)
p.add_layout(xaxis, 'below')
# if no outliers, shrink lengths of stems to be no longer than the minimums or maximums
qmin = groups.quantile(q=0.00)
qmax = groups.quantile(q=1.00)
upper.score = [min([x,y]) for (x,y) in zip(list(qmax.loc[:,'score']),upper.score)]
lower.score = [max([x,y]) for (x,y) in zip(list(qmin.loc[:,'score']),lower.score)]
# # stems
# p.segment(select_modas, upper.score, select_modas, q3.score, line_color="black")
# p.segment(select_modas, lower.score, select_modas, q1.score, line_color="black")
#
# # boxes
# p.vbar(select_modas, 0.7, q2.score, q3.score, fill_color="#E08E79", line_color="black")
# p.vbar(select_modas, 0.7, q1.score, q2.score, fill_color="#3B8686", line_color="black")
# whiskers (almost-0 height rects simpler than segments)
# p.rect(select_modas, lower.score[select_modas[0]], 0.2, 0.01, line_color="black")
# p.rect(select_modas, upper.score[select_modas[0]], 0.2, 0.01, line_color="black")
# Corrected whisker plots
# for moda in select_modas:
#
# # stems
# p.segment([moda], upper.score[moda], [moda], q3.score[moda], line_color="black")
# p.segment([moda], lower.score[moda], [moda], q1.score[moda], line_color="black")
#
# # boxes
# p.vbar([moda], 0.7, q2.score[moda], q3.score[moda], fill_color="#E08E79", line_color="black")
# p.vbar([moda], 0.7, q1.score[moda], q2.score[moda], fill_color="#3B8686", line_color="black")
#
# # whiskers (almost-0 height rects simpler than segments)
# p.rect([moda], lower.score[moda], 0.2, 0.01, line_color="black")
# p.rect([moda], upper.score[moda], 0.2, 0.01, line_color="black")
# All values continuous box plot
#for moda in moda_values
# for moda in all_modas:
#
# # boxes
# p.vbar([moda], 0.7, q2.score[moda], q3.score[moda], fill_color="#E08E79", line_color="black")
# p.vbar([moda], 0.7, q1.score[moda], q2.score[moda], fill_color="#3B8686", line_color="black")
#
# # whiskers (almost-0 height rects simpler than segments)
# p.rect([moda], lower.score[moda], 0.2, 0.01, line_color="black")
# p.rect([moda], upper.score[moda], 0.2, 0.01, line_color="black")
q1s = [q1.score[i] for i in all_modas]
q2s = [q2.score[i] for i in all_modas]
q3s = [q3.score[i] for i in all_modas]
#p.line(all_modas, q3s, line_color='#E08E79')
#p.line(all_modas, q2s, line_color='black')
#p.line(all_modas, q1s, line_color='#3B8686')
p.patch(all_modas + all_modas[::-1], q3s + q2s[::-1], color='#E08E79')
p.patch(all_modas + all_modas[::-1], q2s + q1s[::-1], color='#3B8686')
#p.line(all_modas, q2s, line_color='black')
lowers = [lower.score[i] for i in all_modas]
uppers = [upper.score[i] for i in all_modas]
#p.line(all_modas, uppers, line_color='black')
#p.line(all_modas, lowers, line_color='black')
p.patch(all_modas + all_modas[::-1], uppers + q3s[::-1], color='white')
p.patch(all_modas + all_modas[::-1], q1s + lowers[::-1], color='white')
# outliers
if not out.empty:
p.circle(outx, outy, size=6, color="#F38630", fill_alpha=0.6)
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = "white"
p.grid.grid_line_width = 2
p.xaxis.major_label_text_font_size="12pt"
output_file("boxplot.html", title="boxplot.py example")
show(p)
``` |
{
"source": "jnmaomao/ploomber",
"score": 3
} |
#### File: ploomber/util/validate.py
```python
from ploomber.exceptions import ValidationError
from ploomber.io import pretty_print
def keys(valid, passed, required=None, name='spec'):
passed = set(passed)
if valid:
extra = passed - set(valid)
if extra:
raise ValidationError(
"Error validating {}, the following keys aren't "
"valid: {}. Valid keys are: {}".format(
name, pretty_print.iterable(extra),
pretty_print.iterable(valid)))
if required:
missing = set(required) - passed
if missing:
raise ValidationError(f"Error validating {name}. Missing "
f"keys: { pretty_print.iterable(missing)}")
```
#### File: tests/util/test_mixed_envs.py
```python
import pytest
from ploomber.util import util
def test_mixed_envs():
# Set a pip dep string
reqs = """pyflakes==2.4.0\nPygments==2.11.2\n
pygraphviz @ file:///Users/runner/miniforge3/pygraphviz_1644545996627\n
PyNaCl==1.5.0\npyparsing==3.0.7"""
with pytest.warns(UserWarning):
util.check_mixed_envs(reqs)
``` |
{
"source": "JNMaree/solvdoku",
"score": 4
} |
#### File: src/base/elementspace_1D.py
```python
import numpy
from nodespace_1D import NodeSpace1D
class ElementSpace1D(NodeSpace1D):
"""
An Element space is a collection of elements in a single dimension, X.
The data is stored in a 2-dimensional array.
The value of the array at i and j represents the element at i,
composed of the node j:
ElementSpace[i,j] = Node_j of Element_i
"""
# Element array:
elements = numpy.array([0,0], dtype=int) # Numpy array
# Number of Elements:
n_elements = 0 # int (>0)
# Number of Nodes per Element:
nodes_per_element = 2 # int (>=2)
def __init__(self, elements, dimension=1, start=0, nodes_per_element=2):
# If elements is a number, it specifies the number of elements
if isinstance(elements, int):
self.elements = numpy.zeros((elements, nodes_per_element))
self.n_elements = elements
self.nodes_per_element = nodes_per_element
# Calculate the number of nodes
n_nodes = elements * nodes_per_element - (elements - 1)
super().__init__(n_nodes, dimension, start)
# If elements is a NodeSpace1D, it defines the Nodes used by the Elements,
# - disregards dimension & start
elif isinstance(elements, NodeSpace1D):
self.elements = numpy.empty([elements.n_nodes, nodes_per_element])
self.nodes_per_element = nodes_per_element
self.n_elements = elements.n_nodes - (self.nodes_per_element - 1)
# initialise inherited nodeSpace
super().__init__(elements)
elif isinstance(elements, ElementSpace1D):
self.elements = elements.elements
self.n_elements = elements.n_elements
self.nodes_per_element = elements.nodes_per_element
# Generate Elements from NodeSpace
for i in range(self.n_elements):
for j in range(self.nodes_per_element):
self.elements[i, j] = int( i * (self.nodes_per_element - 1) + j )
# end __init__
def __str__(self) -> str:
ret_str = format(self.n_elements)
ret_str += "\n"
for i in range(self.n_elements):
for j in range(self.nodes_per_element):
ret_str += "[{:n},{:n}]:".format(i,j)
ret_str += "{:n}".format(self.elements[i,j])
if j != (self.nodes_per_element - 1):
if self.elements[i, j] < 10:
ret_str += "\t"
# to get consistent column tabs
ret_str += "\t"
ret_str += "\n"
return ret_str
def __getitem__(self, key):
return self.elements[key]
def __setitem__(self, key, value):
self.elements[key] = value
# Return the NodeSpace the ElementSpace is based on
def nodeSpace_str(self) -> str:
return super().__str__()
def main():
# Test Classes & Functions
print("Test ElementSpace:")
# - Create an ElementSpace of 10 elements
# over a dimension of size 4,
# starting at 3,
# with 2 nodes per element:
e_space2 = ElementSpace1D(10, 4, 3, 2)
print("Nodes_Per_Element:", e_space2.nodes_per_element)
print(e_space2)
# Test get_NodeSpace string
print("Test ElementSpace_2 Parent NodeSpace:")
n_space_str2 = e_space2.nodeSpace_str()
print("N_nodes:", n_space_str2)
print("Test ElementSpace with 4 nodes per element")
# - Create an ElementSpace of 8 elements
# over a dimension of size 10,
# starting at 0,
# with 4 nodes per element:
e_space4 = ElementSpace1D(8, 10, 0, 4)
print("Nodes_Per_Element:", e_space4.nodes_per_element)
print(e_space4)
# Test get_NodeSpace string
print("Test ElementSpace_4 Parent NodeSpace:")
n_space_str4 = e_space4.nodeSpace_str()
print("N_nodes:", n_space_str4)
if __name__ == "__main__":
main()
```
#### File: src/base/matrix.py
```python
import numpy
class Matrix:
matrix = []
rows = 0
cols = 0
# Error Tolerance for FLoating Points:
# if abs( matrix[i, j] ) < epsilon then value = zero
epsilon = 1e-14
def __init__(self, array):
if isinstance(array, numpy.ndarray):
if array.ndim == 2: # If 2D
self.matrix = array
self.rows = array.shape[0]
self.cols = array.shape[1]
elif array.ndim == 1: # If 1D
self.matrix = array
self.rows = array.shape[0]
self.cols = 1
elif isinstance(array, list):
self.matrix = numpy.array(array)
self.rows = len(array)
self.cols = len(array[0])
# Overload [] operator
def __getitem__(self, key):
return self.matrix[key]
def __setitem__(self, key, value):
self.matrix[key] = value
# Overload 'string' method
def __str__(self):
ret_str = "{}x{}\n".format(self.rows, self.cols)
if self.cols == 1:
for i in range(self.rows):
ret_str += f"{self.matrix[i]:8}\n"
else:
for i in range(self.rows):
for j in range(self.cols):
ret_str += f"{self.matrix[i,j]:>4} "
ret_str += "\n"
return ret_str
# BOOLEAN MATRIX CHECK METHODS:
# Return if matrix is square (n = rows = cols)
def is_square(self):
if self.rows == self.cols:
return True
else:
return False
# Return if specified matrix is equal in size
def is_equal_size(self, other):
if self.cols != other.cols:
return False
elif self.rows != other.rows:
return False
else:
return True
# Return if matrix is a diagonal matrix
def is_diagonal(self):
pass
# Overload mathematical operators
# Matrix multiplication
def __mul__(self, other):
if isinstance(other, int):
self.matrix *= other
else:
if self.cols != other.rows:
raise ArithmeticError(f"Product Matrix AB not defined for A:cols={self.cols} and B:rows={other.rows}")
else:
return Matrix(numpy.matmul(self.matrix, other.matrix))
# Matrix 1-to-1 addition
def __add__(self, other):
if self.is_equal_size(other):
return Matrix(numpy.add(self.matrix, other.matrix))
else:
raise ArithmeticError("Sum Matrix not defined for matrices of inequal size")
# Matrix 1-to-1 substraction
def __sub__(self, other):
if self.is_equal_size(other):
return Matrix(numpy.subtract(self.matrix, other.matrix))
else:
raise ArithmeticError("Difference Matrix not defined for matrices of inequal size")
"""
Class Methods:
Basic Matrix Algebra:
- Append Rows to matrix
- Append Columns to matrix
Matrix Transformations:
- Row Echelon form
- Reduced Row Echelon form
"""
# ROW OPERATIONS:
# Add rows to matrix
def add_rows(self, rows, pos =None):
if isinstance(rows, numpy.ndarray):
addRows = rows
elif isinstance(rows, list):
addRows = numpy.array(rows)
else: #not isinstance(rows, numpy.ndarray):
raise TypeError("list or numpy.ndarray types supported")
if pos == None:
# If position not specified, append row to end of matrix
rowPos = self.rows
else:
rowPos = pos
if addRows.ndim > 1:
self.rows += addRows.shape[0]
else:
self.rows += 1
self.matrix = numpy.insert(self.matrix, rowPos, addRows, axis=0)
def swap_rows(self, row_A_index, row_B_index):
if row_A_index > (self.rows - 1):
raise IndexError("row_A_index:", row_A_index, " out of bounds of row_count:", self.rows)
if row_B_index > (self.rows - 1):
raise IndexError("row_B_index:", row_B_index, " out of bounds of row_count:", self.rows)
temp_row = numpy.array(self.matrix[row_A_index, :])
self.matrix[row_A_index, :] = self.matrix[row_B_index, :]
self.matrix[row_B_index, :] = temp_row
def shift_row(self, row_index, to_index=None):
new_order = numpy.arange(0, self.rows)
if to_index is not None:
# if to_index specified, move row@row_index to row@to_index
new_order[to_index] = row_index
else: # Else move row@row_index to end
for i in range(row_index, self.rows - 1):
new_order[i] = i
new_order[self.rows - 1] = row_index
self.matrix = self.matrix[new_order, :]
# COL OPERATIONS:
# Add columns to matrix
def add_cols(self, cols, pos =None):
if isinstance(cols, numpy.ndarray):
addCols = cols
elif isinstance(cols, list):
addCols = numpy.array(cols)
else:
raise TypeError("list or numpy.ndarray types supported")
if pos == None:
# If position not specified, append col to end of matrix
colPos = self.cols
else:
colPos = pos
if addCols.ndim == 1:
self.cols += 1
else:
self.cols += addCols.shape[0]
self.matrix = numpy.insert(self.matrix, colPos, addCols, axis=1)
def swap_cols(self, col_A_index, col_B_index):
if col_A_index > (self.cols - 1):
raise IndexError("col_A_index:", col_A_index, " out of bounds of row_count:", self.cols)
if col_B_index > (self.cols - 1):
raise IndexError("col_B_index:", col_B_index, " out of bounds of row_count:", self.cols)
temp_row = numpy.array(self.matrix[:, col_A_index])
self.matrix[:, col_A_index] = self.matrix[:, col_B_index]
self.matrix[:, col_B_index] = temp_row
def shift_col(self, col_index, to_index=None):
new_order = numpy.arange(0, self.cols)
if to_index is not None:
# if to_index specified, move col@col_index to col@to_index
new_order[to_index] = col_index
else: # Else move col@col_index to end
for i in range(col_index, self.cols - 1):
new_order[i] = i
new_order[self.cols - 1] = col_index
self.matrix = self.matrix[:, new_order]
# Move Full Zero rows to Bottom of Matrix
# - Used for Reduced row echelon form
def move_full_zero_rows(self):
zeroRows = numpy.all(self.matrix==0, axis=1)
anyZeroRows = numpy.any(zeroRows)
if anyZeroRows:
for i in range(zeroRows.size - 1):
if zeroRows[i]:
self.shift_row(i)
# Return the points at which the matrix's largest absolute value occurs
def abs_argmax(self, row_index=None, col_index=None):
if row_index == None and col_index == None:
# If row or col not specified, return arg max of matrix
return numpy.argmax(self.matrix)
elif col_index == None:
# Return argmax of row_index
return numpy.argmax(self.matrix, axis=0)
else:
# Return argmax of col_index
return numpy.argmax(self.matrix, axis=1)
# MATRIX TRANSFORMATIONS (in_place):
# Absolute value of whole matrix
def to_abs(self):
self.matrix = numpy.absolute(self.matrix)
# Reduce matrix to Row Echelon Form (REF)
# see https://en.wikipedia.org/wiki/Row_echelon_form#Reduced_row_echelon_form
def to_row_echelon(self):
rowJ = 0
colK = 0
while rowJ < self.rows and colK < self.cols:
rowMax = self.abs_argmax(row_index=rowJ)
if rowMax.size > 1: # Catch argmax returning an array
rowMax = rowMax[0]
if self.matrix[rowMax, colK] == 0:
colK += 1
else:
self.swap_rows(rowJ, rowMax)
for i in range(rowJ + 1, self.rows):
f = self.matrix[i, colK] / self.matrix[rowJ, colK]
self.matrix[i, colK] = 0
for j in range(colK, self.cols):
self.matrix[i, j] = self.matrix[i, j] - self.matrix[rowJ, j]*f
rowJ += 1
colK += 1
# Reduce matrix to reduced row echelon form (RREF)
def to_reduced_row_echelon(self):
lead = 0
r = 0
stopCondition = False
self.matrix
# matrix loop
while r <= self.rows and ~stopCondition:
if self.cols <= lead:
stopCondition = True
i = r
while self.matrix[r, lead] == 0 and ~stopCondition:
i += 1
if self.rows == i:
i = r
lead += 1
if self.rows == lead:
stopCondition = True
if i != r:
self.matrix[[i, r],:] = self.matrix[[r, i], :]
self.matrix[r, :] /= self.matrix[r, lead]
for i in range(self.rows):
if i != r:
self.matrix[i] -= self.matrix[i, lead]*self.matrix[r]
lead += 1
r += 1
# Transform matrix to its own transpose
def to_transpose(self):
self.matrix = numpy.transpose(self.matrix)
# GET METHODS:
# Return the inverse of the matrix
def get_inverse(self):
# Check if 2D and nxn (square)
if self.matrix.ndim == 2:
if self.rows == self.cols:
# Using a numpy function
return Matrix(numpy.linalg.inv(self.matrix))
else:
raise ArithmeticError(self.rows, " rows, ", self.cols, " cols. Matrix not square")
else:
raise ArithmeticError("Matrix ndim:", self.matrix.ndim)
# Return an identity matrix for the same size
def get_identity(self, size=0):
# Check if 2D and square (nxn)
if self.matrix.ndim >= 2:
return Matrix(numpy.eye(self.rows, self.cols))
elif size > 0:
return Matrix(numpy.eye(size))
else:
return Matrix(numpy.eye(self.matrix.shape[0]))
# Return the transpose of a matrix
def get_transpose(self):
return Matrix(numpy.transpose(self.matrix))
# Return the numerical determinant of a square matrix
def get_determinant(self):
if ~self.is_square:
raise ArithmeticError("Matrix determinant not defined for non-square matrix")
else:
return Matrix(numpy.linalg.det(self.matrix))
# Return the diagonal of a square matrix
def get_diagonal(self):
if ~self.is_square:
raise ArithmeticError("Matrix diagonal not defined for non-square matrix")
else:
ret_matrix = Matrix(numpy.zeros(self.rows))
for i in range(self.rows):
ret_matrix[i] += self.matrix[i, i]
return ret_matrix
# Test function
def main():
#test_matrix = numpy.array(([1, 1, 3], [0, 2, 4], [1, 1, 0], [0, 1, 1]))
test_matrix = numpy.array(([1, 1, 3], [0, 4, 2]))
m = Matrix(test_matrix)
print("Test_Matrix:")
print(m)
print("rows:", m.rows)
print("cols:", m.cols)
print("Test Add Rows:")
m_plusRows = [[1, 2, 3], [0, 0, 0]]
m.add_rows(m_plusRows)
print(m)
print("rows:", m.rows)
print("cols:", m.cols)
print("Test Add Cols:")
m_plusCols = numpy.array([[4, 4, 2, 0], [1, 2, 3, 0], [5, 0, 1, 0]])
m.add_cols(m_plusCols)
print(m)
print("rows:", m.rows)
print("cols:", m.cols)
Aindex = 1
Bindex = 2
print("Test Swap Rows(", Aindex, "&", Bindex, "):")
m.swap_rows(Aindex, Bindex)
print(m)
print("Test Swap Cols(", Aindex, "&", Bindex, "):")
m.swap_cols(Aindex, Bindex)
print(m)
print("Test Swap Rows(", m.rows-1, "&", m.rows-2, "):")
m.swap_rows(m.rows - 1, m.rows - 2)
print(m)
print("Test Move Full Zero Rows to Bottom:")
m.move_full_zero_rows()
print(m)
print("Test ShiftRow")
m.shift_row(1, 2)
print(m)
print("Test ShifCol")
m.shift_col(2)
print(m)
"""
print("Test Row Echelon Form:")
mre = m
mre.to_row_echelon()
print(mre)
print("Test Reduced Row Echelon Form:")
mrre = m
mrre.to_reduced_row_echelon()
print(mrre)
print("Test Inverse Matrix:")
minv = m.get_inverse()
print(minv)
"""
if __name__ == "__main__":
main()
```
#### File: src/base/number_array.py
```python
import copy
import numpy as np
from numpy import random
class NumberArray:
# Define the Number array
ints = []
# Define the number of array entries
n = 0
# Create std class methods
def __init__(self, setup) -> None:
if isinstance(setup, np.ndarray):
self.ints = setup
self.n = setup.size
elif isinstance(setup, list):
self.ints = np.array(setup)
self.n = len(setup)
elif isinstance(setup, int):
self.ints = np.arange(setup)
self.n = setup
else:
raise TypeError("Unknown Type specified as 'setup' parameter")
def __str__(self) -> str:
rstr = format(self.n)
rstr += self.ints.__str__()
return rstr
# Define method to randomise the order of existing array entries
def shuffle(self):
#"""
# Numpy Implementation
#np.random.shuffle(self.numArray) # Numpy function
#"""
# Fisher-Yates Implementation
for i in range(self.n - 1):
endint = self.n-1-i
randint = random.randint(0, endint)
# Swap
self.ints[endint], self.ints[randint] = self.ints[randint], self.ints[endint]
# Remove element from array,
# - index: specify index to remove element from
# - num: specify component data to remove
def remove(self, index=None, num=None):
if index != None:
self.ints = np.delete(self.ints, index)
else:
self.ints = self.ints[~np.isin(self.ints, num)]
self.n = self.ints.size
# Add element to array
def add(self, num):
if isinstance(num, (list, np.ndarray)):
for i in num:
self.ints.append(i)
self.n += 1
else:
self.ints = np.append(self.ints, num)
self.n += 1
def calculate_expected_sum(self):
n = self.n
return n * (n + 1)//2
def calculate_actual_sum(self):
return np.sum(self.ints)
def find_missing_entity(self):
return self.calculate_expected_sum() - self.calculate_actual_sum()
# Define a function to swap array entries based on index
def swap(self, i, j):
self.ints[i], self.ints[j] = self.ints[j], self.ints[i]
# Return minimum/maximum numbers present in array
def minimum(self):
return np.amin(self.ints)
def maximum(self):
return np.amax(self.ints)
# Define a function to reverse the array
def reverse(self):
for i in range(self.n//2): # Reverse array
self.swap(i, self.n - 1 - i)
#
#
#
# SORTING ALGORITHMS
# - implemented to favour ascending order
# 1. Selection Sort:
# - find minimum element, move to start
def sort_selection(self):
for i in range(self.n):
min_i = i
for j in range(i + 1, self.n):
if self.ints[j] < self.ints[min_i]:
min_i = j
self.swap(i, min_i)
# 2. Bubble Sort:
# - find min of 2 adjacent elements, swap if larger value precedes
def sort_bubble(self):
not_sorted = True
c = 0
swaps = 0
while not_sorted:
if self.ints[c] > self.ints[c + 1]:
self.swap(c, c + 1)
swaps += 1
c += 1
if c == (self.n - 1):
c = 0
if swaps != 0:
swaps = 0
else:
not_sorted = False
# 3. Recursive Bubble Sort:
# - Same swapping practive as bubble sort, implemented recursively
def sort_bubble_recursive(self, n=0):
for i in range(self.n -1 -n):
if self.ints[i] > self.ints[i + 1]:
self.swap(i, i + 1)
if n != (self.n - 2):
self.sort_bubble_recursive(n + 1)
# 4. Insertion Sort:
# - Array is split into sorted & unsorted partitions
# - First term of the unsorted side is selected and
# placed into correct position in sorted array
def sort_insertion(self):
for i in range(1, self.n):
if self.ints[i] < self.ints[i - 1]:
j = 0
while self.ints[i] < self.ints[i - 1 - j]:
j += 1
if j >= i:
j = i
for k in range(j):
self.swap(i - k, i - k - 1)
# 5. Recursive Insertion Sort
# - Same process as insertion sort, applied recursively
def sort_insertion_recursive(self, index=1):
j = 1
while self.ints[index - j + 1] < self.ints[index - j]:
self.swap(index - j, index - j + 1)
if (index - j) > 0:
j += 1
if index < (self.n - 1):
self.sort_insertion_recursive(index + 1)
else:
return
# 6. Merge Sort
# - Partition the provided array into 2 parts
# - Sort each partition independently
# - Merge Sorted arrays back into a single array
# - TOP-DOWN approach
def sort_merge(self, arr=None):
if arr is None:
self.sort_merge(self.ints)
elif arr.size > 1:
split = arr.size//2
# Split array into 2, Sort each side (L & R) independently
L = np.array(arr[:split])
R = np.array(arr[split:])
self.sort_merge(L)
self.sort_merge(R)
# Merge L & R sides of array as ordered
iL = iR = iA = 0
while iL < L.size and iR < R.size:
if L[iL] < R[iR]:
arr[iA] = L[iL]
iL += 1
else:
arr[iA] = R[iR]
iR += 1
iA += 1
# Catch Remainders
while iL < L.size:
arr[iA] = L[iL]
iL += 1
iA += 1
while iR < R.size:
arr[iA] = R[iR]
iR += 1
iA += 1
# 7. Merge Sort (Iterative)
# - Same implementation as Merge Sort,
# - No Recursive function calls
# - Implements Merge Sort BOTTOM-UP (from small to large arrays)
def sort_merge_iterative(self):
# Set initial block size
interval = 2
while interval <= self.n:
# Partition Array into blocks of specified size (interval)
blocks = self.n//interval
#print(f"interval:{interval}\tblocks:{blocks}| {self.nums}")
# Sort each individual block
for b in range(blocks):
# Set starting index for block
i0 = b * interval
# Loop through all entries in single block
for i in range(interval - 1):
iMod = i + 1
while iMod < interval and self.ints[i0 + i] > self.ints[i0 + iMod]:
self.swap(i0 + i, i0 + iMod)
iMod += 1
#print(f"blocksort:{interval}\tblocks:{blocks}| {self.nums}")
# Merge adjacent blocks (A & B)
for i in range(blocks//2):
iA = i * 2 * interval # Start index of block A
iB = iA + interval # Start index of block B
for a in range(iA, iB):
b = iB
while b < self.n and self.ints[a] > self.ints[b]:
self.swap(a, b)
b += 1
# Double block size
interval *= 2
# 8. Quick Sort
# - Select a pivot (First number used in this case)
# - Partition array into two sections based on values:
# 1. Greater Than Pivot
# 2. Smaller Than Pivot
def sort_quick(self, start=0, end=None):
if end is None:
end = self.n
if start < end:
pivot = self.ints[end - 1]
low = start - 1
for i in range(start, end):
if self.ints[i] < pivot:
low += 1
self.swap(low, i)
low += 1
self.swap(low, end - 1)
self.sort_quick(start, low)
self.sort_quick(low + 1, end)
# 9. Quick Sort (Iterative)
# - Same pivot based algorithm as quick sort
# - Implemented iteratively
# - Use a temporary array to store indices in sorted form
def sort_quick_iterative(self):
stack = self.ints
pivot_pos = 0 # Set pivot as first index
pivot = stack[pivot_pos]
pivot_pos += 1
while pivot_pos < self.n - 1:
# Position the pivot in correct position in stack array
for i in range(pivot_pos, self.n):
if self.ints[i] <= pivot:
self.swap(i, i - 1)
pivot_pos += 1
pivot = stack[pivot_pos]
for l in range(pivot_pos):
if self.ints[l] > self.ints[l + 1]:
self.swap(l, l + 1)
for h in range(pivot_pos, self.n):
if self.ints[h] < self.ints[h - 1]:
self.swap(h, h - 1)
# 10. Heap Sort (Binary Tree)
# - Array is converted to a binary tree format
# - Smallest values are positioned furthest from the root
# - Root assumes value of maximum array value
def sort_binary_heap(self, index=-1, size=0):
if index == -1:
for i in range(self.n//2, -1, -1):
self.sort_binary_heap(i, self.n)
for i in range(self.n - 1, 0, -1):
self.swap(0, i)
self.sort_binary_heap(0, i)
else:
P = index # Parent node index
L = 2*P + 1 # Left node index
R = 2*P + 2 # Right node index
# Order P, L, R nodes so P is the largest value, followed by R then L
if L < size and self.ints[index] < self.ints[L]:
P = L
if R < size and self.ints[P] < self.ints[R]:
P = R
# If larger number detected
if P != index:
self.swap(P, index)
self.sort_binary_heap(P, size)
#else:
# for i in range(self.n//2): # Re-Order descending to ascending sorted form
# self.swap(i, self.n - i - 1)
# 11. Counting Sort
# - Operates by counting instances of occuring numbers
# - Converts count array to the cumulative sum of counts
# - Count array describes the index of the
def sort_counting(self):
mini = self.minimum()
maxi = self.maximum()
count = np.zeros(maxi - mini + 1)
for i in range(self.n): # Count the nubmer of unique occurrences
count[self.ints[i]] += 1
#print("count_array: \t\t", count)
for i in range(1, count.size): # Convert count to cumulative sum
count[i] += count[i - 1]
#print("cumulative_array: \t", count)
output = np.zeros(self.n, dtype=self.ints.dtype)
for i in range(self.n):
output[int (count[self.ints[i]]) - 1] = self.ints[i]
count[self.ints[i]] -= 1
self.ints = output
# 12. Radix Sort
# - An implementation of counting sort that relies on digit values
# - Numbers are grouped by shared digits in the same place value
# - Digits groups are sorted from least significant to most significant
def sort_radix(self):
digits = len(format(abs(self.maximum()))) # Get max number of digits
for i in range(digits): # Loop through all digit positions
dec = 10**(i+1) # Set decimal digit place to sort
# Implement Counting Sort Algorithm for digit range
output = np.zeros(self.n, dtype=self.ints.dtype)
count = np.zeros(10, dtype=int)
for c in range(self.n): # Count digit occurences
count[(self.ints[c] % dec)//(dec//10)] += 1
for c in range(1, 10): # Convert to cumulative sum
count[c] += count[c - 1]
for j in range(self.n - 1, -1, -1): # Set Output array
index = (self.ints[j] % dec)//(dec//10)
output[count[index] - 1] = self.ints[j]
count[index] -= 1
self.ints = output
# 13. Bucket Sort
# - Applies to sorting uniform distributions over a range
# - Capable of handling floats
def sort_bucket(self, n_bins=0):
if n_bins == 0:
n_bins = self.n//4
bins = [[] for n in range(n_bins)] # Set bin/bucket array
mini = self.minimum()
maxi = self.maximum()
interval = (maxi - mini)//(n_bins) + 1 # Set the interval each bin contains
for i in range(self.n): # Arrange the array into bins/buckets
mov = mini
pos = 0
while pos < n_bins and self.ints[i] >= mov:
mov += interval
pos += 1
bins[pos - 1].append(self.ints[i])
#print(f"bins:{bins}\n")
for b in bins: # Loop through bins & sort each bin independently
for i in range(1, len(b)): # Insertion Sort
comp = b[i]
mov = i - 1
while mov >= 0 and comp < b[mov]:
b[mov + 1], b[mov] = b[mov], b[mov + 1]
mov -= 1
i = 0
for b in bins: # Extract bins back to array
for j in b:
self.ints[i] = j
i += 1
# 14. Shell Sort
# - Operates similarly to insert sort by comparing elements and swapping
# - Works with intervals instead by comparing elements an interval apart
def sort_shell(self, interval=0):
if interval == 0:
interval = self.n//2 # Initial interval size
while interval > 0:
#print("interval:", format(interval), end = " ")
for i in range(interval, self.n):
hold = self.ints[i]
mov = i
while mov >= interval and self.ints[mov - interval] > hold:
self.ints[mov] = self.ints[mov - interval]
mov -= interval
self.ints[mov] = hold
#print(self.ints)
interval = interval//2 # Set next interval value
# 15. Tim Sort
# - A combination of insertion sort and merge sort algorithms
# - Array divided into 'runs' (smaller arrays)
# - Insertion sort on smaller arrays
# - Merge algorithm from merge sort implemented to combine runs
def sort_tim(self):
run_size = 4 # Define run (sub-array) size
runs = self.n//run_size
# Insertion Sort Function for all runs
for r in range(runs):
r_start = r * run_size
r_end = r_start + run_size
if r == (runs - 1) and r_end > self.n: # Sort remainder
r_end = self.n
for i in range(r_start, r_end): #
mov = i
while mov > r_start and self.ints[mov] < self.ints[mov - 1]:
self.swap(mov, mov - 1)
mov -= 1
#print("post_run: ", self.ints)
# Merge Function for all runs
interval = run_size
n_interval = runs
while True:
for r in range(n_interval//2):
A = r*2 * interval # Start index of run A
B = A + interval # Start index of run B
Amax = B
Bmax = B + interval
arrTemp = np.zeros(Bmax - A) # Resultant array
iT = 0
iA = A
iB = B
#print(f"int:{interval}: \t\t A:{A}|Amax:{Amax}-B:{B}|Bmax:{Bmax} \tTempArr[{Bmax - A}]")
while iA < Amax and iB < Bmax:
if self.ints[iA] < self.ints[iB]:
arrTemp[iT] = self.ints[iA]
iA += 1
else:
arrTemp[iT] = self.ints[iB]
iB += 1
iT += 1
# Catch remaining numbers
while iA < Amax:
arrTemp[iT] = self.ints[iA]
iA += 1
iT += 1
while iB < Bmax:
arrTemp[iT] = self.ints[iB]
iB += 1
iT += 1
# Feed sorted array back into main array
self.ints[A:Bmax] = arrTemp
interval *= 2
if interval > self.n:
break
else:
n_interval = self.n//interval
def main():
# Test Functions
t_n = 16
t = NumberArray(t_n)
#print(t1)
t.shuffle()
#print(t1)
# Test remove, add & sum methods
t_rem = 10
t.remove(num=t_rem)
#print(f"remove:{t1_rem}, new_series:{t1}")
#print(f"expected_sum_of_series:{t1.calculate_expected_sum()}")
#print(f"actual_sum_of_series:{t1.calculate_actual_sum()}")
missing = t.find_missing_entity()
#print(f"missing:{missing}")
t.add(missing)
#print(f"add_missing_entity:{t1}")
# Test Sorting Methods
dashes = '-'*(t.n * 4)
print(dashes)
print("Unsorted: ", t)
print(dashes)
t_sel = copy.deepcopy(t)
t_sel.sort_selection()
#print("sort_sel: ", t_sel, "\n")
t_bub = copy.deepcopy(t)
t_bub.sort_bubble()
#print("sort_bub: ", t_bub, "\n")
t_brc = copy.deepcopy(t)
t_brc.sort_bubble_recursive()
#print("sort_brc: ", t_brc, "\n")
t_ins = copy.deepcopy(t)
t_ins.sort_insertion()
#print("sort_ins: ", t_ins, "\n")
t_irc = copy.deepcopy(t)
t_irc.sort_insertion_recursive()
#print("sort_irc: ", t_irc, "\n")
t_mrg = copy.deepcopy(t)
t_mrg.sort_merge()
#print("sort_mrg: ", t_mrg, "\n")
t_mgi = copy.deepcopy(t)
t_mgi.sort_merge_iterative()
#print("sort_mgi: ", t_mgi, "\n")
t_qui = copy.deepcopy(t)
t_qui.sort_quick()
#print("sort_qui: ", t_qui, "\n")
t_qit = copy.deepcopy(t)
t_qit.sort_quick_iterative()
#print("sort_qit: ", t_qit, "\n")
t_bhp = copy.deepcopy(t)
t_bhp.sort_binary_heap()
#print("sort_bhp: ", t_bhp, "\n")
t_cnt = copy.deepcopy(t)
t_cnt.sort_counting()
#print("sort_cnt: ", t_cnt, "\n")
t_rad = copy.deepcopy(t)
t_rad.sort_radix()
#print("sort_rad: ", t_rad, "\n")
t_buc = copy.deepcopy(t)
t_buc.sort_bucket()
#print("sort_buc: ", t_buc, "\n")
t_shl = copy.deepcopy(t)
t_shl.sort_shell()
print("sort_shl: ", t_shl, "\n")
t_tim = copy.deepcopy(t)
t_tim.sort_tim()
print("sort_tim: ", t_tim, "\n")
if __name__ == "__main__":
main()
```
#### File: src/data/hash_table.py
```python
import numpy as np
class HashTable:
"""
Associative Array Data Structure
- uses numpy arrays to store data
"""
# Define a dictionary to hold key-value pairs
__elements = {}
# Define an internal counter for the number of elements
__n = 0
# Define a table capacity
# - power of 2 recommended
TABLE_CAP = 64
# Knuth constant
# - applies when TABLE_CAP is a power of 2
random_real_A = 0.5 * (np.sqrt(5) - 1)
def __init__(self, array) -> None:
if isinstance(array, (list)):
self.__elements = np.empty(self.TABLE_CAP)
self.__elements[:] = np.NaN
for i in range(len(array)):
self.insert(array[i])
self.__n += 1
elif isinstance(array, np.ndarray):
self.__elements = np.empty(self.TABLE_CAP)
self.__elements[:] = np.NaN
for i in range(array.size):
self.insert(array[i])
self.__n += 1
# STR representation method
def __str__(self) -> str:
srt = format(self.__n) + "["
for i in self.__elements:
if not np.isnan(i):
srt += f"{i}, "
srt.removesuffix(", ")
srt += "]"
return srt
# Hashing function
# - Returns index for hash table where value is stored
def get_index(self, key) -> int:
# Simple Integer Hashing function
# - using Knuth constant on the multiplication method
s = key * self.random_real_A
s = s - np.fix(s) # Get fraction part of s
s = int(self.TABLE_CAP * s) # Get whole part of s as index
#print(f"get_index(key:{key}) -> i:{s}")
return s
# Search for a value in a hash table
# - return index of value
def search(self, value) -> int:
index = self.get_index(value)
while index < self.TABLE_CAP - 1 and self.__elements[index] != value:
index += 1
return index
# Insert a value to a hash table
def insert(self, value):
index = self.get_index(value)
while index < self.TABLE_CAP - 1 and not np.isnan(self.__elements[index]):
index += 1
self.__elements[index] = value
self.__n += 1
# Delete a value from a hash table
def delete(self, value):
index = self.get_index(value)
while index < self.TABLE_CAP - 1 and self.__elements[index] != value:
index += 1
self.__elements = np.delete(self.__elements, index)
self.__n -= 1
def main():
# Test Hash_Table
maxi = 16
tarr = np.arange(1, maxi)
ht = HashTable(tarr)
print("tarr:", tarr)
#print(ht)
# Test Search method
for i in tarr:
print(f"search_for:{i}, result:{ht.search(i)}")
print("HashTable_Printout:\n", ht)
# Test Insert method
it = [0, 16, 32, 64]
for i in it:
ht.insert(i)
print("Insert_HashTable_Printout:\n", ht)
# Test Delete method
dt = []
for i in range(maxi):
if i % 2 != 0: # Delete odd numbers from ht
dt.append(i)
for i in dt:
ht.delete(i)
print("Delete_HashTable_Printout:\n", ht)
if __name__ == "__main__":
main()
```
#### File: src/numerical/euler_method.py
```python
import numpy
from base.polynomial import Polynomial
def eulers_method(multiVarPolynom, y0, h, time_steps):
y_n = y0
y_m = 0
for i in time_steps:
y_m = y_n + h*multiVarPolynom.calculate(y_n, i)
return y_m
def main():
# Test Euler's Method:
pass
if __name__ == "__main__":
# test function
main()
```
#### File: src/numerical/newtons_method.py
```python
import numpy
from numpy.lib.polynomial import roots
from base.polynomial import Polynomial
# The error_tolerance defines the threshold at which numbers are equal:
# |n1 - n2| < error_tolerance
ERROR_TOLERANCE = 1e-9
# Maximum number of iterations to run if solution does not converge
MAX_ITERATIONS = 50
# Occurences are counted for each estimate:
# - If convergence for a single estimate is not reached,
# the estimate is slightly increased and Newton's method is re-run on the new estimate
# = This counts as an occurrence.
MAX_OCCURENCES = 10
# Newton's Method: An iterative method for finding roots to polynomials
# - an estimate is required to find the closest root
def Newtons_method(polynomi, estimate = 1, occurence=0):
x = estimate
error = 1
x_n = 0
iteratr = 0
loop = True
while loop:
x_n = approximate_function(x, polynomi)
error = abs(x - x_n)
# Check if necessary to iterate
if error < ERROR_TOLERANCE or iteratr == MAX_ITERATIONS:
loop = False
#print("i:",iTerate, ", x=", x, ", x+=", x_n, ", error=", error)
x = x_n
iteratr += 1
if iteratr < MAX_ITERATIONS:
return x_n # return root
else:
print(f"Max_Iterations:{iteratr} reached without convergence.")
print(f"Error_Tolerance:{ERROR_TOLERANCE:.9f}, error reached:{error:.9f}")
occurence += 1
if occurence < MAX_OCCURENCES:
new_estimate = x_n - occurence*abs(x_n - estimate)
return Newtons_method(polynomi, new_estimate, occurence)
else:
raise RuntimeError("Loop Iteration Ceased & Max Occurrences reached with no roots found!")
# x_n = x - f(x)/f'(x)
def approximate_function(x, polynomial):
return x - polynomial.evaluate(x)/polynomial.derive().evaluate(x)
# Determine if the difference between numbers falls below a threshold,
# making it effectively zero
def is_equal(a, b):
ans = abs(a - b)
if ans < ERROR_TOLERANCE:
return True
else:
return False
# Find all real roots of a polynomial over interval [a, b]
def get_roots(polynomi, n_roots=0, interval_start=-1,interval_end=1):
if n_roots == 0:
# if n_roots not specified
n_estimates = polynomi.degree
else:
n_estimates = n_roots
roots = numpy.zeros(n_estimates)
if n_estimates > 1:
estimate_interval = abs(interval_start - interval_end) / (n_estimates - 1)
else:
estimate_interval = abs(interval_start - interval_end)
#print(f"estimate_interval:{estimate_interval}")
# calculate the roots using Newton's method based on an estimate initial value
# - assuming equally spaced within specified interval
for i in range(n_estimates):
est = interval_start + i * estimate_interval
#print(f"{i}_est:{est}")
potential_root = Newtons_method(polynomi, est)
for j in range(roots.size):
if ~is_equal(j, potential_root):
roots[i] = potential_root
return roots
# Test function
def main():
# Test Newton's Method for 1 iteration
polynom = Polynomial([-3,8,-7,1]) # -3 + 8x -7x^2 + x^3
print(f"poly:{polynom}\npoly_derive:{polynom.derive()}")
print(f"poly__REPR__:{repr(polynom)}")
# Test value evaluation
x = 5
fx = polynom.evaluate(x)
fpx = polynom.derive().evaluate(x)
print(f"x: {x}, f(x): {fx}, f'(x): {fpx}")
print("Frac:", approximate_function(x, polynom))
# Test Newton's Method for calculating a root based on an estimate(x)
root = Newtons_method(polynom, x)
print(f"newtons_method_root:{root}")
# Test the application of Newton's Method for finding all roots within a given interval
# Define a Legendre polynomial of degree 3
legendre3 = Polynomial([0, -1.5, 0, 2.5])
print(f"leg3_repr: {legendre3.__repr__()}")
#print(f"leg3_derive: {legendre3.derive().__repr__()}")
# Test root finding methodology
print(f"leg3_roots:{get_roots(legendre3)}")
print(f"leg3_numpy_roots:{numpy.roots(numpy.flip(legendre3.co_array))}")
# Test individual estimates
print(f"leg_est(-1):{Newtons_method(legendre3, -1)}")
print(f"leg_est( 0):{Newtons_method(legendre3, 0)}")
print(f"leg_est(+1):{Newtons_method(legendre3, 1)}")
# Define a Legendre Polynomial of degree 4
legendre4 = Polynomial([3/8, 0, -30/8, 0, 35/8])
print(f"leg4: {legendre4.__repr__()}")
#print(f"leg4_derive: {legendre4.derive().__repr__()}")
# Test root finding method:
print(f"leg4_roots:{get_roots(legendre4)}")
print(f"leg4_numpy_roots:{numpy.roots(numpy.flip(legendre4.co_array))}")
# Test individual estimates
print(f"leg_est( -1):{Newtons_method(legendre4, -1)}")
print(f"leg_est(-0.3):{Newtons_method(legendre4, -0.3)}")
print(f"leg_est(+0.3):{Newtons_method(legendre4, 0.3)}")
print(f"leg_est( +1):{Newtons_method(legendre4, 1)}")
if __name__ == "__main__":
main()
```
#### File: tasks/advent_of_code_21/day14.py
```python
class Polymer:
def __init__(self, template, insertions) -> None:
self.insertion_rules = []
self.elements = []
self.quantities = []
self.count_pair = {}
self.first_char = template[0]
for i in insertions:
rule = i.split(' -> ')
self.insertion_rules.append(rule)
if rule[1] not in self.elements:
self.elements.append(rule[1])
self.quantities.append(0)
self.count_pair[rule[0]] = 0
# Set template to init pair count
for i in range(len(template) - 1):
key = f'{template[i]}{template[i+1]}'
self.count_pair[key] += 1
print(self.count_pair)
def __str__(self) -> str:
sret = f'{sum(self.quantities)}\n'
for i in range(len(self.elements)):
sret += f'{self.elements[i]}:{self.quantities[i]}\n'
sret = sret.rstrip()
return sret
def get_rule(self, key) -> str:
for i in self.insertion_rules:
if i[0] == key:
return i[1]
raise ProcessLookupError(f'Unknown Key:{key}')
def step(self):
count = self.count_pair.copy()
for key in count:
rule_char = self.get_rule(key)
key_l = f'{key[0]}{rule_char}' # left key
key_r = f'{rule_char}{key[1]}' # right key
self.count_pair[key_l] += count[key]
self.count_pair[key_r] += count[key]
self.count_pair[key] -= count[key]
#print(f'{key} -> {key_l}:{key_r}')
self.set_quantities()
print(self.count_pair)
def set_quantities(self):
self.quantities.clear()
self.quantities = [0 for i in range(len(self.elements))]
for key in self.count_pair:
self.quantities[self.elements.index(key[1])] += self.count_pair[key]
self.quantities[self.elements.index(self.first_char)] += 1
def get_sorted_quantities(self) -> list:
return sorted(self.quantities)
def main():
template = ''
insertions = []
line_break = False
filepath = 'src/tasks/advent_of_code_21/day14_input.txt'
with open(filepath, 'r') as f:
for line in f:
if not line_break:
if line == "\n":
line_break = True
else:
template = line.strip()
else:
insertions.append(line.strip())
#print(f"template:{template}\npair_insertion_rules:\n{insertions}")
"""
template = 'NNCB'
insertions = ['CH -> B', # 6x B rules
'HH -> N', # 3x N rules
'CB -> H', # 2x H rules
'NH -> C', # 5x C rules
'HB -> C',
'HC -> B',
'HN -> C',
'NN -> C',
'BH -> H',
'NC -> B',
'NB -> B',
'BN -> B',
'BB -> N',
'BC -> B',
'CC -> N',
'CN -> C']
#"""
poly = Polymer(template, insertions)
for i in range(1,11):
poly.step()
print(f'>> step:{i}\n{poly}\n')
q = poly.get_sorted_quantities()
mc_minus_lc = q[len(q) - 1] - q[0]
print(f"\tM_Common - L_Common:{mc_minus_lc}\n")
# Part 2 -----------------------------------------------
for i in range(11,41):
poly.step()
print(f'>> step:{i}\n{poly}\n')
q = poly.get_sorted_quantities()
mc_minus_lc = q[len(q) - 1] - q[0]
print(f"\tM_Common - L_Common:{mc_minus_lc}\n")
if __name__ == "__main__":
main()
```
#### File: tasks/advent_of_code_21/day18.py
```python
class Pair:
def __init__(self, pstr: str, depth=0, parent=None) -> None:
self.d = depth
self.x = None # Default x type is Pair type, otherwise int
self.y = None # Default y type is Pair type, otherwise int
self.is_reduced = False
self.pairent = parent
pstr_cut = ''
# Add x value of pair
if pstr[1] == '[':
bracketed = self.get_bracketed_substr(pstr[1:])
self.x = Pair(bracketed, depth + 1, self)
pstr_cut = pstr.replace(f'[{bracketed}', '', 1)
else:
self.x = int (pstr[1])
pstr_cut = pstr.replace(f'[{self.x}', '', 1)
# Add y value of pair
if pstr_cut[1] == '[':
bracketed = self.get_bracketed_substr(pstr_cut[1:])
self.y = Pair(bracketed, depth + 1, self)
else:
self.y = int(pstr_cut[1])
if isinstance(self.x, int) and isinstance(self.y, int):
self.is_reduced = True
else:
self.to_reduced()
def __str__(self) -> str:
sret = f'[{self.x},{self.y}]'
return sret
# Overload addition operator
def __add__(self, other):
if isinstance(other, Pair):
pstr = f'[{self.__str__()},{other}]'
new_pair = Pair(pstr)
return new_pair
elif isinstance(other, list):
if other[0] == 0: # y specified
if isinstance(self.y, int):
self.y += other[1]
else:
self.y += other
elif other[1] == 0: # x specified
if isinstance(self.x, int):
self.x += other[0]
else:
self.x += other
else:
raise TypeError(f'{other} not a recognized type to add to Pair-object')
# Return a substring enclosed by the first bracket
def get_bracketed_substr(self, pstr) -> str:
bret = []
open_brackets = 0
for s in pstr:
bret.append(s)
if s == '[':
open_brackets += 1
elif s == ']':
open_brackets -= 1
if open_brackets == 0:
sret = ''.join(bret)
#print(f'get_bracketed_of {pstr} -> {sret}')
return sret
raise LookupError(f'Str:{pstr} has unclosed brackets')
def to_reduced(self):
if not self.is_reduced:
op = False
# X reduction
if isinstance(self.x, Pair): # x explode
self.x.to_reduced()
if self.d >= 3:
op = True
else: # x split
if self.x > 9:
op = True
x = self.x
self.x = Pair(f'[{x//2},{int(x/2 + x%2)}]')
# Y reduction
if isinstance(self.y, Pair) and not op: # y explode
self.y.to_reduced()
if self.d >= 3:
op = True
else: # y split
if self.y > 9:
op = True
y = self.y
self.y = Pair(f'[{y//2},{int(y/2 + y%2)}]')
if op: # Track if operation executed
self.to_reduced()
else:
self.is_reduced = True
def add_to_first_regular(self, val, fromXorY):
if self == self.pairent.x:
if fromXorY == 0: # from x
if self.d > 0:
self.pairent.add_to_first_regular(fromXorY, val)
elif fromXorY == 1: # from y
if isinstance(self.y, Pair):
self.y.x += val
else:
self.y += val
elif self == self.pairent.y:
if fromXorY == 0: # from x
if isinstance(self.x, Pair):
self.x.y += val
else:
self.y += val
elif fromXorY == 1: # from y
if self.d > 0:
self.pairent.add_to_first_regular(fromXorY, val)
else:
raise RecursionError('add L_or_R comparison did NOT work')
def get_magnitude(self) -> int:
xm = 0
if isinstance(self.x, Pair):
xm = self.x.get_magnitude()
else:
xm = self.x
ym = 0
if isinstance(self.y, Pair):
ym = self.y.get_magnitude()
else:
ym = self.y
return (3*xm) + (2*ym)
class PairList:
def __init__(self, numbers) -> None:
self.pairs = []
for n in range(len(numbers)):
pair_n = Pair(numbers[n])
self.pairs.append(pair_n)
print(f'{n}: {pair_n}')
def main():
numbers = []
relative_path = 'src/tasks/advent_of_code_21/day18_input.txt'
with open(relative_path, 'r') as f:
for line in f:
numbers.append(line.strip())
#print(f'{numbers}')
#""" Test Cases
numbers = ['[[[[[9,8],1],2],3],4]',
'[7,[6,[5,[4,[3,2]]]]]',
'[[6,[5,[4,[3,2]]]],1]',
'[[3,[2,[1,[7,3]]]],[6,[5,[4,[3,2]]]]]',
'[[3,[2,[8,0]]],[9,[5,[4,[3,2]]]]]']
for n in numbers:
pair_n = Pair(n)
print(pair_n)
"""
numbers = ['[[[0,[5,8]],[[1,7],[9,6]]],[[4,[1,2]],[[1,4],2]]]',
'[[[5,[2,8]],4],[5,[[9,9],0]]]',
'[6,[[[6,2],[5,6]],[[7,6],[4,7]]]]',
'[[[6,[0,7]],[0,9]],[4,[9,[9,0]]]]',
'[[[7,[6,4]],[3,[1,3]]],[[[5,5],1],9]]',
'[[6,[[7,3],[3,2]]],[[[3,8],[5,7]],4]]',
'[[[[5,4],[7,7]],8],[[8,3],8]]',
'[[9,3],[[9,9],[6,[4,9]]]]',
'[[2,[[7,7],7]],[[5,8],[[9,3],[0,2]]]]',
'[[[[5,2],5],[8,[3,7]]],[[5,[7,5]],[4,4]]]']
plist = PairList(numbers)
psum = plist[0]
for p in plist[1:]:
psum += p
print(f'{psum} mag:{psum.get_magnitude()}')
"""
if __name__ == '__main__':
main()
```
#### File: tasks/advent_of_code_21/day2.py
```python
import numpy as np
class Submarine:
# Define a vertical position
posVERT = None
# Define a horizontal position
posHORZ = None
# PART 2: add aim variable
aim = None
def __init__(self) -> None:
self.posHORZ = 0
self.posVERT = 0
# Part 2
self.aim = 0
def simulate(self, instruction):
direction = instruction[:instruction.find(' ')]
magnitude = int (instruction[(instruction.find(' ') + 1):])
if direction == "forward":
self.posHORZ += magnitude
elif direction == "down":
self.posVERT += magnitude
elif direction == "up":
self.posVERT -= magnitude
else:
raise TypeError("Unkown direction instruction!")
def execute(self, instruction):
direction = instruction[:instruction.find(' ')]
magnitude = int (instruction[(instruction.find(' ') + 1):])
if direction == "forward":
#self.posHORZ += magnitude
# Part2
self.posHORZ += magnitude
self.posVERT += (self.aim * magnitude)
elif direction == "down":
#self.posVERT += magnitude
# Part2
self.aim += magnitude
elif direction == "up":
#self.posVERT -= magnitude
# Part2
self.aim -= magnitude
else:
raise TypeError("Unkown direction instruction!")
def read_input(fname):
with open(fname, 'r') as f:
arr = np.array(f.read().splitlines())
return arr
def main():
# Read input instruction set
relative_path = 'src/tasks/advent_of_code_21/'
intsructionset = read_input(relative_path + 'day2_input.txt')
#print(intsructionset)
print("Part 1:")
sim = Submarine()
for i in intsructionset:
sim.simulate(i)
print(f"horiztontal_pos:{sim.posHORZ}, depth:{sim.posVERT}")
print(f"horizontal_pos * depth:{sim.posHORZ * sim.posVERT}")
# Part 2 -----------------------------------------------------------------
print("Part 2:")
# Create instance of submarine
sub = Submarine()
# Follow the instruction set provided
for i in intsructionset:
sub.execute(i)
print(f"horiztontal_pos:{sub.posHORZ}, depth:{sub.posVERT}")
print(f"horizontal_pos * depth:{sub.posHORZ * sub.posVERT}")
if __name__ == "__main__":
main()
```
#### File: tasks/advent_of_code_21/day8.py
```python
from numpy import zeros
from numpy.core.fromnumeric import sort
class Signal:
pattern = []
output = []
seg7seq = []
def __init__(self, entry) -> None:
signal = entry.strip().split(' | ')
self.pattern = signal[0].split()
self.output = signal[1].split()
self.seg7seq = [''] * 10
def __str__(self) -> str:
sret = format(self.pattern) + " | "
sret += format(self.output)
return sret
def count_output_digit_sequences(self, n_digits):
ct = 0
for i in self.output:
if len(i) == n_digits:
ct += 1
return ct
def set_display_correlation(self):
seq069 = []
seq235 = []
# Set sequences based on length
for i in self.pattern:
seg = len(i)
seq = ''.join(sorted(i))
if seg == 2: # set 1
self.seg7seq[1] = seq
elif seg == 4: # set 4
self.seg7seq[4] = seq
elif seg == 3: # set 7
self.seg7seq[7] = seq
elif seg == 7: # set 8
self.seg7seq[8] = seq
elif seg == 6:
seq069.append(seq)
else:
seq235.append(seq)
# Isolate left-upper and middle segments
lup_mid = self.seg7seq[4]
for i in self.seg7seq[1]:
lup_mid = lup_mid.replace(i, '')
# Set 0 6 9
for i in seq069: # 6 char length
ct = 0
for s in self.seg7seq[1]:
if s in i:
ct += 1
if ct == 2: # 0 or 9
ctn = 0
for s in lup_mid:
if s in i:
ctn += 1
if ctn == 2:
self.seg7seq[9] = i # set 9
else:
self.seg7seq[0] = i # set 0
else:
self.seg7seq[6] = i # set 6
# Set 2 3 5
for i in seq235: # 5 char length
ct = 0
for s in lup_mid:
if s in i:
ct += 1
if ct == 2:
self.seg7seq[5] = i # set 5
else:
ctn = 0
for s in self.seg7seq[1]:
if s in i:
ctn += 1
if ctn == 2:
self.seg7seq[3] = i # set 3
else:
self.seg7seq[2] = i # set 2
#Print correlations
#for i in range(10):
# print(f"{i} seg:{self.seg7seq[i]}")
def get_output(self):
self.set_display_correlation()
sret = ''
for i in self.output:
seq = ''.join(sorted(i))
for s in range(10):
if seq == self.seg7seq[s]:
sret += format(s)
return int(sret)
def main():
signal_entry = []
relative_path = 'src/tasks/advent_of_code_21/day8_input.txt'
with open(relative_path, 'r') as f:
signal_entry = f.readlines()
#print(f"signal_entry: {signal_entry}")
signals = []
for i in signal_entry:
signals.append(Signal(i))
ct = 0
for i in signals:
ct += i.count_output_digit_sequences(2) #1
ct += i.count_output_digit_sequences(4) #4
ct += i.count_output_digit_sequences(3) #7
ct += i.count_output_digit_sequences(7) #8
print(f"[1,4,7,8]'s in output:{ct}")
# Part 2 --------------------------------------------------------
osum = 0
for i in range(len(signals)):
out = signals[i].get_output()
osum += out
print(f"{i}|{signals[i]} :{out}")
print(f"Sum of Outputs:{osum}")
if __name__ == "__main__":
main()
``` |
{
"source": "jnmclarty/fapple",
"score": 2
} |
#### File: jnmclarty/fapple/keyboardeventmaker.py
```python
import ctypes
import time
SendInput = ctypes.windll.user32.SendInput
VKs = {'\t' : (0x09,),
'0' : (0x30,),
'1' : (0x31,),
'2' : (0x32,),
'3' : (0x33,),
'4' : (0x34,),
'5' : (0x35,),
'6' : (0x36,),
'7' : (0x37,),
'8' : (0x38,),
'9' : (0x39,),
'A' : (0x41,'upper'),
'B' : (0x42,'upper'),
'C' : (0x43,'upper'),
'D' : (0x44,'upper'),
'E' : (0x45,'upper'),
'F' : (0x46,'upper'),
'G' : (0x47,'upper'),
'H' : (0x48,'upper'),
'I' : (0x49,'upper'),
'J' : (0x4A,'upper'),
'K' : (0x4B,'upper'),
'L' : (0x4C,'upper'),
'M' : (0x4D,'upper'),
'N' : (0x4E,'upper'),
'O' : (0x4F,'upper'),
'P' : (0x50,'upper'),
'Q' : (0x51,'upper'),
'R' : (0x52,'upper'),
'S' : (0x53,'upper'),
'T' : (0x54,'upper'),
'U' : (0x55,'upper'),
'V' : (0x56,'upper'),
'W' : (0x57,'upper'),
'X' : (0x58,'upper'),
'Y' : (0x59,'upper'),
'Z' : (0x5A,'upper'),
'.' : (0xBE,),
'a' : (0x41,'lower'),
'b' : (0x42,'lower'),
'c' : (0x43,'lower'),
'd' : (0x44,'lower'),
'e' : (0x45,'lower'),
'f' : (0x46,'lower'),
'g' : (0x47,'lower'),
'h' : (0x48,'lower'),
'i' : (0x49,'lower'),
'j' : (0x4A,'lower'),
'k' : (0x4B,'lower'),
'l' : (0x4C,'lower'),
'm' : (0x4D,'lower'),
'n' : (0x4E,'lower'),
'o' : (0x4F,'lower'),
'p' : (0x50,'lower'),
'q' : (0x51,'lower'),
'r' : (0x52,'lower'),
's' : (0x53,'lower'),
't' : (0x54,'lower'),
'u' : (0x55,'lower'),
'v' : (0x56,'lower'),
'w' : (0x57,'lower'),
'x' : (0x58,'lower'),
'y' : (0x59,'lower'),
'z' : (0x5A,'lower')}
# C struct redefinitions
PUL = ctypes.POINTER(ctypes.c_ulong)
class KeyBdInput(ctypes.Structure):
_fields_ = [("wVk", ctypes.c_ushort),
("wScan", ctypes.c_ushort),
("dwFlags", ctypes.c_ulong),
("time", ctypes.c_ulong),
("dwExtraInfo", PUL)]
class HardwareInput(ctypes.Structure):
_fields_ = [("uMsg", ctypes.c_ulong),
("wParamL", ctypes.c_short),
("wParamH", ctypes.c_ushort)]
class MouseInput(ctypes.Structure):
_fields_ = [("dx", ctypes.c_long),
("dy", ctypes.c_long),
("mouseData", ctypes.c_ulong),
("dwFlags", ctypes.c_ulong),
("time",ctypes.c_ulong),
("dwExtraInfo", PUL)]
class Input_I(ctypes.Union):
_fields_ = [("ki", KeyBdInput),
("mi", MouseInput),
("hi", HardwareInput)]
class Input(ctypes.Structure):
_fields_ = [("type", ctypes.c_ulong),
("ii", Input_I)]
# Actuals Functions
def PressKey(hexKeyCode):
extra = ctypes.c_ulong(0)
ii_ = Input_I()
ii_.ki = KeyBdInput( hexKeyCode, 0x48, 0, 0, ctypes.pointer(extra) )
x = Input( ctypes.c_ulong(1), ii_ )
SendInput(1, ctypes.pointer(x), ctypes.sizeof(x))
def ReleaseKey(hexKeyCode):
extra = ctypes.c_ulong(0)
ii_ = Input_I()
ii_.ki = KeyBdInput( hexKeyCode, 0x48, 0x0002, 0, ctypes.pointer(extra) )
x = Input( ctypes.c_ulong(1), ii_ )
SendInput(1, ctypes.pointer(x), ctypes.sizeof(x))
def TypeKey(kexKeyCode):
PressKey(kexKeyCode)
time.sleep(0.01)
ReleaseKey(kexKeyCode)
time.sleep(0.01)
def PressCharacter(c):
if c in VKs:
k = VKs[c]
else:
raise Exception("{} not in Virtual Keyboard Dictionary".format(c))
if len(k) == 1:
TypeKey(k[0])
else: #len is 2
shift = False
if k[1] == 'upper':
shift=True
if shift:
PressKey(0xA1)
TypeKey(k[0])
if shift:
ReleaseKey(0xA1)
def PressString(s):
for c in s:
PressCharacter(c)
time.sleep(0.1)
def TryPassword(passwd, newpwd):
PressString(passwd)
for i in range(2):
PressCharacter("\t")
PressString(newpwd)
def AltTab():
'''
Press Alt+Tab and hold Alt key for 2 seconds in order to see the overlay
'''
PressKey(0x012) #Alt
PressKey(0x09) #Tab
ReleaseKey(0x09) #~Tab
time.sleep(2)
ReleaseKey(0x012) #~Alt
if __name__ =="__main__":
for i in range(10):
PressString('Aa0.\t1')
time.sleep(1)
#AltTab()
``` |
{
"source": "jnmclarty/gazpacho",
"score": 3
} |
#### File: gazpacho/gazpacho/utils.py
```python
def match(a, b, strict=False):
"""Utility function to match two dictionaries
Params:
- a (dict): Query dictionary
- b (dict): Dictionary to match
- strict (bool): Require exact matching
Examples:
```
a = {'foo': 'bar'}
b = {'foo': 'bar baz'}
match(a, b)
# True
a = {'foo': 'bar'}
b = {'foo': 'bar baz'}
match(a, b, strict=True)
# False
a = {}
b = {'foo': 'bar'}
match(a, b)
# True
a = {}
b = {}
match(a, b)
# True
```
"""
if not a:
return True
if not a and not b:
return True
if a and not b:
return False
for k, v in a.items():
if not b.get(k):
return False
if strict:
if v == b.get(k):
continue
else:
return False
if v in b.get(k):
continue
else:
return False
return True
def html_starttag_and_attrs(tag, attrs, startendtag=False):
"""Utility functon to reconstruct starttag and attrs
Params:
- tag (str): HTML element tag
- attrs (list): HTML element attributes formatted as a list of tuples
- startendtag (bool, False): Flag to handle startend tags
Example:
```
html_starttag_and_attrs('a', [('href', 'localhost:8000')])
# ('<a href="localhost:8000">', {'href': 'localhost:8000'})
```
"""
if attrs:
attrs = dict(attrs)
af = [f'{k}="{v}"' for k, v in attrs.items()]
af = f' {" ".join(af)}'
else:
attrs = {}
af = ""
if startendtag:
html = f"<{tag}{af} />"
else:
html = f"<{tag}{af}>"
return html, attrs
``` |
{
"source": "jnmclarty/pyenvdiff-lib",
"score": 2
} |
#### File: pyenvdiff-lib/pyenvdiff/hub.py
```python
if __name__ == '__main__':
from pickle import dump, HIGHEST_PROTOCOL
from pyenvdiff.version import __version__ as v
from .bottle.bottle import route, run, response, request, template
from .environment import Environment, EnvironmentDiff
from .import_macros import import_json
from .version import __version__ as v
from datetime import datetime
try:
import ghdiff
GHDIFF_MSG = ""
except:
GHDIFF_MSG = "PyEnvDiff is dependency free, but the diffs are easier to read if you manually install ghdiff from pypi. <code>pip install ghdiff</code> does it."
DEVELOP_MODE = False
title = "PyEnvDiff Environment Diff Tool Hub v" + v
header = "<h1>Welcome to " + title + " for M2M & H2M Environment Information & Difference Server!</h1>For more information see the <a href='http://pyenvdiff.com'>home page</a> or checkout the <a href='http://github.com/jnmclarty/pyenvdiff-lib'>github</a>"
env_info_store = ['The first entry in this list is never used, the rest of the elements are a tuple of the form (environment info, timestamp)']
HOME_TEMPLATE = """<html><h1>""" + title + """</h1>
<p>View Environment Information for the hub at <a href=/environment_info>/environment_info</a>
% for i, (posted_data, ts) in enumerate(stored_envs):
<p>View Environment Information for <a href=/environment_info/{{i+1}}>Env #{{i+1}}</a> from {{ts}}
% end
% for a, b, hub in options:
<p>Compare <a href=/environment_diff_view/{{a}}/{{b}}>Env #{{a}} {{hub}} and Env #{{b}}</a>
% end
</html>"""
@route('/')
def index():
options = []
for i in range(len(env_info_store)):
for j in range(i, len(env_info_store)):
if i != j:
if i == 0:
hub = "(hub)"
else:
hub = ""
options.append((i, j, hub))
return template(HOME_TEMPLATE, stored_envs=env_info_store[1:], options=options)
ENV_TEMPLATE = """<html><title>""" + title + """</title><body>
<h1>""" + title + """</h1><hr>
<h1>Environment #{{env_number}} as of {{ts}}</h1>
<ul>
% for collector in env.keys():
<li> <a href=#{{collector}}>{{collector}}</a>
</li>
% end
</ul>
<hr>
% for collector in env.keys():
<a name={{collector}}></a>
<h1>{{collector}}</h1>
<p><b>{{env[collector]['english']}}</b></p>
<p>{{!env[collector]['info']}}</p>
% end
</body></html>"""
@route('/environment_info', method='POST')
def environment_info_write():
"""Recieves Environment Info from another node."""
global env_info_store
json = import_json()
urlparts = request.urlparts
postdata = request.body.read()
ts = datetime.now().isoformat()
postdata = json.loads(postdata)
postdata['environment'] = Environment.from_dict(postdata['environment'])
env_info_store.append((postdata, ts))
env_info_number = len(env_info_store) - 1
out = {'timestamp': ts,
'environment_number': env_info_number,
'response_type': 'environment_info_write',
'read_url': urlparts.scheme + "://" + urlparts.netloc + "/environment_info/" + str(env_info_number),
'diff_url': urlparts.scheme + "://" + urlparts.netloc + "/environment_diff_view/0/" + str(env_info_number)}
out = json.dumps(out)
out = out.encode('utf-8')
response.content_type = 'application/json'
return out
@route('/environment_info', method='GET')
@route('/environment_info/<env_info_key>', method='GET')
def environment_info_read(env_info_key=0):
"""Recieves Environment Info"""
global env_info_store
env_info_key = int(env_info_key)
if env_info_key == 0:
environment = Environment()
ts = datetime.now().isoformat()
else:
postdata, ts = env_info_store[env_info_key]
environment = postdata['environment']
env_info = environment.for_web()
response.content_type = 'text/html'
return template(ENV_TEMPLATE, env=env_info, env_number=env_info_key, ts=ts)
def parse_input_do_diff(env_1_info_key, env_2_info_key):
def get_env(key):
global env_info_store
if key == 0:
e = Environment()
ts = datetime.now().isoformat()
else:
postdata, ts = env_info_store[key]
e = postdata['environment']
return e, ts
e1, ts1 = get_env(env_1_info_key)
e2, ts2 = get_env(env_2_info_key)
return EnvironmentDiff(e1, e2), ts1, ts2
@route('/environment_diff/<env_1_info_key>', method='GET')
@route('/environment_diff/<env_1_info_key>/<env_2_info_key>', method='GET')
def environment_diff(env_1_info_key, env_2_info_key=0):
env_1_info_key = int(env_1_info_key)
env_2_info_key = int(env_2_info_key)
json = import_json()
ed, ts1, ts2 = parse_input_do_diff(env_1_info_key, env_2_info_key)
ed = ed.for_json()
out = json.dumps(ed)
out = out.encode('utf-8')
response.content_type = 'application/json'
return out
DIFF_TEMPLATE = """<html><title>""" + title + """</title><body>
<h1>""" + title + """</h1>
""" + GHDIFF_MSG + """
<hr>
<h1>Environment #{{e1}} ({{ts1}}) vs #{{e2}} ({{ts2}})</h1>
% if e1 == 0 or e2 == 0:
Note: Environment #0 is the environment the PyEnvDiff hub is running from.
% end
<ul>
% for collector in diff.keys():
<li> <a href=#{{collector}}>{{collector}}</a>
% if not diff[collector]['matching']:
- Differences Detected!
% end
</li>
% end
</ul>
<hr>
% for env_cls in diff.keys():
<a name={{env_cls}}></a>
<h1>{{env_cls}}</h1>
<p><b>{{diff[env_cls]['english']}}</b></p>
% if diff[env_cls]['matching']:
<h2><font color="blue">Matching!</font></h2>
{{!diff[env_cls]['left']}}
% end
% if not diff[env_cls]['matching']:
<h2><font color="blue">Differences Detected!</font></h2>
<h3>Comparison</h3>
{{!diff[env_cls]['comparison']}}
<h3>Left</h3>
{{!diff[env_cls]['left']}}
<h3>Right</h3>
{{!diff[env_cls]['right']}}
% end
% end
</body></html>"""
@route('/environment_diff_view/<env_1_info_key>', method='GET')
@route('/environment_diff_view/<env_1_info_key>/<env_2_info_key>', method='GET')
def environment_diff_view(env_1_info_key, env_2_info_key=0):
env_1_info_key = int(env_1_info_key)
env_2_info_key = int(env_2_info_key)
diff, ts1, ts2 = parse_input_do_diff(env_1_info_key, env_2_info_key)
diff = diff.for_web()
if DEVELOP_MODE:
with open("diff.pickle", "wb+") as f:
dump(diff, f, protocol=HIGHEST_PROTOCOL)
response.content_type = 'text/html'
return template(DIFF_TEMPLATE, diff=diff, e1=env_1_info_key, e2=env_2_info_key, ts1=ts1, ts2=ts2)
if DEVELOP_MODE:
@route('/sample_diff')
def sample_diff():
from pickle import load
with open("diff.pickle", "rb+") as f:
diff = load(f)
response.content_type = 'text/html'
print(diff['SysPath'])
return template(DIFF_TEMPLATE, diff=diff)
SERVER = '0.0.0.0'
from sys import argv
try:
# If we get an integer, use it, else...
PORT = str(int(argv[-1]))
except:
PORT = '8080'
print("Starting up " + title + " running on:")
print()
print(" http://" + SERVER + ":" + PORT + "/")
print()
print("You can view environment information at")
print()
print(" http://" + SERVER + ":" + PORT + "/environment_info")
print()
print("If you run the following command from other environments:")
print(">>>python -m pyenvdiff.post_to_hub http://" + SERVER + ":" + PORT)
print("...")
print("You will be able to view it, or compare it, at the following URLs:")
print(" http://" + SERVER + ":" + PORT + "/environment_info/N where N is the number of environment, shown in the response after posting.")
print(" http://" + SERVER + ":" + PORT + "/environment_diff_view/N/M where N & M are two different environment numbers. Use 0 for the hub's environment.")
run(host=SERVER, port=PORT, quiet=True)
``` |
{
"source": "jnmclarty/pysia",
"score": 2
} |
#### File: pysia/pysia/client.py
```python
import requests as r
from simplejson import JSONDecodeError
GET = 1
POST = 2
class Sia(object):
def __init__(self, host='http://localhost', port='9980'):
self.host = host
self.port = port
@property
def _url_base(self):
return self.host + ":" + str(self.port)
def __call__(self, verb, url, data=None):
user_agent = {'User-agent': 'Sia-Agent'}
full_url = self._url_base + url
if verb == GET:
resp = r.get(full_url, headers=user_agent, params=data)
elif verb == POST:
resp = r.post(full_url, headers=user_agent, data=data)
try:
return resp.json()
except JSONDecodeError:
return resp.ok
def get_consensus(self, **data):
url = '/consensus'
return self(GET, url, data)
def set_consensus_validate_transactionset(self, **data):
url = '/consensus/validate/transactionset'
return self(POST, url, data)
def get_daemon_constants(self, **data):
url = '/daemon/constants'
return self(GET, url, data)
def get_daemon_stop(self, **data):
url = '/daemon/stop'
return self(GET, url, data)
def get_daemon_version(self, **data):
url = '/daemon/version'
return self(GET, url, data)
def get_gateway(self, **data):
url = '/gateway'
return self(GET, url, data)
def set_gateway_connect(self, netaddress, **data):
url = '/gateway/connect/{netaddress}'.format(netaddress=netaddress)
return self(POST, url, data)
def set_gateway_disconnect(self, netaddress, **data):
url = '/gateway/disconnect/{netaddress}'.format(netaddress=netaddress)
return self(POST, url, data)
def get_host(self, **data):
url = '/host'
return self(GET, url, data)
def set_host(self, **data):
url = '/host'
return self(POST, url, data)
def set_host_announce(self, **data):
url = '/host/announce'
return self(POST, url, data)
def get_host_storage(self, **data):
url = '/host/storage'
return self(GET, url, data)
def set_host_storage_folders_add(self, **data):
url = '/host/storage/folders/add'
return self(POST, url, data)
def set_host_storage_folders_remove(self, **data):
url = '/host/storage/folders/remove'
return self(POST, url, data)
def set_host_storage_folders_resize(self, **data):
url = '/host/storage/folders/resize'
return self(POST, url, data)
def set_host_storage_sectors_delete(self, merkleroot, **data):
url = '/host/storage/sectors/delete/{merkleroot}'.format(merkleroot=merkleroot)
return self(POST, url, data)
def get_hostdb_active(self, **data):
url = '/hostdb/active'
return self(GET, url, data)
def get_hostdb_all(self, **data):
url = '/hostdb/all'
return self(GET, url, data)
def get_hostdb_hosts(self, pubkey, **data):
url = '/hostdb/hosts/{pubkey}'.format(pubkey=pubkey)
return self(GET, url, data)
def get_miner(self, **data):
url = '/miner'
return self(GET, url, data)
def get_miner_header(self, **data):
url = '/miner/header'
return self(GET, url, data)
def set_miner_header(self, **data):
url = '/miner/header'
return self(POST, url, data)
def get_miner_start(self, **data):
url = '/miner/start'
return self(GET, url, data)
def get_miner_stop(self, **data):
url = '/miner/stop'
return self(GET, url, data)
def get_renter(self, **data):
url = '/renter'
return self(GET, url, data)
def set_renter(self, **data):
url = '/renter'
return self(POST, url, data)
def get_renter_contracts(self, **data):
url = '/renter/contracts'
return self(GET, url, data)
def set_renter_delete(self, siapath, **data):
url = '/renter/delete/{siapath}'.format(siapath=siapath)
return self(POST, url, data)
def get_renter_download(self, siapath, **data):
url = '/renter/download/{siapath}'.format(siapath=siapath)
return self(GET, url, data)
def get_renter_downloadasync(self, siapath, **data):
url = '/renter/downloadasync/{siapath}'.format(siapath=siapath)
return self(GET, url, data)
def get_renter_downloads(self, **data):
url = '/renter/downloads'
return self(GET, url, data)
def get_renter_files(self, **data):
url = '/renter/files'
return self(GET, url, data)
def get_renter_prices(self, **data):
url = '/renter/prices'
return self(GET, url, data)
def set_renter_rename(self, siapath, **data):
url = '/renter/rename/{siapath}'.format(siapath=siapath)
return self(POST, url, data)
def set_renter_upload(self, siapath, **data):
url = '/renter/upload/{siapath}'.format(siapath=siapath)
return self(POST, url, data)
def get_wallet(self, **data):
url = '/wallet'
return self(GET, url, data)
def set_wallet_033x(self, **data):
url = '/wallet/033x'
return self(POST, url, data)
def get_wallet_address(self, **data):
url = '/wallet/address'
return self(GET, url, data)
def get_wallet_addresses(self, **data):
url = '/wallet/addresses'
return self(GET, url, data)
def get_wallet_backup(self, **data):
url = '/wallet/backup'
return self(GET, url, data)
def set_wallet_init(self, **data):
url = '/wallet/init'
return self(POST, url, data)
def set_wallet_init_seed(self, **data):
url = '/wallet/init/seed'
return self(POST, url, data)
def set_wallet_lock(self, **data):
url = '/wallet/lock'
return self(POST, url, data)
def set_wallet_seed(self, **data):
url = '/wallet/seed'
return self(POST, url, data)
def get_wallet_seeds(self, **data):
url = '/wallet/seeds'
return self(GET, url, data)
def set_wallet_siacoins(self, **data):
url = '/wallet/siacoins'
return self(POST, url, data)
def set_wallet_siafunds(self, **data):
url = '/wallet/siafunds'
return self(POST, url, data)
def set_wallet_siagkey(self, **data):
url = '/wallet/siagkey'
return self(POST, url, data)
def set_wallet_sweep_seed(self, **data):
url = '/wallet/sweep/seed'
return self(POST, url, data)
def get_wallet_transaction(self, id, **data):
url = '/wallet/transaction/{id}'.format(id=id)
return self(GET, url, data)
def get_wallet_transactions(self, addr=None, **data):
if addr:
url = '/wallet/transactions/{addr}'.format(addr=addr)
else:
url = '/wallet/transactions'
return self(GET, url, data)
def set_wallet_unlock(self, **data):
url = '/wallet/unlock'
return self(POST, url, data)
if __name__ == '__main__':
sc = Sia()
cs = sc.get_consensus()
print(cs['height'])
# 108060
backup_made = sc.get_wallet_backup(destination=r'd:\siadwallet.dat')
print(backup_made)
# True
backup_made = sc.get_wallet_backup(destination=r'error causing input?@#$!`')
print(backup_made)
# {'message': 'error when calling /wallet/backup: destination must be an absolute path'}
print(sc.get_gateway())
# {'peers': [{'netaddress': '192.168.127.12:9981', 'version': '0.5.2', 'inbound': False, 'local': False}, {'netaddress': '172.16.31.10:9981', 'version': '1.1.2', 'inbound': False, 'local': False}, {'netaddress': '192.168.127.12:9981', 'version': '1.2.0', 'inbound': False, 'local': False}, {'netaddress': '192.168.127.12:9981', 'version': '1.2.1', 'inbound': False, 'local': False}, {'netaddress': '172.16.31.10:9981', 'version': '1.1.2', 'inbound': False, 'local': False}, {'netaddress': '172.16.58.3:9981', 'version': '1.0.0', 'inbound': False, 'local': False}, {'netaddress': '172.16.31.10:9981', 'version': '1.2.2', 'inbound': False, 'local': False}], 'netaddress': '192.168.127.12:9981'}
print(sc.set_gateway_connect('192.168.127.12:9981'))
# True
print(sc.set_gateway_disconnect('192.168.127.12:9981'))
# True
print(sc.set_gateway_disconnect('192.168.127.12:9981'))
# {'message': 'not connected to that node'}
``` |
{
"source": "jnmclarty/sympa",
"score": 2
} |
#### File: jnmclarty/sympa/setup.py
```python
import os
from setuptools import setup
def read(*paths):
"""Build a file path from *paths* and return the contents."""
with open(os.path.join(*paths), 'r') as f:
return f.read()
v = '0.1.0'
print ("Setting up sympa")
setup(
name = 'sympa',
version = v,
packages = ['sympa'],
description = 'Symbolic math for pandas',
long_description=(read('README.rst')),
author = '<NAME>',
author_email = '<EMAIL>',
url = 'https://github.com/jnmclarty/sympa',
download_url = 'https://github.com/jnmclarty/sympa/tarball/' + v,
keywords = ['symbolic', 'symbol', 'sympa', 'sympy', 'pandas', 'math'],
classifiers = ['Development Status :: 1 - Planning',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Financial and Insurance Industry',
'Intended Audience :: Healthcare Industry',
'Intended Audience :: Science/Research',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics'])
``` |
{
"source": "Jn-mic/1m-pitch",
"score": 2
} |
#### File: migrations/versions/f386cd416e5a_update_the_downvote_table.py
```python
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'f386cd416e5a'
down_revision = '<KEY>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('downvotes', 'downvote')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('downvotes', sa.Column('downvote', sa.INTEGER(), autoincrement=False, nullable=True))
# ### end Alembic commands ###
```
#### File: alembic/autogenerate/rewriter.py
```python
from alembic import util
from alembic.operations import ops
class Rewriter(object):
"""A helper object that allows easy 'rewriting' of ops streams.
The :class:`.Rewriter` object is intended to be passed along
to the
:paramref:`.EnvironmentContext.configure.process_revision_directives`
parameter in an ``env.py`` script. Once constructed, any number
of "rewrites" functions can be associated with it, which will be given
the opportunity to modify the structure without having to have explicit
knowledge of the overall structure.
The function is passed the :class:`.MigrationContext` object and
``revision`` tuple that are passed to the :paramref:`.Environment
Context.configure.process_revision_directives` function normally,
and the third argument is an individual directive of the type
noted in the decorator. The function has the choice of returning
a single op directive, which normally can be the directive that
was actually passed, or a new directive to replace it, or a list
of zero or more directives to replace it.
.. seealso::
:ref:`autogen_rewriter` - usage example
"""
_traverse = util.Dispatcher()
_chained = None
def __init__(self):
self.dispatch = util.Dispatcher()
def chain(self, other):
"""Produce a "chain" of this :class:`.Rewriter` to another.
This allows two rewriters to operate serially on a stream,
e.g.::
writer1 = autogenerate.Rewriter()
writer2 = autogenerate.Rewriter()
@writer1.rewrites(ops.AddColumnOp)
def add_column_nullable(context, revision, op):
op.column.nullable = True
return op
@writer2.rewrites(ops.AddColumnOp)
def add_column_idx(context, revision, op):
idx_op = ops.CreateIndexOp(
'ixc', op.table_name, [op.column.name])
return [
op,
idx_op
]
writer = writer1.chain(writer2)
:param other: a :class:`.Rewriter` instance
:return: a new :class:`.Rewriter` that will run the operations
of this writer, then the "other" writer, in succession.
"""
wr = self.__class__.__new__(self.__class__)
wr.__dict__.update(self.__dict__)
wr._chained = other
return wr
def rewrites(self, operator):
"""Register a function as rewriter for a given type.
The function should receive three arguments, which are
the :class:`.MigrationContext`, a ``revision`` tuple, and
an op directive of the type indicated. E.g.::
@writer1.rewrites(ops.AddColumnOp)
def add_column_nullable(context, revision, op):
op.column.nullable = True
return op
"""
return self.dispatch.dispatch_for(operator)
def _rewrite(self, context, revision, directive):
try:
_rewriter = self.dispatch.dispatch(directive)
except ValueError:
_rewriter = None
yield directive
else:
if self in directive._mutations:
yield directive
else:
for r_directive in util.to_list(
_rewriter(context, revision, directive)
):
r_directive._mutations = r_directive._mutations.union(
[self]
)
yield r_directive
def __call__(self, context, revision, directives):
self.process_revision_directives(context, revision, directives)
if self._chained:
self._chained(context, revision, directives)
@_traverse.dispatch_for(ops.MigrationScript)
def _traverse_script(self, context, revision, directive):
upgrade_ops_list = []
for upgrade_ops in directive.upgrade_ops_list:
ret = self._traverse_for(context, revision, upgrade_ops)
if len(ret) != 1:
raise ValueError(
"Can only return single object for UpgradeOps traverse"
)
upgrade_ops_list.append(ret[0])
directive.upgrade_ops = upgrade_ops_list
downgrade_ops_list = []
for downgrade_ops in directive.downgrade_ops_list:
ret = self._traverse_for(context, revision, downgrade_ops)
if len(ret) != 1:
raise ValueError(
"Can only return single object for DowngradeOps traverse"
)
downgrade_ops_list.append(ret[0])
directive.downgrade_ops = downgrade_ops_list
@_traverse.dispatch_for(ops.OpContainer)
def _traverse_op_container(self, context, revision, directive):
self._traverse_list(context, revision, directive.ops)
@_traverse.dispatch_for(ops.MigrateOperation)
def _traverse_any_directive(self, context, revision, directive):
pass
def _traverse_for(self, context, revision, directive):
directives = list(self._rewrite(context, revision, directive))
for directive in directives:
traverser = self._traverse.dispatch(directive)
traverser(self, context, revision, directive)
return directives
def _traverse_list(self, context, revision, directives):
dest = []
for directive in directives:
dest.extend(self._traverse_for(context, revision, directive))
directives[:] = dest
def process_revision_directives(self, context, revision, directives):
self._traverse_list(context, revision, directives)
```
#### File: alembic/operations/batch.py
```python
from sqlalchemy import CheckConstraint
from sqlalchemy import Column
from sqlalchemy import ForeignKeyConstraint
from sqlalchemy import Index
from sqlalchemy import MetaData
from sqlalchemy import PrimaryKeyConstraint
from sqlalchemy import schema as sql_schema
from sqlalchemy import Table
from sqlalchemy import types as sqltypes
from sqlalchemy.events import SchemaEventTarget
from sqlalchemy.util import OrderedDict
from sqlalchemy.util import topological
from ..util import exc
from ..util.sqla_compat import _columns_for_constraint
from ..util.sqla_compat import _copy
from ..util.sqla_compat import _ensure_scope_for_ddl
from ..util.sqla_compat import _fk_is_self_referential
from ..util.sqla_compat import _insert_inline
from ..util.sqla_compat import _is_type_bound
from ..util.sqla_compat import _remove_column_from_collection
from ..util.sqla_compat import _select
class BatchOperationsImpl(object):
def __init__(
self,
operations,
table_name,
schema,
recreate,
copy_from,
table_args,
table_kwargs,
reflect_args,
reflect_kwargs,
naming_convention,
partial_reordering,
):
self.operations = operations
self.table_name = table_name
self.schema = schema
if recreate not in ("auto", "always", "never"):
raise ValueError(
"recreate may be one of 'auto', 'always', or 'never'."
)
self.recreate = recreate
self.copy_from = copy_from
self.table_args = table_args
self.table_kwargs = dict(table_kwargs)
self.reflect_args = reflect_args
self.reflect_kwargs = dict(reflect_kwargs)
self.reflect_kwargs.setdefault(
"listeners", list(self.reflect_kwargs.get("listeners", ()))
)
self.reflect_kwargs["listeners"].append(
("column_reflect", operations.impl.autogen_column_reflect)
)
self.naming_convention = naming_convention
self.partial_reordering = partial_reordering
self.batch = []
@property
def dialect(self):
return self.operations.impl.dialect
@property
def impl(self):
return self.operations.impl
def _should_recreate(self):
if self.recreate == "auto":
return self.operations.impl.requires_recreate_in_batch(self)
elif self.recreate == "always":
return True
else:
return False
def flush(self):
should_recreate = self._should_recreate()
with _ensure_scope_for_ddl(self.impl.connection):
if not should_recreate:
for opname, arg, kw in self.batch:
fn = getattr(self.operations.impl, opname)
fn(*arg, **kw)
else:
if self.naming_convention:
m1 = MetaData(naming_convention=self.naming_convention)
else:
m1 = MetaData()
if self.copy_from is not None:
existing_table = self.copy_from
reflected = False
else:
existing_table = Table(
self.table_name,
m1,
schema=self.schema,
autoload_with=self.operations.get_bind(),
*self.reflect_args,
**self.reflect_kwargs
)
reflected = True
batch_impl = ApplyBatchImpl(
self.impl,
existing_table,
self.table_args,
self.table_kwargs,
reflected,
partial_reordering=self.partial_reordering,
)
for opname, arg, kw in self.batch:
fn = getattr(batch_impl, opname)
fn(*arg, **kw)
batch_impl._create(self.impl)
def alter_column(self, *arg, **kw):
self.batch.append(("alter_column", arg, kw))
def add_column(self, *arg, **kw):
if (
"insert_before" in kw or "insert_after" in kw
) and not self._should_recreate():
raise exc.CommandError(
"Can't specify insert_before or insert_after when using "
"ALTER; please specify recreate='always'"
)
self.batch.append(("add_column", arg, kw))
def drop_column(self, *arg, **kw):
self.batch.append(("drop_column", arg, kw))
def add_constraint(self, const):
self.batch.append(("add_constraint", (const,), {}))
def drop_constraint(self, const):
self.batch.append(("drop_constraint", (const,), {}))
def rename_table(self, *arg, **kw):
self.batch.append(("rename_table", arg, kw))
def create_index(self, idx):
self.batch.append(("create_index", (idx,), {}))
def drop_index(self, idx):
self.batch.append(("drop_index", (idx,), {}))
def create_table_comment(self, table):
self.batch.append(("create_table_comment", (table,), {}))
def drop_table_comment(self, table):
self.batch.append(("drop_table_comment", (table,), {}))
def create_table(self, table):
raise NotImplementedError("Can't create table in batch mode")
def drop_table(self, table):
raise NotImplementedError("Can't drop table in batch mode")
def create_column_comment(self, column):
self.batch.append(("create_column_comment", (column,), {}))
class ApplyBatchImpl(object):
def __init__(
self,
impl,
table,
table_args,
table_kwargs,
reflected,
partial_reordering=(),
):
self.impl = impl
self.table = table # this is a Table object
self.table_args = table_args
self.table_kwargs = table_kwargs
self.temp_table_name = self._calc_temp_name(table.name)
self.new_table = None
self.partial_reordering = partial_reordering # tuple of tuples
self.add_col_ordering = () # tuple of tuples
self.column_transfers = OrderedDict(
(c.name, {"expr": c}) for c in self.table.c
)
self.existing_ordering = list(self.column_transfers)
self.reflected = reflected
self._grab_table_elements()
@classmethod
def _calc_temp_name(cls, tablename):
return ("_alembic_tmp_%s" % tablename)[0:50]
def _grab_table_elements(self):
schema = self.table.schema
self.columns = OrderedDict()
for c in self.table.c:
c_copy = _copy(c, schema=schema)
c_copy.unique = c_copy.index = False
# ensure that the type object was copied,
# as we may need to modify it in-place
if isinstance(c.type, SchemaEventTarget):
assert c_copy.type is not c.type
self.columns[c.name] = c_copy
self.named_constraints = {}
self.unnamed_constraints = []
self.col_named_constraints = {}
self.indexes = {}
self.new_indexes = {}
for const in self.table.constraints:
if _is_type_bound(const):
continue
elif self.reflected and isinstance(const, CheckConstraint):
# TODO: we are skipping reflected CheckConstraint because
# we have no way to determine _is_type_bound() for these.
pass
elif const.name:
self.named_constraints[const.name] = const
else:
self.unnamed_constraints.append(const)
if not self.reflected:
for col in self.table.c:
for const in col.constraints:
if const.name:
self.col_named_constraints[const.name] = (col, const)
for idx in self.table.indexes:
self.indexes[idx.name] = idx
for k in self.table.kwargs:
self.table_kwargs.setdefault(k, self.table.kwargs[k])
def _adjust_self_columns_for_partial_reordering(self):
pairs = set()
col_by_idx = list(self.columns)
if self.partial_reordering:
for tuple_ in self.partial_reordering:
for index, elem in enumerate(tuple_):
if index > 0:
pairs.add((tuple_[index - 1], elem))
else:
for index, elem in enumerate(self.existing_ordering):
if index > 0:
pairs.add((col_by_idx[index - 1], elem))
pairs.update(self.add_col_ordering)
# this can happen if some columns were dropped and not removed
# from existing_ordering. this should be prevented already, but
# conservatively making sure this didn't happen
pairs = [p for p in pairs if p[0] != p[1]]
sorted_ = list(
topological.sort(pairs, col_by_idx, deterministic_order=True)
)
self.columns = OrderedDict((k, self.columns[k]) for k in sorted_)
self.column_transfers = OrderedDict(
(k, self.column_transfers[k]) for k in sorted_
)
def _transfer_elements_to_new_table(self):
assert self.new_table is None, "Can only create new table once"
m = MetaData()
schema = self.table.schema
if self.partial_reordering or self.add_col_ordering:
self._adjust_self_columns_for_partial_reordering()
self.new_table = new_table = Table(
self.temp_table_name,
m,
*(list(self.columns.values()) + list(self.table_args)),
schema=schema,
**self.table_kwargs
)
for const in (
list(self.named_constraints.values()) + self.unnamed_constraints
):
const_columns = set(
[c.key for c in _columns_for_constraint(const)]
)
if not const_columns.issubset(self.column_transfers):
continue
if isinstance(const, ForeignKeyConstraint):
if _fk_is_self_referential(const):
# for self-referential constraint, refer to the
# *original* table name, and not _alembic_batch_temp.
# This is consistent with how we're handling
# FK constraints from other tables; we assume SQLite
# no foreign keys just keeps the names unchanged, so
# when we rename back, they match again.
const_copy = _copy(
const, schema=schema, target_table=self.table
)
else:
# "target_table" for ForeignKeyConstraint.copy() is
# only used if the FK is detected as being
# self-referential, which we are handling above.
const_copy = _copy(const, schema=schema)
else:
const_copy = _copy(
const, schema=schema, target_table=new_table
)
if isinstance(const, ForeignKeyConstraint):
self._setup_referent(m, const)
new_table.append_constraint(const_copy)
def _gather_indexes_from_both_tables(self):
idx = []
idx.extend(self.indexes.values())
for index in self.new_indexes.values():
idx.append(
Index(
index.name,
unique=index.unique,
*[self.new_table.c[col] for col in index.columns.keys()],
**index.kwargs
)
)
return idx
def _setup_referent(self, metadata, constraint):
spec = constraint.elements[0]._get_colspec()
parts = spec.split(".")
tname = parts[-2]
if len(parts) == 3:
referent_schema = parts[0]
else:
referent_schema = None
if tname != self.temp_table_name:
key = sql_schema._get_table_key(tname, referent_schema)
if key in metadata.tables:
t = metadata.tables[key]
for elem in constraint.elements:
colname = elem._get_colspec().split(".")[-1]
if colname not in t.c:
t.append_column(Column(colname, sqltypes.NULLTYPE))
else:
Table(
tname,
metadata,
*[
Column(n, sqltypes.NULLTYPE)
for n in [
elem._get_colspec().split(".")[-1]
for elem in constraint.elements
]
],
schema=referent_schema
)
def _create(self, op_impl):
self._transfer_elements_to_new_table()
op_impl.prep_table_for_batch(self, self.table)
op_impl.create_table(self.new_table)
try:
op_impl._exec(
_insert_inline(self.new_table).from_select(
list(
k
for k, transfer in self.column_transfers.items()
if "expr" in transfer
),
_select(
*[
transfer["expr"]
for transfer in self.column_transfers.values()
if "expr" in transfer
]
),
)
)
op_impl.drop_table(self.table)
except:
op_impl.drop_table(self.new_table)
raise
else:
op_impl.rename_table(
self.temp_table_name, self.table.name, schema=self.table.schema
)
self.new_table.name = self.table.name
try:
for idx in self._gather_indexes_from_both_tables():
op_impl.create_index(idx)
finally:
self.new_table.name = self.temp_table_name
def alter_column(
self,
table_name,
column_name,
nullable=None,
server_default=False,
name=None,
type_=None,
autoincrement=None,
comment=False,
**kw
):
existing = self.columns[column_name]
existing_transfer = self.column_transfers[column_name]
if name is not None and name != column_name:
# note that we don't change '.key' - we keep referring
# to the renamed column by its old key in _create(). neat!
existing.name = name
existing_transfer["name"] = name
if type_ is not None:
type_ = sqltypes.to_instance(type_)
# old type is being discarded so turn off eventing
# rules. Alternatively we can
# erase the events set up by this type, but this is simpler.
# we also ignore the drop_constraint that will come here from
# Operations.implementation_for(alter_column)
if isinstance(existing.type, SchemaEventTarget):
existing.type._create_events = (
existing.type.create_constraint
) = False
self.impl.cast_for_batch_migrate(
existing, existing_transfer, type_
)
existing.type = type_
# we *dont* however set events for the new type, because
# alter_column is invoked from
# Operations.implementation_for(alter_column) which already
# will emit an add_constraint()
if nullable is not None:
existing.nullable = nullable
if server_default is not False:
if server_default is None:
existing.server_default = None
else:
sql_schema.DefaultClause(server_default)._set_parent(existing)
if autoincrement is not None:
existing.autoincrement = bool(autoincrement)
if comment is not False:
existing.comment = comment
def _setup_dependencies_for_add_column(
self, colname, insert_before, insert_after
):
index_cols = self.existing_ordering
col_indexes = {name: i for i, name in enumerate(index_cols)}
if not self.partial_reordering:
if insert_after:
if not insert_before:
if insert_after in col_indexes:
# insert after an existing column
idx = col_indexes[insert_after] + 1
if idx < len(index_cols):
insert_before = index_cols[idx]
else:
# insert after a column that is also new
insert_before = dict(self.add_col_ordering)[
insert_after
]
if insert_before:
if not insert_after:
if insert_before in col_indexes:
# insert before an existing column
idx = col_indexes[insert_before] - 1
if idx >= 0:
insert_after = index_cols[idx]
else:
# insert before a column that is also new
insert_after = dict(
(b, a) for a, b in self.add_col_ordering
)[insert_before]
if insert_before:
self.add_col_ordering += ((colname, insert_before),)
if insert_after:
self.add_col_ordering += ((insert_after, colname),)
if (
not self.partial_reordering
and not insert_before
and not insert_after
and col_indexes
):
self.add_col_ordering += ((index_cols[-1], colname),)
def add_column(
self, table_name, column, insert_before=None, insert_after=None, **kw
):
self._setup_dependencies_for_add_column(
column.name, insert_before, insert_after
)
# we copy the column because operations.add_column()
# gives us a Column that is part of a Table already.
self.columns[column.name] = _copy(column, schema=self.table.schema)
self.column_transfers[column.name] = {}
def drop_column(self, table_name, column, **kw):
if column.name in self.table.primary_key.columns:
_remove_column_from_collection(
self.table.primary_key.columns, column
)
del self.columns[column.name]
del self.column_transfers[column.name]
self.existing_ordering.remove(column.name)
def create_column_comment(self, column):
"""the batch table creation function will issue create_column_comment
on the real "impl" as part of the create table process.
That is, the Column object will have the comment on it already,
so when it is received by add_column() it will be a normal part of
the CREATE TABLE and doesn't need an extra step here.
"""
def create_table_comment(self, table):
"""the batch table creation function will issue create_table_comment
on the real "impl" as part of the create table process.
"""
def drop_table_comment(self, table):
"""the batch table creation function will issue drop_table_comment
on the real "impl" as part of the create table process.
"""
def add_constraint(self, const):
if not const.name:
raise ValueError("Constraint must have a name")
if isinstance(const, sql_schema.PrimaryKeyConstraint):
if self.table.primary_key in self.unnamed_constraints:
self.unnamed_constraints.remove(self.table.primary_key)
self.named_constraints[const.name] = const
def drop_constraint(self, const):
if not const.name:
raise ValueError("Constraint must have a name")
try:
if const.name in self.col_named_constraints:
col, const = self.col_named_constraints.pop(const.name)
for col_const in list(self.columns[col.name].constraints):
if col_const.name == const.name:
self.columns[col.name].constraints.remove(col_const)
else:
const = self.named_constraints.pop(const.name)
except KeyError:
if _is_type_bound(const):
# type-bound constraints are only included in the new
# table via their type object in any case, so ignore the
# drop_constraint() that comes here via the
# Operations.implementation_for(alter_column)
return
raise ValueError("No such constraint: '%s'" % const.name)
else:
if isinstance(const, PrimaryKeyConstraint):
for col in const.columns:
self.columns[col.name].primary_key = False
def create_index(self, idx):
self.new_indexes[idx.name] = idx
def drop_index(self, idx):
try:
del self.indexes[idx.name]
except KeyError:
raise ValueError("No such index: '%s'" % idx.name)
def rename_table(self, *arg, **kw):
raise NotImplementedError("TODO")
``` |
{
"source": "Jn-mic/blog",
"score": 2
} |
#### File: blog/app/__init__.py
```python
from flask import Flask
import flask
from config import config_options
from flask_bootstrap import Bootstrap
from flask_login import LoginManager
from flask_uploads import UploadSet,configure_uploads,IMAGES
from flask_mail import Mail
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
db = SQLAlchemy(app)
login_manager = LoginManager()
bootstrap = Bootstrap(app)
mail = Mail(app)
login_manager.login_view = 'auth.login'
login_manager.session_protection = 'strong'
login_manager.login_message_category = 'info'
def create_app(config_name):
app = Flask(__name__)
app.config.from_object(config_options[config_name])
#initialize extensions
bootstrap.init_app(app)
db.init_app(app)
mail.init_app(app)
login_manager.init_app(app)
# configure UploadSet
from .auth import auth as auth_blueprint
from .main import main as main_blueprint
app.register_blueprint(auth_blueprint)
app.register_blueprint(main_blueprint)
return app
```
#### File: blog/test/test_blog.py
```python
from app.models import User,Post, Comment,Clap
from app import db
# def setUp(self):
# self.user_Jack = User(username = 'Jack',password = '<PASSWORD>', email = '<EMAIL>')
def tearDown(self):
Post.query.delete()
User.query.delete()
Comment.query.delete()
Clap.query.delete()
```
#### File: site-packages/dscmailer/mailer.py
```python
from decouple import config
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import Mail
def send_mail(sender,receiver,subject,html, api_key):
"""
Use this function to set up the `email sender`, `the recipient`, `email subject`, the `html content` of the email and the `api_key` from Sendgrid..
"""
message = Mail(
from_email=sender,
to_emails=receiver,
subject=subject,
html_content=html
)
try:
API_KEY = api_key
sg = SendGridAPIClient(API_KEY)
response = sg.send(message)
print('Email sent successfully.')
except Exception as e:
print(f'Error while sending email. {e}')
``` |
{
"source": "Jn-mic/Photo-Gallery",
"score": 3
} |
#### File: Photo-Gallery/photo/tests.py
```python
from django.test import TestCase
from .models import Location, Category,Image
def test_delete_image(self):
'''
Test the deletion of an instance
'''
self.image.save_image()
self.image.delete_image()
self.assertTrue(len(Image) == 0)
def test_get_image_by_id(self):
self.image.save_image()
image_id= self.image.pk
image_by_id = Image.get_image_by_id(image_id)
self.assertEqual(self.image,image_by_id)
def test_search_image(self):
self.image.save_image()
name = self.image.image_name
found_image = Image.search_image(name)
self.assertTrue(len(found_image) == 1)
def test_filter_by_location(self):
self.image.save_image()
location = self.image.location
found_images = Image.filter_by_location(location)
self.assertEqual(self.image,found_images)
def tearDown(self):
Image.objects.all().delete()
Location.objects.all().delete()
Category.objects.all().delete()
```
#### File: Photo-Gallery/photo/views.py
```python
from photo.models import Image
from django.http.response import Http404, HttpResponse
from django.shortcuts import render, redirect
from django.conf import settings
from . import views
from django.conf.urls.static import static
from django.http import HttpResponse
import datetime as dt
from .models import Location, Image, Category
# Create your views here.
# def photo_of_day(request):
# date=dt.date.today()
# images=Image.objects.all()
# return render(request,'all-Photos/today-photos.html',{'date':date, 'images':images})
# view function to present photos from the past days
def photo_list(request):
images = Image.objects.all()
context = {
'images':images
}
return render(request,'all-Photos/today-photos.html',context)
def past_photo(request,past_date):
try:
# Convert data from string url
date= dt.datetime.strptime(past_date,'%Y-%m-%d').date()
except ValueError:
#Raise 404 error when valueError is thrown
raise Http404()
assert False
if date == dt.date.today():
return redirect(photo_of_day)
render(request,'all-Photos/past-photo.html',{'date':date,'image':image})
def photo(request,photo_id):
try:
photo= Image.objects.get(id = photo_id)
except DoesNotExist:
raise Http404()
return render(request,"all-Photos/photo.html", {"photo":photo})
def search_results(request):
if 'photo' in request.GET and request.GET["photo"]:
search_term = request.GET.get("photo")
searched_photo = Image.search_by_category(search_term)
message = f"{search_term}"
return render(request, 'all-Photos/search.html',{"message":message,"photos": searched_photo})
else:
message = "You haven't searched for any term"
return render(request, 'all-Photos/search.html',{"message":message})
``` |
{
"source": "Jn-mic/Projects-Portfolio",
"score": 3
} |
#### File: cloudinary_cli/core/config.py
```python
import cloudinary
from click import command, option, echo, BadParameter
from cloudinary_cli.defaults import logger
from cloudinary_cli.utils.config_utils import load_config, verify_cloudinary_url, update_config, remove_config_keys, \
show_cloudinary_config
@command("config", help="Display the current configuration, and manage additional configurations.")
@option("-n", "--new", help="""\b Create and name a configuration from a Cloudinary account environment variable.
e.g. cld config -n <NAME> <CLOUDINARY_URL>""", nargs=2)
@option("-ls", "--ls", help="List all saved configurations.", is_flag=True)
@option("-s", "--show", help="Show details of a specified configuration.", nargs=1)
@option("-rm", "--rm", help="Delete a specified configuration.", nargs=1)
@option("-url", "--from_url",
help="Create a configuration from a Cloudinary account environment variable. "
"The configuration name is the cloud name.",
nargs=1)
def config(new, ls, show, rm, from_url):
if new or from_url:
config_name, cloudinary_url = new or [None, from_url]
if not verify_cloudinary_url(cloudinary_url):
return
config_name = config_name or cloudinary.config().cloud_name
update_config({config_name: cloudinary_url})
logger.info("Config '{}' saved!".format(config_name))
logger.info("Example usage: cld -C {} <command>".format(config_name))
elif rm:
if remove_config_keys(rm):
logger.warn(f"Configuration '{rm}' not found.")
else:
logger.info(f"Configuration '{rm}' deleted.")
elif ls:
echo("\n".join(load_config().keys()))
elif show:
curr_config = load_config()
if show not in curr_config:
raise BadParameter(f"Configuration {show} does not exist, use -ls to list available configurations.")
config_obj = cloudinary.Config()
# noinspection PyProtectedMember
config_obj._setup_from_parsed_url(config_obj._parse_cloudinary_url(load_config()[show]))
show_cloudinary_config(config_obj)
else:
show_cloudinary_config(cloudinary.config())
``` |
{
"source": "jnm/kobocat-branches-archive-20200507",
"score": 2
} |
#### File: apps/api/urls.py
```python
from django.conf.urls import url
from rest_framework import routers
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework.urlpatterns import format_suffix_patterns
from rest_framework.views import APIView
from onadata.apps.api.viewsets.charts_viewset import ChartsViewSet
from onadata.apps.api.viewsets.connect_viewset import ConnectViewSet
from onadata.apps.api.viewsets.data_viewset import DataViewSet
from onadata.apps.api.viewsets.metadata_viewset import MetaDataViewSet
from onadata.apps.api.viewsets.note_viewset import NoteViewSet
from onadata.apps.api.viewsets.organization_profile_viewset import\
OrganizationProfileViewSet
from onadata.apps.api.viewsets.project_viewset import ProjectViewSet
from onadata.apps.api.viewsets.stats_viewset import StatsViewSet
from onadata.apps.api.viewsets.team_viewset import TeamViewSet
from onadata.apps.api.viewsets.xform_viewset import XFormViewSet
from onadata.apps.api.viewsets.user_profile_viewset import UserProfileViewSet
from onadata.apps.api.viewsets.user_viewset import UserViewSet
from onadata.apps.api.viewsets.submissionstats_viewset import\
SubmissionStatsViewSet
from onadata.apps.api.viewsets.attachment_viewset import AttachmentViewSet
from onadata.apps.api.viewsets.xform_list_api import XFormListApi
from onadata.apps.api.viewsets.xform_submission_api import XFormSubmissionApi
from onadata.apps.api.viewsets.briefcase_api import BriefcaseApi
class MultiLookupRouter(routers.DefaultRouter):
def __init__(self, *args, **kwargs):
super(MultiLookupRouter, self).__init__(*args, **kwargs)
self.lookups_routes = []
self.lookups_routes.append(routers.Route(
url=r'^{prefix}/{lookups}{trailing_slash}$',
mapping={
'get': 'retrieve',
'put': 'update',
'patch': 'partial_update',
'delete': 'destroy'
},
name='{basename}-detail',
initkwargs={'suffix': 'Instance'}
))
self.lookups_routes.append(self.make_routes('lookup'))
self.lookups_routes.append(self.make_routes('lookups'))
# Dynamically generated routes.
# Generated using @action or @link decorators on methods of the viewset
self.lookups_routes.append(routers.Route(
url=[
r'^{prefix}/{lookups}/{methodname}{trailing_slash}$',
r'^{prefix}/{lookups}/{methodname}/{extra}{trailing_slash}$'],
mapping={
'{httpmethod}': '{methodname}',
},
name='{basename}-{methodnamehyphen}',
initkwargs={}
))
@staticmethod
def make_routes(template_text):
return routers.Route(
url=r'^{prefix}/{%s}{trailing_slash}$' % template_text,
mapping={
'get': 'list',
'post': 'create'
},
name='{basename}-list',
initkwargs={'suffix': 'List'})
def get_extra_lookup_regexes(self, route):
ret = []
base_regex = '(?P<{lookup_field}>[^/]+)'
if 'extra_lookup_fields' in route.initkwargs:
for lookup_field in route.initkwargs['extra_lookup_fields']:
ret.append(base_regex.format(lookup_field=lookup_field))
return '/'.join(ret)
def get_lookup_regexes(self, viewset):
ret = []
lookup_fields = getattr(viewset, 'lookup_fields', None)
if lookup_fields:
for i in range(1, len(lookup_fields)):
tmp = []
for lookup_field in lookup_fields[:i + 1]:
if lookup_field == lookup_fields[i]:
base_regex = '(?P<{lookup_field}>[^/.]+)'
else:
base_regex = '(?P<{lookup_field}>[^/]+)'
tmp.append(base_regex.format(lookup_field=lookup_field))
ret.append(tmp)
return ret
def get_lookup_routes(self, viewset):
ret = [self.routes[0]]
# Determine any `@action` or `@link` decorated methods on the viewset
dynamic_routes = []
for methodname in dir(viewset):
attr = getattr(viewset, methodname)
httpmethods = getattr(attr, 'bind_to_methods', None)
if httpmethods:
httpmethods = [method.lower() for method in httpmethods]
dynamic_routes.append((httpmethods, methodname))
for route in self.lookups_routes:
if route.mapping == {'{httpmethod}': '{methodname}'}:
# Dynamic routes (@link or @action decorator)
for httpmethods, methodname in dynamic_routes:
initkwargs = route.initkwargs.copy()
initkwargs.update(getattr(viewset, methodname).kwargs)
mapping = dict(
(httpmethod, methodname) for httpmethod in httpmethods)
name = routers.replace_methodname(route.name, methodname)
if 'extra_lookup_fields' in initkwargs:
uri = route.url[1]
uri = routers.replace_methodname(uri, methodname)
ret.append(routers.Route(
url=uri, mapping=mapping, name='%s-extra' % name,
initkwargs=initkwargs,
))
uri = routers.replace_methodname(route.url[0], methodname)
ret.append(routers.Route(
url=uri, mapping=mapping, name=name,
initkwargs=initkwargs,
))
else:
# Standard route
ret.append(route)
return ret
def get_routes(self, viewset):
ret = []
lookup_fields = getattr(viewset, 'lookup_fields', None)
if lookup_fields:
ret = self.get_lookup_routes(viewset)
else:
ret = super(MultiLookupRouter, self).get_routes(viewset)
return ret
def get_api_root_view(self):
"""
Return a view to use as the API root.
"""
api_root_dict = {}
list_name = self.routes[0].name
for prefix, viewset, basename in self.registry:
api_root_dict[prefix] = list_name.format(basename=basename)
class OnaApi(APIView):
"""
## KoBo JSON Rest API endpoints:
### Data
* [/api/v1/charts](/api/v1/charts) - List, Retrieve Charts of collected data
* [/api/v1/data](/api/v1/data) - List, Retrieve submission data
* [/api/v1/stats](/api/v1/stats) - Summary statistics
### Forms
* [/api/v1/forms](/api/v1/forms) - List, Retrieve form information
* [/api/v1/media](/api/v1/media) - List, Retrieve media attachments
* [/api/v1/metadata](/api/v1/metadata) - List, Retrieve form metadata
* [/api/v1/projects](/api/v1/projects) - List, Retrieve, Create,
Update organization projects, forms
* [/api/v1/submissions](/api/v1/submissions) - Submit XForms to a form
### Users and Organizations
* [/api/v1/orgs](/api/v1/orgs) - List, Retrieve, Create,
Update organization and organization info
* [/api/v1/profiles](/api/v1/profiles) - List, Create, Update user information
* [/api/v1/teams](/api/v1/teams) - List, Retrieve, Create, Update teams
* [/api/v1/user](/api/v1/user) - Return authenticated user profile info
* [/api/v1/users](/api/v1/users) - List, Retrieve user data
## Status Codes
* **200** - Successful [`GET`, `PATCH`, `PUT`]
* **201** - Resource successfully created [`POST`]
* **204** - Resouce successfully deleted [`DELETE`]
* **403** - Permission denied to resource
* **404** - Resource was not found
## Authentication
KoBo JSON API enpoints support both Basic authentication
and API Token Authentication through the `Authorization` header.
### Basic Authentication
Example using curl:
curl -X GET https://example.com/api/v1/ -u username:password
### Token Authentication
Example using curl:
curl -X GET https://example.com/api/v1/ -H "Authorization: Token TOKEN_KEY"
### KoBo Tagging API
* [Filter form list by tags.](
/api/v1/forms#get-list-of-forms-with-specific-tags)
* [List Tags for a specific form.](
/api/v1/forms#get-list-of-tags-for-a-specific-form)
* [Tag Forms.](/api/v1/forms#tag-forms)
* [Delete a specific tag.](/api/v1/forms#delete-a-specific-tag)
* [List form data by tag.](
/api/v1/data#query-submitted-data-of-a-specific-form-using-tags)
* [Tag a specific submission](/api/v1/data#tag-a-submission-data-point)
## Using Oauth2 with the KoBo API
You can learn more about oauth2 [here](
http://tools.ietf.org/html/rfc6749).
### 1. Register your client application with KoBo - [register](\
/o/applications/register/)
- `name` - name of your application
- `client_type` - Client Type: select confidential
- `authorization_grant_type` - Authorization grant type: Authorization code
- `redirect_uri` - Redirect urls: redirection endpoint
Keep note of the `client_id` and the `client_secret`, it is required when
requesting for an `access_token`.
### 2. Authorize client application.
The authorization url is of the form:
<pre class="prettyprint">
<b>GET</b> /o/authorize?client_id=XXXXXX&response_type=code&state=abc</pre>
example:
http://localhost:8000/o/authorize?client_id=e8&response_type=code&state=xyz
Note: Providing the url to any user will prompt for a password and
request for read and write permission for the application whose `client_id` is
specified.
Where:
- `client_id` - is the client application id - ensure its urlencoded
- `response_type` - should be code
- `state` - a random state string that you client application will get when
redirection happens
What happens:
1. a login page is presented, the username used to login determines the account
that provides access.
2. redirection to the client application occurs, the url is of the form:
> REDIRECT_URI/?state=abc&code=YYYYYYYYY
example redirect uri
http://localhost:30000/?state=xyz&code=SWWk2PN6NdCwfpqiDiPRcLmvkw2uWd
- `code` - is the code to use to request for `access_token`
- `state` - same state string used during authorization request
Your client application should use the `code` to request for an access_token.
### 3. Request for access token.
You need to make a `POST` request with `grant_type`, `code`, `client_id` and
`redirect_uri` as `POST` payload params. You should authenticate the request
with `Basic Authentication` using your `client_id` and `client_secret` as
`username:password` pair.
Request:
<pre class="prettyprint">
<b>POST</b>/o/token</pre>
Payload:
grant_type=authorization_code&code=YYYYYYYYY&client_id=XXXXXX&
redirect_uri=http://redirect/uri/path
curl example:
curl -X POST -d "grant_type=authorization_code&
code=PSwrMilnJESZVFfFsyEmEukNv0sGZ8&
client_id=e8x4zzJJIyOikDqjPcsCJrmnU22QbpfHQo4HhRnv&
redirect_uri=http://localhost:30000" "http://localhost:8000/o/token/"
--user "e8:xo7i4LNpMj"
Response:
{
"access_token": "<KEY>",
"token_type": "Bearer", "expires_in": 36000,
"refresh_token": "<PASSWORD>",
"scope": "read write groups"
}
Where:
- `access_token` - access token - expires
- `refresh_token` - token to use to request a new `access_token` in case it has
expored.
Now that you have an `access_token` you can make API calls.
### 4. Accessing the KoBo API using the `access_token`.
Example using curl:
curl -X GET https://example.com/api/v1
-H "Authorization: Bearer ACCESS_TOKEN"
"""
_ignore_model_permissions = True
def get(self, request, format=None):
ret = {}
for key, url_name in api_root_dict.items():
ret[key] = reverse(
url_name, request=request, format=format)
return Response(ret)
return OnaApi.as_view()
def get_urls(self):
ret = []
if self.include_root_view:
root_url = url(r'^$', self.get_api_root_view(),
name=self.root_view_name)
ret.append(root_url)
for prefix, viewset, basename in self.registry:
lookup = self.get_lookup_regex(viewset)
lookup_list = self.get_lookup_regexes(viewset)
if lookup_list:
# lookup = lookups[0]
lookup_list = [u'/'.join(k) for k in lookup_list]
else:
lookup_list = [u'']
routes = self.get_routes(viewset)
for route in routes:
mapping = self.get_method_map(viewset, route.mapping)
if not mapping:
continue
for lookups in lookup_list:
regex = route.url.format(
prefix=prefix,
lookup=lookup,
lookups=lookups,
trailing_slash=self.trailing_slash,
extra=self.get_extra_lookup_regexes(route)
)
view = viewset.as_view(mapping, **route.initkwargs)
name = route.name.format(basename=basename)
ret.append(url(regex, view, name=name))
if self.include_format_suffixes:
ret = format_suffix_patterns(ret, allowed=['[a-z0-9]+'])
return ret
class MultiLookupRouterWithPatchList(MultiLookupRouter):
"""
This class only extends MultiLookupRouter to allow PATCH method on list endpoint
"""
@staticmethod
def make_routes(template_text):
return routers.Route(
url=r'^{prefix}/{%s}{trailing_slash}$' % template_text,
mapping={
'get': 'list',
'post': 'create',
'patch': 'bulk_validation_status',
'delete': 'bulk_delete'
},
name='{basename}-list',
initkwargs={'suffix': 'List'})
router = MultiLookupRouter(trailing_slash=False)
router.register(r'users', UserViewSet)
router.register(r'user', ConnectViewSet)
router.register(r'profiles', UserProfileViewSet)
router.register(r'orgs', OrganizationProfileViewSet)
router.register(r'forms', XFormViewSet)
router.register(r'projects', ProjectViewSet)
router.register(r'teams', TeamViewSet)
router.register(r'notes', NoteViewSet)
router.register(r'stats', StatsViewSet, base_name='stats')
router.register(r'stats/submissions', SubmissionStatsViewSet,
base_name='submissionstats')
router.register(r'charts', ChartsViewSet, base_name='chart')
router.register(r'metadata', MetaDataViewSet, base_name='metadata')
router.register(r'media', AttachmentViewSet, base_name='attachment')
router.register(r'formlist', XFormListApi, base_name='formlist')
router.register(r'submissions', XFormSubmissionApi, base_name='submissions')
router.register(r'briefcase', BriefcaseApi, base_name='briefcase')
router_with_patch_list = MultiLookupRouterWithPatchList(trailing_slash=False)
router_with_patch_list.register(r'data', DataViewSet, base_name='data')
``` |
{
"source": "jnm/python-throttle",
"score": 3
} |
#### File: python-throttle/limiter/rate_limiter.py
```python
import math
from redis import StrictRedis
from .counter import AbstractionCounter, SlidingRedisCounter, FixedWindowRedisCounter
__all__ = ['FixedWindowLimiter', 'SlidingWindowLimiter']
class RateLimiter(object):
"""rate limiter, please give it a counter implementing all methods of AbstractionCounter"""
def __init__(self, threshold, interval, counter, name_space="default"):
"""
:param threshold: int, or we try to ceil, no smaller than 0
:param interval: int, or we try to ceil, no smaller than 1
:param counter: subclass of or one with all methods of AbstractionCounter
"""
try:
threshold()
except TypeError:
self._static_threshold = threshold
else:
self._dynamic_threshold = threshold
try:
interval()
except TypeError:
self._static_interval = interval
else:
self._dynamic_interval = interval
self._prefix = "rate-limiter:" + name_space + "{}"
self._counter = counter
@property
def _threshold(self):
try:
val = self._static_threshold
except AttributeError:
val = self._dynamic_threshold()
assert val >= 0
return int(math.ceil(val))
@property
def _interval(self):
try:
val = self._static_interval
except AttributeError:
val = self._dynamic_interval()
assert val >= 1
return int(math.ceil(val))
def exceeded(self, iid):
current = self._counter.add_key(self._prefix.format(iid), self._interval)
return current >= self._threshold
def current(self, iid):
return self._counter.current(self._prefix.format(iid))
def reset(self, iid):
self._counter.reset(self._prefix.format(iid))
class FixedWindowLimiter(RateLimiter):
"""return RateLimiter with FixedWindowRedisCounter, which 10 times faster than sliding
but it the limit is not smooth, may overflow two threshold near the gap between two interval
"""
def __init__(self, threshold, interval, redis_config, name_space="default"):
super(FixedWindowLimiter, self).__init__(threshold, interval, FixedWindowRedisCounter(StrictRedis(**redis_config)), name_space)
class SlidingWindowLimiter(RateLimiter):
"""return RateLimiter with SlidingRedisCounter, slow
but offer smooth limit, and offer more info
"""
def __init__(self, threshold, interval, redis_config, name_space="default"):
super(SlidingWindowLimiter, self).__init__(threshold, interval, SlidingRedisCounter(StrictRedis(**redis_config)), name_space)
``` |
{
"source": "jnnccc/Dakota-orb",
"score": 3
} |
#### File: linked_interfaces/Python/rosenbrock.py
```python
def rosenbrock_list(**kwargs):
num_fns = kwargs['functions']
# if num_fns > 1:
# least_sq_flag = true
# else:
# least_sq_flag = false
x = kwargs['cv']
ASV = kwargs['asv']
# get analysis components
#an_comps = kwargs['analysis_components']
#print an_comps
f0 = x[1]-x[0]*x[0]
f1 = 1-x[0]
retval = dict([])
if (ASV[0] & 1): # **** f:
f = [100*f0*f0+f1*f1]
retval['fns'] = f
if (ASV[0] & 2): # **** df/dx:
g = [ [-400*f0*x[0] - 2*f1, 200*f0] ]
retval['fnGrads'] = g
if (ASV[0] & 4): # **** d^2f/dx^2:
fx = x[1]-3*x[0]*x[0]
h = [
[ [-400*fx + 2, -400*x[0]],
[-400*x[0], 200 ] ]
]
retval['fnHessians'] = h
return(retval)
def rosenbrock_numpy(**kwargs):
from numpy import array
num_fns = kwargs['functions']
# if num_fns > 1:
# least_sq_flag = true
# else:
# least_sq_flag = false
x = kwargs['cv']
ASV = kwargs['asv']
f0 = x[1]-x[0]*x[0]
f1 = 1-x[0]
retval = dict([])
if (ASV[0] & 1): # **** f:
f = array([100*f0*f0+f1*f1])
retval['fns'] = f
if (ASV[0] & 2): # **** df/dx:
g = array([[-400*f0*x[0] - 2*f1, 200*f0]])
retval['fnGrads'] = g
if (ASV[0] & 4): # **** d^2f/dx^2:
fx = x[1]-3*x[0]*x[0]
h = array([ [ [-400*fx + 2, -400*x[0]],
[-400*x[0], 200 ] ] ] )
retval['fnHessians'] = h
return(retval)
```
#### File: dakota/interfacing/interfacing.py
```python
from __future__ import print_function
import collections
import re
import sys
import copy
__author__ = '<NAME>'
__copyright__ = 'Copyright 2014 Sandia Corporation'
__license__ = 'GNU Lesser General Public License'
#### Exceptions
class ResponseError(Exception):
pass
class MissingSourceError(Exception):
pass
class ParamsFormatError(Exception):
pass
# Constant used to specify an unnamed results file
UNNAMED = True
#### Class definitions
class Parameters(object):
"""Access variables and analysis components from a Dakota parameters file
Parameters objects typically should be constructed by the convenience
function ``dakota.interfacing.read_parameters_file``.
Variable values can be accessed by name or by index using []. Analysis
components are accessible by index only using the an_comp attribute. The
Parameters class supports iteration, yielding the index, variable
descriptor, and variable value.
Attributes:
an_comps: List of strings containing the analysis components.
eval_id: Evaluation id (string).
eval_num: Evaluation number (final token in eval_id) (int).
aprepro_format: Boolean indicating whether the parameters file was in
aprepro (True) or Dakota (False) format.
descriptors: List of the variable descriptors (read-only)
num_variables: Number of variables (read-only)
num_an_comps: Number of analysis components (read-only)
"""
def __init__(self,aprepro_format=None, variables=None, an_comps=None,
eval_id=None):
self.aprepro_format = aprepro_format
self._variables = copy.deepcopy(variables)
self.an_comps = list(an_comps)
self.eval_id = str(eval_id)
self.eval_num = int(eval_id.split(":")[-1])
# Convert variables to the appropriate type. The possible types
# are int, float, and string. The variables are already strings.
# TODO: Consider a user option to override this behavior and keep
# everything as a string, or provide access to the original strings
for k, v in self._variables.items():
try:
self._variables[k] = int(v)
except ValueError:
try:
self._variables[k] = float(v)
except ValueError:
pass
pass
@property
def descriptors(self):
return [k for k in self._variables.keys()]
def __getitem__(self,key):
if type(key) is int:
return self._variables[self.descriptors[key]]
else:
return self._variables[key]
def __setitem__(self,key,value):
if type(key) is int:
self._variables[self.descriptors[key]] = value
else:
self._variables[key] = value
@property
def num_variables(self):
return len(self._variables)
@property
def num_an_comps(self):
return len(self.an_comps)
def __iter__(self):
for index, (name, response) in enumerate(self._variables.items()):
yield index, name, response
# Datatype to hold ASV element for a single response. function, gradient,
# and hession are set to True or False.
_asvType = collections.namedtuple("ASVType",["function","gradient","hessian"])
# A class to hold the ASV and data for a single response
class Response(object):
"""Active set vector and data for a single response.
Instances of this class are constructed by Results objects.
Attributes:
asv: namedtuple with three members, function, gradient, and hessian.
Each are a boolean indicating whether Dakota requested the
associated information for the response. namedtuples can be
accessed by index or by member.
function: Function value for the response. A ResponseError
is raised if Dakota did not request the function value (and
ignore_asv is False).
gradient: Gradient for the response. Gradients must be a 1D iterable
of values that can be converted to float. A ResponseError
is raised if Dakota did not request the gradient (and ignore_asv is
False), or if the number of elements does not equal the number of
derivative variables.
hessian: Hessian value for the response. Hessians must be an iterable
of iterables (e.g. a 2D numpy array or list of lists). A
ResponseError is raised if Dakota did not request the Hessian
(and ignore_asv is False), or if the dimension does not correspond
correctly with the number of derivative variables.
"""
def __init__(self, descriptor, num_deriv_vars, ignore_asv, asv):
self._descriptor = descriptor
self._num_deriv_vars = num_deriv_vars
int_asv = int(asv)
self.asv = _asvType(int_asv & 1 == 1, int_asv & 2 == 2,
int_asv &4 == 4)
self._function = None
self._gradient = None
self._hessian = None
self._ignore_asv = ignore_asv
@property
def function(self):
return self._function
@function.setter
def function(self,val):
if not (self._ignore_asv or self.asv.function):
raise ResponseError("Function value not requested for '%s'."
% self._descriptor)
self._function = float(val)
@property
def gradient(self):
# try/except needed bc Results._write_results with ignore_csv = True
# tests for existence of this member
try:
return list(self._gradient) # return a copy
except TypeError:
return None
@gradient.setter
def gradient(self,val):
if not (self._ignore_asv or self.asv.gradient):
raise ResponseError("Gradient not requested for '%s'."
% self._descriptor)
self._gradient = [float(e) for e in val]
if len(self._gradient) != self._num_deriv_vars:
raise ResponseError("Length of gradient must equal number of "
"derivative variables.")
@property
def hessian(self):
# try/except needed bc Results._write_results with ignore_csv = True
# tests for existence of this member
try:
return copy.deepcopy(self._hessian)
except:
return None
@hessian.setter
def hessian(self,val):
if not (self._ignore_asv or self.asv.hessian):
raise ResponseError("Hessian not requested for '%s'."
% self._descriptor)
### validate dimensions
rctr = 0
for r in val:
rctr += 1
cctr = 0
for c in r:
cctr += 1
if cctr != self._num_deriv_vars:
raise ResponseError("Hessian must be square and size "
"num_deriv_variables.")
if rctr != self._num_deriv_vars:
raise ResponseError("Hessian must be square and size "
"num_deriv_variables.")
### copy the Hessian
self._hessian = []
for r in val:
row = []
for c in r:
row.append(float(c))
self._hessian.append(row)
class Results(object):
"""Collect response data and write to results file.
Results objects typically should be constructed by the convenience function
``dakota.interfacing.read_parameters_file``.
Each response is represented by a Response objected, and can be accessed
by name or by index using []. The Results class supports iteration, yielding
the index, response descriptor, and Response object.
Attributes:
eval_id: Evaluation id (a string).
eval_num: Evaluation number (final token in eval_id) (int).
aprepro_format: Boolean indicating whether the parameters file was in
aprepro (True) or Dakota (False) format.
descriptors: List of the response descriptors (read-only)
num_responses: Number of variables (read-only)
deriv_vars: List of the derivative variables (read-only)
num_deriv_vars: Number of derivative variables (read-only)
"""
def __init__(self, aprepro_format=None, responses=None,
deriv_vars=None, eval_id=None, ignore_asv=False,
results_file=None):
self.aprepro_format = aprepro_format
self.ignore_asv = ignore_asv
self._deriv_vars = deriv_vars[:]
num_deriv_vars = len(deriv_vars)
self._responses = collections.OrderedDict()
for t, v in responses.items():
self._responses[t] = Response(t, num_deriv_vars, ignore_asv,
int(v))
self.results_file = results_file
self.eval_id = eval_id
self.eval_num = int(eval_id.split(":")[-1])
self._failed = False
def __getitem__(self,key):
if type(key) is int:
return self._responses[self.descriptors[key]]
else:
return self._responses[key]
@property
def num_deriv_vars(self):
return len(self._deriv_vars)
@property
def num_responses(self):
return len(self._responses)
def _write_results(self,stream, ignore_asv):
# Write FAIL if set
if self._failed:
print("FAIL", file=stream)
return
# Write function values
for t, v in self._responses.items():
if (v.asv.function or ignore_asv) and v.function is not None:
print("%24.16E %s" %(v.function, t), file=stream)
# Write gradients
for t, v in self._responses.items():
if (v.asv.gradient or ignore_asv) and v.gradient is not None:
print("[ ",file=stream, end="")
for e in v.gradient:
print("% 24.16E" % e,file=stream,end="")
print(" ]",file=stream)
# Write Hessians
for t, v in self._responses.items():
if (v.asv.hessian or ignore_asv) and v.hessian is not None:
print("[[",file=stream,end="")
first = True
for r in v.hessian:
if not first:
print("\n ",file=stream,end="")
first=False
for c in r:
print("% 24.16E" % c,file=stream, end="")
print(" ]]",file=stream)
def write(self, stream=None, ignore_asv=None):
"""Write the results to the Dakota results file.
Keyword Args:
stream: Write results to this I/O stream. Overrides results_file
specified when the object was constructed.
ignore_asv: Ignore the active set vector while writing the response
data to the results file (or stream). Overrides ignore_asv
setting provided at construct time.
Raises:
dakota.interfacing.MissingSourceError: No results_file was provided at
construct time, and no stream was provided to the method call.
dakota.interfacing.ResponseError: A result requested by Dakota is missing
(and ignore_asv is False).
"""
my_ignore_asv = self.ignore_asv
if ignore_asv is not None:
my_ignore_asv= ignore_asv
## Confirm that user has provided all info requested by Dakota
if not my_ignore_asv and not self._failed:
for t, v in self._responses.items():
if v.asv.function and v.function is None:
raise ResponseError("Response '" + t + "' is missing "
"requested function result.")
if v.asv.gradient and v.gradient is None:
raise ResponseError("Response '" + t + "' is missing "
"requested gradient result.")
if v.asv.hessian and v.hessian is None:
raise ResponseError("Response '" +t + "' is missing "
"requested Hessian result.")
if stream is None:
if self.results_file is None:
raise MissingSourceError("No stream specified and no "
"results_file provided at construct time.")
else:
with open(self.results_file, "w") as ofp:
self._write_results(ofp, my_ignore_asv)
else:
self._write_results(stream, my_ignore_asv)
@property
def descriptors(self):
return [k for k in self._responses.keys()]
@property
def deriv_vars(self):
return list(self._deriv_vars)
def __iter__(self):
for index, (name, response) in enumerate(self._responses.items()):
yield index, name, response
def fail(self):
"""Set the FAIL attribute.
When the results file is written, it will contain only the word FAIL"""
self._failed = True
### Free functions and their helpers for constructing objects
# Collections of regexes for parsing aprepro and dprepro formatted Dakota
# parameter files
_aprepro_re_base = " {{20}}{{ {tag} += +{value} }}\n"
_dakota_re_base = "\s*{value} {tag}\n"
_pRE = {
"APREPRO":{"num_variables":re.compile(_aprepro_re_base.format(
value="(?P<value>\d+)", tag="(?P<tag>DAKOTA_VARS)")),
"num_functions":re.compile(_aprepro_re_base.format(
value="(?P<value>\d+)", tag="(?P<tag>DAKOTA_FNS)")),
"num_deriv_vars":re.compile(_aprepro_re_base.format(
value="(?P<value>\d+)", tag="(?P<tag>DAKOTA_DER_VARS)")),
"num_an_comps":re.compile(_aprepro_re_base.format(
value="(?P<value>\d+)", tag="(?P<tag>DAKOTA_AN_COMPS)")),
"eval_id":re.compile(_aprepro_re_base.format(
value="(?P<value>\d+(?::\d+)*)", tag="(?P<tag>DAKOTA_EVAL_ID)")),
"variable":re.compile(_aprepro_re_base.format(
value="\"?(?P<value>.+?)\"?", tag ="(?P<tag>\S+)")),
"function":re.compile(_aprepro_re_base.format(
value="(?P<value>[0-7])", tag="ASV_\d+:(?P<tag>\S+)")),
"deriv_var":re.compile(_aprepro_re_base.format(
value="(?P<value>\d+)", tag="DVV_\d+:(?P<tag>\S+)")),
"an_comp":re.compile(_aprepro_re_base.format(
value="\"(?P<value>.+?)\"", tag="AC_\d+:(?P<tag>.+?)"))
},
"DAKOTA":{"num_variables":re.compile(_dakota_re_base.format(
value="(?P<value>\d+)", tag="(?P<tag>variables)")),
"num_functions":re.compile(_dakota_re_base.format(
value="(?P<value>\d+)", tag="(?P<tag>functions)")),
"num_deriv_vars":re.compile(_dakota_re_base.format(
value="(?P<value>\d+)", tag="(?P<tag>derivative_variables)")),
"num_an_comps":re.compile(_dakota_re_base.format(
value="(?P<value>\d+)", tag="(?P<tag>analysis_components)")),
"eval_id":re.compile(_dakota_re_base.format(
value="(?P<value>\d+(?::\d+)*)", tag="(?P<tag>eval_id)")),
# A lookahead assertion is required to catch string variables with spaces
"variable":re.compile("\s*(?P<value>.+?)(?= \S+\n) (?P<tag>\S+)\n"),
"function":re.compile(_dakota_re_base.format(
value="(?P<value>[0-7])", tag="ASV_\d+:(?P<tag>\S+)")),
"deriv_var":re.compile(_dakota_re_base.format(
value="(?P<value>\d+)", tag="DVV_\d+:(?P<tag>\S+)")),
"an_comp":re.compile(_dakota_re_base.format(
value="(?P<value>.+?)", tag="AC_\d+:(?P<tag>.+?)"))
}
}
def _extract_block(stream, numRE, dataRE, handle):
"""Extract a block of information from a Dakota parameters file.
stream is expected to be queued up to the beginning of the block.
numRE and dataRE are regexes used to extract the number of items
and the items themselves, respectively. They must support groups
named 'tag' and 'value'.
handle is a function to store the extracted items."""
line = stream.readline()
num = int(numRE.match(line).group("value"))
for i in range(num):
line = stream.readline()
m = dataRE.match(line)
tag = m.group("tag")
value = m.group("value")
handle(tag,value)
def _read_parameters_stream(stream=None, ignore_asv=False, results_file=None):
"""Extract the parameters data from the stream."""
# determine format (dakota or aprepro) by examining the first line
line = stream.readline()
dakota_format_match = _pRE["DAKOTA"]["num_variables"].match(line)
aprepro_format_match = _pRE["APREPRO"]["num_variables"].match(line)
if aprepro_format_match is not None:
aprepro_format = True
useRE = _pRE["APREPRO"]
elif dakota_format_match is not None:
aprepro_format = False
useRE = _pRE["DAKOTA"]
else:
raise ParamsFormatError("Unrecognized parameters file format.")
# Rewind the stream to begin reading blocks
stream.seek(0)
# Read variable values
variables = collections.OrderedDict()
def store_variables(t, v):
variables[t]=v
_extract_block(stream, useRE["num_variables"], useRE["variable"],
store_variables)
# Read functions block
responses = collections.OrderedDict()
def store_responses(t, v):
responses[t]=v
_extract_block(stream, useRE["num_functions"], useRE["function"],
store_responses)
# Read derivative variables block
deriv_vars = []
def store_deriv_vars(t,v):
deriv_vars.append(t)
_extract_block(stream, useRE["num_deriv_vars"], useRE["deriv_var"],
store_deriv_vars)
# Read analysis components
an_comps = []
def store_an_comps(t, v):
an_comps.append(v)
_extract_block(stream, useRE["num_an_comps"], useRE["an_comp"],
store_an_comps)
# Read eval_id
m = useRE["eval_id"].match(stream.readline())
eval_id = m.group("value")
return (Parameters(aprepro_format, variables, an_comps, eval_id),
Results(aprepro_format, responses, deriv_vars, eval_id, ignore_asv,
results_file))
def read_parameters_file(parameters_file=None, results_file=None,
ignore_asv=False):
"""Read and parse the Dakota parameters file.
Keyword Args:
parameters_file: Pathname to the Dakota parameters file. If not
provided, the first command line argument will be used.
results_file: Pathname to the Dakota results file. If not provided
or set to None, the second command line argument will be used.
Setting to dakota.interfacing.UNNAMED leaves the file unnamed,
and a stream must be specified in the call to Results.write().
ignore_asv: If True, ignore the active set vector when setting
responses on the returned Results object.
Returns:
A tuple containing a Parameters object and Results object configured
based on the parameters file.
Raises:
dakota.interfacing.MissingSourceError: Parameters or results filename is
not provided and cannot be read from the command line arguments.
dakota.interfacing.ParamsFormatError: The Dakota parameters file was not
valid.
"""
### Determine the name of the parameters file and read it in
if parameters_file is None:
try:
parameters_file = sys.argv[1]
except IndexError:
raise MissingSourceError("No parameters filename provided and no "
"command line argument.")
with open(parameters_file,"r") as ifp:
parameters_list = ifp.readlines()
### Determine the name of the results file
if results_file is None:
try:
results_file = sys.argv[2]
except IndexError:
raise MissingSourceError("No results filename provided and no "
"command line argument.")
elif results_file == UNNAMED:
results_file = ""
### Open and parse the parameters file
with open(parameters_file, "r") as ifp:
return _read_parameters_stream(ifp, ignore_asv, results_file)
```
#### File: interfaces/Python/dakota_interfacing_test.py
```python
from __future__ import print_function
import unittest
import os
__author__ = '<NAME>'
__copyright__ = 'Copyright 2014 Sandia Corporation'
__license__ = 'GNU Lesser General Public License'
try: # Python 2/3 compatible import of StringIO
import StringIO
except ImportError:
import io as StringIO
import dakota.interfacing as di
from dakota.interfacing import parallel
apreproParams = """ { DAKOTA_VARS = 3 }
{ x1 = 7.488318331306800e-01 }
{ x2 = 2.188638686202466e-01 }
{ dussv_1 = "foo bar" }
{ DAKOTA_FNS = 1 }
{ ASV_1:response_fn_1 = %d }
{ DAKOTA_DER_VARS = 2 }
{ DVV_1:x1 = 1 }
{ DVV_2:x2 = 2 }
{ DAKOTA_AN_COMPS = 2 }
{ AC_1:first.sh = "a b" }
{ AC_2:first.sh = "b" }
{ DAKOTA_EVAL_ID = 1 }
"""
dakotaParams = """ 3 variables
7.488318331306800e-01 x1
2.188638686202466e-01 x2
foo bar dussv_1
1 functions
%d ASV_1:response_fn_1
2 derivative_variables
1 DVV_1:x1
2 DVV_2:x2
2 analysis_components
a b AC_1:first.sh
b AC_2:first.sh
1 eval_id
"""
# Helper functions needed for Python < 2.7
def set_function(r):
r["response_fn_1"].function = 5.0
def set_gradient(r):
r["response_fn_1"].gradient = [1.0, 2.0]
def set_hessian(r):
r["response_fn_1"].hessian = [[1.0, 2.0],[2.0,3.0]]
class dakotaInterfacingTestCase(unittest.TestCase):
def test_aprepro_format(self):
"""Confirm that aprepro format Parameters files are parsed correctly."""
pio = StringIO.StringIO(apreproParams % 1)
p, r = di.interfacing._read_parameters_stream(stream=pio, results_file="results.out")
self.assertEqual(p.num_variables, 3)
self.assertEqual(p.descriptors, ["x1","x2","dussv_1"])
self.assertAlmostEqual(p["x1"], 7.488318331306800e-01)
self.assertAlmostEqual(p["x2"], 2.188638686202466e-01)
self.assertEqual(p["dussv_1"], "foo bar")
self.assertEqual(p.num_an_comps, 2)
self.assertEqual(p.an_comps[0], "a b")
self.assertEqual(p.an_comps[1], "b")
self.assertEqual(p.eval_id, "1")
self.assertTrue(p.aprepro_format)
self.assertEqual(r.num_responses, 1)
self.assertEqual(r.descriptors, ["response_fn_1"])
self.assertEqual(r["response_fn_1"].asv, (True, False, False))
self.assertEqual(r.num_deriv_vars, 2)
self.assertEqual(r.deriv_vars, ["x1","x2"])
self.assertTrue(r.aprepro_format)
self.assertEqual(r.results_file, "results.out")
def test_dakota_format(self):
"""Confirm that Dakota format Parameters files are parsed correctly."""
pio = StringIO.StringIO(dakotaParams % 1)
p, r = di.interfacing._read_parameters_stream(stream=pio, results_file="results.out")
self.assertEqual(p.num_variables, 3)
self.assertEqual(p.descriptors, ["x1","x2","dussv_1"])
self.assertAlmostEqual(p["x1"], 7.488318331306800e-01)
self.assertAlmostEqual(p["x2"], 2.188638686202466e-01)
self.assertEqual(p["dussv_1"], "foo bar")
self.assertEqual(p.num_an_comps, 2)
self.assertEqual(p.an_comps[0], "a b")
self.assertEqual(p.an_comps[1], "b")
self.assertEqual(p.eval_id, "1")
self.assertFalse(p.aprepro_format)
self.assertEqual(r.num_responses, 1)
self.assertEqual(r.descriptors, ["response_fn_1"])
self.assertEqual(r["response_fn_1"].asv, (True, False, False))
self.assertEqual(r.num_deriv_vars, 2)
self.assertEqual(r.deriv_vars, ["x1","x2"])
self.assertEqual(r.eval_id, "1")
self.assertFalse(r.aprepro_format)
self.assertEqual(r.results_file, "results.out")
def test_asv(self):
"""Results behaves according to the ASV when response data is set."""
# Function only
pio = StringIO.StringIO(dakotaParams % 1)
p, r = di.interfacing._read_parameters_stream(stream=pio)
set_function(r)
self.assertRaises(di.interfacing.ResponseError, set_gradient, r)
self.assertRaises(di.interfacing.ResponseError, set_hessian, r)
r.write(StringIO.StringIO())
# Gradient only
pio = StringIO.StringIO(dakotaParams % 2)
p, r = di.interfacing._read_parameters_stream(stream=pio)
self.assertRaises(di.interfacing.ResponseError, set_function, r)
set_gradient(r)
self.assertRaises(di.interfacing.ResponseError, set_hessian, r)
r.write(StringIO.StringIO())
# Hessian only
pio = StringIO.StringIO(dakotaParams % 4)
p, r = di.interfacing._read_parameters_stream(stream=pio)
self.assertRaises(di.interfacing.ResponseError, set_function, r)
self.assertRaises(di.interfacing.ResponseError, set_gradient, r)
set_hessian(r)
r.write(StringIO.StringIO())
def test_ignore_asv(self):
"""Confirm that ASV is ignored."""
# Test exceptions
for i in range(1,8):
sio = StringIO.StringIO(dakotaParams % i)
p, r = di.interfacing._read_parameters_stream(stream=sio,ignore_asv=True)
set_function(r)
set_gradient(r)
set_hessian(r)
r.write(StringIO.StringIO())
# Test write-time ignoring
sio = StringIO.StringIO(dakotaParams % 3)
p, r = di.interfacing._read_parameters_stream(stream=sio,ignore_asv=False)
set_function(r)
rio = StringIO.StringIO()
r.write(stream=rio,ignore_asv=True)
self.assertEqual(rio.getvalue(), " 5.0000000000000000E+00 response_fn_1\n")
def test_results_write(self):
"""Verify Written test results"""
sio = StringIO.StringIO(dakotaParams % 7)
p, r = di.interfacing._read_parameters_stream(stream=sio)
set_function(r)
set_gradient(r)
set_hessian(r)
rio = StringIO.StringIO()
r.write(stream=rio)
expected = """ 5.0000000000000000E+00 response_fn_1
[ 1.0000000000000000E+00 2.0000000000000000E+00 ]
[[ 1.0000000000000000E+00 2.0000000000000000E+00
2.0000000000000000E+00 3.0000000000000000E+00 ]]
"""
self.assertEqual(rio.getvalue(), expected)
# Test simulation failure flag
r.fail()
rio = StringIO.StringIO()
r.write(stream=rio)
expected = "FAIL\n"
self.assertEqual(rio.getvalue(), expected)
def test_slurm_info(self):
"""Info correctly extracted from the environment."""
# Check env without SLURM_JOBID set
try:
del os.environ["SLURM_JOBID"]
except KeyError:
pass
self.assertRaises(parallel.MgrEnvError,parallel._get_job_info)
# Set JOBID and set TASKS_PER_NODE
os.environ["SLURM_JOBID"] = "1337"
os.environ["SLURM_TASKS_PER_NODE"] = "16(x2)"
nnodes, tpn, job_id = parallel._get_job_info()
self.assertEqual(nnodes, 2)
self.assertEqual(tpn, 16)
self.assertEqual(job_id,"1337")
# Single node
os.environ["SLURM_TASKS_PER_NODE"] = "16"
nnodes, tpn, job_id = parallel._get_job_info()
self.assertEqual(nnodes, 1)
self.assertEqual(tpn, 16)
def test_node_list(self):
"""Relative node list is built correctly."""
self.assertEqual( "+n0",
parallel._get_node_list(tile=0, applic_tasks=16,
tasks_per_node=16))
self.assertEqual( "+n0,+n1",
parallel._get_node_list(tile=0, applic_tasks=32,
tasks_per_node=16))
self.assertEqual( "+n0",
parallel._get_node_list(tile=0, applic_tasks=8,
tasks_per_node=16))
self.assertEqual( "+n0",
parallel._get_node_list(tile=1, applic_tasks=8,
tasks_per_node=16))
self.assertEqual( "+n1",
parallel._get_node_list(tile=0, applic_tasks=16,
tasks_per_node=16, dedicated_master=parallel.NODE))
self.assertEqual( "+n1,+n2",
parallel._get_node_list(tile=0, applic_tasks=32,
tasks_per_node=16, dedicated_master=parallel.NODE))
self.assertEqual( "+n0",
parallel._get_node_list(tile=0, applic_tasks=8,
tasks_per_node=16, dedicated_master=parallel.TILE))
self.assertEqual( "+n1",
parallel._get_node_list(tile=1, applic_tasks=8,
tasks_per_node=16, dedicated_master=parallel.TILE))
self.assertEqual( "+n1",
parallel._get_node_list(tile=0, applic_tasks=16,
tasks_per_node=16, dedicated_master=parallel.TILE))
def test_static_tile(self):
"""Correct tile is returned"""
self.assertEqual(0,
parallel._calc_static_tile(eval_num=1, num_tiles=4))
self.assertEqual(3,
parallel._calc_static_tile(eval_num=4, num_tiles=4))
self.assertEqual(0,
parallel._calc_static_tile(eval_num=5, num_tiles=4))
self.assertEqual(0,
parallel._calc_static_tile(eval_num=9, num_tiles=4))
self.assertEqual(0,
parallel._calc_static_tile(eval_num=1, num_tiles=1))
self.assertEqual(0,
parallel._calc_static_tile(eval_num=100, num_tiles=1))
def test_num_tiles(self):
self.assertEqual(10,
parallel._calc_num_tiles(applic_tasks=16, tasks_per_node=16,
num_nodes=10))
self.assertEqual(1,
parallel._calc_num_tiles(applic_tasks=16, tasks_per_node=16,
num_nodes=1))
self.assertEqual(9,
parallel._calc_num_tiles(applic_tasks=16, tasks_per_node=16,
num_nodes=10, dedicated_master=parallel.NODE))
self.assertEqual(9,
parallel._calc_num_tiles(applic_tasks=16, tasks_per_node=16,
num_nodes=10, dedicated_master=parallel.TILE))
self.assertEqual(18,
parallel._calc_num_tiles(applic_tasks=8, tasks_per_node=16,
num_nodes=10, dedicated_master=parallel.NODE))
self.assertEqual(19,
parallel._calc_num_tiles(applic_tasks=8, tasks_per_node=16,
num_nodes=10, dedicated_master=parallel.TILE))
self.assertEqual(1,
parallel._calc_num_tiles(applic_tasks=8, tasks_per_node=16,
num_nodes=1, dedicated_master=parallel.TILE))
self.assertRaises(parallel.ResourceError,parallel._calc_num_tiles,
applic_tasks=8, tasks_per_node=16, num_nodes=1,
dedicated_master=parallel.NODE)
self.assertRaises(parallel.ResourceError,parallel._calc_num_tiles,
applic_tasks=16, tasks_per_node=16, num_nodes=1,
dedicated_master=parallel.TILE)
# todo: test iteration, integer access
unittest.main()
``` |
{
"source": "jnngu/codingdatingsite",
"score": 2
} |
#### File: styledating/backend/test2.py
```python
def bar():
[line x for x in for range in 5 + 2 * 4 + 1]
```
#### File: styledating/challenges/pal_sc.py
```python
def palindrome(x:int):
print(True)
```
#### File: code/tests/testspaces.py
```python
def foo():
s = [1, 2, 3]
for xd in range(s):
return s
```
#### File: codingdatingsite/styledating/main.py
```python
from flask import Flask, render_template, request, redirect, url_for
from flask_login import LoginManager, UserMixin, login_required, login_user, logout_user, current_user
import sqlite3
import hashlib
import re
from util import *
from user import *
from autograde import *
app = Flask(__name__)
app.secret_key = open('secret_key.txt').read()
login_manager = LoginManager()
login_manager.init_app(app)
@login_manager.user_loader
def load_user(user_id):
return User(user_id)
@app.route('/')
def index():
return render_template("index.html")
@app.route('/register')
def register():
return render_template("registration.html")
@app.route('/process_registration', methods=['POST'])
def process_registration():
f = request.form
if not f['username'].isalnum():
return f"username can only contain letters and numbers"
L = sql_execute('''SELECT * FROM users WHERE username=?''', (f['username'],))
print(L)
if len(L) > 0:
return f"sorry; username {f['username']} already taken"
num = sql_execute('''SELECT MAX(id) from users''')
print(num)
newid = 1 if len(num) == 0 or num[0][0] == None else 1 + num[0][0]
sql_execute('''INSERT INTO users VALUES (?,?,?,?,?,?,?,?)''',\
(newid,f['username'],f['name'],'',hashpw(f['pass']),"","",""))
pfp = request.files['pfp']
pfp.save('static/pfps/'+f['username'])
#return f"registered! You are user #{newid}<br>" + render_template("login.html")
return redirect(url_for('login'))
@app.route('/login')
def login():
return render_template("login.html")
@app.route('/process_login', methods=['POST'])
def process_login():
f = request.form
L = sql_execute('''SELECT name FROM users WHERE username=? AND pass_hash=?''',\
(f['username'], hashpw(f['pass'])))
print(L)
if len(L) == 0 or L[0][0] == None:
return "sorry, login failed\n" + render_template("login.html")
user = User(f['username'])
login_user(user)
return redirect(url_for('evaluations'))
@app.route("/logout")
@login_required
def logout():
logout_user()
return redirect(url_for('index'))
dct = {1:'pal', 2:'robot', 3:'rotated'}
fcn_names = {1:'palindrome', 2:'judge', 3:'rotate'}
@app.route('/code/<int:n>')
@login_required
def coding_chal(n):
if n < 1 or n > 3:
return "???"
else:
sc = open("challenges/"+dct[n]+"_sc.py").read()
question_text = open("challenges/"+dct[n]+"_desc.txt").read().split('\n')
return render_template("code_chall.html",\
n=n,question_text=question_text,code=sc,results=["No results yet!"],done=None)
@app.route('/process_language', methods=['POST'])
def process_language():
n = int(request.form['n'])
if request.form['language'] == "Python":
sc = open("challenges/"+dct[n]+"_sc.py").read()
question_text = open("challenges/"+dct[n]+"_desc.txt").read().split('\n')
return render_template("code_chall.html",\
n=n,question_text=question_text,code=sc,results=["No results yet!"],done=None,lang='python')
else:
sc = open("challenges/"+dct[n]+"_sc.c").read()
question_text = open("challenges/"+dct[n]+"_desc.txt").read().split('\n')
return render_template("code_chall.html",\
n=n,question_text=question_text,code=sc,results=["No results yet!"],done=None,lang='c')
@app.route('/process_code', methods=['POST'])
@login_required
def process_code():
n = int(request.form['cnum'])
if n < 1 or n > 3:
return "???"
else:
question_text = open("challenges/"+dct[n]+"_desc.txt").read().split('\n')
user_code = request.form['code_submission']
tc_filename ="challenges/"+dct[n]+"_testcases.txt"
sol_filename = "challenges/"+dct[n]+"_sol.txt"
lang = request.form['language']
(numWrong,results) = runAutograder(lang.lower(),user_code,fcn_names[n],tc_filename, sol_filename)
results = results.split('\n')
if numWrong == 0:
#TODO: user_code is a string with the user's submitted code.
# current_user.username is the user's username.
# @ Brandon.
current_user.updatedb(n,user_code)
return render_template("code_chall.html",n=n,\
question_text=question_text,code=user_code,results=results,\
done=(numWrong == 0))
@app.route('/matches')
@app.route('/matches/<string:typ>/<string:other>')
@login_required
def matches(typ=None,other=None):
if other != None:
sql_execute('''insert into actions values(?,?,?)''', (typ,current_user.username,other))
if current_user.numdone() < 3:
return render_template("matches.html",user=current_user,curr=current_user)
best_match = current_user.compute_best_match()
return render_template("matches.html",user=best_match,curr=current_user)
@app.route('/evaluations')
@login_required
def evaluations():
return render_template("evaluations.html",user=current_user)
@app.route('/profile')
@login_required
def profile():
return render_template("profile.html", user=current_user, isMe=True)
@app.route('/mutual')
@login_required
def mutual():
matches = current_user.get_mutual_matches()
return render_template("mutual.html", matches=matches, user=current_user)
if __name__ == '__main__':
app.run(debug=False)
``` |
{
"source": "jnnkhrtmnn/ml-blueprint-arch",
"score": 2
} |
#### File: jnnkhrtmnn/ml-blueprint-arch/app.py
```python
import numpy as np
from pathlib import Path
from joblib import load
from flask import Flask, request, jsonify
# initialisepath
#path = Path('C:/Users/janni/Desktop/blueprint/ml-blueprint-arch')
# load model
reg = load(Path('models') / 'reg.joblib')
app = Flask(__name__)
@app.route('/predict', methods=['POST'])
def ml_inference():
'''
Uses JSON input from post method,
returns predicition and input as JSON
Returns
-------
TYPE
DESCRIPTION.
'''
json_ = request.json
X = np.array([[json_['x1'], json_['x2']]])
# Some checks
assert X.shape == (1,2)
pred = reg.predict(X)
return jsonify({'prediction': str(pred[0])
,'input': {'x1' : str(X[0][0])
,'x2' : str(X[0][1])}
})
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True)
```
#### File: ml-blueprint-arch/src/data_generation.py
```python
from scipy.stats import norm
import pandas as pd
from pathlib import Path
path = Path('C:/Users/janni/Desktop/blueprint/ml-blueprint-arch')
def generate_data():
'''
Generates random data set with y, x1, x2 and epsilon.
y is a linear combination of iid gaussian x1 and x2
plus the gaussian error term epsilon
Returns
-------
df : TYPE
DESCRIPTION.
'''
n= 1000
x1 = norm.rvs(10,3,n)
x2 = norm.rvs(30,5,n)
epsilon = norm.rvs(0,1,n)
y = x1 + x2 + epsilon
df = pd.DataFrame(list(zip(y,x1,x2)), columns=['y', 'x1', 'x2'])
df.to_pickle(path / 'data' / 'dat.pkl')
print('Data generated!')
return df
if __name__=='__main__':
generate_data()
``` |
{
"source": "jnnl/jc",
"score": 3
} |
#### File: jc/jc/evaluator.py
```python
from __future__ import print_function
import ast
import math
import operator
import re
import sys
from . import completer
from . import __version__
class NamespaceError(Exception):
pass
class Evaluator(object):
'''
Expression evaluator.
Constructs ASTs from expressions and evaluates them.
Supports standard arithmetic and bitwise operations,
variable assignments, basic functions, constants and base conversions.
'''
def __init__(self, base=10, completer=completer.Completer):
self.ans = None
self.operators = {
ast.Add: {'op': operator.add, 'symbol': '+'},
ast.Sub: {'op': operator.sub, 'symbol': '-'},
ast.Mult: {'op': operator.mul, 'symbol': '*'},
ast.Div: {'op': operator.truediv, 'symbol': '/'},
ast.FloorDiv: {'op': operator.floordiv, 'symbol': '//'},
ast.Mod: {'op': operator.mod, 'symbol': '%'},
ast.Pow: {'op': operator.pow, 'symbol': '**'},
ast.BitAnd: {'op': operator.iand, 'symbol': '&'},
ast.BitOr: {'op': operator.ior, 'symbol': '|'},
ast.BitXor: {'op': operator.ixor, 'symbol': '^'},
ast.Invert: {'op': operator.inv, 'symbol': '~'},
ast.LShift: {'op': operator.lshift, 'symbol': '>>'},
ast.RShift: {'op': operator.rshift, 'symbol': '<<'},
ast.UAdd: {'op': operator.pos, 'symbol': '+'},
ast.USub: {'op': operator.neg, 'symbol': '-'},
}
self.symbols = [op['symbol'] for op in self.operators.values()]
self.internal_variables = {
'_base': base,
'_python': 'Python ' + sys.version,
'_version': __version__
}
self.variables = {}
self.constants = {
'e': {'value': math.e,
'help': 'e: Euler\'s number'},
'euler': {'value': 0.577215664901533,
'help': 'euler: Euler-Mascheroni constant'},
'pi': {'value': math.pi,
'help': 'pi: 0.5 tau'},
'phi': {'value': (1 + math.sqrt(5)) / 2,
'help': 'phi: the golden ratio'},
'tau': {'value': math.pi * 2,
'help': 'tau: 2 pi'},
}
self.functions = {
'A': {'value': lambda m, n: n + 1 if m == 0 else \
(self.functions['A']['value'](m - 1, 1) if m > 0 and n == 0 else \
self.functions['A']['value'](m - 1, self.functions['A']['value'](m, n - 1))),
'help': 'A(x, y): Ackermann function'},
'abs': {'value': abs,
'help': 'abs(x): absolute value of x'},
'acos': {'value': math.acos,
'help': 'acos(x): inverse trigonometric cosine of x (in radians)'},
'acosh': {'value': math.acosh,
'help': 'acosh(x): inverse hyperbolic cosine of x (in radians)'},
'asin': {'value': math.asin,
'help': 'asin(x): inverse trigonometric sine of x (in radians)'},
'asinh': {'value': math.asinh,
'help': 'asinh(x): inverse hyperbolic sine of x (in radians)'},
'atan': {'value': math.atan,
'help': 'atan(x): inverse trigonometric tangent of x (in radians)'},
'atanh': {'value': math.atanh,
'help': 'atanh(x): inverse hyperbolic tangent of x (in radians)'},
'base': {'value': lambda x=None: self._set_base(x),
'help': 'base(x): set output base to x (2, 8, 10 or 16 allowed)'},
'cbrt': {'value': lambda x: x ** (1. / 3),
'help': 'cbrt(x): cube root of x'},
'ceil': {'value': math.ceil,
'help': 'ceil(x): ceiling function of x'},
'cos': {'value': math.cos,
'help': 'cos(x): cosine of x (in radians)'},
'deg': {'value': math.degrees,
'help': 'deg(x): convert x from radians to degrees'},
'erf': {'value': math.erf,
'help': 'erf(x): error function of x'},
'exp': {'value': math.exp,
'help': 'exp(x): calculate e ** x'},
'fact': {'value': math.factorial,
'help': 'fact(x): factorial of x'},
'fmod': {'value': math.fmod,
'help': 'fmod(x, y): calculate floating point modulo of x % y'},
'floor': {'value': math.floor,
'help': 'floor(x): floor function of x'},
'gamma': {'value': math.gamma,
'help': 'gamma(x): gamma function of x'},
'help': {'value': lambda x: self._help(x),
'help': 'help(x): print help on x'},
'hyp': {'value': math.hypot,
'help': 'hyp(x): hypotenuse of x'},
'ln': {'value': lambda x: math.log(x, math.e),
'help': 'ln(x): natural logarithm of x'},
'log': {'value': math.log,
'help': 'log(x[, base]): logarithm of x to base (default: 2)'},
'log2': {'value': lambda x: math.log(x, 2),
'help': 'log2(x): base 2 logarithm of x'},
'log10': {'value': math.log10,
'help': 'log10(x): base 10 logarithm of x'},
'nrt': {'value': lambda n, x: x ** (1. / n),
'help': 'nrt(x): nth root of x'},
'rad': {'value': math.radians,
'help': 'rad(x): convert x from degrees to radians'},
'round': {'value': round,
'help': 'round(x[, n]): round x to n decimal places (default: 0)'},
'sin': {'value': math.sin,
'help': 'sin(x): sine of x'},
'sqrt': {'value': math.sqrt,
'help': 'sqrt(x): square root of x'},
'tan': {'value': math.tan,
'help': 'tan(x): tangent of x'},
}
self.completer = None
if completer is not None:
self.completer = completer(
list([k + '(' for k in self.functions.keys()]) +
list(self.constants.keys()) +
list(self.variables.keys()) +
['ans']
)
def _help(self, args):
''' Print help for given topics. '''
if not len(args):
get_help = lambda x: '\n'.join(c['help'] for c in x.values())
print('jc version %s' % self.internal_variables['_version'])
print('\nAvailable constants:')
print(get_help(self.constants))
print('\nAvailable functions:')
print(get_help(self.functions))
for arg in args:
msg = self.constants.get(arg.id, self.functions.get(arg.id, {})).get('help')
if not msg:
raise ValueError('unknown help topic \'%s\'' % arg.id)
print(msg)
def _set_base(self, base):
if base is None:
print(self.internal_variables['_base'])
return
if base not in (2, 8, 10, 16):
raise ValueError('unsupported base \'%s\' (allowed: 2, 8, 10, 16)' % base)
self.internal_variables['_base'] = base
def _get_op(self, operator):
try:
found_op = self.operators[type(operator.op)]['op']
except KeyError:
raise SyntaxError('unknown operator \'%s\'' % operator.op)
return found_op
def _eval_unary_op(self, operator):
''' Evaluate unary operator (e.g. -1) '''
op = self._get_op(operator)
value = op(self._eval_op(operator.operand))
return value
def _eval_binary_op(self, operator):
''' Evaluate binary operator (e.g. 5 + 5). '''
op = self._get_op(operator)
value = op(self._eval_op(operator.left), self._eval_op(operator.right))
return value
def _eval_name(self, operator):
''' Evaluate name expansion / assignment (e.g. a = 10). '''
if operator.id in self.constants:
return self.constants[operator.id]['value']
elif operator.id in self.internal_variables:
print(self.internal_variables[operator.id])
elif operator.id in self.variables:
return self.variables[operator.id]
elif operator.id == 'ans':
return self.ans
elif operator.id in self.functions:
raise TypeError('cannot evaluate function label \'%s\'' % operator.id)
else:
raise NameError('variable \'%s\' is not defined' % operator.id)
def _eval_func(self, operator):
''' Evaluate function call (e.g. sqrt(16)). '''
if operator.func.id in self.functions:
func = self.functions[operator.func.id]['value']
if operator.func.id == 'help':
return func(operator.args)
value = func(*(self._eval_op(arg) for arg in operator.args))
return value
else:
raise NameError('function \'%s\' is not defined' % operator.func.id)
def _eval_op(self, op):
''' Evaluate expression operator. '''
if isinstance(op, ast.Num):
val = op.n
elif isinstance(op, ast.UnaryOp):
val = self._eval_unary_op(op)
elif isinstance(op, ast.BinOp):
val = self._eval_binary_op(op)
elif isinstance(op, ast.Name):
val = self._eval_name(op)
elif isinstance(op, ast.Call):
val = self._eval_func(op)
else:
raise SyntaxError('unknown operator \'%s\'' % op)
return val
def _validate_var(self, var):
''' Validate a variable assignment. '''
if var in self.constants:
raise NamespaceError('cannot assign to constant \'%s\'' % var)
elif var in self.internal_variables:
raise NamespaceError('cannot assign to internal variable \'%s\'' % var)
elif var in self.functions:
raise NamespaceError('cannot override existing function \'%s\'' % var)
elif var.startswith('_'):
raise NamespaceError('cannot assign to internal variable space (starting with _)')
elif var == 'ans':
raise NamespaceError('cannot assign to reserved variable \'ans\'')
elif var.isdigit():
raise TypeError('cannot assign variable to a numeric literal')
elif re.match(r'\w+', var):
return True
else:
raise NameError('invalid variable name \'%s\', only alphanumerics and underscores are supported' % var)
def _assign_var(self, var_name, expr_value):
''' Assign a value to a variable. '''
self._validate_var(var_name)
tree = ast.parse(expr_value, mode='eval')
value = self._eval_op(tree.body)
self.variables[var_name] = value
if self.completer is not None:
self.completer.add_content(var_name)
return value
def _convert_out(self, result):
''' Convert output result to defined base. '''
base = self.internal_variables['_base']
if base == 2:
return int(bin(int(result))[2:])
elif base == 8:
if sys.version_info >= (3, 0, 0):
return int(oct(int(result))[2:])
else:
if result == 0:
return int(oct(int(result)))
else:
return int(oct(int(result))[1:])
elif base == 10:
if result - int(result) == 0:
return int(result)
else:
return result
elif base == 16:
return hex(result)[2:]
else:
raise ValueError('unsupported base value \'%s\'' % base)
def _eval_expr(self, expr):
''' Evaluate a single expression. '''
if self.ans is not None and len(expr) and expr[0] in self.symbols:
expr = 'ans' + expr
tree = ast.parse(expr, mode='eval')
result = self._eval_op(tree.body)
if result is not None:
self.ans = result
result = self._convert_out(result)
return result
def eval(self, expr, results=None):
''' Build and evaluate the AST for an expression, return the result(s). '''
if results is None:
results = []
expr = expr.strip()
multi_expr = ';' in expr
if multi_expr:
expr, _, rest = expr.partition(';')
if expr.startswith('#') or len(expr) == 0:
return results
var_assign = re.search(r'(?P<var>^\w+\s*)\=(?P<val>\s*(.*?)$)', expr)
if var_assign is not None:
var_assign = var_assign.groupdict()
var_name = var_assign['var'].strip()
var_value = var_assign['val'].strip()
self._assign_var(var_name, var_value)
else:
result = self._eval_expr(expr)
if result is not None:
results.append(result)
if multi_expr:
self.eval(rest, results)
return results
``` |
{
"source": "JnnnLe/intro-to-python",
"score": 4
} |
#### File: masters-intro/pyworkshop/project.py
```python
greetings = ["hello", "world", "Jenn"]
for greeting in greetings:
print(f"{greeting}, World")
def add_number(x, y):
return x + y
add_number(1, 2)
things_to_do = ["pickup meds", "shower", "change bandage", "python", "brush Baby and pack dogs", "Whole Foods", "Jocelyn"]
``` |
{
"source": "jnnnn1999/Online-Thai-Video-Content-Search-System",
"score": 3
} |
#### File: djangojobboard/core/upload_video.py
```python
from django.shortcuts import render, redirect
from pytube import *
import os.path
# defining function
def youtube(link_path=None, url=None):
# checking whether request.method is post or not
if link_path:
# getting link from frontend
link = link_path
video = YouTube(link)
# setting video resolution
stream = video.streams.get_lowest_resolution()
# downloads video
path =stream.download('./media')
return path
``` |
{
"source": "jnnr/oemof",
"score": 2
} |
#### File: oemof/outputlib/processing.py
```python
import pandas as pd
import sys
from oemof.network import Node
from oemof.tools.helpers import flatten
from itertools import groupby
from pyomo.core.base.var import Var
def get_tuple(x):
"""
Get oemof tuple within iterable or create it.
Tuples from Pyomo are of type `(n, n, int)`, `(n, n)` and `(n, int)`.
For single nodes `n` a tuple with one object `(n,)` is created.
"""
for i in x:
if isinstance(i, tuple):
return i
elif issubclass(type(i), Node):
return i,
def get_timestep(x):
"""
Get the timestep from oemof tuples.
The timestep from tuples `(n, n, int)`, `(n, n)`, `(n, int)` and (n,)
is fetched as the last element. For time-independent data (scalars)
zero ist returned.
"""
if all(issubclass(type(n), Node) for n in x):
return 0
else:
return x[-1]
def remove_timestep(x):
"""
Remove the timestep from oemof tuples.
The timestep is removed from tuples of type `(n, n, int)` and `(n, int)`.
"""
if all(issubclass(type(n), Node) for n in x):
return x
else:
return x[:-1]
def create_dataframe(om):
"""
Create a result dataframe with all optimization data.
Results from Pyomo are written into pandas DataFrame where separate columns
are created for the variable index e.g. for tuples of the flows and
components or the timesteps.
"""
# get all pyomo variables including their block
block_vars = []
for bv in om.component_data_objects(Var):
block_vars.append(bv.parent_component())
block_vars = list(set(block_vars))
# write them into a dict with tuples as keys
var_dict = {(str(bv).split('.')[0], str(bv).split('.')[-1], i): bv[i].value
for bv in block_vars for i in getattr(bv, '_index')}
# use this to create a pandas dataframe
df = pd.DataFrame(list(var_dict.items()), columns=['pyomo_tuple', 'value'])
df['variable_name'] = df['pyomo_tuple'].str[1]
# adapt the dataframe by separating tuple data into columns depending
# on which dimension the variable/parameter has (scalar/sequence).
# columns for the oemof tuple and timestep are created
df['oemof_tuple'] = df['pyomo_tuple'].map(get_tuple)
df['timestep'] = df['oemof_tuple'].map(get_timestep)
df['oemof_tuple'] = df['oemof_tuple'].map(remove_timestep)
# order the data by oemof tuple and timestep
df = df.sort_values(['oemof_tuple', 'timestep'], ascending=[True, True])
# drop empty decision variables
df = df.dropna(subset=['value'])
return df
def results(om):
"""
Create a result dictionary from the result DataFrame.
Results from Pyomo are written into a dictionary of pandas objects where
a Series holds all scalar values and a dataframe all sequences for nodes
and flows.
The dictionary is keyed by the nodes e.g. `results[idx]['scalars']`
and flows e.g. `results[n, n]['sequences']`.
"""
df = create_dataframe(om)
# create a dict of dataframes keyed by oemof tuples
df_dict = {k if len(k) > 1 else (k[0], None):
v[['timestep', 'variable_name', 'value']]
for k, v in df.groupby('oemof_tuple')}
# create final result dictionary by splitting up the dataframes in the
# dataframe dict into a series for scalar data and dataframe for sequences
result = {}
for k in df_dict:
df_dict[k].set_index('timestep', inplace=True)
df_dict[k] = df_dict[k].pivot(columns='variable_name', values='value')
try:
df_dict[k].index = om.es.timeindex
except ValueError as e:
msg = ("\nFlow: {0}-{1}. This could be caused by NaN-values in"
" your input data.")
raise type(e)(str(e) + msg.format(k[0].label, k[1].label)
).with_traceback(sys.exc_info()[2])
try:
condition = df_dict[k].isnull().any()
scalars = df_dict[k].loc[:, condition].dropna().iloc[0]
sequences = df_dict[k].loc[:, ~condition]
result[k] = {'scalars': scalars, 'sequences': sequences}
except IndexError:
error_message = ('Cannot access index on result data. ' +
'Did the optimization terminate' +
' without errors?')
raise IndexError(error_message)
# add dual variables for bus constraints
if om.dual is not None:
grouped = groupby(sorted(om.Bus.balance.iterkeys()), lambda p: p[0])
for bus, timesteps in grouped:
duals = [om.dual[om.Bus.balance[bus, t]] for _, t in timesteps]
df = pd.DataFrame({'duals': duals}, index=om.es.timeindex)
if (bus, None) not in result.keys():
result[(bus, None)] = {
'sequences': df, 'scalars': pd.Series()}
else:
result[(bus, None)]['sequences']['duals'] = duals
return result
def convert_keys_to_strings(result, keep_none_type=False):
"""
Convert the dictionary keys to strings.
All (tuple) keys of the result object e.g. results[(pp1, bus1)] are
converted into strings that represent the object labels
e.g. results[('pp1','bus1')].
"""
if keep_none_type:
converted = {
tuple([str(e) if e is not None else None for e in k])
if isinstance(k, tuple)
else str(k) if k is not None else None: v
for k, v in result.items()
}
else:
converted = {
tuple(map(str, k))
if isinstance(k, tuple)
else str(k): v
for k, v in result.items()
}
return converted
def meta_results(om, undefined=False):
"""
Fetch some meta data from the Solver. Feel free to add more keys.
Valid keys of the resulting dictionary are: 'objective', 'problem',
'solver'.
om : oemof.solph.Model
A solved Model.
undefined : bool
By default (False) only defined keys can be found in the dictionary.
Set to True to get also the undefined keys.
Returns
-------
dict
"""
meta_res = {'objective': om.objective()}
for k1 in ['Problem', 'Solver']:
k1 = k1.lower()
meta_res[k1] = {}
for k2, v2 in om.es.results[k1][0].items():
try:
if str(om.es.results[k1][0][k2]) == '<undefined>':
if undefined:
meta_res[k1][k2] = str(
om.es.results[k1][0][k2])
else:
meta_res[k1][k2] = om.es.results[k1][0][k2]
except TypeError:
if undefined:
msg = "Cannot fetch meta results of type {0}"
meta_res[k1][k2] = msg.format(
type(om.es.results[k1][0][k2]))
return meta_res
def __separate_attrs(system, get_flows=False, exclude_none=True):
"""
Create a dictionary with flow scalars and series.
The dictionary is structured with flows as tuples and nested dictionaries
holding the scalars and series e.g.
{(node1, node2): {'scalars': {'attr1': scalar, 'attr2': 'text'},
'sequences': {'attr1': iterable, 'attr2': iterable}}}
om : A solved oemof.solph.Model.
Returns
-------
dict
"""
def detect_scalars_and_sequences(com):
com_data = {'scalars': {}, 'sequences': {}}
exclusions = ('__', '_', 'registry', 'inputs', 'outputs',
'register',
'Label', 'from_object', 'input', 'output',
'constraint_group')
attrs = [i for i in dir(com)
if not (callable(i) or i.startswith(exclusions))]
for a in attrs:
attr_value = getattr(com, a)
# Iterate trough investment and add scalars and sequences with
# "investment" prefix to component data:
if attr_value.__class__.__name__ == 'Investment':
invest_data = detect_scalars_and_sequences(attr_value)
com_data['scalars'].update(
{
'investment_' + str(k): v
for k, v in invest_data['scalars'].items()
}
)
com_data['sequences'].update(
{
'investment_' + str(k): v
for k, v in invest_data['sequences'].items()
}
)
continue
if isinstance(attr_value, str):
com_data['scalars'][a] = attr_value
continue
# If the label is a tuple it is iterable, therefore it should be
# converted to a string. Otherwise it will be a sequence.
if a == 'label':
attr_value = str(attr_value)
# check if attribute is iterable
# see: https://stackoverflow.com/questions/1952464/
# in-python-how-do-i-determine-if-an-object-is-iterable
try:
_ = (e for e in attr_value)
com_data['sequences'][a] = attr_value
except TypeError:
com_data['scalars'][a] = attr_value
com_data['sequences'] = flatten(com_data['sequences'])
move_undetected_scalars(com_data)
if exclude_none:
remove_nones(com_data)
com_data = {
'scalars': pd.Series(com_data['scalars']),
'sequences': pd.DataFrame(com_data['sequences'])
}
return com_data
def move_undetected_scalars(com):
for ckey, value in list(com['sequences'].items()):
if isinstance(value, str):
com['scalars'][ckey] = value
del com['sequences'][ckey]
continue
try:
_ = (e for e in value)
except TypeError:
com['scalars'][ckey] = value
del com['sequences'][ckey]
else:
try:
if not value.default_changed:
com['scalars'][ckey] = value.default
del com['sequences'][ckey]
except AttributeError:
pass
def remove_nones(com):
for ckey, value in list(com['scalars'].items()):
if value is None:
del com['scalars'][ckey]
for ckey, value in list(com['sequences'].items()):
if (
len(value) == 0 or
value[0] is None
):
del com['sequences'][ckey]
# Check if system is es or om:
if system.__class__.__name__ == 'EnergySystem':
components = system.flows() if get_flows else system.nodes
else:
components = system.flows if get_flows else system.es.nodes
data = {}
for com_key in components:
component = components[com_key] if get_flows else com_key
component_data = detect_scalars_and_sequences(component)
comkey = com_key if get_flows else (com_key, None)
data[comkey] = component_data
return data
def parameter_as_dict(system, exclude_none=True):
"""
Create a result dictionary containing node parameters.
Results are written into a dictionary of pandas objects where
a Series holds all scalar values and a dataframe all sequences for nodes
and flows.
The dictionary is keyed by flows (n, n) and nodes (n, None), e.g.
`parameter[(n, n)]['sequences']` or `parameter[(n, n)]['scalars']`.
Parameters
----------
system: energy_system.EnergySystem
A populated energy system.
exclude_none: bool
If True, all scalars and sequences containing None values are excluded
Returns
-------
dict: Parameters for all nodes and flows
"""
flow_data = __separate_attrs(system, True, exclude_none)
node_data = __separate_attrs(system, False, exclude_none)
flow_data.update(node_data)
return flow_data
``` |
{
"source": "jnnr/oemof-tabular",
"score": 3
} |
#### File: tabular/tools/__init__.py
```python
import types
class HSN(types.SimpleNamespace):
""" A hashable variant of `types.Simplenamespace`.
By making it hashable, we can use the instances as dictionary keys, which
is necessary, as this is the default type for flows.
"""
def __hash__(self):
return id(self)
def raisestatement(exception, message=""):
""" A version of `raise` that can be used as a statement.
"""
if message:
raise exception(message)
else:
raise exception()
def remap(mapping, renamings, selection):
""" Change `mapping`'s keys according to the `selection` in `renamings`.
The `renaming` found under `selection` in `renamings` is used to rename the
keys found in `mapping`. I.e., return a copy of `mapping` with every `key`
of `mapping` that is also found in `renaming` replaced with
`renaming[key]`.
If key doesn't have a renaming, it's returned as is. If `selection` doesn't
appear as a key in `renamings`, `mapping` is returned unchanged.
Example
-------
>>> renamings = {'R1': {'zero': 'nada'}, 'R2': {'foo': 'bar'}}
>>> mapping = {'zero': 0, 'foo': 'foobar'}
>>> remap(mapping, renamings, 'R1') == {'nada': 0, 'foo': 'foobar'}
True
>>> remap(mapping, renamings, 'R2') == {'zero': 0, 'bar': 'foobar'}
True
As a special case, if `selection` is a `class`, not only `selection` is
considered to select a renaming, but the classes in `selection`'s `mro` are
considered too. The first class in `selection`'s `mro` which is also found
to be a key in `renamings` is used to determine which renaming to use. The
search starts at `selection`.
Parameters
----------
mapping: `Mapping`
The `Mapping` whose keys should be renamed.
renamings: `Mapping` of `Mappings <collections.abc.Mapping>`
selection: `Hashable`
Key specifying which entry in `renamings` is used to determine the new
keys in the copy of `mapping`. If `selection` is a `class`, the first
entry of `selection`'s `mro` which is found in `renamings` is used to
determine the new keys.
"""
mro = getattr(selection, "mro", lambda: [selection])
for c in mro():
if c in renamings:
break
return {renamings.get(c, {}).get(k, k): v for k, v in mapping.items()}
```
#### File: tabular/tools/postprocessing.py
```python
import os
import numpy as np
import pandas as pd
from oemof.network import Bus, Sink
from oemof.outputlib import views
from oemof.solph.components import GenericStorage
from oemof.tabular import facades
def component_results(es, results, select="sequences"):
""" Aggregated by component type
"""
c = {}
if not hasattr(es, "typemap"):
setattr(es, "typemap", facades.TYPEMAP)
for k, v in es.typemap.items():
if type(k) == str:
if select == "sequences":
_seq_by_type = [
views.node(results, n, multiindex=True).get("sequences")
for n in es.nodes
if isinstance(n, v) and not isinstance(n, Bus)
]
# check if dataframes / series have been returned
if any(
[
isinstance(i, (pd.DataFrame, pd.Series))
for i in _seq_by_type
]
):
seq_by_type = pd.concat(_seq_by_type, axis=1)
c[str(k)] = seq_by_type
if select == "scalars":
_sca_by_type = [
views.node(results, n, multiindex=True).get("scalars")
for n in es.nodes
if isinstance(n, v) and not isinstance(n, Bus)
]
if [x for x in _sca_by_type if x is not None]:
_sca_by_type = pd.concat(_sca_by_type)
c[str(k)] = _sca_by_type
return c
def bus_results(es, results, select="sequences", concat=False):
""" Aggregated for every bus of the energy system
"""
br = {}
buses = [b for b in es.nodes if isinstance(b, Bus)]
for b in buses:
if select == "sequences":
bus_sequences = pd.concat(
[
views.node(results, b, multiindex=True).get(
"sequences", pd.DataFrame()
)
],
axis=1,
)
br[str(b)] = bus_sequences
if select == "scalars":
br[str(b)] = views.node(results, b, multiindex=True).get("scalars")
if concat:
if select == "sequences":
axis = 1
else:
axis = 0
br = pd.concat([b for b in br.values()], axis=axis)
return br
def supply_results(
types=[
"dispatchable",
"volatile",
"conversion",
"backpressure",
"extraction",
"storage",
"reservoir",
],
bus=None,
results=None,
es=None,
):
"""
"""
if not hasattr(es, "typemap"):
setattr(es, "typemap", facades.TYPEMAP)
selection = pd.DataFrame()
for t in types:
if (
issubclass(es.typemap[t], GenericStorage)
and es.typemap[t] is not facades.Reservoir
):
df = views.net_storage_flow(results, node_type=es.typemap[t])
if df is not None:
selection = pd.concat([selection, df], axis=1)
else:
df = views.node_output_by_type(results, node_type=es.typemap[t])
if df is not None:
selection = pd.concat([selection, df], axis=1)
selection = selection.loc[
:, (slice(None), [es.groups[b] for b in bus], ["flow", "net_flow"])
]
return selection
def demand_results(types=["load"], bus=None, results=None, es=None):
"""
"""
if not hasattr(es, "typemap"):
setattr(es, "typemap", facades.TYPEMAP)
selection = pd.DataFrame()
for t in types:
selection = pd.concat(
[
selection,
views.node_input_by_type(results, node_type=es.typemap[t]),
],
axis=1,
)
selection = selection.loc[
:, ([es.groups[b] for b in bus], slice(None), ["flow"])
]
return selection
def write_results(
m, output_path, raw=False, summary=True, scalars=True, **kwargs
):
"""
"""
def save(df, name, path=output_path):
""" Helper for writing csv files
"""
df.to_csv(os.path.join(path, name + ".csv"))
buses = [b.label for b in m.es.nodes if isinstance(b, Bus)]
link_results = component_results(m.es, m.results).get("link")
if link_results is not None and raw:
save(link_results, "links-oemof")
imports = pd.DataFrame()
for b in buses:
supply = supply_results(results=m.results, es=m.es, bus=[b], **kwargs)
supply.columns = supply.columns.droplevel([1, 2])
demand = demand_results(results=m.results, es=m.es, bus=[b])
excess = component_results(m.es, m.results, select="sequences").get(
"excess"
)
if link_results is not None and m.es.groups[b] in list(
link_results.columns.levels[0]
):
ex = link_results.loc[
:, (m.es.groups[b], slice(None), "flow")
].sum(axis=1)
im = link_results.loc[
:, (slice(None), m.es.groups[b], "flow")
].sum(axis=1)
net_import = im - ex
net_import.name = m.es.groups[b]
imports = pd.concat([imports, net_import], axis=1)
supply["import"] = net_import
if m.es.groups[b] in demand.columns:
_demand = demand.loc[:, (m.es.groups[b], slice(None), "flow")]
_demand.columns = _demand.columns.droplevel([0, 2])
supply = pd.concat([supply, _demand], axis=1)
if excess is not None:
if m.es.groups[b] in excess.columns:
_excess = excess.loc[:, (m.es.groups[b], slice(None), "flow")]
_excess.columns = _excess.columns.droplevel([0, 2])
supply = pd.concat([supply, _excess], axis=1)
save(supply, b)
save(imports, "import")
try:
all = bus_results(m.es, m.results, select="scalars", concat=True)
all.name = "value"
endogenous = all.reset_index()
endogenous["tech"] = [
getattr(t, "tech", np.nan) for t in all.index.get_level_values(0)
]
endogenous["carrier"] = [
getattr(t, "carrier", np.nan)
for t in all.index.get_level_values(0)
]
endogenous.set_index(
["from", "to", "type", "tech", "carrier"], inplace=True
)
except ValueError:
endogenous = pd.DataFrame()
d = dict()
for node in m.es.nodes:
if not isinstance(node, (Bus, Sink, facades.Shortage)):
if getattr(node, "capacity", None) is not None:
if isinstance(node, facades.TYPEMAP["link"]):
pass
else:
key = (
node,
[n for n in node.outputs.keys()][0],
"capacity",
node.tech, # tech & carrier are oemof-tabular specific
node.carrier,
) # for oemof logic
d[key] = {"value": node.capacity}
exogenous = pd.DataFrame.from_dict(d).T # .dropna()
if not exogenous.empty:
exogenous.index = exogenous.index.set_names(
["from", "to", "type", "tech", "carrier"]
)
capacities = pd.concat([endogenous, exogenous])
capacities = pd.concat([endogenous, exogenous])
save(capacities, "capacities")
bresults = bus_results(m.es, m.results, concat=True)
if "duals" in bresults.columns.levels[2]:
duals = bresults.xs("duals", level=2, axis=1)
duals.columns = duals.columns.droplevel(1)
duals = (duals.T / m.objective_weighting).T
save(duals, "shadow_prices")
# check if storages exist in energy system nodes
if [n for n in m.es.nodes if isinstance(n, GenericStorage)]:
filling_levels = views.node_weight_by_type(m.results, GenericStorage)
filling_levels.columns = filling_levels.columns.droplevel(1)
save(filling_levels, "filling_levels")
```
#### File: oemof-tabular/tests/test_examples.py
```python
import pkg_resources as pkg
from oemof.energy_system import EnergySystem as ES
from oemof.tabular.facades import TYPEMAP
# The import below is only used to monkey patch `EnergySystem`.
# Hence the `noqa` because otherwise, style checkers would complain about an
# unused import.
import oemof.tabular.datapackage # noqa: F401
def test_example_datapackage_readability():
""" The example datapackages can be read and loaded.
"""
systems = []
for example in pkg.resource_listdir(
"oemof.tabular", "examples/datapackages"
):
print("Runnig reading datapackage example {} ...".format(example))
systems.append(
ES.from_datapackage(
pkg.resource_filename(
"oemof.tabular",
"examples/datapackages/{}/datapackage.json".format(
example
),
),
typemap=TYPEMAP,
)
)
for system in systems:
assert type(system) is ES
def test_scripting_examples():
"""
"""
exclude = ["plotting.py", "__pycache__"]
for example in pkg.resource_listdir("oemof.tabular", "examples/scripting"):
if not example.endswith(".ipynb") and example not in exclude:
print("Runnig scripting example {} ...".format(example))
exec(
open(
pkg.resource_filename(
"oemof.tabular",
"examples/scripting/{}".format(example),
)
).read()
)
``` |
{
"source": "jnnr/oemof",
"score": 3
} |
#### File: oemof/tests/solph_tests.py
```python
from nose.tools import ok_
from oemof.energy_system import EnergySystem as EnSys
from oemof.network import Node
from oemof.solph.blocks import InvestmentFlow as InvFlow
from oemof.solph import Investment
import oemof.solph as solph
class TestsGrouping:
def setup(self):
self.es = EnSys(groupings=solph.GROUPINGS)
Node.registry = self.es
def test_investment_flow_grouping(self):
""" Flows of investment sink should be grouped.
The constraint tests uncovered a spurious error where the flows of an
investment `Sink` where not put into the `InvestmentFlow` group,
although the corresponding grouping was present in the energy system.
The error occured in the case where the investment `Sink` was not
instantiated directly after the `Bus` it is connected to.
This test recreates this error scenario and makes sure that the
`InvestmentFlow` group is not empty.
"""
b = solph.Bus(label='Bus')
solph.Source(label='Source', outputs={b: solph.Flow(
actual_value=[12, 16, 14], nominal_value=1000000,
fixed=True)})
solph.Sink(label='Sink', inputs={b: solph.Flow(
summed_max=2.3, variable_costs=25, max=0.8,
investment=Investment(ep_costs=500, maximum=10e5))})
ok_(self.es.groups.get(InvFlow),
("Expected InvestmentFlow group to be nonempty.\n" +
"Got: {}").format(self.es.groups.get(InvFlow)))
``` |
{
"source": "jnns/pytest-django-factories",
"score": 2
} |
#### File: jnns/pytest-django-factories/conftest.py
```python
from itertools import count
import django
import pytest
from django.conf import settings
from django.db import models
from django_factories import Factory, SubFactory
settings.configure(DEBUG=False)
django.setup()
class Author(models.Model):
name = models.CharField(max_length=255)
age = models.PositiveSmallIntegerField()
class Meta:
app_label = "dummy_app"
def __str__(self):
return self.name
class Book(models.Model):
author = models.ForeignKey(Author, on_delete=models.CASCADE)
title = models.CharField(max_length=255)
class Meta:
app_label = "dummy_app"
class Chapter(models.Model):
book = models.ForeignKey(Book, on_delete=models.CASCADE)
title = models.CharField(max_length=255, default="Chapter 1")
class Meta:
app_label = "dummy_app"
class ModelA(models.Model):
class Meta:
app_label = "dummy_app"
class ModelB(models.Model):
model_a = models.ForeignKey(ModelA, on_delete=models.CASCADE)
class Meta:
app_label = "dummy_app"
@pytest.fixture
def author_factory(request):
factory = Factory(Author, name="Default Author")
return factory(request)
@pytest.fixture
def book_factory(request):
return Factory(Book, title="Default Title")(request)
@pytest.fixture
def watterson_author_factory(request):
return Factory(Author, name="<NAME>")(request)
@pytest.fixture
def bill_watterson(author_factory):
return author_factory(name="<NAME>")
@pytest.fixture
def watterson_book_factory(request, watterson_author_factory):
"""This factory builds objects with "<NAME>" as default author."""
return Factory(Book, author=SubFactory("watterson_author_factory"))(request)
@pytest.fixture
def broken_factory(request):
return Factory(Book, author=SubFactory("bill_watterson"))(request)
@pytest.fixture
def chapter_factory(request):
return Factory(Chapter)(request)
@pytest.fixture
def model_b_factory(request):
"""Please note that a factory function for ModelA is missing intentionally."""
return Factory(ModelB)(request)
@pytest.fixture
def enumerated_book_factory(request):
counter = count(1)
class BookFactory(Factory):
model = Book
def create(self, **kwargs):
return super().create(**kwargs, title=f"Book {next(counter)}")
return BookFactory()(request)
``` |
{
"source": "jnobre/lxmls-toolkit-2017",
"score": 3
} |
#### File: lxmls/classifiers/multinomial_naive_bayes.py
```python
import numpy as np
import scipy as scipy
import lxmls.classifiers.linear_classifier as lc
import sys
from lxmls.distributions.gaussian import *
class MultinomialNaiveBayes(lc.LinearClassifier):
def __init__(self, xtype="gaussian"):
lc.LinearClassifier.__init__(self)
self.trained = False
self.likelihood = 0
self.prior = 0
self.smooth = True
self.smooth_param = 1
def train(self, x, y):
# n_docs = no. of documents
# n_words = no. of unique words
n_docs, n_words = x.shape
# classes = a list of possible classes
classes = np.unique(y)
# n_classes = no. of classes
n_classes = np.unique(y).shape[0]
# initialization of the prior and likelihood variables
prior = np.zeros(n_classes)
likelihood = np.zeros((n_words, n_classes))
# TODO: This is where you have to write your code!
# You need to compute the values of the prior and likelihood parameters
# and place them in the variables called "prior" and "likelihood".
# Examples:
# prior[0] is the prior probability of a document being of class 0
# likelihood[4, 0] is the likelihood of the fifth(*) feature being
# active, given that the document is of class 0
# (*) recall that Python starts indices at 0, so an index of 4
# corresponds to the fifth feature!
# ----------
# Solution to Exercise 1.1
for i in xrange(n_classes):
docs_in_class, _ = np.nonzero(y == classes[i]) # docs_in_class = indices of documents in class i
prior[i] = 1.0 * len(docs_in_class) / n_docs # prior = fraction of documents with this class
# word_count_in_class = count of word occurrences in documents of class i
word_count_in_class = x[docs_in_class, :].sum(0)
total_words_in_class = word_count_in_class.sum() # total_words_in_class = total number of words in documents of class i
if not self.smooth:
# likelihood = count of occurrences of a word in a class
likelihood[:, i] = word_count_in_class / total_words_in_class
else:
likelihood[:, i] = (word_count_in_class+self.smooth_param) / (total_words_in_class + self.smooth_param*n_words)
# End solution to Exercise 1.1
# ----------
params = np.zeros((n_words+1, n_classes))
for i in xrange(n_classes):
params[0, i] = np.log(prior[i])
params[1:, i] = np.nan_to_num(np.log(likelihood[:, i]))
self.likelihood = likelihood
self.prior = prior
self.trained = True
return params
``` |
{
"source": "jnoddell/LC",
"score": 4
} |
#### File: LC/E/101.py
```python
class Solution(object):
def isSymmetric(self, root):
"""
:type root: TreeNode
:rtype: bool
"""
def dfs(a, b):
if not a and not b:
return True
if not a or not b:
return False
if a.val != b.val:
return False
return dfs(a.left, b.right) and dfs(a.right, b.left)
return dfs(root.left, root.right)
```
#### File: LC/E/257.py
```python
class Solution(object):
def binaryTreePaths(self, root):
"""
:type root: TreeNode
:rtype: List[str]
"""
rtlPaths = list()
def dfs(node, path):
if node is None:
return
if path:
newPath = path + "->" + str(node.val)
else:
newPath = str(node.val)
if node.left is None and node.right is None:
rtlPaths.append(newPath)
else:
dfs(node.left, newPath)
dfs(node.right, newPath)
dfs(root, "")
return rtlPaths
```
#### File: LC/H/30.py
```python
class Solution(object):
def findSubstring(self, s, words):
"""
:type s: str
:type words: List[str]
:rtype: List[int]
"""
ret = [];
wl = len(words[0])
wordsused = []
for i in range (0, len(s) - len(words) * wl + 1):
test = s[i:i+wl]
if test in words:
for j in range(0, len(words) * wl, wl):
curr = s[i+j:i+j+wl]
if curr in words:
words.remove(curr)
wordsused.append(curr);
else:
break
if len(words) == 0:
ret.append(i)
words = wordsused
wordsused = []
else:
for w in wordsused:
words.append(w)
wordsused = []
return ret;
```
#### File: LC/M/12.py
```python
class Solution(object):
def intToRoman(self, num):
"""
:type num: int
:rtype: str
"""
exchange = [['I', 1], ['V', 5], ['X', 10], ['L', 50], ['C', 100], ['D', 500], ['M', 1000]]
exchange = exchange[::-1]
roman = ""
index = 0
while num > 0:
curr_tuple = exchange[index]
curr_symbol = curr_tuple[0]
curr_value = curr_tuple[1]
increment_index = True
if num - curr_value == 0:
return (roman + curr_symbol)
elif num - curr_value > 0:
num -= curr_value
roman += curr_symbol
increment_index = False
elif increment_index and num - 900 >= 0:
num -= 900
roman += "CM"
elif index > 0 and increment_index and num - 400 >= 0:
num -= 400
roman += "CD"
elif index > 1 and increment_index and num - 90 >= 0:
num -= 90
roman += "XC"
elif index > 2 and increment_index and num - 40 >= 0:
num -= 40
roman += "XL"
elif index > 3 and increment_index and num - 9 >= 0:
num -= 9
roman += "IX"
elif index > 4 and increment_index and num - 4 >= 0:
num -= 4
roman += "IV"
if num == 0:
return roman
elif increment_index:
index += 1
``` |
{
"source": "jnoelvictorino/rasa",
"score": 2
} |
#### File: featurizers/sparse_featurizer/regex_featurizer.py
```python
import logging
import os
import re
from typing import Any, Dict, List, Optional, Text, Type, Tuple
import numpy as np
import scipy.sparse
import rasa.shared.utils.io
import rasa.utils.io
import rasa.nlu.utils.pattern_utils as pattern_utils
from rasa.nlu import utils
from rasa.nlu.components import Component
from rasa.nlu.config import RasaNLUModelConfig
from rasa.nlu.constants import TOKENS_NAMES, FEATURIZER_CLASS_ALIAS
from rasa.shared.nlu.constants import (
TEXT,
RESPONSE,
FEATURE_TYPE_SENTENCE,
FEATURE_TYPE_SEQUENCE,
ACTION_TEXT,
)
from rasa.nlu.featurizers.featurizer import SparseFeaturizer
from rasa.shared.nlu.training_data.features import Features
from rasa.nlu.model import Metadata
from rasa.nlu.tokenizers.tokenizer import Tokenizer
from rasa.shared.nlu.training_data.training_data import TrainingData
from rasa.shared.nlu.training_data.message import Message
logger = logging.getLogger(__name__)
class RegexFeaturizer(SparseFeaturizer):
@classmethod
def required_components(cls) -> List[Type[Component]]:
return [Tokenizer]
defaults = {
# text will be processed with case sensitive as default
"case_sensitive": True,
# use lookup tables to generate features
"use_lookup_tables": True,
# use regexes to generate features
"use_regexes": True,
# use match word boundaries for lookup table
"use_word_boundaries": True,
}
def __init__(
self,
component_config: Optional[Dict[Text, Any]] = None,
known_patterns: Optional[List[Dict[Text, Text]]] = None,
) -> None:
"""Construct new features for regexes and lookup table using regex expressions."""
super().__init__(component_config)
self.known_patterns = known_patterns if known_patterns else []
self.case_sensitive = self.component_config["case_sensitive"]
def train(
self,
training_data: TrainingData,
config: Optional[RasaNLUModelConfig] = None,
**kwargs: Any,
) -> None:
self.known_patterns = pattern_utils.extract_patterns(
training_data,
use_lookup_tables=self.component_config["use_lookup_tables"],
use_regexes=self.component_config["use_regexes"],
use_word_boundaries=self.component_config["use_word_boundaries"],
)
for example in training_data.training_examples:
for attribute in [TEXT, RESPONSE, ACTION_TEXT]:
self._text_features_with_regex(example, attribute)
def process(self, message: Message, **kwargs: Any) -> None:
self._text_features_with_regex(message, TEXT)
def _text_features_with_regex(self, message: Message, attribute: Text) -> None:
if self.known_patterns:
sequence_features, sentence_features = self._features_for_patterns(
message, attribute
)
if sequence_features is not None:
final_sequence_features = Features(
sequence_features,
FEATURE_TYPE_SEQUENCE,
attribute,
self.component_config[FEATURIZER_CLASS_ALIAS],
)
message.add_features(final_sequence_features)
if sentence_features is not None:
final_sentence_features = Features(
sentence_features,
FEATURE_TYPE_SENTENCE,
attribute,
self.component_config[FEATURIZER_CLASS_ALIAS],
)
message.add_features(final_sentence_features)
def _features_for_patterns(
self, message: Message, attribute: Text
) -> Tuple[Optional[scipy.sparse.coo_matrix], Optional[scipy.sparse.coo_matrix]]:
"""Checks which known patterns match the message.
Given a sentence, returns a vector of {1,0} values indicating which
regexes did match. Furthermore, if the
message is tokenized, the function will mark all tokens with a dict
relating the name of the regex to whether it was matched."""
# Attribute not set (e.g. response not present)
if not message.get(attribute):
return None, None
tokens = message.get(TOKENS_NAMES[attribute], [])
if not tokens:
# nothing to featurize
return None, None
flags = 0 # default flag
if not self.case_sensitive:
flags = re.IGNORECASE
sequence_length = len(tokens)
sequence_features = np.zeros([sequence_length, len(self.known_patterns)])
sentence_features = np.zeros([1, len(self.known_patterns)])
for pattern_index, pattern in enumerate(self.known_patterns):
matches = re.finditer(pattern["pattern"], message.get(TEXT), flags=flags)
matches = list(matches)
for token_index, t in enumerate(tokens):
patterns = t.get("pattern", default={})
patterns[pattern["name"]] = False
for match in matches:
if t.start < match.end() and t.end > match.start():
patterns[pattern["name"]] = True
sequence_features[token_index][pattern_index] = 1.0
if attribute in [RESPONSE, TEXT]:
# sentence vector should contain all patterns
sentence_features[0][pattern_index] = 1.0
t.set("pattern", patterns)
return (
scipy.sparse.coo_matrix(sequence_features),
scipy.sparse.coo_matrix(sentence_features),
)
@classmethod
def load(
cls,
meta: Dict[Text, Any],
model_dir: Optional[Text] = None,
model_metadata: Optional[Metadata] = None,
cached_component: Optional["RegexFeaturizer"] = None,
**kwargs: Any,
) -> "RegexFeaturizer":
file_name = meta.get("file")
regex_file = os.path.join(model_dir, file_name)
if os.path.exists(regex_file):
known_patterns = rasa.shared.utils.io.read_json_file(regex_file)
return RegexFeaturizer(meta, known_patterns=known_patterns)
else:
return RegexFeaturizer(meta)
def persist(self, file_name: Text, model_dir: Text) -> Optional[Dict[Text, Any]]:
"""Persist this model into the passed directory.
Return the metadata necessary to load the model again."""
file_name = file_name + ".pkl"
regex_file = os.path.join(model_dir, file_name)
utils.write_json_to_file(regex_file, self.known_patterns, indent=4)
return {"file": file_name}
``` |
{
"source": "jnoennig/aws-account-bootstrap",
"score": 2
} |
#### File: aws-account-bootstrap/scripts/accountBootstrap.py
```python
import os
import logging
import time
import json
from collections import OrderedDict
import boto3
from botocore.exceptions import ClientError
LOGGER = logging.getLogger()
LOGGER.setLevel(logging.INFO)
def assume_role(aws_account_number, role_name, region='us-west-2'):
"""
Assumes the provided role in each account and returns a GuardDuty client
:param aws_account_number: AWS Account Number
:param role_name: Role to assume in target account
:param aws_region: AWS Region for the Client call, not required for IAM calls
:return: GuardDuty client in the specified AWS Account and Region
Modified by <NAME>
"""
# Beginning the assume role process for account
sts_client = boto3.client('sts')
# Get the current partition
partition = sts_client.get_caller_identity()['Arn'].split(":")[1]
response = sts_client.assume_role(
RoleArn='arn:{}:iam::{}:role/{}'.format(
partition,
aws_account_number,
role_name
),
RoleSessionName='AccountBootstrap'
)
# Storing STS credentials
session = boto3.Session(
aws_access_key_id=response['Credentials']['AccessKeyId'],
aws_secret_access_key=response['Credentials']['SecretAccessKey'],
aws_session_token=response['Credentials']['SessionToken'],
region_name=region
)
LOGGER.info("Assumed session for {}.".format(
aws_account_number
))
return session
def get_enabled_regions(enabled_regions, aws_service):
"""
Gets all supported regions for a specified AWS service
If no regions are specified it returns all regions
:param enabled_regions: User specified regions
:param aws_service: AWS service you want to check
:return: list of regions
Modified by <NAME>
"""
session = boto3.session.Session()
region_results = []
if enabled_regions:
region_results = [str(item).lower().strip() for item in enabled_regions.split(',')]
LOGGER.info("Enabling in these regions: {}".format(region_results))
else:
region_results = session.get_available_regions(aws_service)
LOGGER.info("Enabling in all available regions {}".format(region_results))
return region_results
def get_member_email(member_account_id):
'''
Gets the member account email from the Organizations service
:param member_account_id: AWS member account ID
:return member_email: Member email address
'''
try:
orgs = boto3.client('organizations')
response = orgs.describe_account(AccountId=member_account_id)
member_email = response['Account']['Email']
return member_email
except ClientError as e:
LOGGER.error('Failed to get member account email address for account {0}: {1}'.format(member_account_id, e))
def create_alias(session, member_account_name, member_account_id):
'''
Creates an alias for an account
:param session: IAM session
:param member_account_name: AWS target account name
:param member_account_id: AWS target account id
:return: API response
'''
try:
# You can prepend or append a string to make the account_alias globally unique
account_alias = member_account_name
iam = session.client('iam')
response = iam.create_account_alias(
AccountAlias=account_alias
)
LOGGER.info('Set account {0} alias to: {1}'.format(member_account_id, account_alias))
return response
except ClientError as e:
LOGGER.error('Failed setting alias for account {0}: {1}'.format(member_account_id, e))
def update_iam_pwd_policy(session, member_account_id):
'''
Updates the accounts IAM password policy
:param session: IAM session
:param member_account_id: AWS target account id
:return: API response
'''
try:
iam = session.client('iam')
response = iam.update_account_password_policy(
MinimumPasswordLength=15,
RequireSymbols=True,
RequireNumbers=True,
RequireUppercaseCharacters=True,
RequireLowercaseCharacters=True,
AllowUsersToChangePassword=True,
MaxPasswordAge=365,
PasswordReusePrevention=24,
HardExpiry=True
)
LOGGER.info('Set account {0} password policy'.format(member_account_id))
return response
except ClientError as e:
LOGGER.error('Failed to set account {0} IAM password policy: {1}'.format(member_account_id, e))
def enable_s3_pub_block(session, member_account_id):
'''
Enables S3 public access block
:param session: IAM session
:param member_account_id: AWS target account id
:return: API response
'''
try:
s3 = session.client('s3control')
response = s3.put_public_access_block(
PublicAccessBlockConfiguration={
'BlockPublicAcls': True,
'IgnorePublicAcls': True,
'BlockPublicPolicy': True,
'RestrictPublicBuckets': True
},
AccountId=member_account_id
)
LOGGER.info('Set account {0} S3 public access block at account level'.format(member_account_id))
return response
except ClientError as e:
LOGGER.error('Failed setting S3 public access block for account {0}: {1}'.format(member_account_id, e))
def logging_bucket(session, region, member_account_name, member_account_id):
'''
Creates the S3 access logging bucket and returns the responses
:param session: IAM session
:param region: AWS region to deploy bucket
:param member_account_name: AWS target account name
:param member_account_id: AWS target account id
'''
response_dict = {}
# If bucket name needs to be more unique prepend or append string to bucket_name
bucket_name = member_account_name + '-s3accesslogs-' + region
s3 = session.client('s3')
try:
# Creating logging bucket
LOGGER.info('Creating bucket {} in region {}'.format(bucket_name, region))
if region == 'us-east-1':
s3 = session.client('s3')
create_response = s3.create_bucket(
Bucket=bucket_name
)
acl_response = s3.put_bucket_acl(
ACL='log-delivery-write',
Bucket=bucket_name
)
else:
create_response = s3.create_bucket(
ACL='log-delivery-write',
Bucket=bucket_name,
CreateBucketConfiguration={
'LocationConstraint': region
}
)
response_dict.update({'createBucket': create_response})
except ClientError as e:
LOGGER.error('Failed creating S3 logging bucket for account {0}: {1}'.format(member_account_id, e))
LOGGER.info('Created bucket {}'.format(bucket_name))
# Encrypting logging bucket
try:
LOGGER.info('Setting encryption on bucket {}'.format(bucket_name))
encrypt_response = s3.put_bucket_encryption(
Bucket=bucket_name,
ServerSideEncryptionConfiguration={
'Rules':[
{
'ApplyServerSideEncryptionByDefault':{
'SSEAlgorithm': 'AES256'
}
}
]
}
)
response_dict.update({'encryptBucket': encrypt_response})
except ClientError as e:
LOGGER.error('Failed creating S3 logging bucket for account {0}: {1}'.format(member_account_id, e))
# Versioning logging bucket
try:
LOGGER.info('Enabling bucket versioning on bucket {}'.format(bucket_name))
ver_response = s3.put_bucket_versioning(
Bucket=bucket_name,
VersioningConfiguration={
'Status':'Enabled'
}
)
response_dict.update({'versionBucket': ver_response})
except ClientError as e:
LOGGER.error('Failed enabling versioning on S3 logging bucket for account {0}: {1}'.format(member_account_id, e))
# Deny public access
try:
LOGGER.info('Denying public access on bucket {}'.format(bucket_name))
public_response = s3.put_public_access_block(
Bucket=bucket_name,
PublicAccessBlockConfiguration={
'BlockPublicAcls': True,
'IgnorePublicAcls': True,
'BlockPublicPolicy': True,
'RestrictPublicBuckets': True
}
)
response_dict.update({'publicBucket': public_response})
except ClientError as e:
LOGGER.error('Failed denying public access on S3 logging bucket for account {0}: {1}'.format(member_account_id, e))
# Bucket lifecycle configuration
try:
LOGGER.info('Enabling lifecycle configuration on bucket {}'.format(bucket_name))
lifecycle_response = s3.put_bucket_lifecycle_configuration(
Bucket=bucket_name,
LifecycleConfiguration={
'Rules': [
{
'Expiration': {
'Days': 365
},
'ID': 'cleanup',
'Prefix': '',
'Status': 'Enabled',
'NoncurrentVersionExpiration': {
'NoncurrentDays': 90
},
'AbortIncompleteMultipartUpload': {
'DaysAfterInitiation': 7
}
}
]
}
)
response_dict.update({'lifecycleBucket': lifecycle_response})
except ClientError as e:
LOGGER.error('Failed setting lifecycle configuration on S3 logging bucket for account {0}: {1}'.format(member_account_id, e))
return response_dict
def cf_stacksets(stackset_name, member_account_id, region_list):
'''
Updates a StackSet with a new name.
:param stackset_name: Name of the StackSet
:param member_account_id: AWS target account id
:param region_list: List of AWS regions
:return: response
'''
try:
cf = boto3.client('cloudformation')
response = cf.create_stack_instances(
StackSetName=stackset_name,
Accounts=[member_account_id],
Regions=region_list
)
LOGGER.info('Deployed stack in StackSet {0} for account {1}'.format(stackset_name, member_account_id))
return response
except ClientError as e:
LOGGER.error('Error updating CloudFormation StackSet {0}: {1}'.format(stackset_name, e))
# Setup GuardDuty in master memeber relationship
def get_master_members(aws_region, detector_id):
"""
Returns a list of current members of the GuardDuty master account
:param aws_region: AWS Region of the GuardDuty master account
:param detector_id: DetectorId of the GuardDuty master account in the AWS Region
:return: dict of AwsAccountId:RelationshipStatus
"""
member_dict = dict()
gd_client = boto3.client('guardduty', region_name=aws_region)
# Need to paginate and iterate over results
paginator = gd_client.get_paginator('list_members')
operation_parameters = {
'DetectorId': detector_id,
'OnlyAssociated': 'false'
}
page_iterator = paginator.paginate(**operation_parameters)
for page in page_iterator:
if page['Members']:
for member in page['Members']:
member_dict.update({member['AccountId']: member['RelationshipStatus']})
return member_dict
def get_members(aws_region, detector_id):
'''
Returns a dictionary of account IDs and emails
:param aws_region (us-west-2): defaults to us-west-2 as that is our default region
:param detector_id: DetectorId of the GuardDuty master account in the AWS Region
:return: dict of AwsAccountId:email
'''
member_dict = dict()
gd_client = boto3.client('guardduty', region_name=aws_region)
# Need to paginate and iterate over results
paginator = gd_client.get_paginator('list_members')
operation_parameters = {
'DetectorId': detector_id,
'OnlyAssociated': 'false'
}
page_iterator = paginator.paginate(**operation_parameters)
for page in page_iterator:
if page['Members']:
for member in page['Members']:
# Accounts that were added at the Organization level do not have associated emails
try:
member_email = member['Email']
except:
member_email = None
member_dict.update({member['AccountId']: member_email})
return member_dict
def list_detectors(client, aws_region):
"""
Lists the detectors in a given Account/Region
Used to detect if a detector exists already
:param client: GuardDuty client
:param aws_region: AWS Region
:return: Dictionary of AWS_Region: DetectorId
"""
detector_dict = client.list_detectors()
if detector_dict['DetectorIds']:
for detector in detector_dict['DetectorIds']:
detector_dict.update({aws_region: detector})
else:
detector_dict.update({aws_region: ''})
return detector_dict
def master_account_check(master_account, guardduty_regions):
"""
Enables GuardDuty in the regions specified
:param master_account: Master Account ID
:param guardduty_regions: Regions to enable GuardDuty
:returns: A tuple with failed regions and detector id dict
"""
failed_master_regions = []
master_detector_id_dict = dict()
# Processing Master account
for aws_region in guardduty_regions:
try:
aws_region = aws_region.lower().strip()
gd_client = boto3.client('guardduty', region_name=aws_region)
detector_dict = list_detectors(gd_client, aws_region)
if detector_dict[aws_region]:
# a detector exists
LOGGER.info('Found existing detector {detector} in {region} for {account}'.format(
detector=detector_dict[aws_region],
region=aws_region,
account=master_account
))
master_detector_id_dict.update({aws_region: detector_dict[aws_region]})
else:
# create a detector
detector_str = gd_client.create_detector(Enable=True)['DetectorId']
LOGGER.info('Created detector {detector} in {region} for {account}'.format(
detector=detector_str,
region=aws_region,
account=master_account
))
master_detector_id_dict.update({aws_region: detector_str})
except ClientError as err:
if err.response['ResponseMetadata']['HTTPStatusCode'] == 403:
LOGGER.error("Failed to list detectors in Master account for region: {} due to an authentication error. Either your credentials are not correctly configured or the region is an OptIn region that is not enabled on the master account. Skipping {} and attempting to continue").format(aws_region,aws_region)
failed_master_regions.append(aws_region)
return (failed_master_regions, master_detector_id_dict)
def add_config_rule(session, master_account, aws_region):
"""
Adds guardduty-enabled-centralized AWS Config rule
:param session: AWS session object
:param master_account: GuardDuty master account
:param aws_region: AWS region
:returns: response of API call
"""
config = session.client('config', region_name=aws_region)
try:
input_parameters = {'CentralMonitoringAccount': master_account}
response = config.put_config_rule(
ConfigRule={
'ConfigRuleName': 'guardduty-enabled-centralized',
'Description': 'Checks whether Amazon GuardDuty is enabled in your AWS account and region. If you provide an AWS account for centralization, the rule evaluates the GuardDuty results in that account. The rule is compliant when GuardDuty is enabled.',
'Scope': {},
'Source': {
'Owner': 'AWS',
'SourceIdentifier': 'GUARDDUTY_ENABLED_CENTRALIZED',
},
'InputParameters': json.dumps(input_parameters),
'MaximumExecutionFrequency': 'TwentyFour_Hours',
'ConfigRuleState': 'ACTIVE'
}
)
LOGGER.info('AWS Config GuardDuty rule enabled')
return response
except ClientError as e:
LOGGER.error(e)
def invite_members(master_account, aws_account_dict, assume_role_name, guardduty_regions, master_detector_id_dict):
"""
Invites member accounts to the GuardDuty master
:param master_account: Master Account ID
:param aws_account_dict: Dictionary of member Account IDs and email addresses
:param assume_role_name: AWS IAM role to assume in the member accounts
:param guardduty_regions: Regions to enable GuardDuty in
:param master_detector_id_dict: Dictionary of Master Account GuardDuty dectotor IDs
:returns: List of failed accounts
"""
# Setting the invitationmessage
gd_invite_message = 'Account {account} invites you to join GuardDuty.'.format(account=master_account)
failed_accounts = []
for account in aws_account_dict.keys():
try:
session = assume_role(account, assume_role_name)
for aws_region in guardduty_regions:
LOGGER.info('Beginning {account} in {region}'.format(
account=account,
region=aws_region
))
gd_client = session.client('guardduty', region_name=aws_region)
# get detectors for this region
detector_dict = list_detectors(gd_client, aws_region)
detector_id = detector_dict[aws_region]
# If detector does not exist, create it
if detector_id:
# a detector exists
LOGGER.info('Found existing detector {detector} in {region} for {account}'.format(
detector=detector_id,
region=aws_region,
account=account
))
else:
# create a detector
detector_str = gd_client.create_detector(Enable=True)['DetectorId']
LOGGER.info('Created detector {detector} in {region} for {account}'.format(
detector=detector_str,
region=aws_region,
account=account
))
detector_id = detector_str
master_detector_id = master_detector_id_dict[aws_region]
member_dict = get_master_members(aws_region, master_detector_id)
# If detector is not a member of the GuardDuty master account, add it
if account not in member_dict:
gd_client = boto3.client('guardduty', region_name=aws_region)
gd_client.create_members(
AccountDetails=[
{
'AccountId': account,
'Email': aws_account_dict[account]
}
],
DetectorId=master_detector_id
)
LOGGER.info('Added Account {monitored} to member list in GuardDuty master account {master} for region {region}'.format(
monitored=account,
master=master_account,
region=aws_region
))
start_time = int(time.time())
while account not in member_dict:
if (int(time.time()) - start_time) > 180:
LOGGER.warning("Membership did not show up for account {}, skipping".format(account))
break
time.sleep(5)
member_dict = get_master_members(aws_region, master_detector_id)
else:
LOGGER.info('Account {monitored} is already a member of {master} in region {region}'.format(
monitored=account,
master=master_account,
region=aws_region
))
# Check if Verification Was failed before, delete and add it again.
if member_dict[account] == 'EmailVerificationFailed':
# Member is enabled and already being monitored
LOGGER.error('Account {account} Error: EmailVerificationFailed'.format(account=account))
gd_client = boto3.client('guardduty', region_name=aws_region)
gd_client.disassociate_members(
AccountIds=[
account
],
DetectorId=master_detector_id
)
gd_client.delete_members(
AccountIds=[
account
],
DetectorId=master_detector_id
)
LOGGER.warning('Deleting members for {account} in {region}'.format(
account=account,
region=aws_region
))
gd_client.create_members(
AccountDetails=[
{
'AccountId': account,
'Email': aws_account_dict[account]
}
],
DetectorId=master_detector_id
)
LOGGER.info('Added Account {monitored} to member list in GuardDuty master account {master} for region {region}'.format(
monitored=account,
master=master_account,
region=aws_region
))
start_time = int(time.time())
while account not in member_dict:
if (int(time.time()) - start_time) > 300:
LOGGER.warning("Membership did not show up for account {}, skipping".format(account))
break
time.sleep(5)
member_dict = get_master_members(aws_region, master_detector_id)
if member_dict[account] == 'Enabled':
# Member is enabled and already being monitored
LOGGER.info('Account {account} is already enabled'.format(account=account))
else:
master_gd_client = boto3.client('guardduty', region_name=aws_region)
gd_client = session.client('guardduty', region_name=aws_region)
if member_dict[account] == 'Disabled' :
# Member was disabled
LOGGER.error('Account {account} Error: Disabled'.format(account=account))
master_gd_client.start_monitoring_members(
AccountIds=[
account
],
DetectorId=master_detector_id
)
LOGGER.info('Account {account} Re-Enabled'.format(account=account))
while member_dict[account] != 'Enabled':
if member_dict[account] == 'Created' :
# Member has been created in the GuardDuty master account but not invited yet
master_gd_client = boto3.client('guardduty', region_name=aws_region)
master_gd_client.invite_members(
AccountIds=[
account
],
DetectorId=master_detector_id,
Message=gd_invite_message
)
LOGGER.info('Invited Account {monitored} to GuardDuty master account {master} in region {region}'.format(
monitored=account,
master=master_account,
region=aws_region
))
if member_dict[account] == 'Invited' or member_dict[account] == 'Resigned':
# member has been invited so accept the invite
response = gd_client.list_invitations()
invitation_dict = dict()
invitation_id = None
for invitation in response['Invitations']:
invitation_id = invitation['InvitationId']
if invitation_id is not None:
gd_client.accept_invitation(
DetectorId=detector_id,
InvitationId=invitation_id,
MasterId=str(master_account)
)
LOGGER.info('Accepting Account {monitored} to GuardDuty master account {master} in region {region}'.format(
monitored=account,
master=master_account,
region=aws_region
))
# Refresh the member dictionary
member_dict = get_master_members(aws_region, master_detector_id)
LOGGER.info('Finished {account} in {region}'.format(account=account, region=aws_region))
add_config_rule(session, master_account, aws_region)
except ClientError as e:
LOGGER.error("Error Processing Account {}".format(account))
failed_accounts.append({
account: repr(e)
})
return failed_accounts
def lambda_handler(event, context):
'''
Main function
'''
member_account_id = event['detail']['serviceEventDetails']['createManagedAccountStatus']['account']['accountId']
member_account_name = event['detail']['serviceEventDetails']['createManagedAccountStatus']['account']['accountName']
master_account = boto3.client('sts').get_caller_identity().get('Account')
assume_role_name = os.environ['assumeRoleName']
enabled_regions = os.environ['enabledRegions']
stackset_names = os.environ['stackSetNames']
stackset_names_or = os.environ['stackSetNamesOneRegion']
session = assume_role(member_account_id, assume_role_name)
# Create resources
# Alias
create_alias(session, member_account_name, member_account_id)
# IAM password policy
update_iam_pwd_policy(session, member_account_id)
# Enable S3 publick block
enable_s3_pub_block(session, member_account_id)
# S3 logging buckets
s3_regions = get_enabled_regions(enabled_regions, 's3')
for region in s3_regions:
session = assume_role(member_account_id, assume_role_name, region=region)
logging_bucket(session, region, member_account_name, member_account_id)
# CloudFormation
cf_regions = get_enabled_regions(enabled_regions, 'cloudformation')
stackset_names = [str(item).strip() for item in stackset_names.split(',')]
for stackset_name in stackset_names:
if stackset_name:
cf_stacksets(stackset_name, member_account_id, cf_regions)
else:
LOGGER.info('CloudFormation StackSet list empty')
stackset_names_or = [str(item).strip() for item in stackset_names_or.split(',')]
for stackset_name in stackset_names_or:
if stackset_name:
default_region = [os.environ['defaultRegion']]
cf_stacksets(stackset_name, member_account_id, default_region)
else:
LOGGER.info('CloudFormation StackSet one region empty')
# GuardDuty
# Collecting data to enable GD in member accounts
# Change default_gd_region to your main region
default_gd_region = 'us-west-2'
aws_account_dict = OrderedDict()
gd_regions = get_enabled_regions(enabled_regions, 'guardduty')
gd_client = boto3.client('guardduty', region_name=default_gd_region)
detector_dict = list_detectors(gd_client, default_gd_region)
detector_id = detector_dict[default_gd_region]
members = get_members(default_gd_region, detector_id)
member_email = get_member_email(member_account_id)
aws_account_dict.update({member_account_id: member_email})
if len(members) > 1000:
raise Exception("Only 1000 accounts can be linked to a single master account")
master_account_checked = master_account_check(master_account, gd_regions)
master_detector_id_dict = master_account_checked[1]
failed_master_regions = master_account_checked[0]
# Removes regions the detector failed to enable in the master account
for failed_region in failed_master_regions:
gd_regions.remove(failed_region)
# Processing accounts to be linked/invited
failed_accounts = invite_members(
master_account,
aws_account_dict,
assume_role_name,
gd_regions,
master_detector_id_dict
)
if len(failed_accounts) > 0:
for account in failed_accounts:
LOGGER.error("GuardDuty failed to enable accounts")
LOGGER.error("{}: \n\t{}".format(
list(account.keys())[0],
account[list(account.keys())[0]]
)
)
``` |
{
"source": "jnoennig/crest",
"score": 3
} |
#### File: crest/Extensions/exifData.py
```python
import appex
from PIL import Image
from PIL.ExifTags import TAGS
def get_exif(fn):
ret = {}
i = Image.open(fn)
info = i._getexif()
for tag, value in info.items():
decoded = TAGS.get(tag, tag)
ret[decoded] = value
return ret
def main():
if not appex.is_running_extension():
print('This script is intended to be run from the sharing extension.')
return
images = appex.get_attachments('public.jpeg')
if images:
a = get_exif(images[0])
if a:
print('EXIF data:')
for k, v in a.items():
print('{0}: {1}'.format(k, v))
else:
print('No exif data found')
else:
print('No input image found')
if __name__ == '__main__':
main()
```
#### File: crest/Extensions/hash-md5.py
```python
import appex
import hashlib
import clipboard
def md5_hash(file_path):
try:
hash_md5 = hashlib.md5()
with open(file_path, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
except:
return "Error the file was not hashed."
def main():
if not appex.is_running_extension():
print('This script is intended to be run from the sharing extension.')
return
myfile = appex.get_attachments('public.data')[0]
if myfile:
md5 = md5_hash(myfile)
print('md5: {}\n'.format(md5))
clipboard.set(md5)
print('The md5 hash has been copied to the clipboard.')
if __name__ == '__main__':
main()
```
#### File: crest/Extensions/pdfMetadata.py
```python
import appex
import PyPDF2
from PyPDF2 import PdfFileReader
def main():
if not appex.is_running_extension():
print('This script is intended to be run from the sharing extension.')
return
pdfs = appex.get_attachments(uti='public.data')
if pdfs:
pdf_file = PdfFileReader(open(pdfs[0], "rb"))
if pdf_file:
metadata = pdf_file.getDocumentInfo()
for k, v in metadata.items():
if type(v) == PyPDF2.generic.IndirectObject:
print('{0} - {1}'.format(k, metadata[k]))
else:
print('{0} - {1}'.format(k, v))
else:
print('No metadata found')
else:
print('No input PDF found')
if __name__ == '__main__':
main()
``` |
{
"source": "jnohlgard/projector-installer",
"score": 2
} |
#### File: projector-installer/projector_installer/ide_update.py
```python
from distutils.version import LooseVersion
from typing import Optional, List
import click
from .config_generator import save_config
from .global_config import SHORT_NETWORK_TIMEOUT, LONG_NETWORK_TIMEOUT
from .products import Product, get_product_releases, IDEKind, load_compatible_apps, \
COMPATIBLE_IDE_FILE, EAP_PRODUCTS
from .apps import is_projector_installed_ide, get_product_info, download_and_install, ProductInfo
from .run_config import RunConfig
from .timeout import TimeoutException, timeout
# This map differs from products.CODE2KIND in several positions.
IDE_UPDATE_CODE2KIND = {
'IC': IDEKind.Idea_Community,
'IU': IDEKind.Idea_Ultimate,
'PC': IDEKind.PyCharm_Community,
'PY': IDEKind.PyCharm_Professional,
'CL': IDEKind.CLion,
'GO': IDEKind.GoLand,
'DB': IDEKind.DataGrip,
'PS': IDEKind.PhpStorm,
'WS': IDEKind.WebStorm,
'RM': IDEKind.RubyMine,
'RD': IDEKind.Rider,
'PD': IDEKind.DataSpell
}
def is_updatable_ide(path_to_ide: str) -> bool:
"""Returns true if IDE updatable"""
return is_projector_installed_ide(path_to_ide)
def get_product_list_from_file(kind: IDEKind) -> List[Product]:
"""Get product list from compatible IDE json"""
return [prod for prod in load_compatible_apps(COMPATIBLE_IDE_FILE) if prod.kind == kind]
def is_tested_ide(run_config: RunConfig) -> bool:
"""Returns True if given IDE is from compatible list"""
if run_config.update_channel == RunConfig.UNKNOWN:
prod_info = get_product_info(run_config.path_to_app)
kind: IDEKind = IDE_UPDATE_CODE2KIND.get(prod_info.product_code, IDEKind.Unknown)
ver = LooseVersion(prod_info.version)
for prod in load_compatible_apps(COMPATIBLE_IDE_FILE):
if prod.kind == kind and prod.ver == ver:
return True
return False
return run_config.update_channel == RunConfig.TESTED
def get_product_version(prod_info: ProductInfo) -> LooseVersion:
""""Return version for given product"""
kind: IDEKind = IDE_UPDATE_CODE2KIND.get(prod_info.product_code, IDEKind.Unknown)
if kind in EAP_PRODUCTS:
return LooseVersion(f'{prod_info.version}.{prod_info.build_number}')
return LooseVersion(prod_info.version)
def get_update(run_config: RunConfig) -> Optional[Product]:
"""Returns update for given app if available"""
prod_info = get_product_info(run_config.path_to_app)
kind: IDEKind = IDE_UPDATE_CODE2KIND.get(prod_info.product_code, IDEKind.Unknown)
if kind == IDEKind.Unknown:
return None
current = get_product_version(prod_info)
prod_list = get_product_list_from_file(kind) \
if is_tested_ide(run_config) else get_product_releases(kind, timeout=LONG_NETWORK_TIMEOUT)
product = None
for prod in prod_list:
if prod.ver > current and (product is None or prod.ver > product.ver):
product = prod
return product
def update_config(run_config: RunConfig, product: Product) -> None:
"""Updates IDE in given config"""
run_config.path_to_app = download_and_install(product.url)
save_config(run_config)
@timeout(SHORT_NETWORK_TIMEOUT)
def get_fast_update(run_config: RunConfig) -> Optional[Product]:
"""Checks available updates with short network timeout"""
return get_update(run_config)
def check_ide_update(run_config: RunConfig) -> None:
"""Check if update is available and prints message if yes"""
if is_updatable_ide(run_config.path_to_app):
try:
product = get_fast_update(run_config)
except TimeoutException:
click.echo('Checking for updates ... ', nl=False)
product = get_update(run_config)
click.echo('done.')
if product is not None:
prod_info = get_product_info(run_config.path_to_app)
msg = f'\nNew version {product.name} is available.\n' \
f'Current version {prod_info.version}.\n' \
f'To update use command: projector config update {run_config.name}\n'
click.echo(click.style(msg, bold=True))
```
#### File: projector-installer/test/config_generator_test.py
```python
from unittest import TestCase
from projector_installer.config_generator import token_quote
class ConfigGeneratorTest(TestCase):
"""Test config_generator.py module"""
def test_token_quote(self) -> None:
"""The token_quote method must return the same token in quotes"""
self.assertEqual(token_quote('some_token'), '\"some_token\"')
```
#### File: projector-installer/test/dialogs_test.py
```python
from unittest import TestCase
from unittest import mock
import sys
import pytest
from projector_installer.dialogs import get_user_input, is_boolean_input, ask, \
prompt_with_default, get_all_listening_ports
class DialogsTests(TestCase):
"""Test dialogs.py module"""
def test_get_user_input(self): # type: ignore
"""The get_user_input method must return the user's input"""
some_input = 'some_input'
original_input = mock.builtins.input
mock.builtins.input = lambda _: some_input
self.assertEqual(get_user_input('prompt', 'default'), some_input)
mock.builtins.input = original_input
def test_get_user_input_default(self): # type: ignore
"""The get_user_input method must return default if the input is empty"""
original_input = mock.builtins.input
mock.builtins.input = lambda _: ''
self.assertEqual(get_user_input('prompt', 'default'), 'default')
mock.builtins.input = original_input
def test_is_boolean_input_true(self): # type: ignore
"""
The is_boolean_input method must return true
if the user's input is in ['Y', 'y', 'N', 'n']
"""
self.assertTrue(is_boolean_input('Y'))
self.assertTrue(is_boolean_input('y'))
self.assertTrue(is_boolean_input('N'))
self.assertTrue(is_boolean_input('n'))
def test_is_boolean_input_false(self): # type: ignore
"""
The is_boolean_input method must return false
if the user's input is not in ['Y', 'y', 'N', 'n']
"""
self.assertFalse(is_boolean_input('123'))
self.assertFalse(is_boolean_input('true'))
self.assertFalse(is_boolean_input(''))
def test_ask_yes(self): # type: ignore
"""The ask method must return true if the user's input is 'y'"""
original_input = mock.builtins.input
mock.builtins.input = lambda _: 'y'
self.assertTrue(ask('prompt', True))
mock.builtins.input = original_input
def test_ask_no(self): # type: ignore
"""The ask method must return false if the user's input is 'n'"""
original_input = mock.builtins.input
mock.builtins.input = lambda _: 'n'
self.assertFalse(ask('prompt', False))
mock.builtins.input = original_input
def test_ask_empty(self): # type: ignore
"""The ask method must return true if the user's input is empty"""
original_input = mock.builtins.input
mock.builtins.input = lambda _: ''
self.assertTrue(ask('prompt', True))
mock.builtins.input = original_input
def test_prompt_with_default(self): # type: ignore
"""The prompt_with_default method must return the user's input"""
some_input = 'non empty input'
original_input = mock.builtins.input
mock.builtins.input = lambda _: some_input
self.assertEqual(prompt_with_default(prompt='prompt', default='default'), some_input)
mock.builtins.input = original_input
def test_prompt_with_default_empty_input(self): # type: ignore
"""The prompt_with_default method must return 'default' if the user's input is empty"""
empty_input = ''
original_input = mock.builtins.input
mock.builtins.input = lambda _: empty_input
self.assertEqual(prompt_with_default(prompt='prompt', default='default'), 'default')
mock.builtins.input = original_input
@pytest.mark.skipif(sys.platform == "linux", reason="test for non-linux only")
def test_get_all_listening_ports(self) -> None:
"""
The get_all_listening_ports method must return an empty array
if the platform is not Linux
"""
self.assertEqual(get_all_listening_ports(), [])
```
#### File: projector-installer/test/secure_config_test.py
```python
from unittest import TestCase
from projector_installer.secure_config import get_ca_password
class SecureConfigTest(TestCase):
"""Test secure_config.py module"""
def test_get_ca_password(self) -> None:
"""The get_ca_password method must return ca password"""
self.assertEqual(get_ca_password(), '<PASSWORD>')
``` |
{
"source": "jnojek/Interview",
"score": 4
} |
#### File: Interview/JamesNojekChallenge/Database.py
```python
import json
def oldToNewDB(filename):
try:
with open(filename, "r") as f:
text = json.load(f) #get data from file
print("Input the new database name: ")
DBname = str(input()) #select database to edit
for i in range (0, len(text['Customer Records'])):
#define database being edited and column names being edited
print("INSERT INTO " + DBname + " (Record ID, Name, Cell Phone, Work Phone, Email, Address, Basic Widget Order, Advanced Widget Order, Protection Plan)")
if text['Customer Records'][i]['Protection Plan'] == True: #determine boolean of Protection Plan to make sure the keyword is the correct format for SQL
print('VALUES (' + str(text['Customer Records'][i]['ID']) + ', \'' + str(text['Customer Records'][i]['Name']) + '\', \'' + str(text['Customer Records'][i]['Cell Phone']) + '\', \'' +
str(text['Customer Records'][i]['Work Phone']) + '\', \'' + str(text['Customer Records'][i]['Email']) + '\', \'' + str(text['Customer Records'][i]['Address']) + '\', ' +
str(text['Customer Records'][i]['Basic Widget Order']) + ', ' + str(text['Customer Records'][i]['Advanced Widget Order']) + ', TRUE)')
else: #if false
print('VALUES (' + str(text['Customer Records'][i]['ID']) + ', \'' + str(text['Customer Records'][i]['Name']) + '\', \'' + str(text['Customer Records'][i]['Cell Phone']) + '\', \'' +
str(text['Customer Records'][i]['Work Phone']) + '\', \'' + str(text['Customer Records'][i]['Email']) + '\', \'' + str(text['Customer Records'][i]['Address']) + '\', ' +
str(text['Customer Records'][i]['Basic Widget Order']) + ', ' + str(text['Customer Records'][i]['Advanced Widget Order']) + ', FALSE)')
except:
return("Your file was not found")
#test driver
print("Input the old data file name")
filename = str(input())
oldToNewDB(filename)
#{ Sample json used for testing
# "Customer Records": [
# {
# "ID": 1234,
# "Name": "<NAME>",
# "Cell Phone": "405.867.5309",
# "Work Phone": "123.123.1234",
# "Email": "<EMAIL>",
# "Address": "123 Vic Way, Dallas TX 75001",
# "Basic Widget Order": 37,
# "Advanced Widget Order": 12,
# "Protection Plan": true
# }
# ]
#}
``` |
{
"source": "J-Nokwal/AlgorithmsAndDataStructure",
"score": 4
} |
#### File: Algorithms/InfixToPostfix/algo.py
```python
class node:
def __init__(self,data=None,next=None):
self.data=data
self.next=next
class Stack:
def __init__(self):
self.start=None
self.pushOnTop('(')
def pushOnTop(self,data):
p=node(data,None)
if self.start==None:
self.start=p
return
else:
p.next=self.start
self.start=p
def pop(self):
temp=self.start.data
self.start=self.start.next
return temp
def peek(self):
if self.start==None:
print("Stack is Empty")
else:return (self.start.data)
def display(self):
q=self.start
while(q):
print(q.data,end=" ")
q=q.next
print("Left side is Top")
newStack = Stack()
data=[]
string = input("Enter the String: ")+')'
for i in string:
if i in ['+','-','*','/','^']:
if i in ['+','-']:
while True:
if newStack.peek() in ['*','/','^','+','-']:
print('aaaak')
data.append(newStack.pop())
else:
print('bbbk')
break
newStack.pushOnTop(i)
elif i in ['*','/']:
while True:
if newStack.peek() in ['*','/','^']:
data.append(newStack.pop())
else:
break
newStack.pushOnTop(i)
else:
while True:
if newStack.peek() == '^':
data.append(newStack.pop())
newStack.pushOnTop('^')
elif i==')':
while True:
if newStack.peek() == '(':
newStack.pop()
break
else:
data.append(newStack.pop())
elif i=='(':
newStack.pushOnTop('(')
else:
data.append(i)
for i in data: print(i,end=" ")
# a+(b*c-(d/e^f)*g)*h
``` |
{
"source": "jnolan40/husky_mixed_reality_ws",
"score": 2
} |
#### File: simulated_gps/src/more_gps_data.py
```python
from ubxtranslator.msg import hpposllh, relpos2D
from geometry_msgs.msg import Twist
import rospy
class message_combiner:
def __init__(self) :
self.msg = Twist()
self.Pub = rospy.Publisher('/UBX/Combined', Twist, queue_size=10)
def run(self) :
rospy.init_node('message_combiner_node')
rospy.Subscriber('/UBX/relpos2D', relpos2D, self.hdg_cb)
rospy.Subscriber('/UBX/hpposllh', hpposllh, self.llh_cb)
rospy.spin()
def hdg_cb(self, msg):
# Convert to ENU
theta = 90 - msg.pos.theta
while theta > 180:
theta -= 360
while theta < -180:
theta += 360
self.msg.linear.z = theta
def llh_cb(self, msg) :
self.msg.linear.x = msg.llh.lat
self.msg.linear.y = msg.llh.lon
self.msg.angular.x = msg.err.lat
self.msg.angular.y = msg.err.lon
self.Pub.publish(self.msg)
if __name__ == '__main__' :
mc = message_combiner()
mc.run()
``` |
{
"source": "jnolan-poloniex/vennt-server",
"score": 3
} |
#### File: jnolan-poloniex/vennt-server/api_campaigns.py
```python
import venntdb
import uuid
from constants import *
# VenntHandler methods
def has_campaign_permissions(self, username, campaign_id, owner_only=False):
campaign = self.server.db.get_campaign(campaign_id)
if campaign is None:
return False
if owner_only:
return campaign["owner"] == username
return username in campaign["members"]
def has_gm_permissions(self, username, campaign_id):
campaign = self.server.db.get_campaign(campaign_id)
if campaign is None:
return False
return username in campaign["members"] and campaign["members"][username] == "GM"
def get_campaigns(self, args, username):
return self.respond({"success": True, "value": self.server.db.get_campaigns(username)})
def create_campaign(self, args, username):
name = args[KEY_NAME]
id = IDType.CAMPAIGN + str(uuid.uuid4())
self.server.db.create_campaign(username, id, name)
ret = {"success": True, "campaign_id": id}
return self.respond(ret)
def send_campaign_invite(self, args, username):
user_to = args[KEY_USERNAME]
user_from = username
campaign_id = args[KEY_CAMPAIGN_ID]
if not has_campaign_permissions(self, user_from, campaign_id, owner_only=True):
return self.respond({"success": False, "info": MSG_BAD_CAMP})
if has_campaign_permissions(self, user_to, campaign_id):
return self.respond({"success": False, "info": MSG_DID_JOIN})
if not self.server.db.is_valid("accounts", user_to):
return self.respond({"success": False, "info": MSG_NO_USER})
success = self.server.db.send_campaign_invite(
user_from, user_to, campaign_id)
if not success:
return self.respond({"success": False, "info": MSG_INVITE_EXISTS})
return self.respond({"success": success})
def accept_campaign_invite(self, args, username):
invites = self.server.db.get_campaign_invites(username)
campaign_id = args[KEY_CAMPAIGN_ID]
campaign_owner = None
for inv in invites:
if campaign_id == inv["id"]:
campaign_owner = inv["from"]
if campaign_owner is None:
return self.respond({"success": False, "info": MSG_BAD_CAMP})
# Clear campaign invite even if adding the user fails
self.server.db.remove_campaign_invite(username, campaign_id)
if self.server.db.is_valid("campaigns", campaign_id, "members", username):
return self.respond({"success": False, "info": MSG_DID_JOIN})
self.server.db.add_user_to_campaign(username, campaign_id)
return self.respond({"success": True})
def decline_campaign_invite(self, args, username):
invites = self.server.db.get_campaign_invites(username)
campaign_id = args[KEY_CAMPAIGN_ID]
campaign_owner = None
for inv in invites:
if campaign_id == inv["id"]:
campaign_owner = inv["from"]
if campaign_owner is None:
return self.respond({"success": False, "info": MSG_BAD_CAMP})
self.server.db.remove_campaign_invite(username, campaign_id)
return self.respond({"success": True})
def set_role(self, args, username):
if args[KEY_ROLE] not in ROLES:
return self.respond({"success": False, "info": MSG_BAD_ROLE})
campaign_id = args[KEY_CAMPAIGN_ID]
if not has_campaign_permissions(self, username, campaign_id, owner_only=True):
return self.respond({"success": False, "info": MSG_BAD_CAMP})
member = args[KEY_USERNAME]
if not self.server.db.is_valid("campaigns", campaign_id, "members", member):
return self.respond({"success": False, "info": MSG_NO_USER})
self.server.db.set_role(member, campaign_id, args[KEY_ROLE])
return self.respond({"success": True})
def get_role(self, args, username):
campaign_id = args[KEY_CAMPAIGN_ID]
if not has_campaign_permissions(self, username, campaign_id):
return self.respond({"success": False, "info": MSG_BAD_CAMP})
member = args[KEY_USERNAME]
if not self.server.db.is_valid("campaigns", campaign_id, "members", member):
return self.respond({"success": False, "info": MSG_NO_USER})
role = self.server.db.get_role(member, campaign_id)
return self.respond({"success": True, "value": role})
def get_campaign(self, args, username):
campaign_id = args[KEY_CAMPAIGN_ID]
campaign = self.server.db.get_campaign(campaign_id).copy()
if campaign is None or not username in campaign["members"]:
return self.respond({"success": False, "info": MSG_BAD_CAMP})
role = campaign["members"][username]
if role == ROLE_GM:
return self.respond({"success": True, "value": campaign})
elif role in [ROLE_PLAYER, ROLE_SPECTATOR]:
# Filter out gm_only entities (mostly enemies to allow GMs to plan easily)
filtered_entities = {id: entity for id,
entity in campaign["entities"].items() if not entity["gm_only"]}
campaign["entities"] = filtered_entities
return self.respond({"success": True, "value": campaign})
return self.respond({"success": False, "info": MSG_BAD_CAMP})
def add_entity_to_campaign(self, args, username):
campaign_id = args[KEY_CAMPAIGN_ID]
entity_id = args[KEY_ID]
if len(entity_id) != FULL_UUID_LEN:
return self.respond({"success": False, "info": MSG_NO_ENTITY})
campaign = self.server.db.get_campaign(campaign_id)
if campaign is None or not username in campaign["members"]:
return self.respond({"success": False, "info": MSG_BAD_CAMP})
role = campaign["members"][username]
if entity_id[0] == IDType.CHARACTER and not role in [ROLE_PLAYER, ROLE_GM]:
return self.respond({"success": False, "info": MSG_NO_PERMISSION})
elif entity_id[0] == IDType.ENEMY and role != ROLE_GM:
return self.respond({"success": False, "info": MSG_NO_PERMISSION})
if not self.server.db.is_valid("accounts", username, "characters", entity_id):
return self.respond({"success": False, "info": MSG_NO_ENTITY})
self.server.db.add_to_campaign(
campaign_id, username, entity_id, gm_only=(entity_id[0] == IDType.ENEMY))
return self.respond({"success": True})
def remove_entity_from_campaign(self, args, username):
campaign_id = args[KEY_CAMPAIGN_ID]
entity_id = args[KEY_ID]
if not self.server.db.is_valid("campaigns", campaign_id, "entities", entity_id):
return self.respond({"success": False, "info": MSG_NO_ENTITY})
campaign = self.server.db.get_campaign(campaign_id)
if campaign is None or not username in campaign["members"] or not entity_id in campaign["entities"]:
return self.respond({"success": False, "info": MSG_BAD_CAMP})
role = campaign["members"][username]
entity = campaign["entities"][entity_id]
if (role == ROLE_PLAYER and entity["owner"] == username and not entity["gm_only"]) or role == ROLE_GM:
# players can only remove their own entities - GMs can remove any entity
self.server.db.remove_from_campaign(campaign_id, entity_id)
return self.respond({"success": True})
return self.respond({"success": False, "info": MSG_NO_PERMISSION})
```
#### File: jnolan-poloniex/vennt-server/api_characters.py
```python
import venntdb
import uuid
from constants import *
# VenntHandler methods
def get_characters(self, args, username):
return self.respond({"success": True, "value": self.server.db.get_characters(username)})
def get_character(self, args, username):
return self.respond({"success": True, "value": self.server.db.get_character(username, args[KEY_ID])})
def create_character(self, args, username):
name = args[KEY_NAME]
if len(name) > MAX_NAME_LENGTH:
return self.respond({"success": False, "info": MSG_NAME_LONG})
if KEY_GIFT not in args:
args[KEY_GIFT] = "None"
elif args[KEY_GIFT] not in GIFTS:
return self.respond({"success": False, "info": MSG_INVALID_GIFT})
id = IDType.CHARACTER + str(uuid.uuid4())
character = {"name": name, "id": id}
for key in args:
if key in ATTRIBUTES:
try:
character[key] = int(args[key])
except ValueError:
return self.respond({"success": False, "info": MSG_INVALID_ATTRIBUTE})
character[KEY_GIFT] = args[KEY_GIFT]
self.server.db.create_character(username, character)
ret = {"success": True, "id": id}
return self.respond(ret)
def set_attr(self, args, username):
char_id = args[KEY_ID]
attr = args[KEY_ATTR]
try:
val = int(args[KEY_VAL])
except ValueError:
return self.respond({"success": False, "info": MSG_INVALID_ATTRIBUTE})
if attr not in ATTRIBUTES:
return self.respond({"success": False, "info": MSG_NO_ATTR})
if not self.server.db.character_exists(username, char_id):
return self.respond({"success": False, "info": MSG_NO_CHAR})
self.server.db.set_attr(username, char_id, attr, val)
return self.respond({"success": True})
def get_attr(self, args, username):
char_id = args[KEY_ID]
attr = args[KEY_ATTR]
if attr not in ATTRIBUTES:
return self.respond({"success": False, "info": MSG_NO_ATTR})
if not self.server.db.character_exists(username, char_id):
return self.respond({"success": False, "info": MSG_NO_CHAR})
val = self.server.db.get_attr(username, char_id, attr)
return self.respond({"success": True, "value": val})
```
#### File: jnolan-poloniex/vennt-server/api_initiative.py
```python
import venntdb
import d20
from api_campaigns import *
from constants import *
import importlib
logClass = importlib.import_module("logger")
logger = logClass.Logger("api_initiative")
# VenntHandler methods
def entity_in_init_list(campaign, entity_id):
for init in campaign["init"]:
if init["entity_id"] == entity_id:
return True
return False
def add_entity_to_combat(self, args, username):
campaign_id = args[KEY_CAMPAIGN_ID]
entity_id = args[KEY_ID]
campaign = self.server.db.get_campaign(campaign_id)
if campaign is None or not username in campaign["members"]:
return self.respond({"success": False, "info": MSG_BAD_CAMP})
role = campaign["members"][username]
if not entity_id in campaign["entities"]:
return self.respond({"success": False, "info": MSG_NO_ENTITY})
entity = campaign["entities"][entity_id]
if entity_id[0] == IDType.CHARACTER and entity_in_init_list(campaign, entity_id):
# characters cannot go into the init list more than once a round (enemies can though)
return self.respond({"success": False, "info": MSG_NO_PERMISSION})
if (role == ROLE_PLAYER and entity_id[0] == IDType.CHARACTER and entity["owner"] == username and not entity["gm_only"]) or role == ROLE_GM:
owner_username = entity["owner"]
character = self.server.db.get_character(owner_username, entity_id)
bonus = int(character["INIT"])
else:
return self.respond({"success": False, "info": MSG_NO_PERMISSION})
response = {}
if KEY_ROLL in args:
try:
roll = int(args[KEY_ROLL])
except ValueError:
return self.respond({"success": False, "info": MSG_INVALID_ATTRIBUTE})
else:
roll_result = d20.roll("3d6 + " + str(bonus))
roll = roll_result.total
response["roll_str"] = str(roll_result)
# TODO: if the enemy is already in the campaign, ensure it has been seperated by a character
self.server.db.add_to_combat(campaign_id, entity_id, roll, bonus)
response["success"] = True
return self.respond(response)
def remove_entity_from_combat(self, args, username):
campaign_id = args[KEY_CAMPAIGN_ID]
entity_id = args[KEY_ID]
campaign = self.server.db.get_campaign(campaign_id)
if campaign is None or not username in campaign["members"]:
return self.respond({"success": False, "info": MSG_BAD_CAMP})
role = campaign["members"][username]
if not entity_id in campaign["entities"] or not entity_in_init_list(campaign, entity_id):
# the entity needs to be in the list for us to be able to remove it
return self.respond({"success": False, "info": MSG_NO_ENTITY})
entity = campaign["entities"][entity_id]
if (role == ROLE_PLAYER and entity_id[0] == IDType.CHARACTER and entity["owner"] == username) or role == ROLE_GM:
self.server.db.remove_from_combat(campaign_id, entity_id)
return self.respond({"success": True})
return self.respond({"success": False, "info": MSG_NO_PERMISSION})
def start_combat(self, args, username):
campaign_id = args[KEY_CAMPAIGN_ID]
if has_gm_permissions(self, username, campaign_id) and self.server.db.start_combat(campaign_id):
return self.respond({"success": True})
return self.respond({"success": False, "info": MSG_NO_PERMISSION})
def end_combat(self, args, username):
campaign_id = args[KEY_CAMPAIGN_ID]
if has_gm_permissions(self, username, campaign_id) and self.server.db.end_combat(campaign_id):
return self.respond({"success": True})
return self.respond({"success": False, "info": MSG_NO_PERMISSION})
def update_initiative_style(self, args, username):
campaign_id = args[KEY_CAMPAIGN_ID]
style = args[KEY_STYLE]
if style not in INIT_STYLES:
return self.respond({"success": False, "info": MSG_NO_INIT_STYLE})
if has_gm_permissions(self, username, campaign_id):
self.server.db.update_initiative_style(campaign_id, style)
return self.respond({"success": True})
return self.respond({"success": False, "info": MSG_NO_PERMISSION})
def end_turn(self, args, username):
campaign_id = args[KEY_CAMPAIGN_ID]
entity_id = args[KEY_ID]
campaign = self.server.db.get_campaign(campaign_id)
if campaign is None or not username in campaign["members"]:
return self.respond({"success": False, "info": MSG_BAD_CAMP})
role = campaign["members"][username]
if not entity_id in campaign["entities"] or not entity_in_init_list(campaign, entity_id):
# the entity needs to be in the init list in order for us to be able to end its turn!
return self.respond({"success": False, "info": MSG_NO_ENTITY})
entity = campaign["entities"][entity_id]
if (role == ROLE_PLAYER and entity_id[0] == IDType.CHARACTER and entity["owner"] == username) or role == ROLE_GM:
self.server.db.reset_actions(campaign_id, entity_id)
self.server.db.next_turn(campaign_id)
return self.respond({"success": True})
return self.respond({"success": False, "info": MSG_NO_PERMISSION})
```
#### File: jnolan-poloniex/vennt-server/db_characters.py
```python
import venntdb
from constants import *
# VenntDB Methods
def character_exists(self, username, char_id):
return self.get_character(username, char_id) is not None
def get_character(self, username, char_id):
self.assert_valid("accounts", username, "characters")
if self.is_valid("accounts", username, "characters", char_id):
return self.db["accounts"][username]["characters"][char_id]
return None
def create_character(self, username, character, is_enemy=False):
self.assert_valid("accounts", username, "characters")
for attr in ATTRIBUTES:
if attr not in character:
character[attr] = 0
character["items"] = []
character["abilities"] = []
character["is_enemy"] = is_enemy
self.db["accounts"][username]["characters"][character["id"]] = character
self.save_db()
def get_characters(self, username):
self.assert_valid("accounts", username, "characters")
return self.db["accounts"][username]["characters"]
def get_attr(self, username, char_id, attr):
self.assert_valid("accounts", username, "characters", char_id)
return self.get_character(username, char_id)[attr]
def set_attr(self, username, char_id, attr, val):
self.assert_valid("accounts", username, "characters", char_id)
self.get_character(username, char_id)[attr] = val
self.save_db()
```
#### File: jnolan-poloniex/vennt-server/db_initiative.py
```python
import venntdb
from constants import *
import importlib
logClass = importlib.import_module("logger")
logger = logClass.Logger("db_initiative")
# VenntDB Methods
def add_to_combat(self, campaign_id, entity_id, roll, bonus):
init_list = self.db["campaigns"][campaign_id]["init"]
# walk through init list and look for correct place to insert the new row
insert_index = len(init_list)
for i in range(len(init_list)):
if init_list[i]["roll"] == roll:
if init_list[i]["bonus"] == bonus:
if init_list[i]["entity_id"][0] == IDType.ENEMY and entity_id[0] == IDType.CHARACTER:
insert_index = i
break
elif init_list[i]["bonus"] < bonus:
insert_index = i
break
elif init_list[i]["roll"] < roll:
insert_index = i
break
self.db["campaigns"][campaign_id]["init"].insert(
insert_index, {"roll": roll, "bonus": bonus, "entity_id": entity_id})
# shift the current index forward to account for new item in the init list
if self.db["campaigns"][campaign_id]["in_combat"]:
current_index = self.db["campaigns"][campaign_id]["init_index"]
if current_index >= insert_index:
self.db["campaigns"][campaign_id]["init_index"] = current_index + 1
else:
# entities start combat with 1 reaction (only when not in combat)
self.db["campaigns"][campaign_id]["entities"][entity_id]["reactions"] = 1
# if the entity is gm_only - make it visible to everyone
if self.db["campaigns"][campaign_id]["entities"][entity_id]["gm_only"]:
self.db["campaigns"][campaign_id]["entities"][entity_id]["gm_only"] = False
self.save_db()
return True
def remove_from_combat(self, campaign_id, entity_id):
if len(self.db["campaigns"][campaign_id]["init"]) == 0:
return False
init_list = self.db["campaigns"][campaign_id]["init"]
filtered_init_list = []
init_list_len = len(init_list)
removed_count = 0
for index, init in enumerate(init_list):
if init["entity_id"] != entity_id:
filtered_init_list.append(init)
else:
removed_count += 1
# need to update the init_index accordingly
if index == 0 and self.db["campaigns"][campaign_id]["init_index"] == 0:
# need to wrap around backwards
self.db["campaigns"][campaign_id]["init_index"] = init_list_len - \
1 - removed_count
self.db["campaigns"][campaign_id]["init_round"] -= 1
elif index <= self.db["campaigns"][campaign_id]["init_index"]:
self.db["campaigns"][campaign_id]["init_index"] -= 1
if len(filtered_init_list) == 0:
# If the initiative order is empty, we can automatically end combat
return self.end_combat(campaign_id)
self.db["campaigns"][campaign_id]["init"] = filtered_init_list
# Increment turn if needed
next_turn(self, campaign_id, save_db=False)
self.save_db()
return True
def start_combat(self, campaign_id):
if self.db["campaigns"][campaign_id]["in_combat"] or len(self.db["campaigns"][campaign_id]["init"]) == 0:
return False
self.db["campaigns"][campaign_id]["in_combat"] = True
self.db["campaigns"][campaign_id]["init_index"] = 0
self.db["campaigns"][campaign_id]["init_round"] = 0
# grab iniative from the top of the order
entity = self.db["campaigns"][campaign_id]["init"][0]
entity_id = entity["entity_id"]
__give_entity_actions(self, campaign_id, entity_id)
# increment additional times following init style rules
while __should_keep_incrementing_init(self, campaign_id, entity):
__increment_init_index(self, campaign_id)
self.save_db()
return True
def end_combat(self, campaign_id):
self.db["campaigns"][campaign_id]["in_combat"] = False
self.db["campaigns"][campaign_id]["init"] = []
self.db["campaigns"][campaign_id]["init_index"] = 0
self.db["campaigns"][campaign_id]["init_round"] = 0
# Reset actions and reactions
for entity in self.db["campaigns"][campaign_id]["entities"]:
self.db["campaigns"][campaign_id]["entities"][entity]["actions"] = 0
self.db["campaigns"][campaign_id]["entities"][entity]["reactions"] = 0
# do not clear any pending attacks since those still need to be resolved
self.save_db()
return True
def update_initiative_style(self, campaign_id, style):
self.db["campaigns"][campaign_id]["init_styles"] = style
self.save_db()
def reset_actions(self, campaign_id, entity_id):
self.db["campaigns"][campaign_id]["entities"][entity_id]["actions"] = 0
def next_turn(self, campaign_id, save_db=True):
if len(self.db["campaigns"][campaign_id]["init"]) == 0 and not self.db["campaigns"][campaign_id]["in_combat"]:
return False
# Check if we are ready to increment (no one has actions any more)
for init in self.db["campaigns"][campaign_id]["init"]:
entity = self.db["campaigns"][campaign_id]["entities"][init["entity_id"]]
if entity["actions"] > 0 and not entity["delayed_actions"]:
return False
# always increment once
init_index = __increment_init_index(self, campaign_id)
# increment additional times following init style rules
init_details = self.db["campaigns"][campaign_id]["init"][init_index]
while __should_keep_incrementing_init(self, campaign_id, init_details):
__increment_init_index(self, campaign_id)
if save_db:
self.save_db()
return True
# Helper functions
def __get_next_init(self, campaign_id):
next_init = self.db["campaigns"][campaign_id]["init_index"] + 1
init_length = len(self.db["campaigns"][campaign_id]["init"])
if (next_init >= init_length):
# wrap around
return 0
return next_init
def __should_keep_incrementing_init(self, campaign_id, starting_init_details):
# TODO: Should keep incrementing init if the entity is dead (maybe in this case it should just get removed from combat init though)
next_init = __get_next_init(self, campaign_id)
next_init_details = self.db["campaigns"][campaign_id]["init"][next_init]
starting_entity_id = starting_init_details["entity_id"]
next_entity_id = next_init_details["entity_id"]
init_style = self.db["campaigns"][campaign_id]["init_styles"]
style_detail = init_style == INIT_ASYNC or (
next_init_details["roll"] == starting_init_details["roll"] and next_init_details["bonus"] == starting_init_details["bonus"])
return next_entity_id[0] == starting_entity_id[0] and next_entity_id != starting_entity_id and style_detail
def __increment_init_index(self, campaign_id):
next_init = __get_next_init(self, campaign_id)
if next_init == 0:
# wrap around
self.db["campaigns"][campaign_id]["init_round"] += 1
self.db["campaigns"][campaign_id]["init_index"] = next_init
entity_id = self.db["campaigns"][campaign_id]["init"][next_init]["entity_id"]
__give_entity_actions(self, campaign_id, entity_id)
return next_init
def __give_entity_actions(self, campaign_id, entity_id):
# TODO: Do not reset actions / reactions if stunned / paralyzed
# TODO: Do not reset actions if the entity is dead
# TODO: Reset only 1 action / 1 reaction is the entity is dying
# Do not reset actions of users who delayed actions
if self.db["campaigns"][campaign_id]["entities"][entity_id]["actions"] > 1 and self.db["campaigns"][campaign_id]["entities"][entity_id]["delayed_actions"]:
return
self.db["campaigns"][campaign_id]["entities"][entity_id]["actions"] = 3
self.db["campaigns"][campaign_id]["entities"][entity_id]["reactions"] = 1
```
#### File: jnolan-poloniex/vennt-server/example.py
```python
import requests
import json
import uuid
import argparse
import time
url = 'http://localhost:3004/'
parser = argparse.ArgumentParser(description='Vennt API example client.')
parser.add_argument('--verify', action='store_true',
help="Stop running the example when a failure occurs")
parser.add_argument('--quiet', action='store_true',
help="Don't print server responses")
parser.add_argument('--nocert', action='store_true',
help="Don't use a secure connection")
args = parser.parse_args()
do_ssl = not args.nocert
def check_continue(response, expectError=False):
time.sleep(1) # need to sleep so we don't get rate limited
if not args.quiet:
print(response.text)
if not args.verify:
return True
data = json.loads(response.text)
if "success" not in data:
print("No success key received")
exit(1)
if data["success"] and expectError:
print("Operation succeeded but was expected to fail")
print(data["info"])
exit(1)
if not data["success"]:
if expectError:
return True
print("Unsuccessful operation")
print(data["info"])
exit(1)
#################### ACCOUNT APIS ####################
print("New account")
gm_username = str(uuid.uuid4())
gm_password = str(uuid.uuid4())
data = {"register": gm_username, "password": <PASSWORD>}
response = requests.post(
url, json=data, verify=do_ssl)
check_continue(response)
response = json.loads(response.text)
old_auth_token = response["auth_token"]
print("Logout")
data = {"auth_token": old_auth_token}
response = requests.get(
url + 'logout?', params=data, verify=do_ssl)
check_continue(response)
print("Login")
data = {"login": gm_username, "password": <PASSWORD>}
response = requests.post(
url, json=data, verify=do_ssl)
check_continue(response)
response = json.loads(response.text)
gm_token = response["auth_token"]
assert(not args.verify or old_auth_token != gm_token)
print("New account 2")
player_username = str(uuid.uuid4())
player_password = str(uuid.uuid4())
data = {"register": player_username, "password": player_password}
response = requests.post(
url, json=data, verify=do_ssl)
check_continue(response)
response = json.loads(response.text)
player_token = response["auth_token"]
assert(not args.verify or player_token != gm_token)
#################### CHARACTER APIS ####################
print("create character")
random_name = str(uuid.uuid4())
data = {"auth_token": gm_token, "name": random_name,
"PER": 3, "MAX_HP": 10, "HP": 5, "gift": "Mind"}
response = requests.get(url + 'create_character', params=data, verify=do_ssl)
check_continue(response)
response = json.loads(response.text)
my_character_id = response["id"]
print("create character - invalid gift value - will fail")
data = {"auth_token": gm_token, "name": "bad character", "gift": "fake gift"}
response = requests.get(url + 'create_character', params=data, verify=do_ssl)
check_continue(response, expectError=True)
print("create character - invalid attribute value - will fail")
data = {"auth_token": gm_token,
"name": "bad character", "INT": "not a number"}
response = requests.get(url + 'create_character', params=data, verify=do_ssl)
check_continue(response, expectError=True)
print("Get characters")
data = {"auth_token": gm_token}
response = requests.get(url + 'get_characters', params=data, verify=do_ssl)
check_continue(response)
response = json.loads(response.text)
assert(not args.verify or len(response["value"]) == 1)
assert(not args.verify or response["value"]
[my_character_id]["name"] == random_name)
assert(not args.verify or response["value"][my_character_id]["PER"] == 3)
assert(not args.verify or response["value"][my_character_id]["INT"] == 0)
assert(not args.verify or response["value"][my_character_id]["gift"] == "Mind")
assert(not args.verify or response["value"]
[my_character_id]["is_enemy"] == False)
print("Get character")
data = {"auth_token": gm_token, "id": my_character_id}
response = requests.get(url + 'get_character', params=data, verify=do_ssl)
check_continue(response)
response = json.loads(response.text)
assert(not args.verify or response["value"]["name"] == random_name)
# ATTRIBUTES
print("set attribute")
data = {"auth_token": gm_token,
"id": my_character_id, "attr": "STR", "value": 3}
response = requests.get(url + 'set_attr', params=data, verify=do_ssl)
check_continue(response)
print("get attribute")
data = {"auth_token": gm_token, "id": my_character_id, "attr": "STR"}
response = requests.get(url + 'get_attr', params=data, verify=do_ssl)
check_continue(response)
response = json.loads(response.text)
assert(not args.verify or response["value"] == 3)
# ABILITIES
print("Lookup ability")
data = {"auth_token": gm_token, "name": "Basic Cooking"}
response = requests.get(url + 'lookup_ability', params=data, verify=do_ssl)
check_continue(response)
print("add ability")
data = {"auth_token": gm_token,
"name": "Basic Cooking", "id": my_character_id}
response = requests.get(url + 'add_ability', params=data, verify=do_ssl)
check_continue(response)
print("get abilities")
data = {"auth_token": gm_token, "id": my_character_id}
response = requests.get(url + 'get_abilities',
params=data, verify=do_ssl)
check_continue(response)
print("get ability")
data = {"auth_token": gm_token,
"name": "Basic Cooking", "id": my_character_id}
response = requests.get(url + 'get_ability', params=data, verify=do_ssl)
check_continue(response)
# ITEMS
print("Add item")
data = {"auth_token": gm_token, "id": my_character_id,
"name": "donut", "bulk": "1", "desc": "Just a donut"}
response = requests.get(url + 'add_item', params=data, verify=do_ssl)
check_continue(response)
response = json.loads(response.text)
item_id = response["id"]
print("View items")
data = {"auth_token": gm_token, "id": my_character_id}
response = requests.get(url + 'view_items', params=data, verify=do_ssl)
check_continue(response)
print("Remove item")
data = {"auth_token": gm_token, "id": my_character_id, "id2": item_id}
response = requests.get(url + 'remove_item', params=data, verify=do_ssl)
check_continue(response)
# WEAPONS (WIP)
print("Add weapon")
data = {"auth_token": gm_token, "name": "myfirstweapon",
"attr": "STR", "dmg": "1d6+6", "mods": {"burning": "1d6"}}
response = requests.get(url + 'add_weapon', params=data, verify=do_ssl)
check_continue(response)
print("Get weapon")
data = {"auth_token": gm_token, "name": "myfirstweapon"}
response = requests.get(url + 'get_weapon', params=data, verify=do_ssl)
check_continue(response)
print("Remove weapon")
data = {"auth_token": gm_token, "name": "myfirstweapon"}
response = requests.get(url + 'remove_weapon', params=data, verify=do_ssl)
check_continue(response)
# ENEMIES
print("create enemy")
data = {"auth_token": gm_token, "name": "myfirstenemy",
"WIS": 3, "MAX_HP": 10, "HP": 10}
response = requests.get(url + 'create_enemy', params=data, verify=do_ssl)
check_continue(response)
response = json.loads(response.text)
enemy_id = response["id"]
data = {"auth_token": gm_token, "id": enemy_id}
response = requests.get(url + 'get_character', params=data, verify=do_ssl)
check_continue(response)
response = json.loads(response.text)
assert(not args.verify or response["value"]["name"] == "myfirstenemy")
assert(not args.verify or response["value"]["WIS"] == 3)
assert(not args.verify or response["value"]["INT"] == 0)
assert(not args.verify or response["value"]["is_enemy"] == True)
#################### CAMPAIGN APIS ####################
print("Create campaign")
data = {"auth_token": gm_token, "name": "myfirstcampaign"}
response = requests.get(url + 'create_campaign?', params=data, verify=do_ssl)
check_continue(response)
response = json.loads(response.text)
campaign_id = response["campaign_id"]
# CAMPAIGN INVITES
print("Send campaign invite - self invite - will fail")
data = {"auth_token": gm_token,
"username": gm_username, "campaign_id": campaign_id}
response = requests.get(url + 'send_campaign_invite',
params=data, verify=do_ssl)
check_continue(response, expectError=True)
print("Send campaign invite - not owner of campaign - will fail")
data = {"auth_token": player_token,
"username": gm_username, "campaign_id": campaign_id}
response = requests.get(url + 'send_campaign_invite',
params=data, verify=do_ssl)
check_continue(response, expectError=True)
print("Send campaign invite")
data = {"auth_token": gm_token,
"username": player_username, "campaign_id": campaign_id}
response = requests.get(url + 'send_campaign_invite',
params=data, verify=do_ssl)
check_continue(response)
print("View campaign invites")
data = {"auth_token": player_token}
response = requests.get(url + 'view_campaign_invites',
params=data, verify=do_ssl)
check_continue(response)
print("Decline campaign invite")
data = {"auth_token": player_token, "campaign_id": campaign_id}
response = requests.get(url + 'decline_campaign_invite',
params=data, verify=do_ssl)
check_continue(response)
print("Accept campaign invite")
data = {"auth_token": gm_token,
"username": player_username, "campaign_id": campaign_id}
response = requests.get(url + 'send_campaign_invite',
params=data, verify=do_ssl)
check_continue(response)
data = {"auth_token": player_token, "campaign_id": campaign_id}
response = requests.get(url + 'accept_campaign_invite?',
params=data, verify=do_ssl)
check_continue(response)
print("Set role")
data = {"auth_token": gm_token, "campaign_id": campaign_id,
"username": gm_username, "role": "GM"}
response = requests.get(url + 'set_role', params=data, verify=do_ssl)
check_continue(response)
print("Get role")
data = {"auth_token": gm_token,
"campaign_id": campaign_id, "username": gm_username}
response = requests.get(url + 'get_role', params=data, verify=do_ssl)
check_continue(response)
print("Get campaigns")
data = {"auth_token": player_token, "name": "mySecondCampaign"}
response = requests.get(url + 'create_campaign?', params=data, verify=do_ssl)
check_continue(response)
response = json.loads(response.text)
campaign_id_2 = response["campaign_id"]
data = {"auth_token": player_token}
response = requests.get(url + 'get_campaigns', params=data, verify=do_ssl)
check_continue(response)
response = json.loads(response.text)
assert(not args.verify or len(response["value"]) == 2)
assert(not args.verify or response["value"][0]["id"] == campaign_id)
assert(not args.verify or response["value"][0]["name"] == "myfirstcampaign")
assert(not args.verify or response["value"][1]["id"] == campaign_id_2)
assert(not args.verify or response["value"][1]["name"] == "mySecondCampaign")
# ENTITY MANAGEMENT
data = {"auth_token": gm_token, "campaign_id": campaign_id,
"username": player_username, "role": "player"}
response = requests.get(url + 'set_role', params=data, verify=do_ssl)
check_continue(response)
spectator_username = str(uuid.uuid4())
spectator_password = str(uuid.uuid4())
data = {"register": spectator_username, "password": <PASSWORD>}
response = requests.post(
url, json=data, verify=do_ssl)
check_continue(response)
response = json.loads(response.text)
spectator_token = response["auth_token"]
no_perms_username = str(uuid.uuid4())
no_perms_password = str(uuid.uuid4())
data = {"register": no_perms_username, "password": <PASSWORD>perms_password}
response = requests.post(
url, json=data, verify=do_ssl)
check_continue(response)
response = json.loads(response.text)
no_perms_token = response["auth_token"]
data = {"auth_token": gm_token,
"username": spectator_username, "campaign_id": campaign_id}
response = requests.get(url + 'send_campaign_invite',
params=data, verify=do_ssl)
check_continue(response)
data = {"auth_token": spectator_token, "campaign_id": campaign_id}
response = requests.get(url + 'accept_campaign_invite?',
params=data, verify=do_ssl)
check_continue(response)
print("Add character to the campaign - GM")
data = {"auth_token": gm_token,
"campaign_id": campaign_id, "id": my_character_id}
response = requests.get(url + 'add_to_campaign', params=data, verify=do_ssl)
check_continue(response)
print("Add character to the campaign - player")
data = {"auth_token": player_token,
"name": "player character!", "INIT": 5, "SPEED": 6, "MAX_HP": 100, "HP": 52}
response = requests.get(url + 'create_character', params=data, verify=do_ssl)
check_continue(response)
response = json.loads(response.text)
player_character_id = response["id"]
data = {"auth_token": player_token,
"campaign_id": campaign_id, "id": player_character_id}
response = requests.get(url + 'add_to_campaign', params=data, verify=do_ssl)
check_continue(response)
print("Add character to the campaign - spectator - will fail")
data = {"auth_token": spectator_token, "name": random_name}
response = requests.get(url + 'create_character', params=data, verify=do_ssl)
check_continue(response)
response = json.loads(response.text)
spectator_character_id = response["id"]
data = {"auth_token": spectator_token,
"campaign_id": campaign_id, "id": spectator_character_id}
response = requests.get(url + 'add_to_campaign', params=data, verify=do_ssl)
check_continue(response, expectError=True)
print("Add enemy to the campaign - GM")
data = {"auth_token": gm_token,
"campaign_id": campaign_id, "id": enemy_id}
response = requests.get(url + 'add_to_campaign', params=data, verify=do_ssl)
check_continue(response)
print("Add enemy to the campaign - player - will fail")
data = {"auth_token": player_token, "name": "player enemy"}
response = requests.get(url + 'create_enemy', params=data, verify=do_ssl)
check_continue(response)
response = json.loads(response.text)
player_enemy_id = response["id"]
data = {"auth_token": player_token,
"campaign_id": campaign_id, "id": player_enemy_id}
response = requests.get(url + 'add_to_campaign', params=data, verify=do_ssl)
check_continue(response, expectError=True)
print("Get campaign - player")
data = {"auth_token": player_token, "campaign_id": campaign_id}
response = requests.get(url + 'get_campaign', params=data, verify=do_ssl)
check_continue(response)
response = json.loads(response.text)
player_campaign = response["value"]
assert(not args.verify or player_campaign["name"] == "myfirstcampaign")
assert(not args.verify or player_campaign["owner"] == gm_username)
assert(not args.verify or len(player_campaign["members"]) == 3)
assert(not args.verify or player_campaign["members"][gm_username] == "GM")
assert(
not args.verify or player_campaign["members"][player_username] == "player")
assert(
not args.verify or player_campaign["members"][spectator_username] == "spectator")
assert(not args.verify or len(player_campaign["entities"]) == 2)
assert(
not args.verify or player_campaign["entities"][my_character_id]["owner"] == gm_username)
assert(
not args.verify or player_campaign["entities"][my_character_id]["name"] == random_name)
assert(
not args.verify or player_campaign["entities"][my_character_id]["gm_only"] == False)
assert(
not args.verify or player_campaign["entities"][my_character_id]["actions"] == 0)
assert(
not args.verify or player_campaign["entities"][my_character_id]["reactions"] == 0)
assert(
not args.verify or player_campaign["entities"][my_character_id]["health"] == "bloodied")
assert(
not args.verify or player_campaign["entities"][player_character_id]["owner"] == player_username)
assert(not args.verify or player_campaign["init_styles"] == "traditional")
assert(not args.verify or len(player_campaign["init"]) == 0)
assert(not args.verify or player_campaign["init_index"] == 0)
assert(not args.verify or player_campaign["init_round"] == 0)
assert(not args.verify or player_campaign["in_combat"] == False)
print("Get campaign - spectator")
data = {"auth_token": spectator_token, "campaign_id": campaign_id}
response = requests.get(url + 'get_campaign', params=data, verify=do_ssl)
check_continue(response)
response = json.loads(response.text)
assert(not args.verify or response["value"] == player_campaign)
print("Get campaign - no permission - will fail")
data = {"auth_token": no_perms_token, "campaign_id": campaign_id}
response = requests.get(url + 'get_campaign', params=data, verify=do_ssl)
check_continue(response, expectError=True)
print("Get campaign - GM")
data = {"auth_token": gm_token, "campaign_id": campaign_id}
response = requests.get(url + 'get_campaign', params=data, verify=do_ssl)
check_continue(response)
response = json.loads(response.text)
gm_campaign = response["value"]
assert(not args.verify or gm_campaign != player_campaign)
assert(not args.verify or len(gm_campaign["entities"]) == 3)
assert(not args.verify or gm_campaign["entities"]
[enemy_id]["owner"] == gm_username)
assert(not args.verify or gm_campaign["entities"]
[enemy_id]["name"] == "myfirstenemy")
assert(not args.verify or gm_campaign["entities"][enemy_id]["gm_only"] == True)
assert(not args.verify or gm_campaign["entities"][enemy_id]["actions"] == 0)
assert(not args.verify or gm_campaign["entities"][enemy_id]["reactions"] == 0)
assert(not args.verify or gm_campaign["entities"]
[enemy_id]["health"] == "healthy")
#################### INITIATIVE APIS ####################
print("Add character to combat - GM")
data = {"auth_token": gm_token, "campaign_id": campaign_id,
"id": my_character_id, "roll": 10}
response = requests.get(url + 'add_to_combat', params=data, verify=do_ssl)
check_continue(response)
print("Add character to combat - GM - adds player character")
data = {"auth_token": gm_token, "campaign_id": campaign_id,
"id": player_character_id, "roll": 10}
response = requests.get(url + 'add_to_combat', params=data, verify=do_ssl)
check_continue(response)
print("Add enemies to combat - GM")
data = {"auth_token": gm_token, "campaign_id": campaign_id,
"id": enemy_id, "roll": 10}
response = requests.get(url + 'add_to_combat', params=data, verify=do_ssl)
check_continue(response)
data = {"auth_token": gm_token, "name": "second enemy",
"INIT": 1, "MAX_HP": 10, "HP": 5, "campaign_id": campaign_id}
response = requests.get(url + 'create_enemy', params=data, verify=do_ssl)
check_continue(response)
response = json.loads(response.text)
second_enemy_id = response["id"]
data = {"auth_token": gm_token, "campaign_id": campaign_id,
"id": second_enemy_id, "roll": 18}
response = requests.get(url + 'add_to_combat', params=data, verify=do_ssl)
check_continue(response)
print("Add character to combat - Player")
data = {"auth_token": player_token,
"name": "player character 2!", "STR": 6, "MP": 12, "MAX_MP": 12, "MAX_HP": 50, "HP": 50}
response = requests.get(url + 'create_character', params=data, verify=do_ssl)
check_continue(response)
response = json.loads(response.text)
player_second_character_id = response["id"]
data = {"auth_token": player_token,
"campaign_id": campaign_id, "id": player_second_character_id}
response = requests.get(url + 'add_to_campaign', params=data, verify=do_ssl)
check_continue(response)
data = {"auth_token": <PASSWORD>_token, "campaign_id": campaign_id,
"id": player_second_character_id, "roll": 10}
response = requests.get(url + 'add_to_combat', params=data, verify=do_ssl)
check_continue(response)
data = {"auth_token": <PASSWORD>ator_token, "campaign_id": campaign_id}
response = requests.get(url + 'get_campaign', params=data, verify=do_ssl)
check_continue(response)
response = json.loads(response.text)
assert(not args.verify or response["value"]
["entities"][enemy_id]["gm_only"] == False)
assert(not args.verify or len(response["value"]["init"]) == 5)
assert(not args.verify or response["value"]
["init"][0]["entity_id"] == second_enemy_id)
assert(not args.verify or response["value"]
["init"][1]["entity_id"] == player_character_id)
assert(not args.verify or response["value"]
["init"][2]["entity_id"] == my_character_id)
assert(not args.verify or response["value"]
["init"][3]["entity_id"] == player_second_character_id)
assert(not args.verify or response["value"]
["init"][4]["entity_id"] == enemy_id)
assert(not args.verify or response["value"]
["entities"][enemy_id]["reactions"] == 1)
assert(not args.verify or response["value"]["init_index"] == 0)
assert(not args.verify or response["value"]["init_round"] == 0)
assert(not args.verify or response["value"]["in_combat"] == False)
print("Start combat")
data = {"auth_token": gm_token, "campaign_id": campaign_id}
response = requests.get(url + 'start_combat', params=data, verify=do_ssl)
check_continue(response)
data = {"auth_token": spectator_token, "campaign_id": campaign_id}
response = requests.get(url + 'get_campaign', params=data, verify=do_ssl)
check_continue(response)
response = json.loads(response.text)
assert(not args.verify or response["value"]["init_index"] == 0)
assert(not args.verify or response["value"]["init_round"] == 0)
assert(not args.verify or response["value"]["in_combat"] == True)
assert(not args.verify or response["value"]
["entities"][second_enemy_id]["actions"] == 3)
assert(not args.verify or response["value"]
["entities"][second_enemy_id]["reactions"] == 1)
print("End turn - GM")
data = {"auth_token": gm_token,
"campaign_id": campaign_id, "id": second_enemy_id}
response = requests.get(url + 'end_turn', params=data, verify=do_ssl)
check_continue(response)
data = {"auth_token": spectator_token, "campaign_id": campaign_id}
response = requests.get(url + 'get_campaign', params=data, verify=do_ssl)
check_continue(response)
response = json.loads(response.text)
assert(not args.verify or response["value"]["init_index"] == 1)
assert(not args.verify or response["value"]["init_round"] == 0)
assert(not args.verify or response["value"]["in_combat"] == True)
assert(not args.verify or response["value"]
["entities"][second_enemy_id]["actions"] == 0)
assert(not args.verify or response["value"]
["entities"][second_enemy_id]["reactions"] == 1)
assert(not args.verify or response["value"]
["entities"][player_character_id]["actions"] == 3)
assert(not args.verify or response["value"]
["entities"][player_character_id]["reactions"] == 1)
print("End turn - player - init jumps forward 2")
data = {"auth_token": player_token,
"campaign_id": campaign_id, "id": player_character_id}
response = requests.get(url + 'end_turn', params=data, verify=do_ssl)
check_continue(response)
data = {"auth_token": spectator_token, "campaign_id": campaign_id}
response = requests.get(url + 'get_campaign', params=data, verify=do_ssl)
check_continue(response)
response = json.loads(response.text)
assert(not args.verify or response["value"]["init_index"] == 3)
assert(not args.verify or response["value"]["init_round"] == 0)
assert(not args.verify or response["value"]["in_combat"] == True)
assert(not args.verify or response["value"]
["entities"][player_character_id]["actions"] == 0)
assert(not args.verify or response["value"]
["entities"][player_character_id]["reactions"] == 1)
assert(not args.verify or response["value"]
["entities"][my_character_id]["actions"] == 3)
assert(not args.verify or response["value"]
["entities"][my_character_id]["reactions"] == 1)
assert(not args.verify or response["value"]
["entities"][player_second_character_id]["actions"] == 3)
assert(not args.verify or response["value"]
["entities"][player_second_character_id]["reactions"] == 1)
print("End turn - player - init does not move")
data = {"auth_token": <PASSWORD>_token,
"campaign_id": campaign_id, "id": player_second_character_id}
response = requests.get(url + 'end_turn', params=data, verify=do_ssl)
check_continue(response)
data = {"auth_token": <PASSWORD>, "campaign_id": campaign_id}
response = requests.get(url + 'get_campaign', params=data, verify=do_ssl)
check_continue(response)
response = json.loads(response.text)
assert(not args.verify or response["value"]["init_index"] == 3)
assert(not args.verify or response["value"]["init_round"] == 0)
assert(not args.verify or response["value"]["in_combat"] == True)
assert(not args.verify or response["value"]
["entities"][player_character_id]["actions"] == 0)
assert(not args.verify or response["value"]
["entities"][player_character_id]["reactions"] == 1)
assert(not args.verify or response["value"]
["entities"][my_character_id]["actions"] == 3)
assert(not args.verify or response["value"]
["entities"][my_character_id]["reactions"] == 1)
assert(not args.verify or response["value"]
["entities"][player_second_character_id]["actions"] == 0)
assert(not args.verify or response["value"]
["entities"][player_second_character_id]["reactions"] == 1)
print("End turn - GM - init moves 1")
data = {"auth_token": gm_token,
"campaign_id": campaign_id, "id": my_character_id}
response = requests.get(url + 'end_turn', params=data, verify=do_ssl)
check_continue(response)
data = {"auth_token": <PASSWORD>_token, "campaign_id": campaign_id}
response = requests.get(url + 'get_campaign', params=data, verify=do_ssl)
check_continue(response)
response = json.loads(response.text)
assert(not args.verify or response["value"]["init_index"] == 4)
assert(not args.verify or response["value"]["init_round"] == 0)
assert(not args.verify or response["value"]["in_combat"] == True)
assert(not args.verify or response["value"]
["entities"][player_character_id]["actions"] == 0)
assert(not args.verify or response["value"]
["entities"][player_character_id]["reactions"] == 1)
assert(not args.verify or response["value"]
["entities"][my_character_id]["actions"] == 0)
assert(not args.verify or response["value"]
["entities"][my_character_id]["reactions"] == 1)
assert(not args.verify or response["value"]
["entities"][player_second_character_id]["actions"] == 0)
assert(not args.verify or response["value"]
["entities"][player_second_character_id]["reactions"] == 1)
assert(not args.verify or response["value"]
["entities"][enemy_id]["actions"] == 3)
assert(not args.verify or response["value"]
["entities"][enemy_id]["reactions"] == 1)
print("End turn - GM - init wraps around again")
data = {"auth_token": gm_token,
"campaign_id": campaign_id, "id": enemy_id}
response = requests.get(url + 'end_turn', params=data, verify=do_ssl)
check_continue(response)
data = {"auth_token": spectator_token, "campaign_id": campaign_id}
response = requests.get(url + 'get_campaign', params=data, verify=do_ssl)
check_continue(response)
response = json.loads(response.text)
assert(not args.verify or response["value"]["init_index"] == 0)
assert(not args.verify or response["value"]["init_round"] == 1)
assert(not args.verify or response["value"]["in_combat"] == True)
assert(not args.verify or response["value"]
["entities"][second_enemy_id]["actions"] == 3)
assert(not args.verify or response["value"]
["entities"][second_enemy_id]["reactions"] == 1)
assert(not args.verify or response["value"]
["entities"][enemy_id]["actions"] == 0)
assert(not args.verify or response["value"]
["entities"][enemy_id]["reactions"] == 1)
print("Remove from combat - GM")
data = {"auth_token": gm_token,
"campaign_id": campaign_id, "id": player_second_character_id}
response = requests.get(url + 'remove_from_combat', params=data, verify=do_ssl)
check_continue(response)
data = {"auth_token": <PASSWORD>_token, "campaign_id": campaign_id}
response = requests.get(url + 'get_campaign', params=data, verify=do_ssl)
check_continue(response)
response = json.loads(response.text)
assert(not args.verify or response["value"]["init_index"] == 0)
assert(not args.verify or response["value"]["init_round"] == 1)
assert(not args.verify or response["value"]["in_combat"] == True)
assert(not args.verify or len(list(filter(
lambda init: init["entity_id"] == player_second_character_id, response["value"]["init"]))) == 0)
print("Remove from combat - GM - init_index remains the same")
data = {"auth_token": gm_token,
"campaign_id": campaign_id, "id": second_enemy_id}
response = requests.get(url + 'remove_from_combat', params=data, verify=do_ssl)
check_continue(response)
data = {"auth_token": <PASSWORD>ator_token, "campaign_id": campaign_id}
response = requests.get(url + 'get_campaign', params=data, verify=do_ssl)
check_continue(response)
response = json.loads(response.text)
assert(not args.verify or response["value"]["init_index"] == 0)
assert(not args.verify or response["value"]["init_round"] == 1)
assert(not args.verify or response["value"]["in_combat"] == True)
assert(not args.verify or len(list(filter(
lambda init: init["entity_id"] == second_enemy_id, response["value"]["init"]))) == 0)
print("End combat")
data = {"auth_token": gm_token, "campaign_id": campaign_id}
response = requests.get(url + 'end_combat', params=data, verify=do_ssl)
check_continue(response)
data = {"auth_token": spectator_token, "campaign_id": campaign_id}
response = requests.get(url + 'get_campaign', params=data, verify=do_ssl)
check_continue(response)
response = json.loads(response.text)
assert(not args.verify or response["value"]["init_index"] == 0)
assert(not args.verify or response["value"]["init_round"] == 0)
assert(not args.verify or response["value"]["in_combat"] == False)
assert(not args.verify or len(response["value"]["init"]) == 0)
# Print user information for further testing purposes
print("----------------------------------------------------------------------------------")
print("GM username: '{}' password: '{}' auth_token: '{}'".format(
gm_username, gm_password, gm_token))
print("Player username: '{}' password: '{}' auth_token: '{}'".format(
player_username, player_password, player_token))
print("Spectator username: '{}' password: '{}' auth_token: '{}'".format(
spectator_username, spectator_password, spectator_token))
print("No Perms username: '{}' password: '{}' auth_token: '{}'".format(
no_perms_username, no_perms_password, no_perms_token))
```
#### File: jnolan-poloniex/vennt-server/webscraper.py
```python
import time, requests, datetime, re
from bs4 import BeautifulSoup
import venntdb
from constants import *
import importlib
logClass = importlib.import_module("logger")
logger = logClass.Logger("webscraper")
# VenntHandler methods
def lookup_ability(self, args):
name = args[KEY_NAME]
abiDict = self.server.db.get_cached_ability(name)
if abiDict is not None:
logger.log("lookup_ability", "cache hit")
return self.respond({"success":True, "value":abiDict["contents"]})
logger.log("lookup_ability", "cache miss")
approximations, URL = find_ability(self, name)
if len(approximations) == 0:
return self.respond({"success":False, "info":"No such ability"})
elif len(approximations) > 1:
return self.respond({"success":False, "info":"More than one match", "matches":approximations})
else:
ability = "\n".join(get_ability_contents(approximations[0], URL))
# should add to cache here or no?
return self.respond({"success":True, "value":ability})
# Returns list of matches and URL string (if found)
def find_ability(self, name):
name = name.replace("'", "’") # Wiki uses smart quotes
logger.log("find_ability", "Looking for " + name)
return self.server.db.find_ability(name)
def ability_exists(self, *args):
approximations, URL = self.find_ability(" ".join(args[:]))
if len(approximations) != 1:
return False
return True
# Returns contents of ability as list of lines
def get_ability_contents(ability, URL):
page = requests.get(URL)
soup = BeautifulSoup(page.content, 'html.parser')
found = False
contents = []
last_had_newline = False
best_match = 999
for hit in soup.find_all('p'):
text = hit.get_text()
text = text.replace("’", "'") # I hate smart quotes
if ability in text and len(text) < best_match: # Goes through the whole page, takes the *shortest* valid line which matches the given description
found = True
best_match = len(text)
contents = []
if found and (text.isspace() or (text.startswith('\n') and last_had_newline)):
found = False
if found:
contents.append(text)
if text.endswith('\n'):
last_had_newline = True
else:
last_had_newline = False
return contents
``` |
{
"source": "jnolis/dask",
"score": 2
} |
#### File: array/tests/test_stats.py
```python
import pytest
scipy = pytest.importorskip("scipy")
import numpy as np
import dask.array as da
import dask.array.stats
from dask.array.utils import allclose, assert_eq
from dask.delayed import Delayed
@pytest.mark.parametrize(
"kind, kwargs", [("skew", {}), ("kurtosis", {}), ("kurtosis", {"fisher": False})]
)
@pytest.mark.parametrize("single_dim", [True, False])
def test_measures(kind, kwargs, single_dim):
np.random.seed(seed=1337)
if single_dim:
x = np.random.random(size=(30,))
else:
x = np.random.random(size=(30, 2))
y = da.from_array(x, 3)
dfunc = getattr(dask.array.stats, kind)
sfunc = getattr(scipy.stats, kind)
expected = sfunc(x, **kwargs)
result = dfunc(y, **kwargs)
if np.isscalar(expected):
# make it an array to account for possible numeric errors
expected = np.array(expected)
assert_eq(result, expected)
assert isinstance(result, da.Array)
def test_bias_raises():
x = np.random.random(size=(30, 2))
y = da.from_array(x, 3)
with pytest.raises(NotImplementedError):
dask.array.stats.skew(y, bias=False)
with pytest.raises(NotImplementedError):
dask.array.stats.kurtosis(y, bias=False)
@pytest.mark.parametrize(
"kind", ["chisquare", "power_divergence", "normaltest", "skewtest", "kurtosistest"]
)
def test_one(kind):
a = np.random.random(size=30)
a_ = da.from_array(a, 3)
dask_test = getattr(dask.array.stats, kind)
scipy_test = getattr(scipy.stats, kind)
result = dask_test(a_)
expected = scipy_test(a)
assert isinstance(result, Delayed)
assert allclose(result.compute(), expected)
@pytest.mark.parametrize(
"kind, kwargs",
[
("ttest_ind", {}),
("ttest_ind", {"equal_var": False}),
("ttest_1samp", {}),
("ttest_rel", {}),
("chisquare", {}),
("power_divergence", {}),
("power_divergence", {"lambda_": 0}),
("power_divergence", {"lambda_": -1}),
("power_divergence", {"lambda_": "neyman"}),
],
)
def test_two(kind, kwargs):
a = np.random.random(size=30)
b = np.random.random(size=30)
a_ = da.from_array(a, 3)
b_ = da.from_array(b, 3)
dask_test = getattr(dask.array.stats, kind)
scipy_test = getattr(scipy.stats, kind)
with pytest.warns(None): # maybe overflow warning (powrer_divergence)
result = dask_test(a_, b_, **kwargs)
expected = scipy_test(a, b, **kwargs)
assert isinstance(result, Delayed)
assert allclose(result.compute(), expected)
# fails occasionally. shouldn't this be exact?
# assert dask.compute(*result) == expected
@pytest.mark.parametrize("k", range(5))
def test_moments(k):
x = np.random.random(size=(30, 2))
y = da.from_array(x, 3)
expected = scipy.stats.moment(x, k)
result = dask.array.stats.moment(y, k)
assert_eq(result, expected)
def test_anova():
np_args = [i * np.random.random(size=(30,)) for i in range(4)]
da_args = [da.from_array(x, chunks=10) for x in np_args]
result = dask.array.stats.f_oneway(*da_args)
expected = scipy.stats.f_oneway(*np_args)
assert allclose(result.compute(), expected)
@pytest.mark.parametrize(
"func, nargs",
[
(dask.array.stats.ttest_1samp, 2),
(dask.array.stats.ttest_rel, 2),
(dask.array.stats.skewtest, 1),
(dask.array.stats.kurtosis, 1),
(dask.array.stats.kurtosistest, 1),
(dask.array.stats.normaltest, 1),
(dask.array.stats.moment, 1),
],
)
@pytest.mark.parametrize("nan_policy", ["omit", "raise"])
def test_nan_raises(func, nargs, nan_policy):
with pytest.raises(NotImplementedError):
func(*(None,) * nargs, nan_policy=nan_policy)
def test_power_divergence_invalid():
a = np.random.random(size=30)
a_ = da.from_array(a, 3)
with pytest.raises(ValueError):
dask.array.stats.power_divergence(a_, lambda_="wrong")
def test_skew_raises():
a = da.ones((7,), chunks=(7,))
with pytest.raises(ValueError, match="7 samples"):
dask.array.stats.skewtest(a)
def test_skew_single_return_type():
"""This function tests the return type for the skew method for a 1d array."""
numpy_array = np.random.random(size=(30,))
dask_array = da.from_array(numpy_array, 3)
result = dask.array.stats.skew(dask_array).compute()
assert isinstance(result, np.float64)
def test_kurtosis_single_return_type():
"""This function tests the return type for the kurtosis method for a 1d array."""
numpy_array = np.random.random(size=(30,))
dask_array = da.from_array(numpy_array, 3)
result = dask.array.stats.kurtosis(dask_array).compute()
result_non_fisher = dask.array.stats.kurtosis(dask_array, fisher=False).compute()
assert isinstance(result, np.float64)
assert isinstance(result_non_fisher, np.float64)
```
#### File: dask/dataframe/backends.py
```python
import warnings
import numpy as np
import pandas as pd
from pandas.api.types import (
is_categorical_dtype,
is_datetime64tz_dtype,
is_interval_dtype,
is_period_dtype,
is_scalar,
is_sparse,
union_categoricals,
)
from ..utils import is_arraylike, typename
from ._compat import PANDAS_GT_100
from .core import DataFrame, Index, Scalar, Series, _Frame
from .dispatch import (
categorical_dtype_dispatch,
concat,
concat_dispatch,
get_parallel_type,
group_split_dispatch,
hash_object_dispatch,
is_categorical_dtype_dispatch,
make_meta,
make_meta_obj,
meta_nonempty,
tolist_dispatch,
union_categoricals_dispatch,
)
from .extensions import make_array_nonempty, make_scalar
from .utils import (
_empty_series,
_nonempty_scalar,
_scalar_from_dtype,
is_categorical_dtype,
is_float_na_dtype,
is_integer_na_dtype,
)
##########
# Pandas #
##########
@make_scalar.register(np.dtype)
def _(dtype):
return _scalar_from_dtype(dtype)
@make_scalar.register(pd.Timestamp)
@make_scalar.register(pd.Timedelta)
@make_scalar.register(pd.Period)
@make_scalar.register(pd.Interval)
def _(x):
return x
@make_meta.register((pd.Series, pd.DataFrame))
def make_meta_pandas(x, index=None):
return x.iloc[:0]
@make_meta.register(pd.Index)
def make_meta_index(x, index=None):
return x[0:0]
meta_object_types = (pd.Series, pd.DataFrame, pd.Index, pd.MultiIndex)
try:
import scipy.sparse as sp
meta_object_types += (sp.spmatrix,)
except ImportError:
pass
@make_meta_obj.register(meta_object_types)
def make_meta_object(x, index=None):
"""Create an empty pandas object containing the desired metadata.
Parameters
----------
x : dict, tuple, list, pd.Series, pd.DataFrame, pd.Index, dtype, scalar
To create a DataFrame, provide a `dict` mapping of `{name: dtype}`, or
an iterable of `(name, dtype)` tuples. To create a `Series`, provide a
tuple of `(name, dtype)`. If a pandas object, names, dtypes, and index
should match the desired output. If a dtype or scalar, a scalar of the
same dtype is returned.
index : pd.Index, optional
Any pandas index to use in the metadata. If none provided, a
`RangeIndex` will be used.
Examples
--------
>>> make_meta([('a', 'i8'), ('b', 'O')]) # doctest: +SKIP
Empty DataFrame
Columns: [a, b]
Index: []
>>> make_meta(('a', 'f8')) # doctest: +SKIP
Series([], Name: a, dtype: float64)
>>> make_meta('i8') # doctest: +SKIP
1
"""
if is_arraylike(x) and x.shape:
return x[:0]
if index is not None:
index = make_meta(index)
if isinstance(x, dict):
return pd.DataFrame(
{c: _empty_series(c, d, index=index) for (c, d) in x.items()}, index=index
)
if isinstance(x, tuple) and len(x) == 2:
return _empty_series(x[0], x[1], index=index)
elif isinstance(x, (list, tuple)):
if not all(isinstance(i, tuple) and len(i) == 2 for i in x):
raise ValueError(
"Expected iterable of tuples of (name, dtype), got {0}".format(x)
)
return pd.DataFrame(
{c: _empty_series(c, d, index=index) for (c, d) in x},
columns=[c for c, d in x],
index=index,
)
elif not hasattr(x, "dtype") and x is not None:
# could be a string, a dtype object, or a python type. Skip `None`,
# because it is implictly converted to `dtype('f8')`, which we don't
# want here.
try:
dtype = np.dtype(x)
return _scalar_from_dtype(dtype)
except Exception:
# Continue on to next check
pass
if is_scalar(x):
return _nonempty_scalar(x)
raise TypeError("Don't know how to create metadata from {0}".format(x))
@meta_nonempty.register(object)
def meta_nonempty_object(x):
"""Create a nonempty pandas object from the given metadata.
Returns a pandas DataFrame, Series, or Index that contains two rows
of fake data.
"""
if is_scalar(x):
return _nonempty_scalar(x)
else:
raise TypeError(
"Expected Pandas-like Index, Series, DataFrame, or scalar, "
"got {0}".format(typename(type(x)))
)
@meta_nonempty.register(pd.DataFrame)
def meta_nonempty_dataframe(x):
idx = meta_nonempty(x.index)
dt_s_dict = dict()
data = dict()
for i, c in enumerate(x.columns):
series = x.iloc[:, i]
dt = series.dtype
if dt not in dt_s_dict:
dt_s_dict[dt] = _nonempty_series(x.iloc[:, i], idx=idx)
data[i] = dt_s_dict[dt]
res = pd.DataFrame(data, index=idx, columns=np.arange(len(x.columns)))
res.columns = x.columns
if PANDAS_GT_100:
res.attrs = x.attrs
return res
_numeric_index_types = (pd.Int64Index, pd.Float64Index, pd.UInt64Index)
@meta_nonempty.register(pd.Index)
def _nonempty_index(idx):
typ = type(idx)
if typ is pd.RangeIndex:
return pd.RangeIndex(2, name=idx.name)
elif typ in _numeric_index_types:
return typ([1, 2], name=idx.name)
elif typ is pd.Index:
return pd.Index(["a", "b"], name=idx.name)
elif typ is pd.DatetimeIndex:
start = "1970-01-01"
# Need a non-monotonic decreasing index to avoid issues with
# partial string indexing see https://github.com/dask/dask/issues/2389
# and https://github.com/pandas-dev/pandas/issues/16515
# This doesn't mean `_meta_nonempty` should ever rely on
# `self.monotonic_increasing` or `self.monotonic_decreasing`
try:
return pd.date_range(
start=start, periods=2, freq=idx.freq, tz=idx.tz, name=idx.name
)
except ValueError: # older pandas versions
data = [start, "1970-01-02"] if idx.freq is None else None
return pd.DatetimeIndex(
data, start=start, periods=2, freq=idx.freq, tz=idx.tz, name=idx.name
)
elif typ is pd.PeriodIndex:
return pd.period_range(
start="1970-01-01", periods=2, freq=idx.freq, name=idx.name
)
elif typ is pd.TimedeltaIndex:
start = np.timedelta64(1, "D")
try:
return pd.timedelta_range(
start=start, periods=2, freq=idx.freq, name=idx.name
)
except ValueError: # older pandas versions
start = np.timedelta64(1, "D")
data = [start, start + 1] if idx.freq is None else None
return pd.TimedeltaIndex(
data, start=start, periods=2, freq=idx.freq, name=idx.name
)
elif typ is pd.CategoricalIndex:
if len(idx.categories) == 0:
data = pd.Categorical(_nonempty_index(idx.categories), ordered=idx.ordered)
else:
data = pd.Categorical.from_codes(
[-1, 0], categories=idx.categories, ordered=idx.ordered
)
return pd.CategoricalIndex(data, name=idx.name)
elif typ is pd.MultiIndex:
levels = [_nonempty_index(l) for l in idx.levels]
codes = [[0, 0] for i in idx.levels]
try:
return pd.MultiIndex(levels=levels, codes=codes, names=idx.names)
except TypeError: # older pandas versions
return pd.MultiIndex(levels=levels, labels=codes, names=idx.names)
raise TypeError(
"Don't know how to handle index of type {0}".format(typename(type(idx)))
)
@meta_nonempty.register(pd.Series)
def _nonempty_series(s, idx=None):
# TODO: Use register dtypes with make_array_nonempty
if idx is None:
idx = _nonempty_index(s.index)
dtype = s.dtype
if len(s) > 0:
# use value from meta if provided
data = [s.iloc[0]] * 2
elif is_datetime64tz_dtype(dtype):
entry = pd.Timestamp("1970-01-01", tz=dtype.tz)
data = [entry, entry]
elif is_categorical_dtype(dtype):
if len(s.cat.categories):
data = [s.cat.categories[0]] * 2
cats = s.cat.categories
else:
data = _nonempty_index(s.cat.categories)
cats = s.cat.categories[:0]
data = pd.Categorical(data, categories=cats, ordered=s.cat.ordered)
elif is_integer_na_dtype(dtype):
data = pd.array([1, None], dtype=dtype)
elif is_float_na_dtype(dtype):
data = pd.array([1.0, None], dtype=dtype)
elif is_period_dtype(dtype):
# pandas 0.24.0+ should infer this to be Series[Period[freq]]
freq = dtype.freq
data = [pd.Period("2000", freq), pd.Period("2001", freq)]
elif is_sparse(dtype):
entry = _scalar_from_dtype(dtype.subtype)
if PANDAS_GT_100:
data = pd.array([entry, entry], dtype=dtype)
else:
data = pd.SparseArray([entry, entry], dtype=dtype)
elif is_interval_dtype(dtype):
entry = _scalar_from_dtype(dtype.subtype)
data = pd.array([entry, entry], dtype=dtype)
elif type(dtype) in make_array_nonempty._lookup:
data = make_array_nonempty(dtype)
else:
entry = _scalar_from_dtype(dtype)
data = np.array([entry, entry], dtype=dtype)
out = pd.Series(data, name=s.name, index=idx)
if PANDAS_GT_100:
out.attrs = s.attrs
return out
@union_categoricals_dispatch.register(
(pd.DataFrame, pd.Series, pd.Index, pd.Categorical)
)
def union_categoricals_pandas(to_union, sort_categories=False, ignore_order=False):
return pd.api.types.union_categoricals(
to_union, sort_categories=sort_categories, ignore_order=ignore_order
)
@get_parallel_type.register(pd.Series)
def get_parallel_type_series(_):
return Series
@get_parallel_type.register(pd.DataFrame)
def get_parallel_type_dataframe(_):
return DataFrame
@get_parallel_type.register(pd.Index)
def get_parallel_type_index(_):
return Index
@get_parallel_type.register(_Frame)
def get_parallel_type_frame(o):
return get_parallel_type(o._meta)
@get_parallel_type.register(object)
def get_parallel_type_object(_):
return Scalar
@hash_object_dispatch.register((pd.DataFrame, pd.Series, pd.Index))
def hash_object_pandas(
obj, index=True, encoding="utf8", hash_key=None, categorize=True
):
return pd.util.hash_pandas_object(
obj, index=index, encoding=encoding, hash_key=hash_key, categorize=categorize
)
@group_split_dispatch.register((pd.DataFrame, pd.Series, pd.Index))
def group_split_pandas(df, c, k, ignore_index=False):
indexer, locations = pd._libs.algos.groupsort_indexer(
c.astype(np.int64, copy=False), k
)
df2 = df.take(indexer)
locations = locations.cumsum()
parts = [
df2.iloc[a:b].reset_index(drop=True) if ignore_index else df2.iloc[a:b]
for a, b in zip(locations[:-1], locations[1:])
]
return dict(zip(range(k), parts))
@concat_dispatch.register((pd.DataFrame, pd.Series, pd.Index))
def concat_pandas(
dfs,
axis=0,
join="outer",
uniform=False,
filter_warning=True,
ignore_index=False,
**kwargs
):
ignore_order = kwargs.pop("ignore_order", False)
if axis == 1:
return pd.concat(dfs, axis=axis, join=join, **kwargs)
# Support concatenating indices along axis 0
if isinstance(dfs[0], pd.Index):
if isinstance(dfs[0], pd.CategoricalIndex):
for i in range(1, len(dfs)):
if not isinstance(dfs[i], pd.CategoricalIndex):
dfs[i] = dfs[i].astype("category")
return pd.CategoricalIndex(
union_categoricals(dfs, ignore_order=ignore_order), name=dfs[0].name
)
elif isinstance(dfs[0], pd.MultiIndex):
first, rest = dfs[0], dfs[1:]
if all(
(isinstance(o, pd.MultiIndex) and o.nlevels >= first.nlevels)
for o in rest
):
arrays = [
concat([i._get_level_values(n) for i in dfs])
for n in range(first.nlevels)
]
return pd.MultiIndex.from_arrays(arrays, names=first.names)
to_concat = (first.values,) + tuple(k._values for k in rest)
new_tuples = np.concatenate(to_concat)
try:
return pd.MultiIndex.from_tuples(new_tuples, names=first.names)
except Exception:
return pd.Index(new_tuples)
return dfs[0].append(dfs[1:])
# Handle categorical index separately
dfs0_index = dfs[0].index
has_categoricalindex = isinstance(dfs0_index, pd.CategoricalIndex) or (
isinstance(dfs0_index, pd.MultiIndex)
and any(isinstance(i, pd.CategoricalIndex) for i in dfs0_index.levels)
)
if has_categoricalindex:
dfs2 = [df.reset_index(drop=True) for df in dfs]
ind = concat([df.index for df in dfs])
else:
dfs2 = dfs
ind = None
# Concatenate the partitions together, handling categories as needed
if (
isinstance(dfs2[0], pd.DataFrame)
if uniform
else any(isinstance(df, pd.DataFrame) for df in dfs2)
):
if uniform:
dfs3 = dfs2
cat_mask = dfs2[0].dtypes == "category"
else:
# When concatenating mixed dataframes and series on axis 1, Pandas
# converts series to dataframes with a single column named 0, then
# concatenates.
dfs3 = [
df
if isinstance(df, pd.DataFrame)
else df.to_frame().rename(columns={df.name: 0})
for df in dfs2
]
# pandas may raise a RuntimeWarning for comparing ints and strs
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
if filter_warning:
warnings.simplefilter("ignore", FutureWarning)
cat_mask = pd.concat(
[(df.dtypes == "category").to_frame().T for df in dfs3],
join=join,
**kwargs
).any()
if cat_mask.any():
not_cat = cat_mask[~cat_mask].index
# this should be aligned, so no need to filter warning
out = pd.concat(
[df[df.columns.intersection(not_cat)] for df in dfs3],
join=join,
**kwargs
)
temp_ind = out.index
for col in cat_mask.index.difference(not_cat):
# Find an example of categoricals in this column
for df in dfs3:
sample = df.get(col)
if sample is not None:
break
# Extract partitions, subbing in missing if needed
parts = []
for df in dfs3:
if col in df.columns:
parts.append(df[col])
else:
codes = np.full(len(df), -1, dtype="i8")
data = pd.Categorical.from_codes(
codes, sample.cat.categories, sample.cat.ordered
)
parts.append(data)
out[col] = union_categoricals(parts, ignore_order=ignore_order)
# Pandas resets index type on assignment if frame is empty
# https://github.com/pandas-dev/pandas/issues/17101
if not len(temp_ind):
out.index = temp_ind
out = out.reindex(columns=cat_mask.index)
else:
# pandas may raise a RuntimeWarning for comparing ints and strs
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
if filter_warning:
warnings.simplefilter("ignore", FutureWarning)
out = pd.concat(dfs3, join=join, sort=False)
else:
if is_categorical_dtype(dfs2[0].dtype):
if ind is None:
ind = concat([df.index for df in dfs2])
return pd.Series(
union_categoricals(dfs2, ignore_order=ignore_order),
index=ind,
name=dfs2[0].name,
)
with warnings.catch_warnings():
if filter_warning:
warnings.simplefilter("ignore", FutureWarning)
out = pd.concat(dfs2, join=join, **kwargs)
# Re-add the index if needed
if ind is not None:
out.index = ind
return out
@categorical_dtype_dispatch.register((pd.DataFrame, pd.Series, pd.Index))
def categorical_dtype_pandas(categories=None, ordered=False):
return pd.api.types.CategoricalDtype(categories=categories, ordered=ordered)
@tolist_dispatch.register((pd.Series, pd.Index, pd.Categorical))
def tolist_pandas(obj):
return obj.tolist()
@is_categorical_dtype_dispatch.register(
(pd.Series, pd.Index, pd.api.extensions.ExtensionDtype, np.dtype)
)
def is_categorical_dtype_pandas(obj):
return pd.api.types.is_categorical_dtype(obj)
######################################
# cuDF: Pandas Dataframes on the GPU #
######################################
@concat_dispatch.register_lazy("cudf")
@hash_object_dispatch.register_lazy("cudf")
@group_split_dispatch.register_lazy("cudf")
@get_parallel_type.register_lazy("cudf")
@meta_nonempty.register_lazy("cudf")
@make_meta.register_lazy("cudf")
@make_meta_obj.register_lazy("cudf")
def _register_cudf():
import dask_cudf # noqa: F401
``` |
{
"source": "jnomani/locust-plugins",
"score": 3
} |
#### File: locust-plugins/locust_plugins/mongoreader.py
```python
from pymongo import MongoClient
from datetime import datetime
import logging
import time
from contextlib import contextmanager
import os
class MongoReader:
def __init__(self, id_column, uri=None, database=None, collection=None, filters=[]):
uri = uri or os.environ["LOCUST_MONGO"]
database = database or os.environ["LOCUST_MONGO_DATABASE"]
collection = collection or os.environ["LOCUST_MONGO_COLLECTION"]
self.coll = MongoClient(uri)[database][collection]
self.id_column = id_column
self.delay_warning = 0.5
self.query = {"$and": filters + [{"logged_in": 0}]}
@contextmanager
def user(self):
start_at = time.time()
user = self.coll.find_one_and_update(
self.query, {"$set": {"last_login": datetime.now(), "logged_in": 1}}, sort=[("last_login", 1)]
)
if user is None:
raise Exception(f"Didnt get any user from db ({self.coll}) using query {self.query}")
if start_at + self.delay_warning < time.time():
logging.warning(
f"Getting a user took more than {self.delay_warning} seconds (doubling warning threshold for next time)"
)
self.delay_warning *= 2
try:
yield user
finally:
releasessn = self.coll.find_one_and_update(
{"$and": [{self.id_column: user[self.id_column]}, {"logged_in": 1}]}, {"$set": {"logged_in": 0}}
)
if releasessn is None:
raise Exception("Couldnt release lock for user in db. ")
``` |
{
"source": "jnoms/virID",
"score": 3
} |
#### File: bin/python/get_counts.py
```python
import pandas as pd
import pathlib
import inspect
from ete3 import NCBITaxa
ncbi = NCBITaxa()
import argparse
import time
import os
import csv
import sys
# Lets input feilds be extremely large.
csv.field_size_limit(sys.maxsize)
#------------------------------------------------------------------------------#
# Defining functions
#------------------------------------------------------------------------------#
#Ete3 functions
def get_level(taxonID):
level = list(ncbi.get_rank([taxonID]).values())
# Unknown taxonID would yield [], which can't be indexed by [0] to get the
# string
if level == []:
level = "UNKNOWN"
else:
level = level[0]
return level
def get_name(taxonID):
name = list(ncbi.get_taxid_translator([taxonID]).values())
# Unknown taxonID would yield [], which can't be indexed by [0] to get the
# string
if name == []:
name = "UNKNOWN"
else:
name = name[0]
name = name.replace(" ", "_")
return name
def get_lineage(taxonID):
try:
lineage = ncbi.get_lineage(taxonID)
except ValueError:
print("Cannot find taxonID " + str(taxonID))
lineage = [taxonID]
if lineage == None:
lineage = [0]
return lineage
def get_named_lineage_from_lineage(lineage, prefix_dictionary =
{'strain': 'st__',
'species': 's__',
'genus': 'g__',
'family': 'f__',
'order': 'o__',
'class': 'c__',
'phylum': 'p__',
'kingdom': 'k__',
'superkingdom': 'sk__'}
):
named_lineage = []
conversion_dict = ncbi.get_taxid_translator(lineage)
for taxonID in lineage:
level = get_level(taxonID)
prefix = prefix_dictionary.get(level, 'UNKNOWN__')
name = prefix + conversion_dict.get(taxonID, "UNKNOWN").replace(" ", "_")
named_lineage.append(name)
return named_lineage
def write_to_log(log_file, message):
current_date_time = time.strftime("%c")
#if the log_file wasn't input, just print message to screen
if log_file == '':
print(message)
print(current_date_time)
return None
#Check if the file exists. If not, make the directory if necessary
log_file_directory = os.path.dirname(log_file)
pathlib.Path(log_file_directory).mkdir(parents=True, exist_ok=True)
#Open the log_file in append mode and write message to it
with open(log_file, 'a') as infile:
infile.write(message + '\n')
infile.write(current_date_time + '\n')
def read_data_file(input_data_file, sep = '\t', column_names = ''):
"""
This function reads in a tab-delimited file and assigns the column names
appropriately. If column names are provided they're used, else it uses the
first line of the file as the header. column_names is a string that is a
space-delimited list.
"""
#State progress
function_name = inspect.stack()[0][3]
print(function_name + ': Reading the input file ' + input_data_file)
#Main
if column_names == '':
DF = pd.read_csv(input_data_file, engine = 'python', sep = sep,
header = 0).fillna(0)
else:
DF = pd.read_csv(input_data_file, engine = 'python', sep = sep,
header = None).fillna(0)
DF.columns = column_names.split(" ")
#Make sure the LCA_taxonID column is the correct datatype
DF['LCA_taxonID'] = DF['LCA_taxonID'].astype(int)
print(function_name + ': Finished.')
return DF
def make_counts_dictionary_from_counts_file(counts_file, sep = '\t'):
"""
The purpose of this function is to read in a counts file and make it a
dictionary. The structure of the counts file must be a two column file with
'query_ID' and 'counts' as the columns. There should be no colnames in
the file.
"""
#State progress
function_name = inspect.stack()[0][3]
print(function_name + ''': Reading in the counts file to make a counts
dictionary.''')
counts_dictionary = dict()
with open(counts_file, 'r') as infile:
for line in infile:
line = line.split(sep)
if len(line) != 2:
raise ValueError(function_name + """: The counts_file should
have two columns. Make sure the delimiter, sep, is correct.""")
query_ID = line[0]
count = int(line[1])
if query_ID == '*':
continue
counts_dictionary[query_ID] = count
print(function_name + ': Finished.')
return counts_dictionary
def add_read_counts_to_dataframe(input_DF, counts_dictionary):
'''
The purpose of this function is to add a column, labeled counts, to the
input dataframe. This assumes that the dictionary keys of the
counts_dictionary are equal to values in the query_ID column of the input
dataframe.
'''
#State progress
function_name = inspect.stack()[0][3]
print(function_name + ': Adding read counts to dataframe.')
#Make an output DF that is a copy of the input dataframe
output_DF = input_DF.copy()
#Get a list of query_IDs in the input_DF and make sure they are unique
output_DF_query_IDs = list(output_DF['query_ID'].unique())
if output_DF_query_IDs != list(output_DF['query_ID']):
raise ValueError(function_name + """: The query_ID's present in the
input_DF are not unique.""")
#Add a read_count column to the input dataframe
output_DF['read_count'] = 0
#For each query_ID, add counts to the output dictionary
for query_ID in output_DF_query_IDs:
if not query_ID in counts_dictionary:
raise ValueError("Cannot find the query_ID {0} in the counts dict".format(query_ID))
output_DF['read_count'][output_DF['query_ID'] == query_ID] = counts_dictionary[query_ID]
print(function_name + ': Finished.')
return output_DF
def get_taxonID_counts(df):
'''
Get total counts for each LCA_taxonID. Required colnames are 'LCA_taxonID'
and 'read_count'. This takes the sum of read_counts for each taxonID and
returns a dictionary of structure taxonID:counts. Does NOT do any assigning
to lineages and whatnot.
Input:
# - df: A dataframe that contains, at least, the columns "LCA_taxonID" and
"read_count"
Output:
# - taxon_counts_dict: A dictionary of structure taxonID:read_count, where
read_count is the sum of all read counts of that taxonID
'''
def column_names_in_DF(input_DF, required_column_names):
output = True
for column_name in required_column_names:
if column_name not in input_DF.columns:
output = False
return output
if not column_names_in_DF(df, ['LCA_taxonID', 'read_count']):
raise ValueError("The columns LCA_taxonID and read_count are required.")
query_ID_to_taxon = dict(zip(df['query_ID'], df['LCA_taxonID']))
query_ID_to_counts = dict(zip(df['query_ID'], df['read_count']))
#Get total counts
taxon_counts_dict = dict()
for query_ID in query_ID_to_taxon:
taxon = query_ID_to_taxon[query_ID]
counts = query_ID_to_counts[query_ID]
taxon_counts_dict[taxon] = taxon_counts_dict.get(taxon, 0) + counts
return taxon_counts_dict
def assign_counts(counts_dict, prefix_dictionary = {'strain': 'st__',
'species': 's__',
'genus': 'g__',
'family': 'f__',
'order': 'o__',
'class': 'c__',
'phylum': 'p__',
'kingdom': 'k__',
'superkingdom': 'sk__'}
):
def distribute_counts(counts_dict):
read_counts = dict()
#Iterate over each taxonID
for LCA_taxonID, counts in counts_dict.items():
#Check if nan - it will not be equal to itself
if LCA_taxonID != LCA_taxonID:
continue
#Check if taxonID is 0 - no need to count
if LCA_taxonID == 0:
continue
#Get lineage
lineage = get_lineage(LCA_taxonID)
#Assign counts to each taxonID in lineage
for taxonID in lineage:
read_counts[taxonID] = read_counts.get(taxonID, 0) + counts
return read_counts
def generate_output_df(read_counts):
out_df = pd.DataFrame(columns = ['lineage',
'superkingdom',
'taxon',
'level',
'count'])
out_df['taxon'] = read_counts.keys()
out_df['taxon'] = out_df['taxon'].astype(str)
out_df.index = read_counts.keys()
out_df.index.name = "taxonID"
return out_df
def is_cannonical_level(taxonID, cannonical_levels):
level = get_level(taxonID)
if level in cannonical_levels:
return True
else:
return False
def get_cannonical_lineage(taxonID, cannonical_levels):
lineage = get_lineage(taxonID)
cannonical_lineage = []
for taxon in lineage:
if is_cannonical_level(taxon, cannonical_levels):
cannonical_lineage.append(taxon)
return cannonical_lineage
def get_superkingdom_from_named_lineage(named_lineage):
superkingdom = "UNKNOWN"
for taxon in named_lineage:
if taxon.startswith("sk__"):
superkingdom = taxon
return superkingdom
return superkingdom
#State progress
function_name = inspect.stack()[0][3]
print(function_name + ': Generating counts dataframe.')
cannonical_levels = list(prefix_dictionary.keys())
read_counts = distribute_counts(counts_dict)
#generate output_df
out_df = generate_output_df(read_counts)
#For each taxon, check if it is a cannonical level. If so, write to output df
for taxonID, count in read_counts.items():
#Check if the taxonID is cannonical. If not, continue
if not is_cannonical_level(taxonID, cannonical_levels):
continue
level = get_level(taxonID)
prefix = prefix_dictionary.get(level, "unknown__")
name = prefix + get_name(taxonID)
cannonical_lineage = get_cannonical_lineage(taxonID, cannonical_levels)
named_lineage = get_named_lineage_from_lineage(cannonical_lineage, prefix_dictionary)
superkingdom = get_superkingdom_from_named_lineage(named_lineage)
#Write to output dictionary -
# 'lineage', 'superkingdom', 'taxon', 'level', 'count'
out_df.at[taxonID, 'lineage'] = named_lineage
out_df.at[taxonID, 'superkingdom'] = superkingdom
out_df.at[taxonID, 'taxon'] = name
out_df.at[taxonID, 'level'] = level
out_df.at[taxonID, 'count'] = count
#Remove rows that have missing data
out_df = out_df.dropna()
print(function_name + ': Finished.')
return out_df
def write_output(output_DF, output_path):
#State progress
function_name = inspect.stack()[0][3]
print(function_name + ': Writing output to ' + output_path)
#Make the output directory if necessary
output_directory = os.path.dirname(output_path)
pathlib.Path(output_directory).mkdir(parents=True, exist_ok=True)
#Write the output file
output_DF.to_csv(output_path, sep='\t', index=True)
print('write_output(): Finished.')
def main(infile, outfile, counts_file, log_file):
write_to_log(log_file, "Starting.")
#import data
data = read_data_file(infile)
#if there are no counts, assume these are reads
if counts_file == '':
print('''Did not detect a counts file. Assuming the input dataset
is queried reads.''')
data['read_count'] = 1
else:
#Make counts_dictionary from counts_file
counts_dictionary = make_counts_dictionary_from_counts_file(counts_file)
#Add counts to the dataframe
data = add_read_counts_to_dataframe(data, counts_dictionary)
#Make a dict containing the aggregate counts for each taxonID
counts_dict = get_taxonID_counts(data)
#Assign counts to each taxonID's phylogeny
counts = assign_counts(counts_dict)
#write output
write_output(counts, outfile)
write_to_log(log_file, "Finished.")
if __name__ == '__main__':
#Argparse input
parser = argparse.ArgumentParser(description="""
The purpose of this script is to take a converted DIAMOND or BLAST output
file (in csv format) and generate a counts file at each taxon level.
Input:
- infile: A converted DIAMOND or blast output file. This script assumes
there ARE colnames. Required column is 'LCA_taxonID'. If there is no
counts_file, this input is assumed to be from reads, meaning each line has
a read_count of 1.
- outfile: Path to the output file, which will be tab-delimited.
*Optional*
- counts_file: A tab-delimited file of structure <sequence name> <count>. If
this is provided, the counts for each entry in the infile will come from this
file.
- log_file: An optional file provided for logging purposes (start/end of the script).
""")
parser.add_argument('-i', '--infile', type=str, required=True,
help='''Path to the input data file. See -h for more information.''')
parser.add_argument('-o', '--outfile', type=str, required=True,
help='''Path to the output file (tab-delimited).''')
parser.add_argument('-c', '--counts_file', type=str, required=False, default = '',
help='''Path to the counts file. See -h for more information.''')
parser.add_argument('-l', '--log_file', type=str, required=False, default = '',
help='''Path to the log file.''')
args = parser.parse_args()
infile = args.infile
outfile = args.outfile
counts_file = args.counts_file
log_file = args.log_file
#Run script.
main(infile, outfile, counts_file, log_file)
```
#### File: bin/python/get_enriched_kmers.py
```python
from glob import glob
from Bio.Seq import Seq
import os
import argparse
import time
import pathlib
#------------------------------------------------------------------------------#
# Define functions
#------------------------------------------------------------------------------#
def write_to_log(log_file, message):
current_date_time = time.strftime("%c")
#if the log_file wasn't input, just print message to screen
if log_file == '':
print(message)
print(current_date_time)
return None
#Check if the file exists. If not, make the directory if necessary
log_file_directory = os.path.dirname(log_file)
pathlib.Path(log_file_directory).mkdir(parents=True, exist_ok=True)
#Open the log_file in append mode and write message to it
with open(log_file, 'a') as infile:
infile.write(message + '\n')
infile.write(current_date_time + '\n')
def extract_kmers(infile_path):
output_set = set()
with open(infile_path) as infile:
for line in infile:
if line.startswith(">"):
continue
kmer = line.rstrip('\n')
output_set.add(kmer)
# also add it's rev compliment
rev_compliment = str(Seq(kmer).reverse_complement())
output_set.add(rev_compliment)
return output_set
def get_dict_from_infile_list(infile_list):
"""
infile_list is a list of infiles from which to extract kmers and add to the
output dictionary.
"""
out_dict = dict()
out_set = set()
for infile_path in infile_list:
print('Processing the infile ' + infile_path)
sample_name = os.path.basename(infile_path).split(".")[0]
kmer_set = extract_kmers(infile_path)
out_dict[sample_name] = kmer_set
out_set.update(kmer_set)
return out_dict, out_set
#------------------------------------------------------------------------------#
# Main
#------------------------------------------------------------------------------#
def main():
parser = argparse.ArgumentParser(description="""
The purpose of this script is to find which kmers are enriched in
experimental samples compared to control samples.
This script takes in globs specifying fastas that contain kmers present in
experimentals and controls. It then identifies kmers that are present in at
least EXPERIMENTAL_THRESHOLD of the experimental files, and determines which
of those kmers are not present in any of the control samples.
""")
parser.add_argument(
'-e',
'--experimental_files_glob',
type=str,
required=True,
help="""
A glob that specifies fastas containing the kmers from experimental
files.
"""
)
parser.add_argument(
'-c',
'--control_files_glob',
type=str,
required=True,
help="""
A glob that specifies fastas containing the kmers from control
files.
"""
)
parser.add_argument(
'-o',
'--outfile_path',
type=str,
required=True,
help="""
Path to the output file, which is a txt file where each line is one
enriched kmer.
"""
)
parser.add_argument(
'-t',
'--threshold',
type=int,
required=True,
help="""
The number of experimental samples that should have the kmer for it to
be considered for enrichment.
"""
)
parser.add_argument(
'-l',
'--log_file',
type=str,
required=False,
default="",
help="""
Path to the log file. If blank, log will be written to screen. Log is in
tsv format.
"""
)
args = parser.parse_args()
experimental_files_glob = args.experimental_files_glob
control_files_glob = args.control_files_glob
outfile_path = args.outfile_path
threshold = args.threshold
log_file = args.log_file
# Gen dictionaries, where sampleID:kmers, as well as all the kmers in a set
experimental_dict, experimental_kmers = get_dict_from_infile_list(glob(experimental_files_glob))
control_dict, control_kmers = get_dict_from_infile_list(glob(control_files_glob))
write_to_log(log_file, "experimental_group_total_kmers\t" + str(len(experimental_kmers)))
write_to_log(log_file, "control_group_total_kmers\t" + str(len(control_kmers)))
# For each of the kmers, check how many samples' the kmer is present in and
# get a count. If it is above threshold, add to common_experimental_kmers.
common_experimental_kmers = set()
for kmer in experimental_kmers:
count = 0
for sample, kmers in experimental_dict.items():
if count >= threshold:
continue
if kmer in kmers:
count += 1
if count >= threshold:
common_experimental_kmers.add(kmer)
write_to_log(log_file, "common_experimental_kmers\t" + str(len(common_experimental_kmers)))
# Determine which of the common experimental kmers are not present in the control kmers
enriched_kmers = common_experimental_kmers - control_kmers
write_to_log(log_file, "enriched_kmers\t" + str(len(enriched_kmers)))
# write enriched kmers to outfile
with open(outfile_path, "w") as outfile:
for kmer in enriched_kmers:
outfile.write(kmer + '\n')
if __name__ == '__main__':
main()
``` |
{
"source": "jnoortheen/arger",
"score": 3
} |
#### File: arger/arger/docstring.py
```python
import inspect
import re
import typing as tp
class ParamDocTp(tp.NamedTuple):
type_hint: tp.Any
flags: tp.Tuple[str, ...]
doc: str
@classmethod
def init(cls, type_hint: tp.Any, doc: str, flag_symbol="-"):
"""Parse flags defined in param's doc
Examples:
''':param arg1: -a --arg this is the document'''
"""
doc_parts: tp.List[str] = []
flags: tp.List[str] = []
for part in doc.split():
if part.startswith(flag_symbol) and not doc_parts:
# strip both comma and empty space
flags.append(part.strip(", ").strip())
else:
doc_parts.append(part)
return cls(type_hint, flags=tuple(flags), doc=" ".join(doc_parts))
class DocstringTp(tp.NamedTuple):
description: str
epilog: str
params: tp.Dict[str, ParamDocTp]
class DocstringParser:
"""Abstract class"""
pattern: tp.Pattern
section_ptrn: tp.Pattern
param_ptrn: tp.Pattern
_parsers: tp.List["DocstringParser"] = []
def __init_subclass__(cls, **_):
# Cache costly init phase per session.
cls._parsers.append(cls())
@classmethod
def parse(cls, func: tp.Optional[tp.Callable]) -> DocstringTp:
doc = (inspect.getdoc(func) or "") if func else ""
if doc:
for parser in cls._parsers:
if parser.matches(doc):
return parser._parse(doc)
return DocstringTp(description=doc, epilog="", params={})
def _parse(self, doc: str) -> DocstringTp:
raise NotImplementedError
def matches(self, doc: str) -> bool:
return bool(self.pattern.search(doc))
class NumpyDocParser(DocstringParser):
"""Implement Numpy Docstring format parser
`Example <https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_numpy.html#example-numpy>`_
"""
def __init__(self):
self.pattern = re.compile(r"(Parameters\n[-]+)")
self.section_ptrn = re.compile(r"\n\s*(?P<section>\w+)\n\s*[-]+\n+")
self.param_ptrn = re.compile(r"^(?P<param>\w+)[ \t]*:?[ \t]*(?P<type>\w+)?")
def get_rest_of_section(self, params: str) -> tp.Tuple[str, str]:
other_sect = self.section_ptrn.search(params)
if other_sect:
pos = other_sect.start()
return params[pos:].strip(), params[:pos]
return "", params
def parse_params(self, params: str) -> tp.Dict[str, ParamDocTp]:
docs = []
for line in params.splitlines():
match = self.param_ptrn.search(line)
if match:
result = match.groupdict()
doc = result.get("doc", "")
docs.append([result["param"], result["type"], doc])
elif docs:
docs[-1][-1] += line
return {
param.strip("*"): ParamDocTp.init(tphint, doc)
for param, tphint, doc in docs
}
def _parse(self, doc: str) -> DocstringTp:
desc, _, params = self.pattern.split(doc, maxsplit=1)
other_sect, params = self.get_rest_of_section(params)
return DocstringTp(desc.strip(), other_sect, params=self.parse_params(params))
class GoogleDocParser(NumpyDocParser):
"""Implement Google Docstring format parser
`Example <https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html>`_
"""
def __init__(self): # pylint: disable=super-init-not-called
self.pattern = re.compile(r"\s(Args|Arguments):\s")
self.section_ptrn = re.compile(r"\n(?P<section>[A-Z]\w+):\n+")
self.param_ptrn = re.compile(
r"^\s+(?P<param>[*\w]+)\s*(\((?P<type>[\s,`:\w]+)\))?:\s*(?P<doc>[\s\S]+)"
) # matches parameter_name e.g. param1 (type): description
class RstDocParser(DocstringParser):
"""Sphinx rst-style docstrings parser
`Example <https://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html>`_
"""
def __init__(self):
self.pattern = re.compile(r":param")
self.section_ptrn = re.compile(r"\n:[\w]+") # matches any start of the section
self.param_ptrn = re.compile(r"^[ ]+(?P<tp_param>.+):[ ]*(?P<doc>[\s\S]+)")
def parse_doc(self, line: str, params: tp.Dict[str, ParamDocTp]):
match = self.param_ptrn.match(line)
if match:
tp_param, doc = match.groups() # type: str, str
parts = tp_param.strip().rsplit(" ", maxsplit=1)
param = parts[-1].strip()
type_hint = None
if len(parts) > 1:
type_hint = parts[0].strip()
params[param] = ParamDocTp.init(type_hint, doc)
def _parse(self, doc: str) -> DocstringTp:
lines = self.pattern.split(doc)
long_desc = lines.pop(0)
epilog = ""
params: tp.Dict[str, ParamDocTp] = {}
for idx, lin in enumerate(lines):
sections = self.section_ptrn.split(lin, maxsplit=1)
if idx + 1 == len(lines) and len(sections) > 1:
epilog = sections[-1]
self.parse_doc(sections[0], params)
return DocstringTp(long_desc.strip(), epilog, params)
```
#### File: tests/test_args_opts/conftest.py
```python
import inspect
import pytest
from arger import Arger, Argument
from arger.docstring import ParamDocTp
from arger.main import FlagsGenerator
@pytest.fixture
def param_doc(hlp=''):
return ParamDocTp.init('', hlp)
@pytest.fixture
def add_arger():
def _add(argument):
par = Arger()
argument.add_to(par)
return par
return _add
@pytest.fixture
def gen_options():
return FlagsGenerator('-')
@pytest.fixture
def parameter(name, tp):
return inspect.Parameter(
name,
inspect.Parameter.POSITIONAL_OR_KEYWORD,
annotation=tp,
)
@pytest.fixture
def argument(parameter, param_doc, gen_options) -> Argument:
return Argument.create(parameter, param_doc, gen_options)
@pytest.fixture
def parser(add_arger, argument):
return add_arger(argument)
```
#### File: arger/tests/test_dispatcher.py
```python
import re
import sys
from pathlib import Path
import pytest
from colorama import Back, Fore, Style
from arger import Arger
@pytest.fixture
def arger(pyfile: Path) -> Arger:
ctx: dict = {}
exec(pyfile.read_text(), ctx)
return ctx["arger"]
@pytest.fixture
def args(cmd: str):
args = cmd.strip("$").strip().split(" ")
args.pop(0) # python
args.pop(0) # filename
return args
def test_example(capsys, arger, args, expected: str):
if "error" in expected or expected.startswith("usage:"):
with pytest.raises(SystemExit):
arger.run(*args, capture_sys=False) # start function
else:
arger.run(*args, capture_sys=False)
if sys.version_info >= (3, 9):
expected = re.sub(
r"\[(?P<arg>\w+) \[(?P=arg) ...]]", r"[\g<arg> ...]", expected
)
if sys.version_info >= (3, 10):
expected = expected.replace("optional arguments:", "options:")
capture = capsys.readouterr()
out = capture.err or capture.out
assert out.split() == expected.split(), "".join(
[
f"\n{Fore.BLUE}{Back.WHITE}cmd: {Style.RESET_ALL}\n{args}",
f"\n{Fore.BLUE}{Back.WHITE}out: {Style.RESET_ALL}\n{out}",
f"\n{Fore.BLUE}{Back.WHITE}expected: {Style.RESET_ALL}\n{expected}",
]
)
``` |
{
"source": "jnoortheen/django-utils-plus",
"score": 2
} |
#### File: jnoortheen/django-utils-plus/tasks.py
```python
import invoke
@invoke.task
def release(c):
cversion: invoke.Result = c.run("poetry version")
version: str = cversion.stdout.replace("django-utils-plus", "").strip()
c.run('git push')
c.run('git tag v{}'.format(version))
c.run('git push --tags')
# dont forget to have this file
# ~/.pypirc
# [distutils]
# index-servers =
# pypi
c.run('poetry publish --build')
@invoke.task
def test(c):
c.run(
'pytest '
'--cov utils_plus '
)
@invoke.task
def lint(c):
c.run("mypy utils_plus")
c.run("pylint utils_plus")
```
#### File: django-utils-plus/tests/conftest.py
```python
from contextlib import contextmanager
import pytest
@pytest.fixture
def records(db):
from .test_app.models import Book
from django_dynamic_fixture import G
# create three records
for i in range(3):
G(Book, authors=3, name='filter')
# copied form the pytest-django as it is not available in PyPI version
@pytest.fixture(scope='function')
def django_assert_num_queries(pytestconfig):
from django.db import connection
from django.test.utils import CaptureQueriesContext
@contextmanager
def _assert_num_queries(num):
with CaptureQueriesContext(connection) as context:
yield
if num != len(context):
msg = "Expected to perform %s queries but %s were done" % (num, len(context))
if pytestconfig.getoption('verbose') > 0:
sqls = (q['sql'] for q in context.captured_queries)
msg += '\n\nQueries:\n========\n\n%s' % '\n\n'.join(sqls)
else:
msg += " (add -v option to show queries)"
pytest.fail(msg)
return _assert_num_queries
```
#### File: tests/test_app/models.py
```python
from django.db import models
# Create your models here.
from utils_plus.abstracts import CheckDeletableModelMixin
from utils_plus.choices import ChoicesEnum
from utils_plus.models import QueryManager
from utils_plus.fields import ChoicesEnumField
class Title(ChoicesEnum):
mr = 'Mr.'
ms = 'Ms.'
mrs = 'Mrs.'
class Author(CheckDeletableModelMixin, models.Model, ):
title = ChoicesEnumField(Title, default=Title.mr)
name = models.CharField(max_length=20)
objects = QueryManager()
def __str__(self):
return self.name
class Publisher(models.Model):
name = models.CharField(max_length=20)
created_on = models.DateTimeField(auto_now_add=True)
class Book(models.Model):
authors = models.ManyToManyField(Author)
publisher = models.ForeignKey(Publisher, on_delete=models.CASCADE)
name = models.CharField(max_length=40)
created_on = models.DateTimeField(auto_now_add=True)
objects = models.Manager()
rel_objects = QueryManager(name__icontains='filter').selects('publisher').prefetches('authors').order_by('-id')
```
#### File: django-utils-plus/tests/test_mixins.py
```python
from unittest.mock import Mock
from django.urls import reverse
from django.views import View
from utils_plus.mixins import ContextMixinPlus, CreateUpdateMixin, ProcessFormView
def test_generic_view():
class GenricView(ContextMixinPlus, View):
three = 0
four = 0
kwargs = {'one': 1, 'two': 2}
view = GenricView()
assert set(view.get_context_data(six=6, five=5).keys()) == {'six', 'five', 'view', 'one', 'two'}
class test_CreateUpdateMixin:
def test_get(self, monkeypatch):
c = CreateUpdateMixin()
monkeypatch.setattr(c, "get_object", Mock())
monkeypatch.setattr(ProcessFormView, "get", Mock())
c.get(request='')
c.get_object.assert_called_once()
def test_post(self, monkeypatch):
c = CreateUpdateMixin()
monkeypatch.setattr(c, "get_object", Mock())
monkeypatch.setattr(ProcessFormView, "post", Mock())
c.post(request='', pk=1)
c.get_object.assert_called_once()
def test_urls(self):
# the urls are defined in test-app
assert reverse('author:add') == '/blog/author_profile/add/'
assert reverse('author:edit', args=[12]) == '/blog/author_profile/edit/12/'
```
#### File: django-utils-plus/tests/test_router.py
```python
from utils_plus.router import url
from utils_plus.views import return_path_view as view
str_iter = lambda x: list(map(str, x))
def test_nesting_levels():
urls = list(
url("home")[
url("p1", view, "report1"),
url("p2", view, "report2"),
url("level1", view, "sub1")[
url("p1", view, "level1"),
url("p2", view, "level2"),
url("level2", view, "sub2")[url("p1", view, "lp1"), url("p2", view, "lp2")],
],
url("p3", view, "report3"),
]
)
assert str_iter(urls) == [
"<URLPattern 'home/p1/' [name='report1']>",
"<URLPattern 'home/p2/' [name='report2']>",
"<URLPattern 'home/level1/' [name='sub1']>",
"<URLPattern 'home/level1/p1/' [name='level1']>",
"<URLPattern 'home/level1/p2/' [name='level2']>",
"<URLPattern 'home/level1/level2/' [name='sub2']>",
"<URLPattern 'home/level1/level2/p1/' [name='lp1']>",
"<URLPattern 'home/level1/level2/p2/' [name='lp2']>",
"<URLPattern 'home/p3/' [name='report3']>",
]
assert len(urls) == 9
def test_variable_regex():
g = url("home")[
url.pk(view, "pk"),
url.int("int_var", view, "int"),
url.var("str_var", view, "str"),
url.re("reg_x", r"[x]+", view, "regex"),
]
assert str_iter(g) == [
"<URLPattern 'home/<int:pk>/' [name='pk']>",
"<URLPattern 'home/<int:int_var>/' [name='int']>",
"<URLPattern 'home/<str_var>/' [name='str']>",
"<URLPattern 'home/(?P<reg_x>[x]+)/' [name='regex']>",
]
def test_same_level_urls():
g = url("home", view)[
url.pk(view)
] + url("about", view)[
url.pk(view)[
url("pdf", view)
]
] + url("contact", view)[
url.pk(view)
]
assert str_iter(g) == ["<URLPattern 'home/'>",
"<URLPattern 'home/<int:pk>/'>",
"<URLPattern 'about/'>",
"<URLPattern 'about/<int:pk>/'>",
"<URLPattern 'about/<int:pk>/pdf/'>",
"<URLPattern 'contact/'>",
"<URLPattern 'contact/<int:pk>/'>"]
def test_paths_without_views():
g = url("home")[
url.pk()[url("edit", view)],
url.int("integer")[url("edit", view)],
url.var("variable")[url("edit", view)],
url.re("regex", r"\.+")[url("edit", view)],
url("home2")[url.pk(view, "pk")],
url("home3", view),
]
assert str_iter(g) == [
"<URLPattern 'home/<int:pk>/edit/'>",
"<URLPattern 'home/<int:integer>/edit/'>",
"<URLPattern 'home/<variable>/edit/'>",
"<URLPattern 'home/(?P<regex>\\.+)/edit/'>",
"<URLPattern 'home/home2/<int:pk>/' [name='pk']>",
"<URLPattern 'home/home3/'>",
]
def test_include_patterns():
# app1 urls
app2 = url("app2/")[url("post")[url.pk(view)[url("edit", view)]]]
# app1 urls
app1 = url("app1/")[url("post")[url.pk(view)[url("edit", view)]]]
# root url definition
app = url("app/")[app1, app2]
assert str_iter(app) == [
"<URLPattern 'app/app1/post/<int:pk>/'>",
"<URLPattern 'app/app1/post/<int:pk>/edit/'>",
"<URLPattern 'app/app2/post/<int:pk>/'>",
"<URLPattern 'app/app2/post/<int:pk>/edit/'>",
]
def test_paths_that_starts_with_a_blank_root():
g = url("", view)[
url("home")[url.pk()[url("edit", view)], url.int("integer")[url("edit", view)]],
url("home2")[url.pk(view, "pk")],
url("home3", view),
]
assert str_iter(g) == [
"<URLPattern ''>",
"<URLPattern 'home/<int:pk>/edit/'>",
"<URLPattern 'home/<int:integer>/edit/'>",
"<URLPattern 'home2/<int:pk>/' [name='pk']>",
"<URLPattern 'home3/'>"
]
def test_multi_part_single_entry():
g = url("nest1/nest2", view)[
url("home/coming")[
url.pk()[
url("edit", view)
],
url.int("integer")[
url("edit", view),
],
],
url("first/nest3")[
url.pk(view, "pk")
],
url("home3", view),
]
assert str_iter(g) == [
"<URLPattern 'nest1/nest2/'>",
"<URLPattern 'nest1/nest2/home/coming/<int:pk>/edit/'>",
"<URLPattern 'nest1/nest2/home/coming/<int:integer>/edit/'>",
"<URLPattern 'nest1/nest2/first/nest3/<int:pk>/' [name='pk']>",
"<URLPattern 'nest1/nest2/home3/'>"
]
```
#### File: django-utils-plus/utils_plus/abstracts.py
```python
from django.db.models import Model
class CheckDeletableModelMixin:
def is_deletable(self: Model): # type: ignore
# get all the related object
for rel in self._meta.get_fields():
try:
# check if there is a relationship with at least one related object
if rel.related_model:
related = rel.related_model.objects.filter(**{rel.field.name: self}) # type: ignore
if related.exists():
# if there is return a Tuple of flag = False the related_model object
return False, related
except AttributeError: # an attribute error for field occurs when checking for AutoField
pass # just pass as we dont need to check for AutoField
return True, None
```
#### File: management/commands/create_middleware.py
```python
import os
from django.core.management.base import AppCommand
from utils_plus.utils.misc import read_if_exists
from .create_admin import render_template
CLASS_TMPL = """\
def {{name}}(get_response):
def middleware(request):
# do your processing here
return get_response(request)
return middleware
"""
class Command(AppCommand):
help = "Create new middleware"
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument('-n', '--name', type=str, help='Name of the middeware', default='sample')
def handle_app_config(self, app_config, **options):
file = os.path.join(app_config.path, 'middleware.py')
content = read_if_exists(file)
mware_name = "{}_middleware".format(options['name'])
if mware_name not in content:
content += render_template(CLASS_TMPL, name=mware_name)
# finaly write file contents
if content:
with open(file, 'w') as writer:
writer.write(content)
```
#### File: django-utils-plus/utils_plus/middleware.py
```python
import re
from django.conf import settings
from django.contrib.auth.decorators import login_required
LOGIN_REQ_URLS = []
if hasattr(settings, 'LOGIN_REQUIRED_URLS'):
LOGIN_REQ_URLS += [re.compile(expr) for expr in settings.LOGIN_REQUIRED_URLS]
def login_required_middleware(get_response):
"""
settings.LOGIN_REQ_URLS holds a list of ``re`` patterns. Those request are wrapped with login_required decorator
This is extremely useful when you don't have to litter your code with lots of decorators and checks
Args:
get_response:
Returns:
"""
def middleware(request):
if any(m.match(request.path) for m in LOGIN_REQ_URLS):
return login_required(get_response)(request)
return get_response(request)
return middleware
```
#### File: utils_plus/templatetags/utils_plus_tags.py
```python
from __future__ import absolute_import
from django import template
from django.conf import settings
from django.templatetags.static import static
from django.utils.safestring import mark_safe
from utils_plus.utils import get_node_modules_dir
from utils_plus.utils.html import script_tag, link_css_tag
from ..utils import get_npm_pkg_path
register = template.Library()
@register.filter
def klass(obj) -> str:
"""
returns class name of the object. Might be useful when rendering widget class names
Args:
obj: any python class object
Returns:
str: name of the class
>>> from tests.test_app.models import Author
>>> klass(Author)
'ModelBase'
"""
return obj.__class__.__name__
if settings.DEBUG:
# todo: replace this with finder implementation
# add node_modules to list of static folders to find and load in local development
settings.STATICFILES_DIRS = settings.STATICFILES_DIRS + [
get_node_modules_dir()
]
@register.simple_tag
def npmcdn(cdn_url, path) -> str:
"""
Args:
cdn_url: URL of CDN like `unpkg.com` or `cdn.jsdelivr.net/npm`
path: path to js/css file. should follow the directory structure as inside ``node_modules``
Returns:
str: URL
"""
return (
static(path)
if settings.DEBUG
else '//{}/{}'.format(cdn_url, get_npm_pkg_path(path))
)
@register.simple_tag
def unpkg(path) -> str:
"""
Alternative to standard `static` template tag. When you are using external static files/libraries
like bootstrap, jquery you may want to load them from CDNs instead of managing them yourself in production.
This tag helps you to do that. When `settings.DEBUG` is false, this will return paths that resolved from
`package.json` to versioned `unpkg.com`. Otherwise it will resolve to `node_modules` locally.
Args:
path (str):
Returns:
str: assets path resolved either to node_modules directory or https://unpkg.com/#/ in production.
Usage:
load the template tags and use `unpkg` like `static` tag,
```
{% load static utils_plus_tags %}
<link rel="stylesheet" type="text/css" href="{% unpkg 'bootstrap/dist/css/bootstrap.min.css' %}"/>
<script src="{% unpkg 'bootstrap/dist/js/bootstrap.min.js' %}"></script>
<script src="{% unpkg 'jquery/dist/jquery.min.js' %}"></script>
```
Note:
1. the package.json should be present in the project ROOT DIR.
2. When DEBUG is True the packages must be installed and should be available already inside `node_modules`.
"""
return npmcdn('unpkg.com', path)
@register.simple_tag
def jsdelivr(path) -> str:
"""
same as above with CDN as jsdelivr
"""
return npmcdn('cdn.jsdelivr.net/npm', path)
def jsdelivr_combined_tags_helper(tag_template, *paths) -> str:
if settings.DEBUG:
tag_str = "".join([
tag_template.format(static(path)) for path in paths
])
else:
npm_paths = ','.join(["npm/" + get_npm_pkg_path(path) for path in paths])
cdn_url = '//{}/{}'.format('cdn.jsdelivr.net/combine/', npm_paths)
tag_str = mark_safe(tag_template.format(cdn_url))
return mark_safe(tag_str)
@register.simple_tag
def jsdelivr_combine_js(*paths, **attributes):
"""
same as above with CDN as jsdelivr's combine feature.
In debug mode, it return multiple script tag. Otherwise return single script tag with combined js
Args:
*paths:
**attributes: script tag's attrbute
"""
script_template = script_tag(
**attributes,
src='{}', # URL placeholder
)
return jsdelivr_combined_tags_helper(script_template, *paths)
@register.simple_tag
def jsdelivr_combine_css(*paths, **attributes):
"""
same as above with CDN as jsdelivr's combine feature.
In debug mode, it return multiple script tag. Otherwise return single script tag with combined js
Args:
*paths:
**attributes: script tag's attrbute
"""
tmpl = link_css_tag(
**attributes,
href='{}', # URL placeholder
)
return jsdelivr_combined_tags_helper(tmpl, *paths)
``` |
{
"source": "jnoortheen/flake8-import-style",
"score": 3
} |
#### File: flake8-import-style/tests/test_module.py
```python
import ast
import unittest
import flake8_typing_import_style
class I8Test(unittest.TestCase):
def test_run(self):
tree = ast.parse("import typing")
i8 = flake8_typing_import_style.I9(tree)
self.assertEqual(list(i8.run()), [(1, 0, "I902 use 'import typing as tp' instead of 'import typing'", 'I902')])
tree = ast.parse("import typing as tp")
i8 = flake8_typing_import_style.I9(tree)
self.assertEqual(list(i8.run()), [])
tree = ast.parse("from typing import obj")
i8 = flake8_typing_import_style.I9(tree)
self.assertEqual(list(i8.run()), [(
1,
0,
"I901 use 'import typing as tp' instead of 'from typing import obj'",
"I901",
)])
``` |
{
"source": "jnoortheen/graphql-pynamodb",
"score": 2
} |
#### File: graphql-pynamodb/graphene_pynamodb/types.py
```python
from collections import OrderedDict
from inspect import isclass
from graphene import Field, Connection, Node
from graphene.relay import is_node
from graphene.types.objecttype import ObjectType, ObjectTypeOptions
from graphene.types.utils import yank_fields_from_attrs
from pynamodb.attributes import Attribute, NumberAttribute
from pynamodb.models import Model
from .converter import convert_pynamo_attribute
from .registry import Registry, get_global_registry
from .relationships import RelationshipResult
from .utils import get_key_name, connection_for_type
def get_model_fields(model, excluding=None):
if excluding is None:
excluding = []
attributes = dict()
for attr_name, attr in model.get_attributes().items():
if attr_name in excluding:
continue
attributes[attr_name] = attr
return OrderedDict(sorted(attributes.items(), key=lambda t: t[0]))
def construct_fields(model, registry, only_fields, exclude_fields):
inspected_model = get_model_fields(model)
fields = OrderedDict()
for name, attribute in inspected_model.items():
is_not_in_only = only_fields and name not in only_fields
is_already_created = name in fields
is_excluded = name in exclude_fields or is_already_created
if is_not_in_only or is_excluded:
# We skip this field if we specify only_fields and is not
# in there. Or when we excldue this field in exclude_fields
continue
converted_column = convert_pynamo_attribute(attribute, attribute, registry)
fields[name] = converted_column
return fields
class PynamoObjectTypeOptions(ObjectTypeOptions):
model = None # type: Model
registry = None # type: Registry
connection = None # type: Type[Connection]
id = None # type: str
class PynamoObjectType(ObjectType):
@classmethod
def __init_subclass_with_meta__(cls, model=None, registry=None, skip_registry=False,
only_fields=(), exclude_fields=(), connection=None,
use_connection=None, interfaces=(), id=None, **options):
assert model and isclass(model) and issubclass(model, Model), (
'You need to pass a valid PynamoDB Model in '
'{}.Meta, received "{}".'
).format(cls.__name__, model)
if not registry:
registry = get_global_registry()
assert isinstance(registry, Registry), (
'The attribute registry in {} needs to be an instance of '
'Registry, received "{}".'
).format(cls.__name__, registry)
pynamo_fields = yank_fields_from_attrs(
construct_fields(model, registry, only_fields, exclude_fields),
_as=Field,
)
if use_connection is None and interfaces:
use_connection = any((issubclass(interface, Node) for interface in interfaces))
if use_connection and not connection:
# We create the connection automatically
connection = Connection.create_type('{}Connection'.format(cls.__name__), node=cls)
if connection is not None:
assert issubclass(connection, Connection), (
"The connection must be a Connection. Received {}"
).format(connection.__name__)
_meta = PynamoObjectTypeOptions(cls)
_meta.model = model
_meta.registry = registry
_meta.fields = pynamo_fields
_meta.connection = connection
_meta.id = id or 'id'
super(PynamoObjectType, cls).__init_subclass_with_meta__(_meta=_meta, interfaces=interfaces, **options)
if not skip_registry:
registry.register(cls)
@classmethod
def is_type_of(cls, root, info):
if isinstance(root, RelationshipResult) and root.__wrapped__ == cls._meta.model:
return True
return isinstance(root, cls._meta.model)
@classmethod
def get_node(cls, info, id):
if isinstance(getattr(cls._meta.model, get_key_name(cls._meta.model)), NumberAttribute):
return cls._meta.model.get(int(id))
else:
return cls._meta.model.get(id)
def resolve_id(self, info):
graphene_type = info.parent_type.graphene_type
if is_node(graphene_type):
return getattr(self, get_key_name(graphene_type._meta.model))
@classmethod
def get_connection(cls):
return connection_for_type(cls)
```
#### File: graphql-pynamodb/graphene_pynamodb/utils.py
```python
import json
from typing import Tuple
from graphql_relay import from_global_id, to_global_id
from pynamodb.attributes import Attribute
from pynamodb.models import Model
import graphene
MODEL_KEY_REGISTRY = {}
def get_key_name(model):
if not issubclass(model, Model):
raise TypeError("Invalid type passed to get_key_name: %s" % model.__class__)
if model in MODEL_KEY_REGISTRY:
return MODEL_KEY_REGISTRY[model]
for attr in model.get_attributes().values():
if isinstance(attr, Attribute) and attr.is_hash_key:
MODEL_KEY_REGISTRY[model] = attr.attr_name
return attr.attr_name
def connection_for_type(_type):
class Connection(graphene.relay.Connection):
total_count = graphene.Int()
class Meta:
name = _type._meta.name + "Connection"
node = _type
def resolve_total_count(self, args, context, info):
return self.total_count if hasattr(self, "total_count") else len(self.edges)
return Connection
def to_cursor(item: Model) -> str:
data = {} # this will be same as last_evaluated_key returned by PageIterator
for name, attr in item.get_attributes().items():
if attr.is_hash_key or attr.is_range_key:
data[name] = item._serialize_value(attr, getattr(item, name))
return to_global_id(type(item).__name__, json.dumps(data))
def from_cursor(cursor: str) -> Tuple[str, dict]:
model, data = from_global_id(cursor)
return model, json.loads(data)
``` |
{
"source": "jnoortheen/pipm",
"score": 2
} |
#### File: pipm/pipm/classes.py
```python
from pip._internal.req import InstallRequirement
from .file_utils import parse_comes_from
class FileRequirement:
def __init__(self, req, env):
# type: (InstallRequirement, str) -> None
filename, line_num = parse_comes_from(req.comes_from, env)
self.req = req
self.filename = filename
self.line_num = line_num
def __repr__(self):
return "{}".format(self.req)
```
#### File: pipm/pipm/setup_cfg.py
```python
import codecs
import os
try:
from typing import Dict, Iterable, Set, List
except ImportError:
pass
from pip._internal.req import InstallRequirement
from pip._vendor.packaging.requirements import Requirement
from pip._vendor.packaging.specifiers import SpecifierSet
from six.moves.configparser import ConfigParser
from . import operations
SETUP_FILE_NAME = "setup.cfg"
def _req_list_to_str(reqs):
# type: (Iterable) -> str
return "".join(map(lambda x: "\n" + x, sorted(reqs, key=lambda x: x.lower())))
def _req_str_to_list(reqs):
# type: (str) -> list
if reqs and isinstance(reqs, str):
reqs = reqs.strip()
if "\n" in reqs:
return reqs.splitlines()
elif "," in reqs:
return reqs.split(",")
else:
return [reqs]
return []
def _req_str_to_dict(config, base_key, key):
# type: (ConfigParser, str, str) -> Dict[str, str]
reqs = (
config.get(base_key, key)
if config.has_section(base_key) and config.has_option(base_key, key)
else ""
)
return dict(map(lambda x: (Requirement(x).name, x), _req_str_to_list(reqs)))
def update_config(config, env, new_reqs):
"""
updates config
Args:
config (ConfigParser): parsed config file
base_key (str): i.e. options, options.extra_requires
key (str):
new_reqs (Dict[str, str]): a dict of newly installed/updated requirements.
Key is the package name and value is the full name with version and markers
"""
base_key, key = get_keys(env)
if not config.has_section(base_key):
config.add_section(base_key)
reqs = _req_str_to_dict(config, base_key, key)
reqs.update(new_reqs)
if reqs:
config.set(base_key, key, _req_list_to_str(reqs.values()))
def get_keys(env=None):
"""return key combination for adding new RequirementSet"""
if env:
base_key = "options.extras_require"
key = "testing" if env and "test" in env else str(env)
else:
base_key = "options"
key = "install_requires"
return base_key, key
def _read_config():
"""read from existing file"""
config = ConfigParser()
if os.path.exists(SETUP_FILE_NAME):
config.read(SETUP_FILE_NAME)
return config
def _write_to_file(config):
with codecs.open(SETUP_FILE_NAME, "w") as file_obj:
config.write(file_obj)
def get_requirements(env=None):
# (str) -> dict
config = _read_config()
base_key, key = get_keys(env)
return _req_str_to_dict(config, base_key, key)
def add_requirements(user_reqs, env=None):
# type: (List[InstallRequirement], str) -> ConfigParser
"""
create/update setup.cfg file
Args:
user_reqs: list of user requirements
file_obj: file object to write to
"""
config = _read_config()
reqs = {}
for req in user_reqs: # type: InstallRequirement
if not req.comes_from: # add only top-level dependencies
if not req.req.specifier and req.installed_version:
req.req.specifier = SpecifierSet("~=" + str(req.installed_version))
reqs[req.req.name] = str(req.req)
if reqs:
update_config(config, env, reqs)
_write_to_file(config)
return config
def _remove_requirements(config, base_key, key, installed_reqs):
# (ConfigParser, str, str, Set[str]) -> None
# check all the sections and remove requirements that are not in the
reqs = _req_str_to_dict(config, base_key, key)
filtered = (req for req_name, req in reqs.items() if req_name in installed_reqs)
config.set(base_key, key, _req_list_to_str(filtered))
def remove_requirements(installed_reqs=None):
# type: (Set[str]) -> ConfigParser
"""
remove requirements from `setup.cfg` after `pip uninstall`
Args:
installed_reqs (set): set of requirements name strings
"""
installed_reqs = installed_reqs or set(operations.get_frozen_reqs().keys())
config = _read_config()
# check all the sections and remove requirements that are not in the
for name in config.sections():
if name == "options":
section, option = get_keys()
_remove_requirements(config, section, option, installed_reqs)
elif name == "options.extras_require":
for key in config.options(name):
_remove_requirements(config, name, key, installed_reqs)
_write_to_file(config)
return config
```
#### File: pipm/tests/test_commands.py
```python
from pipm import commands
from pipm.file import get_req_filenames
from pipm.file_utils import get_req_filename
def test_fill_args_when_no_args_given(chdir):
"""
Args:
tmpdir (LocalPath): fixture
"""
f = get_req_filename()
cmd = commands.InstallCommandPlus("i", "i")
opts, args = cmd.parse_args([])
assert opts.requirements == [f]
def test_fill_args_when_no_args_given_dev(chdir):
"""
Args:
tmpdir (LocalPath): fixture
"""
for env in ["dev", "test", "prod"]:
f = get_req_filename(env)
cmd = commands.InstallCommandPlus("i", "i")
opts, args = cmd.parse_args(["--{}".format(env)])
assert opts.requirements == [f]
def test_fill_args_when_no_args_given_all(chdir):
"""
Args:
tmpdir (LocalPath): fixture
"""
# create the below two files
get_req_filename("dev")
get_req_filename("test")
# start test
fset = get_req_filenames()
cmd = commands.InstallCommandPlus("i", "i")
opts, args = cmd.parse_args(["--all"])
assert set(opts.requirements) == fset
def test_fill_args_when_no_args_given_environ(chdir):
"""
Args:
tmpdir (LocalPath): fixture
"""
env = "staging"
f = get_req_filename(env)
cmd = commands.InstallCommandPlus("i", "i")
opts, args = cmd.parse_args(["--env", env])
assert opts.requirements == [f]
def test_req_environment():
cmd = commands.InstallCommandPlus("i", "i")
opts, args = cmd.parse_args(["--dev"])
assert opts.req_environment == "dev"
opts, args = cmd.parse_args(["--test"])
assert opts.req_environment == "test"
opts, args = cmd.parse_args(["pkg_name", "--prod"])
assert opts.req_environment == "prod"
opts, args = cmd.parse_args(["pkg_name", "--env", "staging"])
assert opts.req_environment == "staging"
opts, args = cmd.parse_args([])
assert opts.req_environment is None
def test_update_command_parse_args():
cmd = commands.UpdateCommand("u", "u")
opts, args = cmd.parse_args([])
assert opts.upgrade
def test_install_cmd_run(mocker):
"""
"""
run = mocker.patch.object(commands.install.InstallCommand, "run") # type: MagicMock
save = mocker.patch.object(commands.file, "save") # type: MagicMock
# run method
cmd = commands.InstallCommandPlus("i", "i")
cmd.main(["--dev", "pkg"])
run.assert_called_once()
save.assert_called_once()
def test_remove_cmd_run(mocker):
run = mocker.patch.object(commands.UninstallCommand, "run") # type: MagicMock
save = mocker.patch.object(commands.file, "save") # type: MagicMock
mocker.patch.object(
commands, "get_orphaned_packages", return_value=["pkg1", "pkg2"]
) # type: MagicMock
# run method
cmd = commands.UninstallCommandPlus("u", "u")
opts, args = cmd.parse_args(["dev"])
cmd.run(opts, args)
run.assert_called_once_with(opts, ["dev", "pkg1", "pkg2"])
save.assert_called_once()
def test_freeze_cmd_run(mocker):
run = mocker.patch.object(commands.FreezeCommand, "run") # type: MagicMock
save = mocker.patch.object(commands.file, "save") # type: MagicMock
mocker.patch.object(
commands, "get_orphaned_packages", return_value=["pkg1", "pkg2"]
) # type: MagicMock
# run method
cmd = commands.FreezeCommandPlus("f", "f")
opts, args = cmd.parse_args([])
cmd.run(opts, args)
run.assert_called_once()
save.assert_called_once()
``` |
{
"source": "jnoortheen/xontrib-avox-poetry",
"score": 2
} |
#### File: xontrib/avox_poetry/venv_poetry.py
```python
import builtins
import typing as tp
from pathlib import Path
from .utils import deep_get
def run(*args) -> str:
from subprocess import check_output
return check_output(args).decode().strip()
def poetry_venv_path(pkg_name: str, cwd: str) -> str:
import hashlib
import base64
import re
# taken from
# https://github.com/python-poetry/poetry/blob/1.0.3/poetry/utils/env.py#L693
name = pkg_name.lower()
sanitized_name = re.sub(r'[ $`!*@"\\\r\n\t]', "_", name)[:42]
h = hashlib.sha256(str(cwd).encode()).digest()
h = base64.urlsafe_b64encode(h).decode()[:8]
return "{}-{}".format(sanitized_name, h)
def find_project_name(proj_file: Path) -> tp.Optional[str]:
import tomlkit
proj = tomlkit.parse(proj_file.read_text())
return deep_get(proj, "tool", "poetry", "name")
def iter_venvs():
path = Path(
builtins.__xonsh__.env.get("VIRTUALENV_HOME", "~/.virtualenvs")
).expanduser()
if path.exists():
yield from path.iterdir()
def iter_venvs_proj(proj_name):
for env in iter_venvs():
if env.name.startswith(proj_name) or env.name.startswith(
proj_name.replace("_", "-")
):
yield env
def find_venv_path(cwd: Path) -> tp.Optional[Path]:
# durations
# plain-function: ~40ms
# using subprocess: ~760ms
# using xonsh builtins: ~640ms
proj_toml = cwd.joinpath("pyproject.toml")
if not proj_toml.exists():
return None
proj_name = find_project_name(proj_toml)
if not proj_name:
return
venvs = list(iter_venvs_proj(proj_name))
if len(venvs) == 1:
return venvs[0]
if not venvs:
return None
# if there are multiple venv for the same project then resolve it actually
# using poetry itself
return Path(run("poetry", "env", "info", "-p"))
``` |
{
"source": "jnoortheen/xontrib-broot",
"score": 2
} |
#### File: xontrib-broot/xontrib/broot.py
```python
import builtins
import os
import subprocess
import tempfile
from xonsh.built_ins import XSH
def _br(args, stdin=None, stdout=None, stderr=None):
cmd_file = tempfile.NamedTemporaryFile(delete=False)
try:
cmds = ("broot", "--outcmd", cmd_file.name) + tuple(args)
if builtins.__xonsh__.env.get("XONSH_INTERACTIVE"):
cmds += ("--color", "yes")
subprocess.call(
cmds,
stdin=stdin,
stderr=stderr,
stdout=stdout,
)
builtins.evalx(cmd_file.read().decode())
finally:
os.remove(cmd_file.name)
builtins.aliases["br"] = _br
@XSH.builtins.events.on_ptk_create
def custom_keybindings(bindings, **kw):
def handler(key_name: str, default: str):
def do_nothing(_):
pass
if key_name not in XSH.env:
key = default
else:
key = XSH.env.get(key_name)
if key:
return bindings.add(key)
return do_nothing
@handler("XONSH_BROOT_KEY", "c-n")
def start_broot(event):
_br([])
``` |
{
"source": "jnoortheen/xontrib-commands",
"score": 2
} |
#### File: xontrib/commands/dev_cmd.py
```python
import operator
import os
import typing as tp
from pathlib import Path
from arger import Argument
import xonsh.tools as xt
from xonsh.completers.tools import RichCompletion
from xonsh.parsers.completion_context import CommandContext
from .utils import Command, xsh
ENVS = {}
@xt.lazyobject
def added_file_path() -> Path:
return (
Path(xsh.env.get("XDG_DATA_HOME", "~/.local/share")).resolve()
/ "dev-paths.json"
)
def update_saved_paths(paths=()):
import json
file = tp.cast(Path, added_file_path)
if paths:
content = json.dumps(list(set(paths)))
file.write_text(content)
def get_added_paths(add_path: str = None) -> tp.Dict[str, str]:
import json
file = tp.cast(Path, added_file_path)
if file.exists():
paths = json.loads(file.read_text())
else:
paths = []
if add_path:
paths.append(add_path)
update_saved_paths(paths)
return {os.path.split(p)[-1]: p for p in paths}
def register_project(fn: tp.Callable = None):
"""Register new project.
Parameters
----------
call
This function will get invoked upon finding the project_path.
the function name will be used to search in $PROJECT_PATHS
"""
def _wrapper():
path = _start_proj_shell(fn.__name__)
result = fn(path)
return result
ENVS[fn.__name__] = _wrapper
return _wrapper
def _start_proj_shell(env: tp.Union[str, Path]):
if isinstance(env, Path) and env.exists():
path = str(env)
else:
path = find_proj_path(env)
if not path:
raise Exception("Project not found")
os.chdir(path)
return path
def _find_proj_path(name, op):
for direc in xsh.env.get("PROJECT_PATHS", []):
for path in Path(direc).glob("*"):
if op(path.name, name):
yield path
def find_proj_path(name):
for op in [operator.eq, str.startswith, operator.contains]:
for path in _find_proj_path(name, op):
return path
def _list_cmds():
from rich.console import Console
c = Console()
paths = get_added_paths()
c.print("Paths", paths)
def _add_current_path():
from rich.console import Console
c = Console()
path = Path.cwd().resolve()
c.log("Adding cwd to project-paths", {path.name: path})
get_added_paths(str(path))
def proj_name_completer(**kwargs):
command: CommandContext = kwargs.pop("command")
for name, path in get_added_paths().items():
yield RichCompletion(name, description=path)
yield from ENVS
for path in _find_proj_path(command.prefix, str.startswith):
yield RichCompletion(path.name, description=str(path))
@Command.reg
def _dev(
name: tp.cast(str, Argument(nargs="?", completer=proj_name_completer)),
add=False,
ls=False,
):
"""A command to cd into a directory
Inspired from my own workflow and these commands
- https://github.com/ohmyzsh/ohmyzsh/tree/master/plugins/pj
- https://github.com/ohmyzsh/ohmyzsh/tree/master/plugins/wd
Parameters
----------
name
name of the folder to cd into.
This searches for names under $PROJECT_PATHS
or the ones registered with `dev --add`
add
register the current folder to dev command.
When using this, it will get saved in a file,
also that is used during completions.
ls
show currently registered paths
Notes
-----
One can use `register_project` function to regiter custom_callbacks to run on cd to project path
Examples
-------
> $PROJECT_PATHS = ["~/src/"]
> dev proj-name # --> will cd into ~/src/proj-name
> dev --add
"""
if ls or (name is None):
_list_cmds()
elif add:
_add_current_path()
else:
added_paths = get_added_paths()
if name in ENVS:
ENVS[name]()
elif name in added_paths:
path = added_paths[name]
if os.path.exists(path):
return _start_proj_shell(Path(path))
else:
# old/expired link
added_paths.pop(name)
update_saved_paths(tuple(added_paths.values()))
return _start_proj_shell(name)
``` |
{
"source": "jnoortheen/xontrib-term-integrations",
"score": 2
} |
#### File: xontrib-term-integrations/xontrib_term_integrations/utils.py
```python
import sys
from xonsh.environ import Env
class Codes:
# https://iterm2.com/documentation-escape-codes.html equivalents
ESC = "\x1b"
ST = "\x07"
OSC = ESC + "]" # \x5d
CSI = ESC + "["
class ShellIntegrationPrompt:
def __init__(self, env: "Env"):
self.env = env
self.old_prompt = env["PROMPT"]
def __call__(self, **_):
prompt = self.old_prompt() if callable(self.old_prompt) else self.old_prompt
prefix, suffix = [
ansi_esc(form())
for form in [form_term_prompt_prefix, form_term_prompt_suffix]
]
return prefix + prompt + suffix
def ansi_esc(code: str):
return "\001" + code + "\002"
def term_mark(code):
return f"{Codes.OSC}133;{code}{Codes.ST}"
def term_osc_cmd(code):
return f"{Codes.OSC}1337;{code}{Codes.ST}"
def form_term_prompt_prefix():
return term_mark("A")
def form_term_prompt_suffix():
return term_mark("B")
def write_term_mark(code):
return write_to_out(term_mark(code))
def write_osc_cmd(code):
return write_to_out(term_osc_cmd(code))
def write_osc_output_prefix():
return write_term_mark("C")
def write_osc_cmd_status(status):
return write_term_mark(f"D;{status}")
def write_to_out(code: str):
sys.stdout.write(code)
sys.stdout.flush()
def write_osc_shell_integration():
# OSC 1337 ; ShellIntegrationVersion=[Pn] ; [Ps] ST
write_osc_cmd("ShellIntegrationVersion=15;shell=xonsh")
def write_osc_cwd(newdir):
# OSC 1337 ; CurrentDir=[current directory] ST
write_osc_cmd(f"CurrentDir={newdir}")
def write_osc_user_host(env):
user = env.get("USER")
host = env.get("HOSTNAME")
if user and host:
write_osc_cmd(f"RemoteHost={user}@{host}")
``` |
{
"source": "jnordberg/benchmarking",
"score": 2
} |
#### File: evm/scripts/bench_evm384.py
```python
import re
import subprocess
import nanodurationpy as durationpy
import csv
import time
import datetime
import os
import shutil
import shlex
import json
# output paths should be mounted docker volumes
RESULT_CSV_OUTPUT_PATH = "/evmraceresults"
RESULT_CSV_FILENAME = "evm_benchmarks_evmone384.csv"
EVMONE_BENCH_INFOS = [
{
"command": "/root/evmone-evm384-v1/build/bin/evmone-bench --benchmark_format=json --benchmark_color=false --benchmark_min_time=5 /root/evm384_f6m_mul/build/v1-f6m_mul_bench.bin 00 74229fc665e6c3f4401905c1a454ea57c8931739d05a074fd60400f19684d680a9e1305c25f13613dcc6cdd6e6e57d0800000000000000000000000000000000",
"bench_name": "evm384-synth-loop-v1"
},
{
"command": "/root/evmone-evm384-v2/build/bin/evmone-bench --benchmark_format=json --benchmark_color=false --benchmark_min_time=5 /root/evm384_f6m_mul/build/v2-f6m_mul_bench.bin 00 74229fc665e6c3f4401905c1a454ea57c8931739d05a074fd60400f19684d680a9e1305c25f13613dcc6cdd6e6e57d0800000000000000000000000000000000",
"bench_name": "evm384-synth-loop-v2"
},
{
"command": "/root/evmone-evm384-v2-unsafe/build/bin/evmone-bench --benchmark_format=json --benchmark_color=false --benchmark_min_time=5 /root/mem-check-disable-evm384_f6m_mul/build/v2-f6m_mul_bench.bin 00 74229fc665e6c3f4401905c1a454ea57c8931739d05a074fd60400f19684d680a9e1305c25f13613dcc6cdd6e6e57d0800000000000000000000000000000000",
"bench_name": "evm384-synth-loop-v3"
}
]
"""
root@472ab2fd1fc1:~/evm384_f6m_mul# /root/evmone-evm384-v2/build/bin/evmone-bench ~/evm384_f6m_mul/build/v2-f6m_mul_bench.bin "00" "74229fc665e6c3f4401905c1a454ea57c8931739d05a074fd60400f19684d680a9e1305c25f13613dcc6cdd6e6e57d0800000000000000000000000000000000"
Benchmarking evmone
2020-06-18 20:52:56
Running /root/evmone-evm384-v2/build/bin/evmone-bench
Run on (4 X 2294.68 MHz CPU s)
CPU Caches:
L1 Data 32K (x2)
L1 Instruction 32K (x2)
L2 Unified 256K (x2)
L3 Unified 51200K (x2)
-------------------------------------------------------------------------------------------------------
Benchmark Time CPU Iterations UserCounters...
-------------------------------------------------------------------------------------------------------
/root/evm384_f6m_mul/build/v2-f6m_mul_bench.bin 18156 us 18156 us 39 gas_rate=322.266M/s gas_used=5.85118M
"""
def do_evmone_bench(evmone_bench_cmd):
evmone_cmd = shlex.split(evmone_bench_cmd)
print("running evmone benchmark...\n{}".format(evmone_bench_cmd))
stdoutlines = []
with subprocess.Popen(evmone_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1, universal_newlines=True) as p:
for line in p.stdout: # b'\n'-separated lines
print(line, end='')
stdoutlines.append(line) # pass bytes as is
p.wait()
json_result = json.loads("".join(stdoutlines[2:]))
benchmarks = json_result['benchmarks']
benchmark_results = benchmarks[0]
gasused = int(benchmark_results['gas_used'])
total_time = str(benchmark_results['real_time']) + benchmark_results['time_unit']
time = durationpy.from_str(total_time)
return {'gas_used': gasused, 'time': time.total_seconds()}
def saveResults(precompile_benchmarks):
# move existing csv file to backup-datetime-folder
ts = time.time()
date_str = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d')
ts_folder_name = "backup-{}-{}".format(date_str, round(ts))
dest_backup_path = os.path.join(RESULT_CSV_OUTPUT_PATH, ts_folder_name)
result_file = "{}/{}".format(RESULT_CSV_OUTPUT_PATH, RESULT_CSV_FILENAME)
# back up existing result csv file
if os.path.isfile(result_file):
os.makedirs(dest_backup_path)
shutil.move(result_file, dest_backup_path)
print("existing {} moved to {}".format(RESULT_CSV_FILENAME, dest_backup_path))
with open(result_file, 'w', newline='') as bench_result_file:
fieldnames = ['engine', 'test_name', 'total_time', 'gas_used']
writer = csv.DictWriter(bench_result_file, fieldnames=fieldnames)
writer.writeheader()
for test_result in precompile_benchmarks:
writer.writerow({"engine": test_result['engine'], "test_name" : test_result['bench_name'], "gas_used" : test_result['gas_used'], "total_time" : test_result['time']})
def main():
all_bench_resuls = []
for evmone_bench_info in EVMONE_BENCH_INFOS:
evmone_cmd = evmone_bench_info['command']
bench_result = do_evmone_bench(evmone_cmd)
bench_result['bench_name'] = evmone_bench_info['bench_name']
bench_result['engine'] = "evmone384"
all_bench_resuls.append(bench_result)
saveResults(all_bench_resuls)
if __name__ == "__main__":
main()
```
#### File: evm/scripts/benchevm.py
```python
import json, re
import subprocess
import nanodurationpy as durationpy
import csv
import time
import datetime
import os
import sys
import shutil
import shlex
RESULT_CSV_OUTPUT_PATH = "/evmraceresults"
# must be an absolute path to evm code dir
EVM_CODE_DIR = "/input_data/evmcode"
INPUT_VECTORS_DIR = "/input_data/input_vectors"
EVMONE_BUILD_DIR = "/root/evmone/build"
PARITY_EVM_DIR = "/parity/target/release"
CITA_EVM_DIR = "/cita-vm/target/release"
GETH_EVM_DIR = "/root/go/src/github.com/ethereum/go-ethereum/core/vm/runtime"
def save_results(evm_name, evm_benchmarks):
result_file = os.path.join(RESULT_CSV_OUTPUT_PATH, "evm_benchmarks_{}.csv".format(evm_name))
# move existing files to old-datetime-folder
ts = time.time()
date_str = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d')
ts_folder_name = "{}-{}".format(date_str, round(ts))
dest_backup_path = os.path.join(RESULT_CSV_OUTPUT_PATH, ts_folder_name)
#for file in glob.glob(r"{}/*.csv".format(RESULT_CSV_OUTPUT_PATH)):
if os.path.isfile(result_file):
os.makedirs(dest_backup_path)
print("backing up existing {}".format(result_file))
shutil.move(result_file, dest_backup_path)
print("existing csv files backed up to {}".format(dest_backup_path))
# will always be a new file after this.
# might move this backup routine to a bash script
fieldnames = ['engine', 'test_name', 'total_time', 'gas_used']
# write header if new file
if not os.path.isfile(result_file):
with open(result_file, 'w', newline='') as bench_result_file:
writer = csv.DictWriter(bench_result_file, fieldnames=fieldnames)
writer.writeheader()
# append to existing file
with open(result_file, 'a', newline='') as bench_result_file:
writer = csv.DictWriter(bench_result_file, fieldnames=fieldnames)
for row in evm_benchmarks:
writer.writerow(row)
def get_evmone_cmd(codefile, calldata, expected):
cmd_str = "bin/evmone-bench {} {} {} --benchmark_color=false --benchmark_min_time=7 --benchmark_format=json".format(codefile, calldata, expected)
return cmd_str
def get_parity_cmd(codefile, calldata, expected):
cmd_str = "./parity-evm --code-file {} --input {} --expected {} ".format(codefile, calldata, expected)
return cmd_str
def get_geth_cmd(codefile, calldata, expected):
cmd_str = "/go-ethereum/build/bin/evm --codefile {} --statdump --input {} --bench run".format(codefile, calldata)
return cmd_str
def get_cita_cmd(codefile, calldata, expected):
cmd_str = "./cita-evm --code-file {} --input {} --expected {} ".format(codefile, calldata, expected)
return cmd_str
def do_evmone_bench(evmone_cmd):
print("running evmone benchmark...\n{}\n".format(evmone_cmd))
evmone_cmd = shlex.split(evmone_cmd)
stdoutlines = []
with subprocess.Popen(evmone_cmd, cwd=EVMONE_BUILD_DIR, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1, universal_newlines=True) as p:
for line in p.stdout: # b'\n'-separated lines
print(line, end='')
stdoutlines.append(line) # pass bytes as is
p.wait()
json_result = json.loads("".join(stdoutlines[2:]))
benchmarks = json_result['benchmarks']
benchmark_results = benchmarks[0]
gasused = int(benchmark_results['gas_used'])
total_time = str(benchmark_results['real_time']) + benchmark_results['time_unit']
time = durationpy.from_str(total_time)
return { 'gas_used': gasused, 'time': time.total_seconds() }
def do_parity_bench(parity_cmd):
print("running parity-evm benchmark...\n{}\n".format(parity_cmd))
parity_cmd = shlex.split(parity_cmd)
stdoutlines = []
with subprocess.Popen(parity_cmd, cwd=PARITY_EVM_DIR, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1, universal_newlines=True) as p:
for line in p.stdout: # b'\n'-separated lines
print(line, end='')
stdoutlines.append(line) # pass bytes as is
p.wait()
timeregex = "code avg run time: ([\d\w\.]+)"
gasregex = "gas used: (\d+)"
# maybe --benchmark_format=json is better so dont have to parse "36.775k"
time_line = stdoutlines[-1]
gas_line = stdoutlines[-2]
time_match = re.search(timeregex, time_line)
time = durationpy.from_str(time_match.group(1))
gas_match = re.search(gasregex, gas_line)
gasused = gas_match.group(1)
return {'gas_used': gasused, 'time': time.total_seconds()}
def do_geth_bench(geth_cmd):
print("running geth-evm benchmark...\n{}\n".format(geth_cmd))
geth_cmd = shlex.split(geth_cmd)
stdoutlines = []
with subprocess.Popen(geth_cmd, cwd=GETH_EVM_DIR, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1) as p:
for line in p.stdout: # b'\n'-separated lines
print(line.decode(), end='')
stdoutlines.append(line.decode()) # pass bytes as is
p.wait()
msOpRegex = "execution time:\s+([\d]+.[\d]+)ms"
qsOpRegex = "execution time:\s+([\d]+.[\d]+)µs"
gasregex = "EVM gas used:\s+(\d+)"
# maybe --benchmark_format=json is better so dont have to parse "36.775k"
time_line = stdoutlines[1]
gas_line = stdoutlines[0]
time_match = re.search(msOpRegex, time_line)
time = None
if time_match is None:
time_match = re.search(qsOpRegex, time_line)
time = durationpy.from_str("{}µs".format(time_match.group(1)))
else:
time = durationpy.from_str("{}ms".format(time_match.group(1)))
gas_match = re.search(gasregex, gas_line)
gasused = gas_match.group(1)
return {'gas_used': gasused, 'time': time.total_seconds()}
def do_cita_bench(cita_cmd):
print("running cita-evm benchmark...\n{}\n".format(cita_cmd))
cita_cmd = shlex.split(cita_cmd)
stdoutlines = []
with subprocess.Popen(cita_cmd, cwd=CITA_EVM_DIR, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1, universal_newlines=True) as p:
for line in p.stdout: # b'\n'-separated lines
print(line, end='')
stdoutlines.append(line) # pass bytes as is
p.wait()
timeregex = "code avg run time: ([\d\w\.]+)"
gasregex = "gas_used: (\d+)"
time_line = stdoutlines[-1]
gas_line = stdoutlines[-2]
time_match = re.search(timeregex, time_line)
time = durationpy.from_str(time_match.group(1))
gas_match = re.search(gasregex, gas_line)
gasused = gas_match.group(1)
return {'gas_used': gasused, 'time': time.total_seconds()}
def bench_evm(evm_name, input, codefilepath, shift_suffix):
calldata = input['input']
expected = input['expected']
test_name = input['name'] + shift_suffix
evm_result = {}
if evm_name == 'evmone':
evmone_bench_cmd = get_evmone_cmd(codefilepath, calldata, expected)
evmone_bench_result = do_evmone_bench(evmone_bench_cmd)
print("got evmone_bench_result:", evmone_bench_result)
evm_result['engine'] = 'evmone'
evm_result['test_name'] = test_name
evm_result['total_time'] = evmone_bench_result['time']
evm_result['gas_used'] = evmone_bench_result['gas_used']
if evm_name == "parity":
parity_bench_cmd = get_parity_cmd(codefilepath, calldata, expected)
parity_bench_result = do_parity_bench(parity_bench_cmd)
evm_result['engine'] = 'parity-evm'
evm_result['test_name'] = test_name
evm_result['total_time'] = parity_bench_result['time']
evm_result['gas_used'] = parity_bench_result['gas_used']
if evm_name == "geth":
geth_bench_cmd = get_geth_cmd(codefilepath, calldata, expected)
geth_bench_result = do_geth_bench(geth_bench_cmd)
evm_result['engine'] = "geth-evm"
evm_result['test_name'] = test_name
evm_result['total_time'] = geth_bench_result['time']
evm_result['gas_used'] = geth_bench_result['gas_used']
if evm_name == "cita-vm":
cita_bench_cmd = get_cita_cmd(codefilepath, calldata, expected)
cita_bench_result = do_cita_bench(cita_bench_cmd)
evm_result['engine'] = "cita-evm"
evm_result['test_name'] = test_name
evm_result['total_time'] = cita_bench_result['time']
evm_result['gas_used'] = cita_bench_result['gas_used']
return evm_result
def main(evm_name):
evmcodefiles = [fname for fname in os.listdir(EVM_CODE_DIR) if fname.endswith('.hex')]
evm_benchmarks = []
for codefile in evmcodefiles:
print('start benching: ', codefile)
codefilepath = os.path.join(EVM_CODE_DIR, codefile)
benchname = codefile.replace(".hex", "")
inputsfilename = benchname
shift_suffix = ""
if benchname.endswith("_shift"):
inputsfilename = benchname.replace("_shift", "")
shift_suffix = "-shiftopt"
file_name = "{}-inputs.json".format(inputsfilename)
inputs_file_path = os.path.join(INPUT_VECTORS_DIR, file_name)
with open(inputs_file_path) as f:
bench_inputs = json.load(f)
for input in bench_inputs:
print("bench input: ", input['name'])
evm_result = bench_evm(evm_name, input, codefilepath, shift_suffix)
evm_benchmarks.append(evm_result)
save_results(evm_name, evm_benchmarks)
def usage():
print("newbench.py <evm_name>")
if __name__ == "__main__":
if len(sys.argv) < 2:
usage()
evm_name = sys.argv[1]
main(evm_name)
```
#### File: evm/scripts/merge.py
```python
import csv
import os
import time
import datetime
import shutil
INDIVIDUAL_EVM_RESULTS_CSV_PATH = "/evmraceresults/"
RESULT_CSV_OUTPUT_PATH = "/benchmark_results_data/"
EVMS = ["evmone", "parity", "geth", "cita-vm", "evmone384"]
RESULT_CSV_FILENAME = "evm_benchmarks.csv"
RESULT_FILE = os.path.join(RESULT_CSV_OUTPUT_PATH, RESULT_CSV_FILENAME)
# translate a string representing a numerical field like '1.8M' to '1800000'
def format_evmone_benchmark(csv_line):
fields = csv_line.split(',')
gas_used = fields[-1]
if gas_used[-1].lower() == 'k':
gas_used = str(int(float(gas_used[:-1]) * 1000))
elif gas_used[-1].lower() == 'm':
gas_used = str(int(float(gas_used[:-1]) * 1000000))
else:
return csv_line
return ','.join(fields[:-1] + [gas_used])[:-1]
# merge benchmarks from multiple engines into one csv output
def main():
merged_csv_contents = 'engine, test_name, total_time, gas_used\n'
evm_results = []
for evm in EVMS:
path = os.path.join(INDIVIDUAL_EVM_RESULTS_CSV_PATH, "evm_benchmarks_{}.csv".format(evm))
data_file = open(path, 'r')
data = data_file.read().splitlines()
data_file.close()
evm_results.append(data)
for e in range(0, len(EVMS)):
for i in range(0, len(evm_results[e])):
if EVMS[e] == 'evmone' or EVMS[e] == 'evmone384':
merged_csv_contents += format_evmone_benchmark(evm_results[e][i]) + '\n'
else:
merged_csv_contents += evm_results[e][i] + '\n'
# move existing csv file to backup-datetime-folder
ts = time.time()
date_str = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d')
ts_folder_name = "backup-{}-{}".format(date_str, round(ts))
dest_backup_path = os.path.join(RESULT_CSV_OUTPUT_PATH, ts_folder_name)
# back up existing result csv file
if os.path.isfile(RESULT_FILE):
os.makedirs(dest_backup_path)
shutil.move(RESULT_FILE, dest_backup_path)
print("existing {} moved to {}".format(RESULT_CSV_FILENAME, dest_backup_path))
with open(RESULT_FILE, 'w') as bench_result_file:
bench_result_file.write(merged_csv_contents)
print("saved evm results to:", RESULT_FILE)
if __name__ == "__main__":
main()
```
#### File: benchmarking/scout-engines/scout_bignum_bench.py
```python
import json, re
import subprocess
import nanodurationpy as durationpy
import csv
import time
import datetime
import os
import shutil
import shlex
import sys
import yaml
# docker run --privileged=true -v $(pwd)/../benchmark_results_data:/benchmark_results_data:cached --security-opt seccomp=unconfined -it scout-engines /bin/bash
RESULT_CSV_OUTPUT_DIR = "/benchmark_results_data"
RESULT_CSV_FILE_NAME = "scout_bignum_benchmarks.csv"
C_EWASM_DIR = "/scoutyamls/C_ewasm_contracts"
# the wabt branch reads input data from a file in the working directory `./test_block_data.hex`
# we'll create a new directory using bench_name + engine_name, and create test_block_data.hex in each one
WABT_BENCH_WORKING_DIR = "/engines/wabt-bench-dirs"
# because wabt reads input data from `./test_block_data.hex` in the working directory,
# we can execute `wabt_bin_path` from any directory.
WASM3_BENCH_INFOS = [
{
'bench_name': 'bls12-wasmsnark-synth-loop',
'engine_name': 'wasm3-with-bignums',
'wasm3_bin_path': '/engines/fizzy-bls12-hostfuncs/build/bin/fizzy-bench',
'wasm3_bench_dir': '/engines/fizzy-bls12-hostfuncs/build/bin/bls12-synth-loop',
},
{
'bench_name': 'bls12-wasmsnark-two-pairings-standalone',
'engine_name': 'wasm3-with-bignums',
'wasm3_bin_path': '/engines/fizzy-bls12-hostfuncs/build/bin/fizzy-bench',
'wasm3_bench_dir': '/engines/fizzy-bls12-hostfuncs/build/bin/bls12-pairing',
}
]
WAMR_BENCH_INFOS = [
{
'bench_name': 'bls12-wasmsnark-synth-loop',
'engine_path': '/engines/scout_wamr.c/build/scout.exec',
'engine_name': 'wamr-with-bignums',
'scout_yml_path': '/engines/scout_wamr.c/bls12-synth-loop.yaml',
},
{
'bench_name': 'bls12-wasmsnark-two-pairings-standalone',
'engine_path': '/engines/scout_wamr.c/build/scout.exec',
'engine_name': 'wamr-with-bignums',
'scout_yml_path': '/engines/scout_wamr.c/bls12-two-pairings-standalone.yaml',
}
]
FIZZY_BENCH_INFOS = [
{
'bench_name': 'bls12-wasmsnark-synth-loop',
'engine_name': 'fizzy-with-bignums',
'fizzy_bin_path': '/engines/fizzy-bls12-hostfuncs/build/bin/fizzy-bench',
'fizzy_bench_dir': '/engines/fizzy-bls12-hostfuncs/build/bin/bls12-synth-loop',
},
{
'bench_name': 'bls12-wasmsnark-two-pairings-standalone',
'engine_name': 'fizzy-with-bignums',
'fizzy_bin_path': '/engines/fizzy-bls12-hostfuncs/build/bin/fizzy-bench',
'fizzy_bench_dir': '/engines/fizzy-bls12-hostfuncs/build/bin/bls12-pairing',
},
{
'bench_name': 'bls12-wasmsnark-two-pairings-standalone',
'engine_name': 'fizzy',
'fizzy_bin_path': '/engines/fizzy-bls12-hostfuncs/build/bin/fizzy-bench',
'fizzy_bench_dir': '/engines/fizzy-bls12-hostfuncs/build/bin/bls12-pairing-nohostfuncs',
}
]
# TODO: use scout wabt branch without bignums for the no-bignum runs?
WABT_BENCH_INFOS = [
{
'bench_name': 'ecrecover-eth1-txns-websnark-secp256k1-verify-72-sigs',
'engine_name': 'wabt-fasthost-bignums',
'wabt_bin_path': '/engines/wabt-secp/out/clang/Release/benchmark-interp',
'yaml_file_dir': '/scoutyamls/scout.ts-secp/',
'yaml_file_rel_path': 'secpsigverify.yaml'
},
{
'bench_name': 'ecrecover-eth1-txns-websnark-secp256k1-verify-72-sigs',
'engine_name': 'wabt-no-bignums',
'wabt_bin_path': '/engines/wabt-secp/out/clang/Release/benchmark-interp',
'yaml_file_dir': '/scoutyamls/scout.ts-secp/',
'yaml_file_rel_path': 'secpsigverify_nobignums.yaml'
},
# {
# 'bench_name': 'ecpairing-zkrollup-websnark-bn128-two-pairings',
# 'engine_name': 'wabt-with-bignums',
# 'wabt_bin_path': '/engines/wabt-bn128/out/clang/Release/benchmark-interp',
# 'yaml_file_dir': '/scoutyamls/scout.ts-bn128/',
# 'yaml_file_rel_path': 'bn128pairing_bignums.yaml'
# },
# {
# 'bench_name': 'ecpairing-zkrollup-websnark-bn128-two-pairings',
# 'engine_name': 'wabt-no-bignums',
# 'wabt_bin_path': '/engines/wabt-bn128/out/clang/Release/benchmark-interp',
# 'yaml_file_dir': '/scoutyamls/scout.ts-bn128/',
# 'yaml_file_rel_path': 'bn128pairing.yaml'
# },
{
'bench_name': 'biturbo-token-eth1-mainnet-stateless-block-hexary-trie-keccak256-multiproof',
'engine_name': 'wabt-superops',
'wabt_bin_path': '/engines/wabt-biturbo/out/clang/Release/benchmark-interp',
'yaml_file_dir': '/scoutyamls/biturbo/',
'yaml_file_rel_path': 'turbo-token-realistic.yaml'
},
{
'bench_name': 'biturbo-token-eth1-mainnet-stateless-block-hexary-trie-keccak256-multiproof',
'engine_name': 'wabt-baseline',
'wabt_bin_path': '/engines/wabt-biturbo-no-superops/out/clang/Release/benchmark-interp',
'yaml_file_dir': '/scoutyamls/biturbo/',
'yaml_file_rel_path': 'turbo-token-realistic.yaml'
},
{
'bench_name': 'bls12-wasmsnark-two-pairings',
'engine_name': 'wabt-fastmont-fasthost-superops',
'wabt_bin_path': '/engines/wabt-bls12-fastmont-fasthost-superops/out/clang/Release/benchmark-interp',
'yaml_file_dir': '/scoutyamls/scout.ts-bls12/',
'yaml_file_rel_path': 'bls12pairing.yaml'
},
{
'bench_name': 'bls12-wasmsnark-two-pairings',
'engine_name': 'wabt-fastmont-fasthost-superops',
'wabt_bin_path': '/engines/wabt-bls12-fastmont-fasthost-superops/out/clang/Release/benchmark-interp',
'yaml_file_dir': '/scoutyamls/scout.ts-bls12/',
'yaml_file_rel_path': 'bls12pairing.yaml'
},
{
'bench_name': 'bls12-wasmsnark-two-pairings',
'engine_name': 'wabt-fastmont-fasthost-f1m_mul',
'wabt_bin_path': '/engines/wabt-bls12-bignums-fasthost-fastmont-no-superops/out/clang/Release/benchmark-interp',
'yaml_file_dir': '/scoutyamls/scout.ts-bls12/',
'yaml_file_rel_path': 'bls12pairing-f1m_mul.yaml'
},
{
'bench_name': 'bls12-wasmsnark-two-pairings',
'engine_name': 'wabt-fastmont-fasthost-f1m_mul-f1m_add',
'wabt_bin_path': '/engines/wabt-bls12-bignums-fasthost-fastmont-no-superops/out/clang/Release/benchmark-interp',
'yaml_file_dir': '/scoutyamls/scout.ts-bls12/',
'yaml_file_rel_path': 'bls12pairing-f1m_mul-f1m_add.yaml'
},
{
'bench_name': 'bls12-wasmsnark-two-pairings',
'engine_name': 'wabt-fastmont-fasthost-f1m_mul-f1m_add-f1m_sub',
'wabt_bin_path': '/engines/wabt-bls12-bignums-fasthost-fastmont-no-superops/out/clang/Release/benchmark-interp',
'yaml_file_dir': '/scoutyamls/scout.ts-bls12/',
'yaml_file_rel_path': 'bls12pairing-f1m_mul-f1m_add-f1m_sub.yaml'
},
{
'bench_name': 'bls12-wasmsnark-two-pairings',
'engine_name': 'wabt-fastmont-fasthost-f1m_mul-f1m_add-f1m_sub-int_mul',
'wabt_bin_path': '/engines/wabt-bls12-bignums-fasthost-fastmont-no-superops/out/clang/Release/benchmark-interp',
'yaml_file_dir': '/scoutyamls/scout.ts-bls12/',
'yaml_file_rel_path': 'bls12pairing-f1m_mul-f1m_add-f1m_sub-int_mul.yaml'
},
{
'bench_name': 'bls12-wasmsnark-two-pairings',
'engine_name': 'wabt-fastmont-fasthost-f1m_mul-f1m_add-f1m_sub-int_mul-int_add',
'wabt_bin_path': '/engines/wabt-bls12-bignums-fasthost-fastmont-no-superops/out/clang/Release/benchmark-interp',
'yaml_file_dir': '/scoutyamls/scout.ts-bls12/',
'yaml_file_rel_path': 'bls12pairing-f1m_mul-f1m_add-f1m_sub-int_mul-int_add.yaml'
},
{
'bench_name': 'bls12-wasmsnark-two-pairings',
'engine_name': 'wabt-fastmont-fasthost-f1m_mul-f1m_add-f1m_sub-int_mul-int_add-int_sub',
'wabt_bin_path': '/engines/wabt-bls12-bignums-fasthost-fastmont-no-superops/out/clang/Release/benchmark-interp',
'yaml_file_dir': '/scoutyamls/scout.ts-bls12/',
'yaml_file_rel_path': 'bls12pairing-f1m_mul-f1m_add-f1m_sub-int_mul-int_add-int_sub.yaml'
},
{
'bench_name': 'bls12-wasmsnark-two-pairings',
'engine_name': 'wabt-fastmont-fasthost-f1m_mul-f1m_add-f1m_sub-int_mul-int_add-int_sub-int_div',
'wabt_bin_path': '/engines/wabt-bls12-bignums-fasthost-fastmont-no-superops/out/clang/Release/benchmark-interp',
'yaml_file_dir': '/scoutyamls/scout.ts-bls12/',
'yaml_file_rel_path': 'bls12pairing-f1m_mul-f1m_add-f1m_sub-int_mul-int_add-int_sub-int_div.yaml'
},
{
'bench_name': 'bls12-wasmsnark-two-pairings-standalone',
'engine_name': 'wabt-fastmont-fasthost-superops',
'wabt_bin_path': '/engines/wabt-bls12-fastmont-fasthost-superops/out/clang/Release/benchmark-interp',
'yaml_file_dir': '/scoutyamls/scout.ts-bls12-standalone-pairing/',
'yaml_file_rel_path': 'bls12pairing_standalone.yaml'
},
{
'bench_name': 'bls12-wasmsnark-synth-loop',
'engine_name': 'wabt-fastmont-fasthost-superops',
'wabt_bin_path': '/engines/wabt-bls12-fastmont-fasthost-superops/out/clang/Release/benchmark-interp',
'yaml_file_dir': '/scoutyamls/scout.ts-bls12-standalone-synth-loop/',
'yaml_file_rel_path': 'bls12_f6m_mul_loop.yaml'
}
]
# on bls12-wasmsnark-two-pairings wabt-no-bignums, we can use any wabt that doesn't have superops
# (some of the code in one of the big_int implementations of wasmsnark triggers a bug)
# use wabt/wasm-interp instead of wabt/benchmark-interp
# benchmark-interp loops over calls to main() without zeroing out memory or re-instantiating the wasm instance
# some wasm modules support repeated calls to main(). Daiqiuri doesn't, so use wasm-interp instead of benchmark-interp
WABT_BENCH_MANUAL_INFOS = [
{
'bench_name': 'bls12-wasmsnark-two-pairings',
'engine_name': 'wabt-no-bignums',
'wabt_bin_path': '/engines/wabt-biturbo-no-superops/out/clang/Release/wasm-interp',
'yaml_file_dir': '/scoutyamls/scout.ts-bls12/',
'yaml_file_rel_path': 'bls12pairing_nobignums.yaml'
},
{
'bench_name': 'daiquiri-zkmixer-websnark-bn128-groth16-four-pairings-and-mimc',
'engine_name': 'wabt-fasthost-bignums',
'wabt_bin_path': '/engines/wabt-bn128/out/clang/Release/wasm-interp',
'yaml_file_dir': '/scoutyamls/daiquiri/',
'yaml_file_rel_path': 'tests/tests-withdraw-bn.yml'
},
{
'bench_name': 'daiquiri-zkmixer-websnark-bn128-groth16-four-pairings-and-mimc',
'engine_name': 'wabt-no-bignums',
'wabt_bin_path': '/engines/wabt-bn128/out/clang/Release/wasm-interp',
'yaml_file_dir': '/scoutyamls/daiquiri/',
'yaml_file_rel_path': 'tests/tests-withdraw.yml'
},
{
'bench_name': 'ecpairing-zkrollup-rust-wasm-bn128-two-pairings',
'engine_name': 'wabt-no-bignums',
'wabt_bin_path': '/engines/wabt-bn128-rolluprs-slowmont-slowhost/out/clang/Release/wasm-interp',
'yaml_file_dir': '/scoutyamls/rollup-rs-no-bignums/',
'yaml_file_rel_path': 'rolluprs.yaml'
},
{
'bench_name': 'ecpairing-zkrollup-rust-wasm-bn128-two-pairings',
'engine_name': 'wabt-bignums-slowhost-slowmont',
'wabt_bin_path': '/engines/wabt-bn128-rolluprs-slowmont-slowhost/out/clang/Release/wasm-interp',
'yaml_file_dir': '/scoutyamls/rollup-rs-with-bignums/',
'yaml_file_rel_path': 'rolluprs.yaml'
},
{
'bench_name': 'ecpairing-zkrollup-rust-wasm-bn128-two-pairings',
'engine_name': 'wabt-bignums-slowhost-slowmont-superops',
'wabt_bin_path': '/engines/wabt-bn128-rolluprs-slowmont-slowhost-superops/out/clang/Release/wasm-interp',
'yaml_file_dir': '/scoutyamls/rollup-rs-with-bignums/',
'yaml_file_rel_path': 'rolluprs.yaml'
},
{
'bench_name': 'ecpairing-zkrollup-rust-wasm-bn128-two-pairings',
'engine_name': 'wabt-bignums-fasthost-slowmont-superops',
'wabt_bin_path': '/engines/wabt-bn128-rolluprs-slowmont-fasthost-superops/out/clang/Release/wasm-interp',
'yaml_file_dir': '/scoutyamls/rollup-rs-with-bignums/',
'yaml_file_rel_path': 'rolluprs.yaml'
},
{
'bench_name': 'ecpairing-zkrollup-rust-wasm-bn128-two-pairings',
'engine_name': 'wabt-bignums-fasthost-fastmont-superops',
'wabt_bin_path': '/engines/wabt-bn128-rolluprs-fastmont-fasthost-superops/out/clang/Release/wasm-interp',
'yaml_file_dir': '/scoutyamls/rollup-rs-with-bignums/',
'yaml_file_rel_path': 'rolluprs.yaml'
},
{
'bench_name': 'ecpairing-zkrollup-websnark-bn128-two-pairings',
'engine_name': 'wabt-no-bignums',
'wabt_bin_path': '/engines/wabt-bn128-websnark-slowmont-slowhost/out/clang/Release/wasm-interp',
'yaml_file_dir': '/scoutyamls/scout.ts-bn128/',
'yaml_file_rel_path': 'bn128pairing.yaml'
},
{
'bench_name': 'ecpairing-zkrollup-websnark-bn128-two-pairings',
'engine_name': 'wabt-bignums-slowhost-slowmont',
'wabt_bin_path': '/engines/wabt-bn128-websnark-slowmont-slowhost/out/clang/Release/wasm-interp',
'yaml_file_dir': '/scoutyamls/scout.ts-bn128/',
'yaml_file_rel_path': 'bn128pairing_bignums.yaml'
},
{
'bench_name': 'ecpairing-zkrollup-websnark-bn128-two-pairings',
'engine_name': 'wabt-bignums-slowhost-slowmont-superops',
'wabt_bin_path': '/engines/wabt-bn128-websnark-slowmont-slowhost-superops/out/clang/Release/wasm-interp',
'yaml_file_dir': '/scoutyamls/scout.ts-bn128/',
'yaml_file_rel_path': 'bn128pairing_bignums.yaml'
},
{
'bench_name': 'ecpairing-zkrollup-websnark-bn128-two-pairings',
'engine_name': 'wabt-bignums-fasthost-slowmont-superops',
'wabt_bin_path': '/engines/wabt-bn128-websnark-slowmont-fasthost-superops/out/clang/Release/wasm-interp',
'yaml_file_dir': '/scoutyamls/scout.ts-bn128/',
'yaml_file_rel_path': 'bn128pairing_bignums.yaml'
},
{
'bench_name': 'ecpairing-zkrollup-websnark-bn128-two-pairings',
'engine_name': 'wabt-bignums-fasthost-fastmont-superops',
'wabt_bin_path': '/engines/wabt-bn128-websnark-fastmont-fasthost-superops/out/clang/Release/wasm-interp',
'yaml_file_dir': '/scoutyamls/scout.ts-bn128/',
'yaml_file_rel_path': 'bn128pairing_bignums.yaml'
},
{
'bench_name': 'ecpairing-zkrollup-websnark-bn128-two-pairings',
'engine_name': 'wabt-bignums-fasthost-fastmont',
'wabt_bin_path': '/engines/wabt-bn128-websnark-fastmont-fasthost/out/clang/Release/wasm-interp',
'yaml_file_dir': '/scoutyamls/scout.ts-bn128/',
'yaml_file_rel_path': 'bn128pairing_bignums.yaml'
}
]
SCOUTCPP_BENCH_INFOS = []
#SCOUTCPP_BENCH_INFOS = [
# {
# 'bench_name': 'ecrecover-eth1-txns-websnark-secp256k1-verify-72-sigs',
# 'engine_name': 'scoutcpp-wabt-with-bignums',
# 'scoutcpp_bin_path': '/engines/scoutcpp-secp/build/scout.exec',
# 'yaml_working_dir': '/scoutyamls/scout.ts-secp/',
# 'yaml_file_path': 'secpsigverify.yaml'
# },
# {
# 'bench_name': 'ecrecover-eth1-txns-websnark-secp256k1-verify-72-sigs',
# 'engine_name': 'scoutcpp-wabt-no-bignums',
# 'scoutcpp_bin_path': '/engines/scoutcpp-secp/build/scout.exec',
# 'yaml_working_dir': '/scoutyamls/scout.ts-secp/',
# 'yaml_file_path': 'secpsigverify_nobignums.yaml'
# },
# {
# 'bench_name': 'ecpairing-zkrollup-websnark-bn128-two-pairings',
# 'engine_name': 'scoutcpp-wabt-with-bignums',
# 'scoutcpp_bin_path': '/engines/scoutcpp-bn128/build/scout.exec',
# 'yaml_working_dir': '/scoutyamls/scout.ts-bn128/',
# 'yaml_file_path': 'bn128pairing_bignums.yaml'
# },
# {
# 'bench_name': 'ecpairing-zkrollup-websnark-bn128-two-pairings',
# 'engine_name': 'scoutcpp-wabt-no-bignums',
# 'scoutcpp_bin_path': '/engines/scoutcpp-bn128/build/scout.exec',
# 'yaml_working_dir': '/scoutyamls/scout.ts-bn128/',
# 'yaml_file_path': 'bn128pairing.yaml'
# },
# {
# 'bench_name': 'ecpairing-zkrollup-rust-wasm-bn128-two-pairings',
# 'engine_name': 'scoutcpp-wabt-with-bignums',
# 'scoutcpp_bin_path': '/engines/scoutcpp-bn128-rolluprs/build/scout.exec',
# 'yaml_working_dir': '/scoutyamls/rollup-rs-with-bignums/',
# 'yaml_file_path': 'rolluprs.yaml'
# },
# {
# 'bench_name': 'ecpairing-zkrollup-rust-wasm-bn128-two-pairings',
# 'engine_name': 'scoutcpp-wabt-no-bignums',
# 'scoutcpp_bin_path': '/engines/scoutcpp-bn128-rolluprs/build/scout.exec',
# 'yaml_working_dir': '/scoutyamls/rollup-rs-no-bignums/',
# 'yaml_file_path': 'rolluprs.yaml'
# },
# {
# 'bench_name': 'daiquiri-zkmixer-websnark-bn128-groth16-four-pairings-and-mimc',
# 'engine_name': 'scoutcpp-wabt-with-bignums',
# 'scoutcpp_bin_path': '/engines/scoutcpp-bn128/build/scout.exec',
# 'yaml_working_dir': '/scoutyamls/daiquiri/',
# 'yaml_file_path': 'tests/tests-withdraw-bn.yml'
# },
# {
# 'bench_name': 'daiquiri-zkmixer-websnark-bn128-groth16-four-pairings-and-mimc',
# 'engine_name': 'scoutcpp-wabt-no-bignums',
# 'scoutcpp_bin_path': '/engines/scoutcpp-bn128/build/scout.exec',
# 'yaml_working_dir': '/scoutyamls/daiquiri/',
# 'yaml_file_path': 'tests/tests-withdraw.yml'
# }
#]
# v8 is always benched with no bignums. EDIT: benched with bignums now too, to show the slowdown.
# bignums cause a slowdown due to v8's host function overhead
# in practice, wasm engines that don't support the bignum host functions
# could use "polyfill" implementations of the host functions, provided in wasm
# these v8 benchmarks demonstrate using websnark's implementations of bigints
# and modular arithmetic as the polyfill.
# TODO: add v8-interpreter
V8_BENCH_INFOS = [
{
'bench_name': 'ecrecover-eth1-txns-websnark-secp256k1-verify-72-sigs',
'engine_name': 'v8-turbofan',
'scoutts_cmd': 'npm run start:turbofan secpsigverify_nobignums.yaml',
'scoutts_working_dir': '/scoutyamls/scout.ts-bn128/'
},
{
'bench_name': 'ecrecover-eth1-txns-websnark-secp256k1-verify-72-sigs',
'engine_name': 'v8-liftoff',
'scoutts_cmd': 'npm run start:liftoff secpsigverify_nobignums.yaml',
'scoutts_working_dir': '/scoutyamls/scout.ts-secp/',
},
{
'bench_name': 'ecrecover-eth1-txns-websnark-secp256k1-verify-72-sigs',
'engine_name': 'v8-interpreter',
'scoutts_cmd': 'node --wasm-interpret-all ./dist/cli.js secpsigverify_nobignums.yaml',
'scoutts_working_dir': '/scoutyamls/scout.ts-secp/',
},
{
'bench_name': 'ecpairing-zkrollup-websnark-bn128-two-pairings',
'engine_name': 'v8-turbofan',
'scoutts_cmd': 'npm run start:turbofan bn128pairing.yaml',
'scoutts_working_dir': '/scoutyamls/scout.ts-bn128/'
},
{
'bench_name': 'ecpairing-zkrollup-websnark-bn128-two-pairings',
'engine_name': 'v8-liftoff',
'scoutts_cmd': 'npm run start:liftoff bn128pairing.yaml',
'scoutts_working_dir': '/scoutyamls/scout.ts-bn128/'
},
{
'bench_name': 'ecpairing-zkrollup-websnark-bn128-two-pairings',
'engine_name': 'v8-liftoff-and-turbofan',
'scoutts_cmd': 'node ./dist/cli.js bn128pairing.yaml',
'scoutts_working_dir': '/scoutyamls/scout.ts-bn128/'
},
{
'bench_name': 'ecpairing-zkrollup-websnark-bn128-two-pairings',
'engine_name': 'v8-interpreter',
'scoutts_cmd': 'node --wasm-interpret-all ./dist/cli.js bn128pairing.yaml',
'scoutts_working_dir': '/scoutyamls/scout.ts-bn128/'
},
{
'bench_name': 'ecpairing-zkrollup-websnark-bn128-two-pairings',
'engine_name': 'v8-turbofan-with-bignums',
'scoutts_cmd': 'npm run start:turbofan bn128pairing_bignums.yaml',
'scoutts_working_dir': '/scoutyamls/scout.ts-bn128/'
},
{
'bench_name': 'ecpairing-zkrollup-websnark-bn128-two-pairings',
'engine_name': 'v8-liftoff-with-bignums',
'scoutts_cmd': 'npm run start:liftoff bn128pairing_bignums.yaml',
'scoutts_working_dir': '/scoutyamls/scout.ts-bn128/'
},
{
'bench_name': 'ecpairing-zkrollup-websnark-bn128-two-pairings',
'engine_name': 'v8-interpreter-with-bignums',
'scoutts_cmd': 'node --wasm-interpret-all ./dist/cli.js bn128pairing_bignums.yaml',
'scoutts_working_dir': '/scoutyamls/scout.ts-bn128/'
},
{
'bench_name': 'ecpairing-zkrollup-rust-wasm-bn128-two-pairings',
'engine_name': 'v8-turbofan',
'scoutts_cmd': 'npm run start:turbofan rolluprs.yaml',
'scoutts_working_dir': '/scoutyamls/scout.ts-bn128/'
},
{
'bench_name': 'ecpairing-zkrollup-rust-wasm-bn128-two-pairings',
'engine_name': 'v8-liftoff',
'scoutts_cmd': 'npm run start:liftoff rolluprs.yaml',
'scoutts_working_dir': '/scoutyamls/scout.ts-bn128/'
},
{
'bench_name': 'ecpairing-zkrollup-rust-wasm-bn128-two-pairings',
'engine_name': 'v8-liftoff-and-turbofan',
'scoutts_cmd': 'node ./dist/cli.js rolluprs.yaml',
'scoutts_working_dir': '/scoutyamls/scout.ts-bn128/'
},
{
'bench_name': 'ecpairing-zkrollup-rust-wasm-bn128-two-pairingss',
'engine_name': 'v8-interpreter',
'scoutts_cmd': 'node --wasm-interpret-all ./dist/cli.js rolluprs.yaml',
'scoutts_working_dir': '/scoutyamls/scout.ts-bn128/'
},
{
'bench_name': 'daiquiri-zkmixer-websnark-bn128-groth16-four-pairings-and-mimc',
'engine_name': 'v8-turbofan',
'scoutts_cmd': 'npm run bench:turbofan tests/tests-withdraw.yml',
'scoutts_working_dir': '/scoutyamls/daiquiri/'
},
{
'bench_name': 'daiquiri-zkmixer-websnark-bn128-groth16-four-pairings-and-mimc',
'engine_name': 'v8-liftoff',
'scoutts_cmd': 'npm run bench:liftoff tests/tests-withdraw.yml',
'scoutts_working_dir': '/scoutyamls/daiquiri/'
},
{
'bench_name': 'daiquiri-zkmixer-websnark-bn128-groth16-four-pairings-and-mimc',
'engine_name': 'v8-interpreter',
'scoutts_cmd': 'npm run bench:interpreter tests/tests-withdraw.yml',
'scoutts_working_dir': '/scoutyamls/daiquiri/'
},
{
'bench_name': 'biturbo-token-eth1-mainnet-stateless-block-hexary-trie-keccak256-multiproof',
'engine_name': 'v8-turbofan',
'scoutts_cmd': 'node --no-liftoff node_modules/scout.ts/dist/cli.js turbo-token-realistic.yaml',
'scoutts_working_dir': '/scoutyamls/biturbo/'
},
{
'bench_name': 'biturbo-token-eth1-mainnet-stateless-block-hexary-trie-keccak256-multiproof',
'engine_name': 'v8-liftoff',
'scoutts_cmd': 'node --liftoff --no-wasm-tier-up node_modules/scout.ts/dist/cli.js turbo-token-realistic.yaml',
'scoutts_working_dir': '/scoutyamls/biturbo/'
},
{
'bench_name': 'biturbo-token-eth1-mainnet-stateless-block-hexary-trie-keccak256-multiproof',
'engine_name': 'v8-liftoff-and-turbofan',
'scoutts_cmd': 'node node_modules/scout.ts/dist/cli.js turbo-token-realistic.yaml',
'scoutts_working_dir': '/scoutyamls/biturbo/'
},
{
'bench_name': 'biturbo-token-eth1-mainnet-stateless-block-hexary-trie-keccak256-multiproof',
'engine_name': 'v8-interpreter',
'scoutts_cmd': 'node --wasm-interpret-all node_modules/scout.ts/dist/cli.js turbo-token-realistic.yaml',
'scoutts_working_dir': '/scoutyamls/biturbo/'
},
{
'bench_name': 'bls12-wasmsnark-two-pairings',
'engine_name': 'v8-liftoff',
'scoutts_cmd': 'node --liftoff --no-wasm-tier-up dist/cli.js bls12pairing_nobignums.yaml',
'scoutts_working_dir': '/scoutyamls/scout.ts-bls12/'
},
{
'bench_name': 'bls12-wasmsnark-two-pairings',
'engine_name': 'v8-turbofan',
'scoutts_cmd': 'npm run start:turbofan bls12pairing_nobignums.yaml',
'scoutts_working_dir': '/scoutyamls/scout.ts-bls12/'
},
]
RUST_NATIVE_BENCH_INFOS = [
{
'bench_name': 'ecpairing-zkrollup-rust-native-bn128-two-pairings',
'engine_name': 'rust-native',
'native_bin_path': '/scoutyamls/rollup-rs-native/target/release/rollup_rs'
},
{
'bench_name': 'bls12-eip1962-rust-native-two-pairings',
'engine_name': 'rust-native',
'native_bin_path': '/scoutyamls/eip1962-bls12-rs-native/target/release/eip1962-bench'
}
]
# scout yaml files look like this:
"""
beacon_state:
execution_scripts:
- assembly/secp-sig-verify/out/main_with_websnark_and_keccak.wasm
shard_pre_state:
exec_env_states:
- "0000000000000000000000000000000000000000000000000000000000000000"
shard_blocks:
- env: 0
data: "d894895b62b6dc6115fe23c931f9765041a078e1241881ff8015ed312c5863d1e3ff253e8c9077c460233f62bc73d69c5364e0f2de0f7cd064173d84e53ad0bb8bbbd2f48703c59697ca33bf9077524d9df154bc944f8f6516"
shard_post_state:
exec_env_states:
- "00000000000000000000000029120ac3527858f5637e698cdbf0548c6b59ec77"
#- "000000000000000000000000d683fd3465c996598dc972688b7ace676c89077b"
"""
def saveResults(benchmarks):
result_file = os.path.join(RESULT_CSV_OUTPUT_DIR, RESULT_CSV_FILE_NAME)
if os.path.isfile(result_file):
print("backing up existing {}".format(result_file))
# move existing files to old-datetime-folder
ts = time.time()
date_str = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d')
ts_folder_name = "{}-{}".format(date_str, round(ts))
dest_backup_path = os.path.join(RESULT_CSV_OUTPUT_DIR, ts_folder_name)
os.makedirs(dest_backup_path)
shutil.move(result_file, dest_backup_path)
print("existing csv files backed up to {}".format(dest_backup_path))
with open(result_file, 'w', newline='') as bench_result_file:
fieldnames = ['engine', 'bench_name', 'parse_time', 'exec_time']
writer = csv.DictWriter(bench_result_file, fieldnames=fieldnames)
writer.writeheader()
for row in benchmarks:
writer.writerow(row)
"""
/engines/wabt-bench-dirs/ecpairing-zkrollup-rust-bn128-two-pairings-wabt-with-bignums# /scoutyamls/rollup-rs-native/target/release/rollup_rs
pairing check time: 10.467ms
"""
"""
/scoutyamls/eip1962-bls12-rs-native/target/release/eip1962-bench
Time elapsed in bench() is: 5.178954ms
"""
def do_rust_native(native_bin_path):
print("running rust-native benchmark...\n{}".format(native_bin_path))
rust_cmd = shlex.split(native_bin_path)
stdoutlines = []
with subprocess.Popen(rust_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1, universal_newlines=True) as p:
for line in p.stdout: # b'\n'-separated lines
print(line, end='')
stdoutlines.append(line) # pass bytes as is
p.wait()
exec_time_regex = "pairing check time: ([\.\w\d]+)"
exec_benchline = stdoutlines[-1]
exec_time_match = re.search(exec_time_regex, exec_benchline)
if exec_time_match is None:
exec_time_regex = "Time elapsed in bench\(\) is: ([\.\w\d]+)"
exec_time_match = re.search(exec_time_regex, exec_benchline)
exec_us_time = durationpy.from_str(exec_time_match.group(1))
return {'exec_time': exec_us_time.total_seconds()}
"""
$ npm run start:turbofan secpsigverify_nobignums.yaml
> [email protected] start:turbofan /Users/mbpro/dev_ewasm/scout.ts
> node --no-liftoff ./dist/cli.js "secpsigverify_nobignums.yaml"
benchmark startup took 0 seconds and 11780729 nanoseconds (11.780729ms)
benchmark execution 0 seconds and 267736023 nanoseconds (267.736023ms)
"""
def do_v8_bench(scoutts_cmd, scoutts_working_dir):
print("running v8 benchmark...\n{}".format(scoutts_cmd))
scoutts_cmd = shlex.split(scoutts_cmd)
stdoutlines = []
with subprocess.Popen(scoutts_cmd, cwd=scoutts_working_dir, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1, universal_newlines=True) as p:
for line in p.stdout: # b'\n'-separated lines
print(line, end='')
stdoutlines.append(line) # pass bytes as is
p.wait()
parse_time_regex = "benchmark startup took (\d+) seconds and \d+ nanoseconds \(([\d\w\.\s]+)\)"
exec_time_regex = "benchmark execution took (\d+) seconds and \d+ nanoseconds \(([\d\w\.\s]+)\)"
parse_time_line = stdoutlines[-2]
exec_time_line = stdoutlines[-1]
parse_time_match = re.search(parse_time_regex, parse_time_line)
parse_time_seconds = durationpy.from_str(parse_time_match.group(1) + "s")
parse_time_milliseconds = durationpy.from_str(parse_time_match.group(2).replace(" ", ""))
parse_time = parse_time_seconds + parse_time_milliseconds
exec_time_match = re.search(exec_time_regex, exec_time_line)
exec_time_seconds = durationpy.from_str(exec_time_match.group(1) + "s")
exec_time_milliseconds = durationpy.from_str(exec_time_match.group(2).replace(" ", ""))
exec_time = exec_time_seconds + exec_time_milliseconds
return { 'exec_time': exec_time.total_seconds(), 'parse_time': parse_time.total_seconds() }
def do_wamr_bench(wamr_bench_info):
wamr_cmd = [wamr_bench_info['engine_path'], wamr_bench_info['scout_yml_path']]
stdoutlines = []
wamr_working_dir = os.path.dirname(wamr_bench_info['scout_yml_path'])
with subprocess.Popen(wamr_cmd, cwd=wamr_working_dir, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1, universal_newlines=True) as p:
for line in p.stdout: # b'\n'-separated lines
print(line, end='')
stdoutlines.append(line) # pass bytes as is
p.wait()
parse_time_regex = 'startup time: (.*)\n'
execution_time_regex = 'execution time: (.*)\n'
parse_time_match = re.search(parse_time_regex, stdoutlines[0])[1]
execution_time_match = re.search(execution_time_regex, stdoutlines[2])[1]
return {'engine': wamr_bench_info['engine_name'],
'bench_name': wamr_bench_info['bench_name'],
'parse_time': float(parse_time_match),
'exec_time': float(execution_time_match)}
"""
root@1<PASSWORD>:/engines/scout.ts-bn128# /engines/scout-cpp-bn128/build/scout.exec bn128pairing_bignums.yaml
opening assembly/bn128-pairing/out/main_with_websnark_bignum_hostfuncs.wasm
called host func blockDataCopy 512016 0 576
benchmark took 0.0419916 seconds.
"""
def do_scoutcpp_bench(scoutcpp_cmd, yaml_working_dir):
print("running scout.cpp benchmark...\n{}".format(scoutcpp_cmd))
scoutcpp_cmd = shlex.split(scoutcpp_cmd)
stdoutlines = []
with subprocess.Popen(scoutcpp_cmd, cwd=yaml_working_dir, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1, universal_newlines=True) as p:
for line in p.stdout: # b'\n'-separated lines
print(line, end='')
stdoutlines.append(line) # pass bytes as is
p.wait()
time_line = stdoutlines[-1]
timeregex = "benchmark took ([\d\.e-]+) seconds."
time_match = re.search(timeregex, time_line)
time_string = ""
if "e-" in time_line:
time_string = "{:.8f}s".format(float(time_match.group(1)))
else:
time_string = time_match.group(1) + "s"
time_seconds = durationpy.from_str(time_string)
return { 'exec_time': time_seconds.total_seconds()}
"""
root@7<PASSWORD>:/benchscript# /engines/fizzy-bls12-hostfuncs/build/bin/fizzy-bench --benchmark_filter=fizzy/* --benchmark_color=false /engines/fizzy-bls12-hostfuncs/build/bin/bls12-synth-loop
2020-06-08 17:49:12
Running /engines/fizzy-bls12-hostfuncs/build/bin/fizzy-bench
Run on (4 X 2294.68 MHz CPU s)
CPU Caches:
L1 Data 32K (x2)
L1 Instruction 32K (x2)
L2 Unified 256K (x2)
L3 Unified 51200K (x2)
Load Average: 0.00, 0.00, 0.02
---------------------------------------------------------------------------------------------------------------------
Benchmark Time CPU Iterations UserCounters...
---------------------------------------------------------------------------------------------------------------------
fizzy/parse/main_with_websnark_bignum_hostfuncs 655 us 655 us 1044 rate=132.335M/s size=86.734k
fizzy/instantiate/main_with_websnark_bignum_hostfuncs 788 us 788 us 885
fizzy/execute/main_with_websnark_bignum_hostfuncs/-e synth 10374 us 10374 us 65
"""
def do_fizzy_bench(fizzy_cmd):
print("\nrunning fizzy benchmark...\n{}\n".format(fizzy_cmd))
fizzy_cmd = shlex.split(fizzy_cmd)
stdoutlines = []
with subprocess.Popen(fizzy_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1, universal_newlines=True) as p:
for line in p.stdout: # b'\n'-separated lines
print(line, end='')
stdoutlines.append(line) # pass bytes as is
p.wait()
instantiate_time_regex = "fizzy/instantiate\D+(\d+) us"
instantiate_benchline = stdoutlines[-2]
instantiate_time_match = re.search(instantiate_time_regex, instantiate_benchline)
parse_us_time = durationpy.from_str("{}us".format(instantiate_time_match.group(1)))
exec_time_regex = "fizzy/execute\D+(\d+) us"
exec_benchline = stdoutlines[-1]
exec_time_match = re.search(exec_time_regex, exec_benchline)
exec_us_time = durationpy.from_str("{}us".format(exec_time_match.group(1)))
return {'parse_time': parse_us_time.total_seconds(), 'exec_time': exec_us_time.total_seconds()}
def do_wasm3_bench(wasm3_cmd):
print("\nrunning wasm3 benchmark...\n{}\n".format(wasm3_cmd))
wasm3_cmd = shlex.split(wasm3_cmd)
stdoutlines = []
with subprocess.Popen(wasm3_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1, universal_newlines=True) as p:
for line in p.stdout: # b'\n'-separated lines
print(line, end='')
stdoutlines.append(line)
bench_result = json.loads("".join(stdoutlines))
# count parsing/instantiation in 'init_time'
init_time = bench_result['benchmarks'][0]['real_time'] + bench_result['benchmarks'][1]['real_time']
exec_time = bench_result['benchmarks'][2]['real_time']
p.wait()
return {'parse_time': init_time, 'exec_time': exec_time}
"""
/engines/wabt-bn128/out/clang/Release/wasm-interp /engines/wabt-bench-dirs/daiquiri-zkmixer-websnark-bn128-groth16-four-pairings-and-mimc-wabt-with-bignums/main_with_websnark_bignum_hostfuncs.wasm
ReadMemorySection time: 240us
eth2_savePostStateRoot: E0A30EF7356F67420D65613C3E5F718B12240227C90304CA00916B2618B5B300
parse time: 3204us
exec time: 345512us
"""
def do_wabt_bench_manual(isolated_bench_dir, wabt_cmd):
print("running wabt_manual benchmark...\n{}".format(wabt_cmd))
wabt_cmd = shlex.split(wabt_cmd)
stdoutlines = []
with subprocess.Popen(wabt_cmd, cwd=isolated_bench_dir, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1, universal_newlines=True) as p:
for line in p.stdout: # b'\n'-separated lines
print(line, end='')
stdoutlines.append(line) # pass bytes as is
p.wait()
parse_time_regex = "parse time: (\d+)us"
exec_time_regex = "exec time: (\d+)us"
parse_benchline = stdoutlines[-2]
exec_benchline = stdoutlines[-1]
parse_time_match = re.search(parse_time_regex, parse_benchline)
parse_us_time = durationpy.from_str("{}us".format(parse_time_match.group(1)))
exec_time_match = re.search(exec_time_regex, exec_benchline)
exec_us_time = durationpy.from_str("{}us".format(exec_time_match.group(1)))
return {'exec_time': exec_us_time.total_seconds(), 'parse_time': parse_us_time.total_seconds()}
"""
/engines/wabt-secp/out/clang/Release/benchmark-interp /engines/wabt-bench-dirs/ecrecover-eth1-txns-websnark-secp256k1-verify-72-sigs-wabt-with-bignums/main_with_websnark_and_keccak.wasm
ReadMemorySection time: 47us
parse succeeded..
eth2_savePostStateRoot: 00000000000000000000000029120AC3527858F5637E698CDBF0548C6B59EC77
parse time: 2685us
exec time: 883336us
execution succeeded...
register benchmark...
run benchmark...
2020-03-10 23:45:46
Running /engines/wabt-secp/out/clang/Release/benchmark-interp
Run on (1 X 2900 MHz CPU )
CPU Caches:
L1 Data 32 KiB (x1)
L1 Instruction 32 KiB (x1)
L2 Unified 256 KiB (x1)
L3 Unified 12288 KiB (x1)
Load Average: 0.39, 0.12, 0.20
eth2_savePostStateRoot: 00000000000000000000000029120AC3527858F5637E698CDBF0548C6B59EC77
------------------------------------------------------
Benchmark Time CPU Iterations
------------------------------------------------------
wabt_interp 999345 us 806069 us 1
"""
def do_wabt_bench(isolated_bench_dir, wabt_cmd):
print("\nrunning wabt benchmark...\n{}\n".format(wabt_cmd))
wabt_cmd = shlex.split(wabt_cmd)
stdoutlines = []
with subprocess.Popen(wabt_cmd, cwd=isolated_bench_dir, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1, universal_newlines=True) as p:
for line in p.stdout: # b'\n'-separated lines
print(line, end='')
stdoutlines.append(line) # pass bytes as is
p.wait()
parse_time_regex = "parse time: (\d+)us"
parse_benchline = stdoutlines[3]
parse_time_match = re.search(parse_time_regex, parse_benchline)
if parse_time_match is None:
# the parse time is on `stdoutlines[2]` if eth2_savePostStateRoot isn't called (one less line printed)
parse_benchline = stdoutlines[2]
parse_time_match = re.search(parse_time_regex, parse_benchline)
parse_us_time = durationpy.from_str("{}us".format(parse_time_match.group(1)))
exec_time_regex = "wabt_interp\s+(\d+) us"
# maybe --benchmark_format=json is better so dont have to parse "36.775k"
exec_benchline = stdoutlines[-1]
exec_time_match = re.search(exec_time_regex, exec_benchline)
exec_us_time = durationpy.from_str("{}us".format(exec_time_match.group(1)))
return {'parse_time': parse_us_time.total_seconds(), 'exec_time': exec_us_time.total_seconds()}
def run_yaml_file_in_wabt(isolated_bench_dir, wabt_bin_path, yaml_file_dir, yaml_file_rel_path, manual=False):
yaml_file_path = os.path.join(yaml_file_dir, yaml_file_rel_path)
with open(yaml_file_path, 'r') as stream:
print("running yaml file in wabt:", yaml_file_path)
yaml_file = yaml.safe_load(stream)
wasm_files = yaml_file['beacon_state']['execution_scripts']
if len(wasm_files) > 1:
print("ERROR: should only be one wasm file in the yaml file. the scout_bignum_bench.py script doesn't support more.")
sys.exit()
print("wasm file name:", yaml_file['beacon_state']['execution_scripts'])
wasm_file_relative_path = wasm_files[0]
input_data = yaml_file['shard_blocks'][0]['data']
print("input_data size (bytes):", len(input_data)//2)
# TODO: this test_block_data.hex is a workaround because this wabt branch doesn't support scout yaml files
isolated_bench_path = os.path.join(WABT_BENCH_WORKING_DIR, isolated_bench_dir)
os.makedirs(isolated_bench_path, exist_ok=True)
isolated_input_data_path = os.path.join(isolated_bench_path, "test_block_data.hex")
with open(isolated_input_data_path, 'w') as blockdata_file:
blockdata_file.write(input_data)
# copy the wasm file into isolated_bench_path
# we assume the path in the scout yaml file is a relative path to the directory where the yaml file resides
wasm_file_full_src_path = os.path.join(yaml_file_dir, wasm_file_relative_path)
wasm_filename = os.path.basename(wasm_file_full_src_path)
wasm_file_dst_path = os.path.join(isolated_bench_path, wasm_filename)
shutil.copyfile(wasm_file_full_src_path, wasm_file_dst_path)
# form the wabt command and execute
#cmd_str = "./benchmark-interp {}".format(wasmfile)
#cmd_str = "./wasm-interp {}".format(wasmfile)
wabt_cmd = "{} {}".format(wabt_bin_path, wasm_file_dst_path)
if manual:
wabt_result = do_wabt_bench_manual(isolated_bench_path, wabt_cmd)
else:
wabt_result = do_wabt_bench(isolated_bench_path, wabt_cmd)
return wabt_result
print("ERROR in scout_bignum_bench.py, run_yaml_file_in_wabt()!")
sys.exit()
return
def generate_single_test_yamls_from_multitest(yaml_file_dir, yaml_file_rel_path, output_file_dir):
yaml_file_path = os.path.join(yaml_file_dir, yaml_file_rel_path)
yaml_file_name = os.path.basename(yaml_file_path)
with open(yaml_file_path, 'r') as stream:
print("generating single test case yaml files from multi test case yaml file:", yaml_file_path)
yaml_file = yaml.safe_load(stream)
exec_script = yaml_file['beacon_state']['execution_scripts'][0]
block_data = yaml_file['shard_blocks'][0]['data']
prestate_exec_env_state = yaml_file['shard_pre_state']['exec_env_states'][0]
poststate_exec_env_state = yaml_file['shard_post_state']['exec_env_states'][0]
template_yaml = {}
template_yaml['beacon_state'] = {'execution_scripts': [exec_script]}
template_yaml['shard_pre_state'] = {'exec_env_states': [prestate_exec_env_state]}
template_yaml['shard_blocks'] = [{'env': 0, 'data': block_data}]
template_yaml['shard_post_state'] = {'exec_env_states': [poststate_exec_env_state]}
execution_scripts = yaml_file['beacon_state']['execution_scripts']
for script in execution_scripts:
new_yaml = template_yaml.copy()
new_yaml['beacon_state']['execution_scripts'][0] = script
script_without_extension = script[0:-5]
script_without_prefix = re.sub(r'(\w+_)', '', script_without_extension)
new_name_without_extension = yaml_file_name[0:-5]
new_file_name = new_name_without_extension + '-' + script_without_prefix + '.yaml'
new_file_path = os.path.join(output_file_dir, new_file_name)
with open(new_file_path, 'w') as outstream:
yaml.dump(new_yaml, outstream, default_flow_style=False)
print("wrote new yaml file:", new_file_name)
return
def generate_all_cewasm_yamls():
new_yamls_output_dir = os.path.join(C_EWASM_DIR, 'wasm')
c_ewasm_tests_path = os.path.join(C_EWASM_DIR, 'tests')
yamlfiles = [dI for dI in os.listdir(c_ewasm_tests_path)]
for c_yaml_file in yamlfiles:
generate_single_test_yamls_from_multitest(c_ewasm_tests_path, c_yaml_file, new_yamls_output_dir)
def do_all_cewasm_benchmarks():
yaml_files_path = os.path.join(C_EWASM_DIR, 'wasm')
yaml_file_names = [file for file in os.listdir(yaml_files_path) if file[-4:] == "yaml"]
# yaml_file_names look like "keccak256_256-keccak256_libkeccak-tiny-unrolled.yaml"
scoutcpp_bin_path = '/engines/scoutcpp-bn128/build/scout.exec'
c_ewasm_bench_runs = []
for yaml_file_name in yaml_file_names:
if yaml_file_name in ["keccak256_1024-ref.yaml", "keccak256_256-ref.yaml", "keccak256_64-ref.yaml"]:
continue
yaml_file_without_extension = yaml_file_name[:-5]
c_ewasm_bench_name = yaml_file_without_extension
#engine_name = "wabt-baseline"
scoutcpp_bin_path = '/engines/scoutcpp-bn128/build/scout.exec'
yaml_file_path = os.path.join(yaml_files_path, yaml_file_name)
yaml_working_dir = yaml_files_path
scoutcpp_cmd = "{} {}".format(scoutcpp_bin_path, yaml_file_path)
for i in range(0, 10):
print("doing scoutcpp c_ewasm bench i=", i)
scoutcpp_result = do_scoutcpp_bench(scoutcpp_cmd, yaml_working_dir)
scoutcpp_record = {}
scoutcpp_record['engine'] = "wabt-baseline"
scoutcpp_record['bench_name'] = c_ewasm_bench_name
scoutcpp_record['exec_time'] = scoutcpp_result['exec_time']
#bench_record['parse_time'] = scoutcpp_result['exec_time']
c_ewasm_bench_runs.append(scoutcpp_record)
print("doing wabt-with-superops c_ewasm bench i=", i)
wabt_with_superops_bin_path = "/engines/wabt-biturbo/out/clang/Release/wasm-interp"
isolated_bench_dir = "{}-{}".format(c_ewasm_bench_name, "wabt-with-superops")
wabt_superops_result = run_yaml_file_in_wabt(isolated_bench_dir, wabt_with_superops_bin_path, yaml_working_dir, yaml_file_name, manual=True)
wabt_record = {}
wabt_record['engine'] = "wabt-with-superops"
wabt_record['bench_name'] = c_ewasm_bench_name
wabt_record['exec_time'] = wabt_superops_result['exec_time']
wabt_record['parse_time'] = wabt_superops_result['parse_time']
#bench_record['parse_time'] = scoutcpp_result['exec_time']
c_ewasm_bench_runs.append(wabt_record)
return c_ewasm_bench_runs
def main():
scout_benchmarks = []
## do C_ewasm hash function benchmarks
# TODO: temporarily disabled until we replace scout.cpp
#generate_all_cewasm_yamls()
#c_ewasm_bench_runs = do_all_cewasm_benchmarks()
#scout_benchmarks.extend(c_ewasm_bench_runs)
# run 5 iterations of wasm3
for i in range(0, 5):
print("\ndoing wasm3 bench run i=",i)
for wasm3_bench in WASM3_BENCH_INFOS:
bench_name = wasm3_bench['bench_name']
wasm3_engine_name = wasm3_bench['engine_name']
wasm3_bin_path = wasm3_bench['wasm3_bin_path']
wasm3_bench_dir = wasm3_bench['wasm3_bench_dir']
print("wasm3 bench: ", wasm3_engine_name, bench_name)
wasm3_command = "{} --benchmark_format=json --benchmark_filter=wasm3/* --benchmark_color=false {}".format(wasm3_bin_path, wasm3_bench_dir)
wasm3_bench_result = do_wasm3_bench(wasm3_command)
wasm3_record = {}
wasm3_record['engine'] = wasm3_engine_name
wasm3_record['bench_name'] = bench_name
wasm3_record['exec_time'] = wasm3_bench_result['exec_time']
wasm3_record['parse_time'] = wasm3_bench_result['parse_time']
scout_benchmarks.append(wasm3_record)
print("got fizzy result:", wasm3_record)
# run 10 iterations of scout_wamr.c
for i in range(10):
for bench_to_run in WAMR_BENCH_INFOS:
scout_benchmarks.append(do_wamr_bench(bench_to_run))
# run 10 iterations of rust-native
for i in range(0, 10):
print("\ndoing rust-native bench run i=",i)
for rust_bench_torun in RUST_NATIVE_BENCH_INFOS:
bench_name = rust_bench_torun['bench_name']
rust_engine_name = rust_bench_torun['engine_name']
native_bin_path = rust_bench_torun['native_bin_path']
print("rust-native bench: ", rust_engine_name, bench_name)
rust_bench_result = do_rust_native(native_bin_path)
rust_record = {}
rust_record['engine'] = rust_engine_name
rust_record['bench_name'] = bench_name
rust_record['exec_time'] = rust_bench_result['exec_time']
#wabt_record['parse_time'] = 0
scout_benchmarks.append(rust_record)
print("got rust-native result:", rust_record)
# run 5 iterations of fizzy
for i in range(0, 5):
print("\ndoing fizzy bench run i=",i)
for fizzy_bench_torun in FIZZY_BENCH_INFOS:
bench_name = fizzy_bench_torun['bench_name']
fizzy_engine_name = fizzy_bench_torun['engine_name']
fizzy_bin_path = fizzy_bench_torun['fizzy_bin_path']
fizzy_bench_dir = fizzy_bench_torun['fizzy_bench_dir']
print("fizzy bench: ", fizzy_engine_name, bench_name)
fizzy_command = "{} --benchmark_filter=fizzy/* --benchmark_color=false {}".format(fizzy_bin_path, fizzy_bench_dir)
fizzy_bench_result = do_fizzy_bench(fizzy_command)
fizzy_record = {}
fizzy_record['engine'] = fizzy_engine_name
fizzy_record['bench_name'] = bench_name
fizzy_record['exec_time'] = fizzy_bench_result['exec_time']
fizzy_record['parse_time'] = fizzy_bench_result['parse_time']
scout_benchmarks.append(fizzy_record)
print("got fizzy result:", fizzy_record)
# run 10 iterations of wabt_manual (ran using wabt/wasm-interp)
for i in range(0, 10):
print("\ndoing wabt manual bench run i=",i)
for wabt_bench_torun in WABT_BENCH_MANUAL_INFOS:
bench_name = wabt_bench_torun['bench_name']
wabt_engine_name = wabt_bench_torun['engine_name']
wabt_bin_path = wabt_bench_torun['wabt_bin_path']
yaml_file_dir = wabt_bench_torun['yaml_file_dir']
yaml_file_rel_path = wabt_bench_torun['yaml_file_rel_path']
print("wabt bench: ", wabt_engine_name, bench_name)
isolated_bench_dir = "{}-{}".format(bench_name, wabt_engine_name)
wabt_bench_result = run_yaml_file_in_wabt(isolated_bench_dir, wabt_bin_path, yaml_file_dir, yaml_file_rel_path, manual=True)
wabt_record = {}
wabt_record['engine'] = wabt_engine_name
wabt_record['bench_name'] = bench_name
wabt_record['exec_time'] = wabt_bench_result['exec_time']
wabt_record['parse_time'] = wabt_bench_result['parse_time']
scout_benchmarks.append(wabt_record)
print("got wabt_manual result:", wabt_record)
# run only 5 iterations of wabt, since each run is an average already (ran using wabt/benchmark-interp)
for i in range(0, 5):
print("\ndoing wabt bench run i=",i)
for wabt_bench_torun in WABT_BENCH_INFOS:
bench_name = wabt_bench_torun['bench_name']
wabt_engine_name = wabt_bench_torun['engine_name']
wabt_bin_path = wabt_bench_torun['wabt_bin_path']
yaml_file_dir = wabt_bench_torun['yaml_file_dir']
yaml_file_rel_path = wabt_bench_torun['yaml_file_rel_path']
print("wabt bench: ", wabt_engine_name, bench_name)
isolated_bench_dir = "{}-{}".format(bench_name, wabt_engine_name)
wabt_bench_result = run_yaml_file_in_wabt(isolated_bench_dir, wabt_bin_path, yaml_file_dir, yaml_file_rel_path, manual=False)
wabt_record = {}
wabt_record['engine'] = wabt_engine_name
wabt_record['bench_name'] = bench_name
wabt_record['exec_time'] = wabt_bench_result['exec_time']
wabt_record['parse_time'] = wabt_bench_result['parse_time']
scout_benchmarks.append(wabt_record)
print("got wabt result:", wabt_record)
# run 10 iterations of v8
for i in range(0, 10):
print("\ndoing v8 bench run i=",i)
for v8_bench in V8_BENCH_INFOS:
v8_bench_name = v8_bench['bench_name']
v8_engine_name = v8_bench['engine_name']
scoutts_cmd = v8_bench['scoutts_cmd']
scoutts_working_dir = v8_bench['scoutts_working_dir']
v8_result = do_v8_bench(scoutts_cmd, scoutts_working_dir)
v8_record = {}
v8_record['engine'] = v8_engine_name
v8_record['bench_name'] = v8_bench_name
v8_record['parse_time'] = v8_result['parse_time']
v8_record['exec_time'] = v8_result['exec_time']
scout_benchmarks.append(v8_record)
print("got v8 result:", v8_record)
# run 10 iterations of scout.cpp
for i in range(0, 10):
print("\ndoing scout.cpp bench run i=",i)
for scoutcpp_bench in SCOUTCPP_BENCH_INFOS:
scoutcpp_bench_name = scoutcpp_bench['bench_name']
scoutcpp_engine_name = scoutcpp_bench['engine_name']
scoutcpp_bin_path = scoutcpp_bench['scoutcpp_bin_path']
yaml_file_path = scoutcpp_bench['yaml_file_path']
yaml_working_dir = scoutcpp_bench['yaml_working_dir']
scoutcpp_cmd = "{} {}".format(scoutcpp_bin_path, yaml_file_path)
scoutcpp_result = do_scoutcpp_bench(scoutcpp_cmd, yaml_working_dir)
scoutcpp_record = {}
scoutcpp_record['engine'] = scoutcpp_engine_name
scoutcpp_record['bench_name'] = scoutcpp_bench_name
scoutcpp_record['exec_time'] = scoutcpp_result['exec_time']
scout_benchmarks.append(scoutcpp_record)
print("got scout.cpp result:", scoutcpp_record)
print("got scout_benchmarks:")
print(json.dumps(scout_benchmarks))
saveResults(scout_benchmarks)
if __name__ == "__main__":
main()
``` |
{
"source": "jnordin20/conan-boost-scripts",
"score": 2
} |
#### File: jnordin20/conan-boost-scripts/conanfile.py
```python
from conans import ConanFile, tools
class BoostConan(ConanFile):
name = "boost"
settings = "os", "compiler", "build_type", "arch"
options = {"shared": [True, False],
"android_ndk": "ANY", "android_stl_type":["c++_static", "c++_shared"]}
default_options = "shared=False", "android_ndk=None"
description = "Boost is a set of libraries for the C++ programming language."
url = "https://github.com/RGPaul/conan-boost-scripts"
license = "Boost Software License"
def package(self):
self.copy("*", dst="include", src='conan/include')
self.copy("*.lib", dst="lib", src='conan/lib', keep_path=False)
self.copy("*.dll", dst="bin", src='conan/lib', keep_path=False)
self.copy("*.so", dst="lib", src='conan/lib', keep_path=False)
self.copy("*.dylib", dst="lib", src='conan/lib', keep_path=False)
self.copy("*.a", dst="lib", src='conan/lib', keep_path=False)
def package_id(self):
if "arm" in self.settings.arch and self.settings.os == "iOS":
self.info.settings.arch = "AnyARM"
def package_info(self):
self.cpp_info.libs = tools.collect_libs(self)
self.cpp_info.includedirs = ['include']
def config_options(self):
# remove android specific option for all other platforms
if self.settings.os != "Android":
del self.options.android_ndk
del self.options.android_stl_type
``` |
{
"source": "jnorthrup/neural-qa",
"score": 3
} |
#### File: jnorthrup/neural-qa/generator_test.py
```python
import operator
import generator
import generator_utils
def test_extract_variables():
query = 'select distinct(?x) ?y where { ?x a C . ?x a ?y}'
query2 = 'select distinct ?a where'
result = generator_utils.extract_variables(query)
result2 = generator_utils.extract_variables(query2)
assert result == ['x', 'y']
assert result2 == ['a']
def test_single_resource_sort():
matches = [{'usages': [17]}, {'usages': [0]}, {'usages': [3]}, {'usages': [2]}, {'usages': [1]}]
result = sorted(matches, key=generator.prioritize_usage)
assert list(map(operator.itemgetter(0), list(map(operator.itemgetter('usages'), result)))) == [17, 3, 2, 1, 0 ]
def test_couple_resource_sort():
matches = [{'usages': [17, 2]}, {'usages': [0, 0]}, {'usages': [3, 2]}, {'usages': [2, 2]}, {'usages': [1, 2]}]
result = sorted(matches, key=generator.prioritize_usage)
assert list(map(operator.itemgetter('usages'), result)) == [[17, 2], [3, 2], [2, 2], [1, 2], [0, 0]]
def test_encoding():
original = 'SELECT ?city WHERE { ?m skos:broader dbc:Cities_in_Germany . ?city dct:subject ?m . ?city dbo:areaTotal ?area . ?b dbo:artist dbr:John_Halsey_(musician) } order by asc (?area)'
expected_encoding = 'SELECT var_city WHERE brack_open var_m skos_broader dbc_Cities_in_Germany sep_dot var_city dct_subject var_m sep_dot var_city dbo_areaTotal var_area sep_dot var_b dbo_artist dbr_John_Halsey_ attr_open musician attr_close brack_close _oba_ var_area '
result = generator_utils.encode(original)
assert result == expected_encoding
assert str.strip(generator_utils.decode(result)) == original
def test_shorten_query():
shorten = generator_utils.shorten_query
assert shorten('ORDER BY var_area') == '_oba_ var_area'
assert shorten('order by asc par_open var_area par_close') == '_oba_ var_area'
assert shorten('order by desc attr_open var_area attr_close') == '_obd_ var_area'
def test_normalize_predicates():
alt = 'dbp_placeOfBirth'
assert generator_utils.normalize_predicates(alt) == 'dbo_birthPlace'
```
#### File: zheyuan/pipeline/eliminator.py
```python
import argparse
from tqdm import tqdm
def eliminator():
"""
The function remove the templates which are considered as less popular
based on the proposed ranking mechanism, the input files should be pre processed
and a TRUE or FALSE should be added as the last column.
This function just removes the entries with FALSE as the last entry in a row
and create a file name new_train.csv to be used for futher purposes.
"""
lines = open(location,'r').readlines()
print(len(lines))
accum = []
nspm_ready = open("new_train.csv",'w')
for line in tqdm(lines):
values = line.split(",")
if(len(values)<8):
print("Input file is of wrong format, please add the corect bolean values as the last column entry to use this function")
print(values[-1])
if(values[-1]=="TRUE\n"):
accum.append(";".join(values[:-2])+"\n")
nspm_ready.write(accum[-1])
nspm_ready.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
requiredNamed = parser.add_argument_group('Required Arguments')
requiredNamed.add_argument('--location', dest='location', metavar='location',
help='location of the file to be pruned.', required=True)
args = parser.parse_args()
location = args.location
eliminator(location)
pass
```
#### File: utility/benchmark/benchmark.py
```python
import argparse
import json
from tqdm import tqdm
from extract_questions import read_json, write_to_ask
from interpreter import interprete
from reconstruct_json import construct_json
from retrieve_answers import read_sparqls, retrieve
def benchmark(trained_model, test_set, answer_file="answers.json"):
# Deconstruct the questions and infos from test set
questions_info, questions = read_json(test_set)
# Write the questions to to_ask.txt
write_to_ask(questions)
# Use Interpreter to interprete them into decoded queries stored in output_decoded.txt
interprete(trained_model)
# Use the sparql endpoint (http://dbpedia.org/sparql) to retrieve answers of the queries
sparqls = read_sparqls()
answers = []
print("Retrieving answers of queries via SPARQL endpoint")
for query in tqdm(sparqls):
try:
answer_group = retrieve(query)
except:
answer_group = []
answers.append(answer_group)
json_file = construct_json(test_set.replace(".qald.json",""), questions_info, questions, sparqls, answers)
path = "../gsoc/zheyuan/utility/benchmark/"
with open(path+"answers-"+test_set, "w") as f:
# js = json.dumps(json_file, indent=4, separators=(',', ':'))
json.dump(json_file, f, indent=4, separators=(', ', ': '))
if __name__ == "__main__":
"""
Section to parse the command line arguments.
"""
parser = argparse.ArgumentParser()
requiredNamed = parser.add_argument_group('Required Arguments')
requiredNamed.add_argument('--model', dest='model', metavar='[modelId]',
help='the trained model', required=True)
requiredNamed.add_argument('--test', dest='test', metavar='[testset file name]',
help='the testing qald set file name', required=True)
requiredNamed.add_argument('--answer', dest='answer', metavar='[answer file name]',
help='the answers of qald dataset file name', required=False)
args = parser.parse_args()
trained_model = args.model
test_set = args.test
answer_file = args.answer
benchmark(trained_model,test_set, answer_file)
pass
``` |
{
"source": "jnorwood/incubator-mxnet",
"score": 2
} |
#### File: example/cnn_text_classification/data_helpers.py
```python
import itertools
import os
import re
from collections import Counter
import numpy as np
import word2vec
# from gensim.models import word2vec
def clean_str(string):
"""Tokenization/string cleaning for all datasets except for SST.
Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py
"""
string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r",", " , ", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\(", r" \( ", string)
string = re.sub(r"\)", r" \) ", string)
string = re.sub(r"\?", r" \? ", string)
string = re.sub(r"\s{2,}", " ", string)
return string.strip().lower()
def load_data_and_labels():
"""Loads MR polarity data from files, splits the data into words and generates labels.
Returns split sentences and labels.
"""
# Load data from files
pos_path = "./data/rt-polaritydata/rt-polarity.pos"
neg_path = "./data/rt-polaritydata/rt-polarity.neg"
if not os.path.exists(pos_path):
os.system("git clone https://github.com/dennybritz/cnn-text-classification-tf.git")
os.system('mv cnn-text-classification-tf/data .')
os.system('rm -rf cnn-text-classification-tf')
positive_examples = list(open(pos_path).readlines())
positive_examples = [s.strip() for s in positive_examples]
negative_examples = list(open(neg_path).readlines())
negative_examples = [s.strip() for s in negative_examples]
# Split by words
x_text = positive_examples + negative_examples
x_text = [clean_str(sent) for sent in x_text]
x_text = [s.split(" ") for s in x_text]
# Generate labels
positive_labels = [1 for _ in positive_examples]
negative_labels = [0 for _ in negative_examples]
y = np.concatenate([positive_labels, negative_labels], 0)
return [x_text, y]
def pad_sentences(sentences, padding_word="</s>"):
"""Pads all sentences to the same length. The length is defined by the longest sentence.
Returns padded sentences.
"""
sequence_length = max(len(x) for x in sentences)
padded_sentences = []
for i, sentence in enumerate(sentences):
num_padding = sequence_length - len(sentence)
new_sentence = sentence + [padding_word] * num_padding
padded_sentences.append(new_sentence)
return padded_sentences
def build_vocab(sentences):
"""Builds a vocabulary mapping from word to index based on the sentences.
Returns vocabulary mapping and inverse vocabulary mapping.
"""
# Build vocabulary
word_counts = Counter(itertools.chain(*sentences))
# Mapping from index to word
vocabulary_inv = [x[0] for x in word_counts.most_common()]
# Mapping from word to index
vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}
return [vocabulary, vocabulary_inv]
def build_input_data(sentences, labels, vocabulary):
"""Maps sentencs and labels to vectors based on a vocabulary."""
x = np.array([[vocabulary[word] for word in sentence] for sentence in sentences])
y = np.array(labels)
return [x, y]
def build_input_data_with_word2vec(sentences, labels, word2vec_list):
"""
Map sentences and labels to vectors based on a pretrained word2vec
"""
x_vec = []
for sent in sentences:
vec = []
for word in sent:
if word in word2vec_list:
vec.append(word2vec_list[word])
else:
vec.append(word2vec_list['</s>'])
x_vec.append(vec)
x_vec = np.array(x_vec)
y_vec = np.array(labels)
return [x_vec, y_vec]
def load_data_with_word2vec(word2vec_list):
"""Loads and preprocessed data for the MR dataset.
Returns input vectors, labels, vocabulary, and inverse vocabulary.
"""
# Load and preprocess data
sentences, labels = load_data_and_labels()
sentences_padded = pad_sentences(sentences)
# vocabulary, vocabulary_inv = build_vocab(sentences_padded)
return build_input_data_with_word2vec(sentences_padded, labels, word2vec_list)
def load_data():
"""Loads and preprocessed data for the MR dataset.
Returns input vectors, labels, vocabulary, and inverse vocabulary.
"""
# Load and preprocess data
sentences, labels = load_data_and_labels()
sentences_padded = pad_sentences(sentences)
vocabulary, vocabulary_inv = build_vocab(sentences_padded)
x, y = build_input_data(sentences_padded, labels, vocabulary)
return [x, y, vocabulary, vocabulary_inv]
def batch_iter(data, batch_size, num_epochs):
"""Generates a batch iterator for a dataset."""
data = np.array(data)
data_size = len(data)
num_batches_per_epoch = int(len(data)/batch_size) + 1
for epoch in range(num_epochs):
# Shuffle the data at each epoch
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_data = data[shuffle_indices]
for batch_num in range(num_batches_per_epoch):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
yield shuffled_data[start_index:end_index]
def load_pretrained_word2vec(infile):
"""Load the pre-trained word2vec from file."""
if isinstance(infile, str):
infile = open(infile)
word2vec_list = {}
for idx, line in enumerate(infile):
if idx == 0:
vocab_size, dim = line.strip().split()
else:
tks = line.strip().split()
word2vec_list[tks[0]] = map(float, tks[1:])
return word2vec_list
def load_google_word2vec(path):
model = word2vec.Word2Vec.load_word2vec_format(path, binary=True)
return model
``` |
{
"source": "jnorwood/keras-squeezenet",
"score": 3
} |
#### File: jnorwood/keras-squeezenet/squeezenet.py
```python
import keras.backend as K
from keras.models import Model
from keras.layers import Input, Flatten, Dropout, Concatenate, Activation
from keras.layers import Convolution2D, MaxPooling2D, AveragePooling2D
from keras.layers import GlobalMaxPooling2D, GlobalAveragePooling2D
from keras.applications.imagenet_utils import decode_predictions
from keras.applications.imagenet_utils import preprocess_input
#modified in keras 2.2.2 see https://stackoverflow.com/questions/49113140/importerror-cannot-import-name-obtain-input-shape-from-keras
from keras_applications.imagenet_utils import _obtain_input_shape
from keras.utils.data_utils import get_file
WEIGHTS_PATH = 'https://github.com/wohlert/keras-squeezenet/releases/download/v0.1/squeezenet_weights.h5'
def _fire(x, filters, name="fire"):
sq_filters, ex1_filters, ex2_filters = filters
squeeze = Convolution2D(sq_filters, (1, 1), activation='relu', padding='same', name=name + "/squeeze1x1")(x)
expand1 = Convolution2D(ex1_filters, (1, 1), activation='relu', padding='same', name=name + "/expand1x1")(squeeze)
expand2 = Convolution2D(ex2_filters, (3, 3), activation='relu', padding='same', name=name + "/expand3x3")(squeeze)
x = Concatenate(axis=-1, name=name)([expand1, expand2])
return x
def SqueezeNet(include_top=True, weights="imagenet", input_tensor=None, input_shape=None, pooling=None, classes=1000):
if weights not in {'imagenet', None}:
raise ValueError('The `weights` argument should be either '
'`None` (random initialization) or `imagenet` '
'(pre-training on ImageNet).')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as imagenet with `include_top`'
' as true, `classes` should be 1000')
# Determine proper input shape
input_shape = _obtain_input_shape(input_shape,
default_size=224,
min_size=48,
data_format=K.image_data_format(),
require_flatten=include_top)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
x = Convolution2D(64, kernel_size=(3, 3), strides=(2, 2), padding="same", activation="relu", name='conv1')(img_input)
x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), name='maxpool1', padding="valid")(x)
x = _fire(x, (16, 64, 64), name="fire2")
x = _fire(x, (16, 64, 64), name="fire3")
x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), name='maxpool3', padding="valid")(x)
x = _fire(x, (32, 128, 128), name="fire4")
x = _fire(x, (32, 128, 128), name="fire5")
x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), name='maxpool5', padding="valid")(x)
x = _fire(x, (48, 192, 192), name="fire6")
x = _fire(x, (48, 192, 192), name="fire7")
x = _fire(x, (64, 256, 256), name="fire8")
x = _fire(x, (64, 256, 256), name="fire9")
if include_top:
x = Dropout(0.5, name='dropout9')(x)
x = Convolution2D(classes, (1, 1), padding='valid', name='conv10')(x)
x = AveragePooling2D(pool_size=(13, 13), name='avgpool10')(x)
x = Flatten(name='flatten10')(x)
x = Activation("softmax", name='softmax')(x)
else:
if pooling == "avg":
x = GlobalAveragePooling2D(name="avgpool10")(x)
else:
x = GlobalMaxPooling2D(name="maxpool10")(x)
model = Model(img_input, x, name="squeezenet")
if weights == 'imagenet':
weights_path = get_file('squeezenet_weights.h5',
WEIGHTS_PATH,
cache_subdir='models')
model.load_weights(weights_path)
return model
``` |
{
"source": "jnorwood/NNEF-Tools",
"score": 2
} |
#### File: nnef_converter/common/nnef_converter.py
```python
from __future__ import division
import collections
import math
import os
import nnef
import numpy as np
import networkx as nx
from .importer_exporter import ImporterExporter
from .nnef_graph import *
from . import nnef_node as node
class NNEFImporter(ImporterExporter):
@staticmethod
def list_to_string(list_input):
return "[" + ','.join(str(e) for e in list_input) + "]"
def __init__(self, input_model):
super(NNEFImporter, self).__init__()
self.graph_name = ""
self.input_model = input_model
self.nxgraph = nx.OrderedDiGraph()
self.node_pool = collections.OrderedDict()
self.with_weights = False
def get_input_nodes(self):
input_node_list = []
if 'input' in self.node_pool.keys():
input_node_list.append(self.node_pool['input'])
else:
i = 1
while 'input' + str(i) in self.node_pool.keys():
input_node_list.append(self.node_pool['input' + str(i)])
i += 1
return input_node_list
def get_output_nodes(self):
output_node_list = []
if 'output' in self.node_pool.keys():
output_node_list.append(self.node_pool['output'])
else:
i = 1
while 'output' + str(i) in self.node_pool.keys():
output_node_list.append(self.node_pool['output' + str(i)])
i += 1
return output_node_list
def get_node_from_pool(self, operation, param):
if isinstance(operation.inputs[param], str):
return self.node_pool[operation.inputs[param]]
else:
return operation.inputs[param]
def remove_node_from_pool(self, node_name):
self.node_pool.pop(node_name, None)
def define_elementwise_binary_output_shape(self, node_x, node_y):
if isinstance(node_x, Node) and not isinstance(node_y, Node):
return node_x.output_shape[:]
elif not isinstance(node_x, Node) and isinstance(node_y, Node):
return node_y.output_shape[:]
else:
y_size = x_size = 1
for i in node_x.output_shape:
x_size *= i
for i in node_y.output_shape:
y_size *= i
if x_size >= y_size:
output_shape = node_x.output_shape[:]
else:
output_shape = node_y.output_shape[:]
return output_shape
def run(self):
try:
self.with_weights = os.path.isdir(self.input_model)
if not self.with_weights:
print("Importing without weights (specify a directory as input-model to import with weights)")
parser_graph = nnef.load_graph(self.input_model)
nnef.infer_shapes(parser_graph)
except Exception as ex:
raise Exception('failed to open %s: %s' %
(self.input_model, ex))
self.graph_name = parser_graph.name
for operation in parser_graph.operations:
if hasattr(self, "import_" + operation.name):
func = getattr(self, "import_" + operation.name)
returned_node = func(operation, parser_graph.tensors)
self.node_pool[returned_node.name] = returned_node
else:
self.import_UNKNOWN(operation, parser_graph.tensors)
input_nodes = self.get_input_nodes()
output_nodes = self.get_output_nodes()
graph = NNEFGraph(os.path.basename(self.input_model).split('.')[0],
input_nodes,
output_nodes,
node_pool=self.node_pool)
return graph
def import_abs(self, operation, tensors):
node_name = operation.outputs['y']
node_x = self.get_node_from_pool(operation, 'x')
if isinstance(node_x, Node):
output_shape = node_x.output_shape
else:
output_shape = list(np.shape(node_x))
return node.Abs(x=node_x,
_uid=node_name,
_output_shape=output_shape)
def import_add(self, operation, tensors):
node_name = operation.outputs['z']
node_x = self.get_node_from_pool(operation, 'x')
node_y = self.get_node_from_pool(operation, 'y')
output_shape = self.define_elementwise_binary_output_shape(node_x, node_y)
return node.Add(x=node_x,
y=node_y,
_uid=node_name,
_output_shape=output_shape)
def import_add_n(self, operation, tensors):
node_name = operation.outputs['y']
node_x = []
for x_names in operation.inputs['x']:
node_x.append(self.node_pool[x_names])
output_shape = node_x[0].output_shape[:]
return node.AddN(x=node_x,
_uid=node_name,
_output_shape=output_shape)
def import_and(self, operation, tensors):
node_name = operation.outputs['z']
node_x = self.get_node_from_pool(operation, 'x')
node_y = self.get_node_from_pool(operation, 'y')
output_shape = self.define_elementwise_binary_output_shape(node_x, node_y)
return node.And(x=node_x,
y=node_y,
_uid=node_name,
_output_shape=output_shape)
def import_area_downsample(self, operation, tensors):
node_name = operation.outputs['output']
node_input = self.get_node_from_pool(operation, 'input')
factors = operation.attribs['factor']
output_shape = node_input.output_shape[:]
for i in range(len(factors)):
output_shape[i + 2] = int(output_shape[i + 2] / factors[i])
return node.AreaDownsample(input=node_input,
factor=factors,
_uid=node_name,
_output_shape=output_shape)
def import_avg_pool(self, operation, tensors):
node_name = operation.outputs['output']
node_input = self.get_node_from_pool(operation, 'input')
size = operation.attribs['size']
output_shape = node_input.output_shape[:]
if 'border' in operation.attribs:
border = operation.attribs['border']
else:
border = 'constant'
if 'padding' in operation.attribs:
padding = operation.attribs['padding']
else:
padding = []
if 'stride' in operation.attribs and operation.attribs['stride'] != []:
stride = operation.attribs['stride']
else:
stride = [1] * len(output_shape)
if 'dilation' in operation.attribs and operation.attribs['dilation'] != []:
dilation = operation.attribs['dilation']
else:
dilation = [1] * len(output_shape)
for i in range(len(output_shape)):
if padding == []:
output_shape[i] = math.ceil(output_shape[i] / stride[i])
else:
fd = (size[i] - 1) * dilation[i] + 1
output_shape[i] = math.floor((output_shape[i] + padding[i][0] +
padding[i][1] - fd) / stride[i]) + 1
return node.AvgPool(input=node_input,
size=size,
border=border,
padding=padding,
stride=stride,
dilation=dilation,
_uid=node_name,
_output_shape=output_shape)
def import_batch_normalization(self, operation, tensors):
node_name = operation.outputs['output']
node_input = self.get_node_from_pool(operation, 'input')
node_mean = self.get_node_from_pool(operation, 'mean')
node_variance = self.get_node_from_pool(operation, 'variance')
node_offset = self.get_node_from_pool(operation, 'offset')
node_scale = self.get_node_from_pool(operation, 'scale')
node_epsilon = operation.attribs['epsilon']
output_shape = node_input.output_shape[:]
return node.BatchNormalization(input=node_input,
mean=node_mean,
variance=node_variance,
offset=node_offset,
scale=node_scale,
epsilon=node_epsilon,
_uid=node_name,
_output_shape=output_shape)
def import_ceil(self, operation, tensors):
node_name = operation.outputs['y']
node_x = self.get_node_from_pool(operation, 'x')
if isinstance(node_x, Node):
output_shape = node_x.output_shape
else:
output_shape = list(np.shape(node_x))
return node.Ceil(x=node_x,
_uid=node_name,
_output_shape=output_shape)
def import_concat(self, operation, tensors):
node_name = operation.outputs['value']
values = []
for val in operation.inputs['values']:
values.append(self.node_pool[val])
axis = operation.attribs['axis']
output_shape = values[0].output_shape[:]
for nnef_node in values[1:]:
output_shape[axis] += nnef_node.output_shape[axis]
return node.Concat(values=values,
axis=axis,
_uid=node_name,
_output_shape=output_shape)
def import_constant(self, operation, tensors):
node_name = operation.outputs['output']
shape = operation.attribs['shape']
value = operation.attribs['value']
return node.Constant(shape=shape,
value=value,
_uid=node_name,
_output_shape=shape)
def import_conv(self, operation, tensors):
node_name = operation.outputs['output']
node_input = self.get_node_from_pool(operation, 'input')
node_filter = self.get_node_from_pool(operation, 'filter')
output_shape = node_input.output_shape[:]
if 'bias' in operation.inputs:
bias = self.get_node_from_pool(operation, 'bias')
else:
bias = 0.0
if 'border' in operation.attribs:
border = operation.attribs['border']
else:
border = 'constant'
if 'padding' in operation.attribs:
padding = operation.attribs['padding']
else:
padding = []
if 'stride' in operation.attribs and operation.attribs['stride'] != []:
stride = operation.attribs['stride']
else:
stride = [1] * (len(output_shape) - 2)
if 'dilation' in operation.attribs and operation.attribs['dilation'] != []:
dilation = operation.attribs['dilation']
else:
dilation = [1] * (len(output_shape) - 2)
if 'groups' in operation.attribs:
groups = operation.attribs['groups']
else:
groups = 1
conv_filter = node_filter.output_shape
output_shape[1] = conv_filter[0]
for i in range(2, len(output_shape)):
if padding == []:
output_shape[i] = math.ceil(output_shape[i] / stride[i - 2])
else:
fd = (conv_filter[i] - 1) * dilation[i - 2] + 1
pad = padding[i - 2][0] + padding[i - 2][1]
output_shape[i] = math.floor((output_shape[i] + pad - fd) / stride[i - 2]) + 1
return node.Conv(input=node_input,
filter=node_filter,
bias=bias,
border=border,
padding=padding,
stride=stride,
dilation=dilation,
groups=groups,
_uid=node_name,
_output_shape=output_shape)
def import_deconv(self, operation, tensors):
node_name = operation.outputs['output']
node_input = self.get_node_from_pool(operation, 'input')
node_filter = self.get_node_from_pool(operation, 'filter')
output_shape = node_input.output_shape[:]
if 'bias' in operation.inputs:
bias = self.get_node_from_pool(operation, 'bias')
else:
bias = 0.0
if 'border' in operation.attribs:
border = operation.attribs['border']
else:
border = 'constant'
if 'padding' in operation.attribs:
padding = operation.attribs['padding']
else:
padding = []
if 'stride' in operation.attribs and operation.attribs['stride'] != []:
stride = operation.attribs['stride']
else:
stride = [1] * (len(output_shape) - 2)
if 'dilation' in operation.attribs and operation.attribs['dilation'] != []:
dilation = operation.attribs['dilation']
else:
dilation = [1] * (len(output_shape) - 2)
if 'groups' in operation.attribs:
groups = operation.attribs['groups']
else:
groups = 1
conv_filter = node_filter.output_shape
output_shape[1] = conv_filter[1]
for i in range(2, len(output_shape)):
fd = (conv_filter[i] - 1) * dilation[i - 2] + 1
if padding == []:
pad = 0
else:
pad = padding[i - 2][0] + padding[i - 2][1]
output_shape[i] = (output_shape[i] - 1) * stride[i - 2] + fd - pad
return node.Deconv(input=node_input,
filter=node_filter,
bias=bias,
border=border,
padding=padding,
stride=stride,
dilation=dilation,
groups=groups,
_uid=node_name,
_output_shape=output_shape)
def import_div(self, operation, tensors):
node_name = operation.outputs['z']
node_x = self.get_node_from_pool(operation, 'x')
node_y = self.get_node_from_pool(operation, 'y')
output_shape = self.define_elementwise_binary_output_shape(node_x, node_y)
return node.Div(x=node_x,
y=node_y,
_uid=node_name,
_output_shape=output_shape)
def import_elu(self, operation, tensors):
node_name = operation.outputs['y']
node_x = self.get_node_from_pool(operation, 'x')
if isinstance(node_x, Node):
output_shape = node_x.output_shape
else:
output_shape = list(np.shape(node_x))
return node.Elu(x=node_x,
_uid=node_name,
_output_shape=output_shape)
def import_eq(self, operation, tensors):
node_name = operation.outputs['z']
node_x = self.get_node_from_pool(operation, 'x')
node_y = self.get_node_from_pool(operation, 'y')
output_shape = self.define_elementwise_binary_output_shape(node_x, node_y)
return node.EQ(x=node_x,
y=node_y,
_uid=node_name,
_output_shape=output_shape)
def import_exp(self, operation, tensors):
node_name = operation.outputs['y']
node_x = self.get_node_from_pool(operation, 'x')
if isinstance(node_x, Node):
output_shape = node_x.output_shape
else:
output_shape = list(np.shape(node_x))
return node.Exp(x=node_x,
_uid=node_name,
_output_shape=output_shape)
def import_external(self, operation, tensors):
node_name = operation.outputs['output']
nnef_shape = operation.attribs['shape']
return node.External(shape=nnef_shape,
_uid=node_name,
_output_shape=nnef_shape)
def import_floor(self, operation, tensors):
node_name = operation.outputs['y']
node_x = self.get_node_from_pool(operation, 'x')
if isinstance(node_x, Node):
output_shape = node_x.output_shape
else:
output_shape = list(np.shape(node_x))
return node.Floor(x=node_x,
_uid=node_name,
_output_shape=output_shape)
def import_ge(self, operation, tensors):
node_name = operation.outputs['z']
node_x = self.get_node_from_pool(operation, 'x')
node_y = self.get_node_from_pool(operation, 'y')
output_shape = self.define_elementwise_binary_output_shape(node_x, node_y)
return node.GE(x=node_x,
y=node_y,
_uid=node_name,
_output_shape=output_shape)
def import_gt(self, operation, tensors):
node_name = operation.outputs['z']
node_x = self.get_node_from_pool(operation, 'x')
node_y = self.get_node_from_pool(operation, 'y')
output_shape = self.define_elementwise_binary_output_shape(node_x, node_y)
return node.GT(x=node_x,
y=node_y,
_uid=node_name,
_output_shape=output_shape)
def import_le(self, operation, tensors):
node_name = operation.outputs['z']
node_x = self.get_node_from_pool(operation, 'x')
node_y = self.get_node_from_pool(operation, 'y')
output_shape = self.define_elementwise_binary_output_shape(node_x, node_y)
return node.LE(x=node_x,
y=node_y,
_uid=node_name,
_output_shape=output_shape)
def import_linear(self, operation, tensors):
node_name = operation.outputs['output']
node_input = self.get_node_from_pool(operation, 'input')
node_filter = self.get_node_from_pool(operation, 'filter')
node_bias = self.get_node_from_pool(operation, 'bias')
output_shape = [node_input.output_shape[0], node_filter.output_shape[0]]
return node.Linear(input=node_input,
filter=node_filter,
bias=node_bias,
_uid=node_name,
_output_shape=output_shape)
def import_local_response_normalization(self, operation, tensors):
node_name = operation.outputs['output']
node_input = self.get_node_from_pool(operation, 'input')
size = operation.attribs['size']
if 'alpha' in operation.attribs:
alpha = operation.attribs['alpha']
else:
alpha = 1.0
if 'beta' in operation.attribs:
beta = operation.attribs['beta']
else:
beta = 0.5
if 'bias' in operation.attribs:
bias = operation.attribs['bias']
else:
bias = 1.0
output_shape = node_input.output_shape[:]
return node.LocalResponseNormalization(input=node_input,
size=size,
alpha=alpha,
beta=beta,
bias=bias,
_uid=node_name,
_output_shape=output_shape)
def import_log(self, operation, tensors):
node_name = operation.outputs['y']
node_x = self.get_node_from_pool(operation, 'x')
if isinstance(node_x, Node):
output_shape = node_x.output_shape
else:
output_shape = list(np.shape(node_x))
return node.Log(x=node_x,
_uid=node_name,
_output_shape=output_shape)
def import_lt(self, operation, tensors):
node_name = operation.outputs['z']
node_x = self.get_node_from_pool(operation, 'x')
node_y = self.get_node_from_pool(operation, 'y')
output_shape = self.define_elementwise_binary_output_shape(node_x, node_y)
return node.LT(x=node_x,
y=node_y,
_uid=node_name,
_output_shape=output_shape)
def import_l2_normalization(self, operation, tensors):
node_name = operation.outputs['output']
node_input = self.get_node_from_pool(operation, 'input')
axes = operation.attribs['axes']
if 'bias' in operation.attribs:
bias = operation.attribs['bias']
else:
bias = 0.0
output_shape = node_input.output_shape[:]
return node.L2Normalization(input=node_input,
axes=axes,
bias=bias,
_uid=node_name,
_output_shape=output_shape)
def import_matmul(self, operation, tensors):
node_name = operation.outputs['C']
node_A = self.get_node_from_pool(operation, 'A')
node_B = self.get_node_from_pool(operation, 'B')
trA = operation.attribs['transposeA']
trB = operation.attribs['transposeB']
output_shape = []
if trA:
output_shape.append(node_A.output_shape[-1])
else:
output_shape.append(node_A.output_shape[0])
if trB:
output_shape.append(node_B.output_shape[0])
else:
output_shape.append(node_B.output_shape[-1])
return node.Matmul(A=node_A,
B=node_B,
transposeA=trA,
transposeB=trB,
_uid=node_name,
_output_shape=output_shape)
def import_max(self, operation, tensors):
node_name = operation.outputs['z']
node_x = self.get_node_from_pool(operation, 'x')
node_y = self.get_node_from_pool(operation, 'y')
output_shape = self.define_elementwise_binary_output_shape(node_x, node_y)
return node.Max(x=node_x,
y=node_y,
_uid=node_name,
_output_shape=output_shape)
def import_max_pool(self, operation, tensors):
node_name = operation.outputs['output']
node_input = self.get_node_from_pool(operation, 'input')
size = operation.attribs['size']
output_shape = node_input.output_shape[:]
if 'border' in operation.attribs:
border = operation.attribs['border']
else:
border = 'constant'
if 'padding' in operation.attribs:
padding = operation.attribs['padding']
else:
padding = []
if 'stride' in operation.attribs and operation.attribs['stride'] != []:
stride = operation.attribs['stride']
else:
stride = [1] * len(output_shape)
if 'dilation' in operation.attribs and operation.attribs['dilation'] != []:
dilation = operation.attribs['dilation']
else:
dilation = [1] * len(output_shape)
for i in range(len(output_shape)):
if padding == []:
output_shape[i] = math.ceil(output_shape[i] / stride[i])
else:
fd = (size[i] - 1) * dilation[i] + 1
output_shape[i] = math.floor((output_shape[i] + padding[i][0] +
padding[i][1] - fd) / stride[i]) + 1
return node.MaxPool(input=node_input,
size=size,
border=border,
padding=padding,
stride=stride,
dilation=dilation,
_uid=node_name,
_output_shape=output_shape)
def import_max_pool_with_index(self, operation, tensors):
node_name = operation.outputs['output'] + ', ' + operation[2]['index']
node_input = self.get_node_from_pool(operation, 'input')
size = operation.attribs['size']
output_shape = node_input.output_shape[:]
if 'border' in operation.attribs:
border = operation.attribs['border']
else:
border = 'constant'
if 'padding' in operation.attribs:
padding = operation.attribs['padding']
else:
padding = []
if 'stride' in operation.attribs and operation.attribs['stride'] != []:
stride = operation.attribs['stride']
else:
stride = [1] * len(output_shape)
if 'dilation' in operation.attribs and operation.attribs['dilation'] != []:
dilation = operation.attribs['dilation']
else:
dilation = [1] * len(output_shape)
for i in range(len(output_shape)):
if padding == []:
output_shape[i] = math.ceil(output_shape[i] / stride[i])
else:
fd = (size[i] - 1) * dilation[i] + 1
output_shape[i] = math.floor((output_shape[i] + padding[i][0] +
padding[i][1] - fd) / stride[i]) + 1
base_node = node.MaxPoolWithIndex(input=node_input,
size=size,
border=border,
padding=padding,
stride=stride,
dilation=dilation,
_uid=node_name,
_output_shape=output_shape)
node_maxpool = node.OutputVal(base_node=base_node,
base_index=0,
_uid=operation[2]['output'],
_output_shape=output_shape)
self.node_pool[node_maxpool.name] = node_maxpool
node_index = node.OutputVal(base_node=base_node,
base_index=1,
_uid=operation[2]['index'],
_output_shape=output_shape)
self.node_pool[node_index.name] = node_index
return base_node
def import_max_reduce(self, operation, tensors):
node_name = operation.outputs['output']
node_input = self.get_node_from_pool(operation, 'input')
axes = operation.attribs['axes']
output_shape = []
for i in range(len(node_input.output_shape)):
if i not in axes:
output_shape.append(node_input.output_shape[i])
return node.MaxReduce(input=node_input,
axes=axes,
_uid=node_name,
_output_shape=output_shape)
def import_mean_reduce(self, operation, tensors):
node_name = operation.outputs['output']
node_input = self.get_node_from_pool(operation, 'input')
axes = operation.attribs['axes']
output_shape = []
for i in range(len(node_input.output_shape)):
if i not in axes:
output_shape.append(node_input.output_shape[i])
return node.MeanReduce(input=node_input,
axes=axes,
_uid=node_name,
_output_shape=output_shape)
def import_min(self, operation, tensors):
node_name = operation.outputs['z']
node_x = self.get_node_from_pool(operation, 'x')
node_y = self.get_node_from_pool(operation, 'y')
output_shape = self.define_elementwise_binary_output_shape(node_x, node_y)
return node.Min(x=node_x,
y=node_y,
_uid=node_name,
_output_shape=output_shape)
def import_mul(self, operation, tensors):
node_name = operation.outputs['z']
node_x = self.get_node_from_pool(operation, 'x')
node_y = self.get_node_from_pool(operation, 'y')
output_shape = self.define_elementwise_binary_output_shape(node_x, node_y)
return node.Mul(x=node_x,
y=node_y,
_uid=node_name,
_output_shape=output_shape)
def import_multilinear_upsample(self, operation, tensors):
node_name = operation.outputs['output']
node_input = self.get_node_from_pool(operation, 'input')
factors = operation.attribs['factor']
if 'method' in operation.attribs:
method = operation.attribs['method']
else:
method = 'symmetric'
if 'border' in operation.attribs:
border = operation.attribs['border']
else:
border = 'replicate'
output_shape = node_input.output_shape[:]
for i in range(len(factors)):
output_shape[i + 2] = int(output_shape[i + 2] * factors[i])
return node.MultilinearUpsample(input=node_input,
factor=factors,
method=method,
border=border,
_uid=node_name,
_output_shape=output_shape)
def import_ne(self, operation, tensors):
node_name = operation.outputs['z']
node_x = self.get_node_from_pool(operation, 'x')
node_y = self.get_node_from_pool(operation, 'y')
output_shape = self.define_elementwise_binary_output_shape(node_x, node_y)
return node.NE(x=node_x,
y=node_y,
_uid=node_name,
_output_shape=output_shape)
def import_nearest_downsample(self, operation, tensors):
node_name = operation.outputs['output']
node_input = self.get_node_from_pool(operation, 'input')
factors = operation.attribs['factor']
output_shape = node_input.output_shape[:]
for i in range(len(factors)):
output_shape[i + 2] = int(output_shape[i + 2] / factors[i])
return node.NearestDownsample(input=node_input,
factor=factors,
_uid=node_name,
_output_shape=output_shape)
def import_nearest_upsample(self, operation, tensors):
node_name = operation.outputs['output']
node_input = self.get_node_from_pool(operation, 'input')
factors = operation.attribs['factor']
output_shape = node_input.output_shape[:]
for i in range(len(factors)):
output_shape[i + 2] = int(output_shape[i + 2] * factors[i])
return node.NearestUpsample(input=node_input,
factor=factors,
_uid=node_name,
_output_shape=output_shape)
def import_neg(self, operation, tensors):
node_name = operation.outputs['y']
node_x = self.get_node_from_pool(operation, 'x')
if isinstance(node_x, Node):
output_shape = node_x.output_shape
else:
output_shape = list(np.shape(node_x))
return node.Neg(x=node_x,
_uid=node_name,
_output_shape=output_shape)
def import_not(self, operation, tensors):
node_name = operation.outputs['y']
node_x = self.get_node_from_pool(operation, 'x')
if isinstance(node_x, Node):
output_shape = node_x.output_shape
else:
output_shape = list(np.shape(node_x))
return node.Not(x=node_x,
_uid=node_name,
_output_shape=output_shape)
def import_or(self, operation, tensors):
node_name = operation.outputs['z']
node_x = self.get_node_from_pool(operation, 'x')
node_y = self.get_node_from_pool(operation, 'y')
output_shape = self.define_elementwise_binary_output_shape(node_x, node_y)
return node.Or(x=node_x,
y=node_y,
_uid=node_name,
_output_shape=output_shape)
def import_planewise_conv(self, operation, tensors):
node_name = operation.outputs['output']
node_input = self.get_node_from_pool(operation, 'input')
node_filter = self.get_node_from_pool(operation, 'filter')
output_shape = node_input.output_shape[:]
if 'bias' in operation.inputs:
bias = self.get_node_from_pool(operation, 'bias')
else:
bias = 0.0
if 'border' in operation.attribs:
border = operation.attribs['border']
else:
border = 'constant'
if 'padding' in operation.attribs:
padding = operation.attribs['padding']
else:
padding = []
if 'stride' in operation.attribs and operation.attribs['stride'] != []:
stride = operation.attribs['stride']
else:
stride = [1] * (len(output_shape) - 2)
if 'dilation' in operation[1] and operation.attribs['dilation'] != []:
dilation = operation.attribs['dilation']
else:
dilation = [1] * (len(output_shape) - 2)
conv_filter = node_filter.output_shape
output_shape[1] = conv_filter[0]
for i in range(2, len(output_shape)):
if padding == []:
output_shape[i] = math.ceil(output_shape[i] / stride[i - 2])
else:
fd = (conv_filter[i] - 1) * dilation[i - 2] + 1
pad = padding[i - 2][0] + padding[i - 2][1]
output_shape[i] = math.floor((output_shape[i] + pad - fd) / stride[i - 2]) + 1
return node.PlanewiseConv(input=node_input,
filter=node_filter,
bias=bias,
border=border,
padding=padding,
stride=stride,
dilation=dilation,
_uid=node_name,
_output_shape=output_shape)
def import_pow(self, operation, tensors):
node_name = operation.outputs['z']
node_x = self.get_node_from_pool(operation, 'x')
node_y = self.get_node_from_pool(operation, 'y')
output_shape = node_x.output_shape[:]
return node.Pow(x=node_x,
y=node_y,
_uid=node_name,
_output_shape=output_shape)
def import_rcp(self, operation, tensors):
node_name = operation.outputs['y']
node_x = self.get_node_from_pool(operation, 'x')
if isinstance(node_x, Node):
output_shape = node_x.output_shape
else:
output_shape = list(np.shape(node_x))
return node.Rcp(x=node_x,
_uid=node_name,
_output_shape=output_shape)
def import_relu(self, operation, tensors):
node_name = operation.outputs['y']
node_x = self.get_node_from_pool(operation, 'x')
if isinstance(node_x, Node):
output_shape = node_x.output_shape
else:
output_shape = list(np.shape(node_x))
return node.Relu(x=node_x,
_uid=node_name,
_output_shape=output_shape)
def import_reshape(self, operation, tensors):
node_name = operation.outputs['output']
node_input = self.get_node_from_pool(operation, 'input')
shape = operation.attribs['shape']
return node.Reshape(input=node_input,
shape=shape,
_uid=node_name,
_output_shape=shape)
def import_round(self, operation, tensors):
node_name = operation.outputs['y']
node_x = self.get_node_from_pool(operation, 'x')
if isinstance(node_x, Node):
output_shape = node_x.output_shape
else:
output_shape = list(np.shape(node_x))
return node.Round(x=node_x,
_uid=node_name,
_output_shape=output_shape)
def import_rsqrt(self, operation, tensors):
node_name = operation.outputs['y']
node_x = self.get_node_from_pool(operation, 'x')
if isinstance(node_x, Node):
output_shape = node_x.output_shape
else:
output_shape = list(np.shape(node_x))
return node.Rsqrt(x=node_x,
_uid=node_name,
_output_shape=output_shape)
def import_select(self, operation, tensors):
node_name = operation.outputs['output']
node_cond = self.get_node_from_pool(operation, 'condition')
node_true = self.get_node_from_pool(operation, 'true_value')
node_false = self.get_node_from_pool(operation, 'false_value')
output_shape = self.define_elementwise_binary_output_shape(node_true, node_false)
return node.Select(condition=node_cond,
true_value=node_true,
false_value=node_false,
_uid=node_name,
_output_shape=output_shape)
def import_separable_conv(self, operation, tensors):
node_name = operation.outputs['output']
node_input = self.get_node_from_pool(operation, 'input')
node_plane_filter = self.get_node_from_pool(operation, 'plane_filter')
node_point_filter = self.get_node_from_pool(operation, 'point_filter')
output_shape = node_input.output_shape[:]
if 'bias' in operation.inputs:
bias = self.get_node_from_pool(operation, 'bias')
else:
bias = 0.0
if 'border' in operation.attribs:
border = operation.attribs['border']
else:
border = 'constant'
if 'padding' in operation.attribs:
padding = operation.attribs['padding']
else:
padding = []
if 'stride' in operation.attribs and operation.attribs['stride'] != []:
stride = operation.attribs['stride']
else:
stride = [1] * (len(output_shape) - 2)
if 'dilation' in operation.attribs and operation.attribs['dilation'] != []:
dilation = operation.attribs['dilation']
else:
dilation = [1] * (len(output_shape) - 2)
if 'groups' in operation.attribs:
groups = operation.attribs['groups']
else:
groups = 1
conv_filter = node_plane_filter.output_shape
output_shape[1] = conv_filter[0]
for i in range(2, len(output_shape)):
if padding == []:
output_shape[i] = math.ceil(output_shape[i] / stride[i - 2])
else:
fd = (conv_filter[i] - 1) * dilation[i - 2] + 1
pad = padding[i - 2][0] + padding[i - 2][1]
output_shape[i] = math.floor((output_shape[i] + pad - fd) / stride[i - 2]) + 1
conv_filter = node_point_filter.output_shape
output_shape[1] = conv_filter[0]
for i in range(2, len(output_shape)):
if padding == []:
output_shape[i] = math.ceil(output_shape[i] / stride[i - 2])
else:
fd = (conv_filter[i] - 1) * dilation[i - 2] + 1
pad = padding[i - 2][0] + padding[i - 2][1]
output_shape[i] = math.floor((output_shape[i] + pad - fd) / stride[i - 2]) + 1
return node.SeparableConv(input=node_input,
plane_filter=node_plane_filter,
point_filter=node_point_filter,
bias=bias,
border=border,
padding=padding,
stride=stride,
dilation=dilation,
groups=groups,
_uid=node_name,
_output_shape=output_shape)
def import_sigmoid(self, operation, tensors):
node_name = operation.outputs['y']
node_x = self.get_node_from_pool(operation, 'x')
if isinstance(node_x, Node):
output_shape = node_x.output_shape
else:
output_shape = list(np.shape(node_x))
return node.Sigmoid(x=node_x,
_uid=node_name,
_output_shape=output_shape)
def import_sign(self, operation, tensors):
node_name = operation.outputs['y']
node_x = self.get_node_from_pool(operation, 'x')
if isinstance(node_x, Node):
output_shape = node_x.output_shape
else:
output_shape = list(np.shape(node_x))
return node.Sign(x=node_x,
_uid=node_name,
_output_shape=output_shape)
def import_slice(self, operation, tensors):
node_name = operation.outputs['output']
node_input = self.get_node_from_pool(operation, 'input')
axes = operation.attribs['axes']
begin = operation.attribs['begin']
end = operation.attribs['end']
input_shape = node_input.output_shape
output_shape = input_shape[:]
for i in range(len(axes)):
if i in axes:
if begin[i] == -1 and end[i] == 0:
output_shape[i] = 1
elif end[i] == 0:
output_shape[i] = int(input_shape[i] - begin[i])
else:
output_shape[i] = int(end[i] - begin[i])
return node.Slice(input=node_input,
axes=axes,
begin=begin,
end=end,
_uid=node_name,
_output_shape=output_shape)
def import_softmax(self, operation, tensors):
node_name = operation.outputs['y']
node_x = self.get_node_from_pool(operation, 'x')
if 'axes' in operation.attribs:
axes = operation.attribs['axes']
else:
axes = [1]
output_shape = node_x.output_shape[:]
return node.Softmax(x=node_x,
axes=axes,
_uid=node_name,
_output_shape=output_shape)
def import_softplus(self, operation, tensors):
node_name = operation.outputs['y']
node_x = self.get_node_from_pool(operation, 'x')
if isinstance(node_x, Node):
output_shape = node_x.output_shape
else:
output_shape = list(np.shape(node_x))
return node.Softplus(x=node_x,
_uid=node_name,
_output_shape=output_shape)
# Not in current NNEF Documentation
def import_softsign(self, operation, tensors):
node_name = operation.outputs['y']
node_x = self.get_node_from_pool(operation, 'x')
if isinstance(node_x, Node):
output_shape = node_x.output_shape
else:
output_shape = list(np.shape(node_x))
return node.Softsign(x=node_x,
_uid=node_name,
_output_shape=output_shape)
def import_split(self, operation, tensors):
node_name = '['
for val_name in operation.outputs['values']:
node_name += val_name + ', '
node_name = node_name[:-2] + ']'
node_value = self.get_node_from_pool(operation, 'value')
axis = operation.attribs['axis']
ratios = operation.attribs['ratios']
input_shape = node_value.output_shape[:]
total_ratio = 0
for ratio in ratios:
total_ratio += ratio
mu = int(input_shape[axis] / total_ratio)
base_node = node.Split(value=node_value,
axis=axis,
ratios=ratios,
_uid=node_name,
_output_shape=input_shape)
for i in range(len(operation.outputs['values'])):
output_shape = input_shape[:]
output_shape[axis] = ratios[i] * mu
node_split = node.OutputVal(base_node=base_node,
base_index=i,
_uid=operation.outputs['values'][i],
_output_shape=output_shape)
self.node_pool[node_split.name] = node_split
return base_node
def import_sqr(self, operation, tensors):
node_name = operation.outputs['y']
node_x = self.get_node_from_pool(operation, 'x')
if isinstance(node_x, Node):
output_shape = node_x.output_shape
else:
output_shape = list(np.shape(node_x))
return node.Sqr(x=node_x,
_uid=node_name,
_output_shape=output_shape)
def import_sqrt(self, operation, tensors):
node_name = operation.outputs['y']
node_x = self.get_node_from_pool(operation, 'x')
if isinstance(node_x, Node):
output_shape = node_x.output_shape
else:
output_shape = list(np.shape(node_x))
return node.Sqrt(x=node_x,
_uid=node_name,
_output_shape=output_shape)
def import_stack(self, operation, tensors):
node_name = operation.outputs['value']
values = []
for val in operation.inputs['values']:
values.append(self.node_pool[val])
axis = operation.attribs['axis']
output_shape = values[0].output_shape[:]
output_shape.insert(axis, len(values))
return node.Stack(values=values,
axis=axis,
_uid=node_name,
_output_shape=output_shape)
def import_sub(self, operation, tensors):
node_name = operation.outputs['z']
node_x = self.get_node_from_pool(operation, 'x')
node_y = self.get_node_from_pool(operation, 'y')
output_shape = self.define_elementwise_binary_output_shape(node_x, node_y)
return node.Sub(x=node_x,
y=node_y,
_uid=node_name,
_output_shape=output_shape)
def import_sum_reduce(self, operation, tensors):
node_name = operation.outputs['output']
node_input = self.get_node_from_pool(operation, 'input')
axes = operation.attribs['axes']
if 'normalize' in operation.attribs:
normalize = operation.attribs['normalize']
else:
normalize = False
output_shape = []
for i in range(len(node_input.output_shape)):
if i not in axes:
output_shape.append(node_input.output_shape[i])
return node.SumReduce(input=node_input,
axes=axes,
normalize=normalize,
_uid=node_name,
_output_shape=output_shape)
def import_tanh(self, operation, tensors):
node_name = operation.outputs['y']
node_x = self.get_node_from_pool(operation, 'x')
if isinstance(node_x, Node):
output_shape = node_x.output_shape
else:
output_shape = list(np.shape(node_x))
return node.Tanh(x=node_x,
_uid=node_name,
_output_shape=output_shape)
def import_transpose(self, operation, tensors):
node_name = operation.outputs['output']
node_input = self.get_node_from_pool(operation, 'input')
perm = operation.attribs['axes']
output_shape = []
for i in range(len(node_input.output_shape)):
output_shape.append(node_input.output_shape[perm[i]])
return node.Transpose(input=node_input,
axes=perm,
_uid=node_name,
_output_shape=output_shape)
def import_update(self, operation, tensors):
node_name = operation.outputs['result']
node_variable = self.get_node_from_pool(operation, 'variable')
node_value = self.get_node_from_pool(operation, 'value')
return node.Update(variable=node_variable,
value=node_value,
_uid=node_name,
_output_shape=node_value.output_shape[:])
def import_variable(self, operation, tensors):
node_name = operation.outputs['output']
label = operation.attribs['label']
shape = operation.attribs['shape']
if self.with_weights:
tensor = tensors[node_name].data
assert tensor is not None
else:
nnef_dtype = tensors[node_name].dtype
if nnef_dtype == "integer":
tensor = np.random.randint(0, 255, dtype=np.int32)
elif nnef_dtype == "logical":
tensor = np.random.rand(*shape).astype(np.float32) > 0.5
else:
assert nnef_dtype == "scalar", "Unknown nnef dtype: {}".format(nnef_dtype)
tensor = np.random.rand(*shape).astype(np.float32)
nnef_node = node.Variable(label=label,
shape=shape,
_np_dtype=tensor.dtype,
_np_tensor=tensor,
_uid=node_name,
_output_shape=shape)
return nnef_node
def import_UNKNOWN(self, operation, tensors):
assert False, "Missing implementation for node op: %s" % operation.name
class NNEFExporter(ImporterExporter):
def __init__(self, output_model):
super(NNEFExporter, self).__init__()
self.output_model = output_model
def run(self, nnef_graph):
self.generate_nnef(nnef_graph)
def generate_nnef(self, nnef_graph):
nnef_graph.save_to_file(self.output_model)
```
#### File: nnef_converter/common/nnef_graph.py
```python
from __future__ import division
import nnef
import networkx as nx
from .nnef_version import NNEFVersion
from .nnef_node import Node
import os
class NNEFGraph(object):
def __init__(self,
model_name=None,
input_list=None,
output_list=None,
pre_compile_callback=None,
post_compile_callback=None,
node_pool = None):
if model_name is None and \
input_list is None and \
output_list is None and \
pre_compile_callback is None and \
post_compile_callback is None and \
node_pool is None:
return
assert isinstance(input_list, list) or isinstance(input_list, Node), "Input list must be a single Node or list of Nodes"
assert isinstance(output_list, list) or isinstance(output_list, Node), "Output list must be a single Node or list of Nodes"
self.network_dir = os.getcwd()
input_list_ = []
output_list_ = []
if isinstance(input_list, Node):
input_list_.append(input_list)
else:
input_list_ = input_list
if isinstance(output_list, Node):
output_list_.append(output_list)
else:
output_list_ = output_list
if(model_name == 'graph' or model_name in Node.type_nnef_primitive):
model_name += '_'
self.model_name = model_name
bottom_top_nodes = []
self.nx_graph = nx.OrderedDiGraph()
if node_pool is None:
'''
Add all nodes without connecting edges.
(by walking output list)
'''
def add_node_to_nx_graph(node):
bottom_top_nodes.append(node)
for key, param in node.parameters.items():
if isinstance(param, Node) and param.name not in self.nx_graph:
add_node_to_nx_graph(param)
for output_node in output_list_:
add_node_to_nx_graph(output_node)
for node in reversed(bottom_top_nodes):
type_node = 'node'
if node in input_list_:
type_node = 'input'
if node in output_list_:
type_node = 'output'
self.nx_graph.add_node(node.name, node=node, type_node=type_node)
else:
for _,node in node_pool.items():
type_node = 'node'
if node in input_list_:
type_node = 'input'
if node in output_list_:
type_node = 'output'
self.nx_graph.add_node(node.name, node=node, type_node=type_node)
'''
pre_compile_callback: Callback to provide converter's specific interaction with the nxgraph if required
BEFORE connecting the edges
'''
if pre_compile_callback is not None:
pre_compile_callback(self.nx_graph)
'''
"Compile" the nodes: creates edge connections, calls node specific callbacks (if defined)
'''
for node in self.nx_graph:
self.nx_graph.node[node]['node'].compile(self.nx_graph)
'''
post_compile_callback: Callback to provide converter's specific interaction with the nxgraph if required
AFTER connecting the edges
'''
if post_compile_callback is not None:
post_compile_callback(self.nx_graph)
self.trim_nx_graph(input_list_, output_list_)
#nx.write_yaml(self.nx_graph, 'graph.yaml')
'''
To be used only in an environment where tf is the framework (vs protobuf definitions)
'''
def run(self, output_tensors, input_data, needs_enable_eager_execution=False):
assert isinstance(self.nx_graph, nx.OrderedDiGraph), "Invalid nx graph"
cwd = os.getcwd()
os.chdir(self.network_dir)
import tensorflow as tf
if needs_enable_eager_execution:
tf.enable_eager_execution()
assert tf.executing_eagerly(), "TF not executing eagerly"
output_results = {}
for node, data in self.nx_graph.nodes(data=True):
node = data['node']
if node.op == 'external':
assert node.name in input_data, "Input data missing for external node"
node.run(external_tensor=input_data[node.name])
elif node.name in output_tensors:
output_results[node.name] = node.run(nx_graph=self.nx_graph)
elif node.op is "variable":
output_results[node.name] = node.run(nx_graph=self.nx_graph)
else:
node.run(nx_graph=self.nx_graph)
os.chdir(cwd)
return output_results
def trim_nx_graph(self, input_list, output_list):
ancestors = set()
for node in output_list:
ancestors = ancestors.union(nx.ancestors(self.nx_graph, node.name))
remove_nodes = list(self.nx_graph.nodes())
for ancestor in ancestors:
remove_nodes.remove(ancestor)
for node in output_list:
remove_nodes.remove(node.name)
for node in input_list:
assert node.name not in remove_nodes, "Input node %s is not required for the outputs" % (node.name)
for node in remove_nodes:
self.nx_graph.remove_node(node)
def set_nx_graph(self, nx_graph):
assert isinstance(nx_graph, nx.OrderedDiGraph), "nx_graph isn't a nx.OrderedDiGraph"
self.nx_graph = nx_graph.copy()
def get_nx_graph(self):
return self.nx_graph
def load_from_file(self, load_path):
assert False, "Unimplemented"
def save_to_file(self, save_file):
assert isinstance(save_file, str), "Output model path is required to be of type str."
network_dir, model_filename = os.path.split(save_file)
assert model_filename == "graph.nnef", "NNEF Format requires to write to a file named 'graph.nnef'"
cwd = os.getcwd()
self.network_dir = network_dir
if not os.path.exists(network_dir):
os.makedirs(network_dir)
os.chdir(network_dir)
with open(model_filename, 'w') as f:
f.write("\nversion %s;\n\n" % (NNEFVersion().version))
fragments = set()
inputs = ''
outputs = ''
for node, data in self.nx_graph.nodes(data=True):
if data['node'].op in ['gru']:
fragments.add(data['node'].op)
assert 'type_node' in data, "Node in graph is missing type information"
if data['type_node'] == 'input':
inputs += node + ', '
elif data['type_node'] == 'output':
outputs += node + ', '
inputs = inputs[:-2]
outputs = outputs[:-2]
if fragments:
f.write("extension KHR_enable_fragment_definitions, KHR_enable_operator_expressions;\n\n")
for frag in fragments:
if frag == 'gru':
gru = \
"fragment gru( \
\n\tinput: tensor<scalar>, \
\n\tchannels: integer, \
\n\tscope: string ) \
\n-> ( output: tensor<scalar> ) \
\n{ \
\n\tbatch = shape_of(input)[0]; \
\n\n\th = variable(shape = [batch,channels], label = scope + '/h'); \
\n\n\tm = concat([input, h], axis = 1); \
\n\n\tz = sigmoid(linear_layer(m, channels = channels, scope = scope + '/z')); \
\n\tr = sigmoid(linear_layer(m, channels = channels, scope = scope + '/r')); \
\n\ts = tanh(linear_layer(concat([input, r * h], axis = 1), channels = channels, scope = scope + '/s')); \
\n\n\toutput = update(h, z * s + (1.0 - z) * h); \
\n}\n\n"
f.write(gru)
f.write("graph %s( %s ) -> ( %s )\n" % (self.model_name.replace('/', '_'), inputs, outputs))
f.write("{\n")
for node, data in self.nx_graph.nodes(data=True):
if 'node' in data:
if data['node'].op == 'output_val':
continue
#print((data['node'].nnef_node_definition()))
f.write("\t")
f.write(data['node'].nnef_node_definition())
f.write(";\n")
assert data['node'] is not None, "Node doesn't have NNEF node!"
nnef_node = data['node']
if data['node'].op == 'variable':
#print("=> Node %s is saving tensor to disk(%s)" % (
# nnef_node.name, nnef_node.parameters['label']))
loc = nnef_node.parameters['label'].rfind('/')
if loc != -1:
if not os.path.isdir(nnef_node.parameters['label'][:loc]):
os.makedirs(nnef_node.parameters['label'][:loc])
dat_file = open(nnef_node.parameters['label'] + '.dat', 'wb')
nnef.write_tensor(dat_file, nnef_node.tensor)
dat_file.close()
else:
print("===> %s doesn't have node!?" % (node))
f.write("}")
os.chdir(cwd)
```
#### File: nnef_converter/common/nnef_node.py
```python
from __future__ import division
import numpy as np
class Node(object):
type_name = set([
"tensor",
"extent",
"scalar",
"logical",
"string"
])
type_nnef_primitive = set([
'tensor',
'variable',
'external',
'constant',
'concat',
'split',
'reshape',
'transpose',
'idn',
'add',
'sub',
'mul',
'div',
'pow',
'and',
'or',
'not',
'neg',
'abs',
'sign',
'exp',
'log',
'sqrt',
'rsqrt',
'sqr',
'floor',
'ceil',
'round',
'gt',
'ge',
'lt',
'le',
'eq',
'ne',
'min',
'max',
'select',
'update',
'matmul',
'sum_reduce',
'mean_reduce',
'max_reduce',
'sigmoid',
'tanh',
'elu',
'relu',
'leaky_relu',
'stack',
'shape_of',
'slice',
'softsign',
'softplus',
'softmax',
'conv',
'deconv',
'planewise_conv',
'max_pool',
'max_pool_with_index',
'avg_pool',
'local_response_normalization',
'batch_normalization',
'l2_normalization',
'linear',
'add_n',
'multilinear_upsample',
'nearest_upsample',
'nearest_downsample',
'area_downsample',
'gru',
'pad',
'output_val'])
array_type_spec = set([])
def __init__(self, nnef_primitive, **kwargs):
assert '_uid' in kwargs
assert '/' not in kwargs['_uid'], "Node uid shouldn't contain '/' characters"
assert ':' not in kwargs['_uid'], "Node uid shouldn't contain ':' characters"
self.unique_name = kwargs['_uid']
self.parameters_ = {}
self.output_shape = None
self.tensor = None
self.output_value_ = None
if not hasattr(self, 'defaults'):
self.defaults = {}
if nnef_primitive in self.type_nnef_primitive:
self.nnef_primitive_ = nnef_primitive
else:
print("Creating Node with unsupported primitive: %s"%nnef_primitive)
assert False
for key, value in kwargs.items():
if key not in ['_uid', '_output_shape', '_np_tensor']:
self.set_parameter(key, value)
elif key == '_output_shape':
self.output_shape = value
for key, value in self.defaults.items():
if key not in self.parameters:
self.parameters[key] = value
def list_to_string(self, list_):
return "[" + ','.join(str(e) for e in list_) + "]"
def nnef_node_definition(self):
param_list = ""
for key, val in self.parameters.items():
if key in self.defaults and val == self.defaults[key]:
continue
if key.startswith("_"):
continue
# Reformat as strings
if isinstance(val, Node):
val = val.name
if key == 'label':
val = "'" + val + "'"
elif key == 'shape' or \
key == 'size' or \
key == 'perm' or \
key == 'axis' or \
key == 'axes':
if isinstance(val, list):
val = self.list_to_string(val)
else:
val = str(val)
elif key == 'value':
if isinstance(self.parameters['value'], Node):
val = self.parameters['value'].name
else:
val = str(self.parameters['value'])
elif isinstance(self.parameters[key], list) and isinstance(self.parameters[key][0], Node):
new_val = '['
for value in val:
new_val += value.name + ', '
val = new_val[:-2] + ']'
elif key == 'scope':
val = "'" + val + "'"
if isinstance(val, bool):
val = 'true' if val else 'false'
if self.nnef_primitive_ == 'shape_of':
param_list += '{v}, '.format(v=val)
else:
param_list += '{k} = {v}, '.format(k=key, v=val)
if len(param_list) > 2:
param_list = param_list[:-2]
return '{n} = {self.nnef_primitive_}({params})'.format(self=self, n=self.unique_name.lower(), params=param_list)
def get_value(self):
print("Trying to get value of unsupported op: %s"%(self.__class__.__name__))
input()
@property
def nnef_version(self):
return '1.0-provisional'
@property
def keyword(self):
return 'node'
@property
def op(self):
return self.nnef_primitive_
@property
def output_value(self):
return self.output_value_
@property
def name(self):
return self.unique_name
@property
def parameters(self):
return self.parameters_
def set_name(self, name):
self.unique_name = name
def print_parameters(self):
print("DBG: Printing from parameters_ dict")
for key, val in self.parameters_.items():
print(key, ":", val)
def set_parameter(self, key, param):
self.parameters_[key] = param
# This function gets called if no compile implementation is found for nodes
# Nodes with compile() implementations should check for their callback fct...
def compile(self, nx_graph):
if '_compile_callback' in self.parameters:
self.parameters['_compile_callback'](nx_graph, self)
return
def add_edges(self, nx_graph, param_inputs):
for param in param_inputs:
if isinstance(self.parameters[param], Node):
if self.parameters[param].__class__.__name__ == 'Idn':
self.parameters[param] = self.parameters[param].parameters['x']
nx_graph.add_edge(self.parameters[param].name, self.name)
elif isinstance(self.parameters[param], list):
for i in range(len(self.parameters[param])):
if isinstance(self.parameters[param][i], Node):
if self.parameters[param][i].__class__.__name__ == 'Idn':
self.parameters[param][i] = self.parameters[param][i].parameters['x']
nx_graph.add_edge(self.parameters[param][i].name, self.name)
'''
NNEF 1.0 provisional operations:
Framework specific/'vendor specific' operations to be implemented by subclassing Node
into the framework specific converter file.
'''
class Abs(Node):
def __init__(self, **kwargs):
assert 'x' in kwargs, "NNEF %s node is missing 'x' value" % (self.__class__.__name__)
super(Abs, self).__init__('abs', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
self.add_edges(nx_graph, ['x'])
def run(self, nx_graph):
self.output_value_ = None
class Add(Node):
def __init__(self, **kwargs):
assert 'x' in kwargs, "NNEF %s node is missing 'x' value"%(self.__class__.__name__)
assert 'y' in kwargs, "NNEF %s node is missing 'y' value"%(self.__class__.__name__)
super(Add, self).__init__('add', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
self.add_edges(nx_graph, ['x', 'y'])
def run(self, **kwargs):
import tensorflow as tf
nx_graph = kwargs['nx_graph']
x = tf.constant(nx_graph.node[self.parameters['x'].name]['node'].output_value)
y = tf.constant(nx_graph.node[self.parameters['y'].name]['node'].output_value)
self.output_value_ = tf.add(x, y).numpy()
return self.output_value_
class AddN(Node):
def __init__(self, **kwargs):
assert 'x' in kwargs, "NNEF %s node is missing 'x' value"%(self.__class__.__name__)
super(AddN, self).__init__('add_n', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
self.add_edges(nx_graph, ['x'])
def run(self, nx_graph):
self.output_value_ = None
class And(Node):
def __init__(self, **kwargs):
assert 'x' in kwargs, "NNEF %s node is missing 'x' value"%(self.__class__.__name__)
assert 'y' in kwargs, "NNEF %s node is missing 'y' value"%(self.__class__.__name__)
super(And, self).__init__('and', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
self.add_edges(nx_graph, ['x', 'y'])
def run(self, nx_graph):
self.output_value_ = None
class AreaDownsample(Node):
def __init__(self, **kwargs):
assert 'input' in kwargs, "NNEF %s node is missing 'input' value"%(self.__class__.__name__)
assert 'factor' in kwargs, "NNEF %s node is missing 'factor' value"%(self.__class__.__name__)
super(AreaDownsample, self).__init__('area_downsample', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
self.add_edges(nx_graph, ['input'])
def run(self, nx_graph):
self.output_value_ = None
class AvgPool(Node):
def __init__(self, **kwargs):
assert 'input' in kwargs, "NNEF %s node is missing 'input' value"%(self.__class__.__name__)
assert 'size' in kwargs, "NNEF %s node is missing 'size' value"%(self.__class__.__name__)
self.defaults = {'border': 'constant', 'padding': [],
'stride': [], 'dilation': []}
super(AvgPool, self).__init__('avg_pool', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
self.add_edges(nx_graph, ['input'])
def run(self, nx_graph):
self.output_value_ = None
class BatchNormalization(Node):
def __init__(self, **kwargs):
assert 'input' in kwargs, "NNEF %s node is missing 'input' value"%(self.__class__.__name__)
assert 'mean' in kwargs, "NNEF %s node is missing 'mean' value"%(self.__class__.__name__)
assert 'variance' in kwargs, "NNEF %s node is missing 'variance' value"%(self.__class__.__name__)
assert 'offset' in kwargs, "NNEF %s node is missing 'offset' value"%(self.__class__.__name__)
assert 'scale' in kwargs, "NNEF %s node is missing 'scale' value"%(self.__class__.__name__)
assert 'epsilon' in kwargs, "NNEF %s node is missing 'epsilon' value"%(self.__class__.__name__)
super(BatchNormalization, self).__init__('batch_normalization', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
self.add_edges(nx_graph, ['input', 'mean', 'variance', 'offset', 'scale', 'epsilon'])
def run(self, nx_graph):
self.output_value_ = None
class Ceil(Node):
def __init__(self, **kwargs):
assert 'x' in kwargs, "NNEF %s node is missing 'x' value" % (self.__class__.__name__)
super(Ceil, self).__init__('ceil', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
self.add_edges(nx_graph, ['x'])
def run(self, nx_graph):
self.output_value_ = None
class Concat(Node):
def __init__(self, **kwargs):
assert 'values' in kwargs, "NNEF %s node is missing 'values' value"%(self.__class__.__name__)
assert 'axis' in kwargs, "NNEF %s node is missing 'axis' value"%(self.__class__.__name__)
assert isinstance(kwargs['values'], list), "NNEF %s node, 'values' value is not of 'list' type"%(self.__class__.__name__)
super(Concat, self).__init__('concat', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
self.add_edges(nx_graph, ['values'])
def run(self, **kwargs):
self.output_value_ = None
class Constant(Node):
def __init__(self, **kwargs):
assert 'value' in kwargs, "NNEF %s node is missing 'value' value"%(self.__class__.__name__)
assert 'shape' in kwargs, "NNEF %s node is missing 'shape' value"%(self.__class__.__name__)
assert isinstance(kwargs['shape'], list), "NNEF %s node, 'shape' value is not of 'list' type"%(self.__class__.__name__)
super(Constant, self).__init__('constant', **kwargs)
def run(self, **kwargs):
self.output_value_ = self.parameters['value']
return self.output_value_
def get_value(self):
if len(self.parameters['shape']) == 2 and self.parameters['shape'][0] == 1:
return np.reshape(np.asarray(self.parameters['value']), self.parameters['shape'][1:])
else:
return np.reshape(np.asarray(self.parameters['value']), self.parameters['shape'])
class Conv(Node):
def __init__(self, **kwargs):
assert 'input' in kwargs, "NNEF %s node is missing 'input' value"%(self.__class__.__name__)
assert 'filter' in kwargs, "NNEF %s node is missing 'input' value"%(self.__class__.__name__)
self.defaults = {'bias': 0.0, 'border': 'constant', 'padding': [],
'stride': [], 'dilation': [], 'groups': 1}
super(Conv, self).__init__('conv', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
if 'bias' in self.parameters and isinstance(self.parameters['bias'], Node):
self.add_edges(nx_graph, ['input', 'filter', 'bias'])
else:
self.add_edges(nx_graph, ['input', 'filter'])
def run(self, nx_graph):
self.output_value_ = None
class Deconv(Node):
def __init__(self, **kwargs):
assert 'input' in kwargs, "NNEF %s node is missing 'input' value"%(self.__class__.__name__)
assert 'filter' in kwargs, "NNEF %s node is missing 'filter' value"%(self.__class__.__name__)
self.defaults = {'bias': 0.0, 'border': 'constant', 'padding': [],
'stride': [], 'dilation': [], 'groups': 1}
super(Deconv, self).__init__('deconv', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
if 'bias' in self.parameters and isinstance(self.parameters['bias'], Node):
self.add_edges(nx_graph, ['input', 'filter', 'bias'])
else:
self.add_edges(nx_graph, ['input', 'filter'])
def run(self, nx_graph):
self.output_value_ = None
class Div(Node):
def __init__(self, **kwargs):
assert 'x' in kwargs, "NNEF %s node is missing 'x' value"%(self.__class__.__name__)
assert 'y' in kwargs, "NNEF %s node is missing 'y' value"%(self.__class__.__name__)
super(Div, self).__init__('div', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
self.add_edges(nx_graph, ['x', 'y'])
def run(self, nx_graph):
self.output_value_ = None
class Elu(Node):
def __init__(self, **kwargs):
assert 'x' in kwargs, "NNEF %s node is missing 'x' value"%(self.__class__.__name__)
super(Elu, self).__init__('elu', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
self.add_edges(nx_graph, ['x'])
def run(self, nx_graph):
self.output_value_ = None
class EQ(Node):
def __init__(self, **kwargs):
assert 'x' in kwargs, "NNEF %s node is missing 'x' value"%(self.__class__.__name__)
assert 'y' in kwargs, "NNEF %s node is missing 'y' value"%(self.__class__.__name__)
super(EQ, self).__init__('eq', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
self.add_edges(nx_graph, ['x', 'y'])
def run(self, nx_graph):
self.output_value_ = None
class Exp(Node):
def __init__(self, **kwargs):
assert 'x' in kwargs, "NNEF %s node is missing 'x' value" % (self.__class__.__name__)
super(Exp, self).__init__('exp', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
self.add_edges(nx_graph, ['x'])
def run(self, nx_graph):
self.output_value_ = None
class External(Node):
def __init__(self, **kwargs):
assert 'shape' in kwargs, "NNEF External node is missing 'shape' value"
assert isinstance(kwargs['shape'], list), "NNEF External node, 'shape' must be a list"
for shape_item in kwargs['shape']:
assert shape_item > 0, "NNEF External node, 'shape' values must be positive"
super(External, self).__init__('external', **kwargs)
def run(self, **kwargs):
self.output_value_ = kwargs['external_tensor']
return self.output_value_
class Floor(Node):
def __init__(self, **kwargs):
assert 'x' in kwargs, "NNEF %s node is missing 'x' value" % (self.__class__.__name__)
super(Floor, self).__init__('floor', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
self.add_edges(nx_graph, ['x'])
def run(self, nx_graph):
self.output_value_ = None
class GE(Node):
def __init__(self, **kwargs):
assert 'x' in kwargs, "NNEF %s node is missing 'x' value"%(self.__class__.__name__)
assert 'y' in kwargs, "NNEF %s node is missing 'y' value"%(self.__class__.__name__)
super(GE, self).__init__('ge', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
self.add_edges(nx_graph, ['x', 'y'])
def run(self, nx_graph):
self.output_value_ = None
class Gru(Node):
def __init__(self, **kwargs):
assert 'input' in kwargs, "NNEF %s node is missing 'input' value"%(self.__class__.__name__)
assert 'channels' in kwargs, "NNEF %s node is missing 'channels' value"%(self.__class__.__name__)
assert 'scope' in kwargs, "NNEF %s node is missing 'scope' value"%(self.__class__.__name__)
super(Gru, self).__init__('gru', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
self.add_edges(nx_graph, ['input'])
def run(self, nx_graph):
self.output_value_ = None
class GT(Node):
def __init__(self, **kwargs):
assert 'x' in kwargs, "NNEF %s node is missing 'x' value"%(self.__class__.__name__)
assert 'y' in kwargs, "NNEF %s node is missing 'y' value"%(self.__class__.__name__)
super(GT, self).__init__('gt', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
self.add_edges(nx_graph, ['x', 'y'])
def run(self, nx_graph):
self.output_value_ = None
class Idn(Node):
def __init__(self, **kwargs):
assert 'x' in kwargs, "NNEF %s node is missing 'x' value"%(self.__class__.__name__)
super(Idn, self).__init__('idn', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
def run(self, nx_graph):
self.output_value_ = None
class LE(Node):
def __init__(self, **kwargs):
assert 'x' in kwargs, "NNEF %s node is missing 'x' value"%(self.__class__.__name__)
assert 'y' in kwargs, "NNEF %s node is missing 'y' value"%(self.__class__.__name__)
super(LE, self).__init__('le', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
self.add_edges(nx_graph, ['x', 'y'])
def run(self, nx_graph):
self.output_value_ = None
class LeakyRelu(Node):
def __init__(self, **kwargs):
assert 'x' in kwargs, "NNEF %s node is missing 'x' value"%(self.__class__.__name__)
assert 'alpha' in kwargs, "NNEF %s node is missing 'alpha' value"%(self.__class__.__name__)
super(LeakyRelu, self).__init__('leaky_relu', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
self.add_edges(nx_graph, ['x'])
def run(self, nx_graph):
self.output_value_ = None
class Linear(Node):
def __init__(self, **kwargs):
assert 'input' in kwargs, "NNEF %s node is missing 'input' value"%(self.__class__.__name__)
assert 'filter' in kwargs, "NNEF %s node is missing 'filter' value"%(self.__class__.__name__)
self.defaults = {'bias': 0.0}
super(Linear, self).__init__('linear', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
if 'bias' in self.parameters and isinstance(self.parameters['bias'], Node):
self.add_edges(nx_graph, ['input', 'filter', 'bias'])
else:
self.add_edges(nx_graph, ['input', 'filter'])
def run(self, nx_graph):
self.output_value_ = None
class LocalResponseNormalization(Node):
def __init__(self, **kwargs):
assert 'input' in kwargs, "NNEF %s node is missing 'input' value"%(self.__class__.__name__)
assert 'size' in kwargs, "NNEF %s node is missing 'size' value"%(self.__class__.__name__)
self.defaults = {'alpha': 1.0, 'beta': 0.5, 'bias': 1.0}
super(LocalResponseNormalization, self).__init__('local_response_normalization', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
self.add_edges(nx_graph, ['input', 'size', 'alpha', 'beta', 'bias'])
def run(self, nx_graph):
self.output_value_ = None
class Log(Node):
def __init__(self, **kwargs):
assert 'x' in kwargs, "NNEF %s node is missing 'x' value" % (self.__class__.__name__)
super(Log, self).__init__('log', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
self.add_edges(nx_graph, ['x'])
def run(self, nx_graph):
self.output_value_ = None
class LT(Node):
def __init__(self, **kwargs):
assert 'x' in kwargs, "NNEF %s node is missing 'x' value"%(self.__class__.__name__)
assert 'y' in kwargs, "NNEF %s node is missing 'y' value"%(self.__class__.__name__)
super(LT, self).__init__('lt', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
self.add_edges(nx_graph, ['x', 'y'])
def run(self, nx_graph):
self.output_value_ = None
class L2Normalization(Node):
def __init__(self, **kwargs):
assert 'input' in kwargs, "NNEF %s node is missing 'input' value"%(self.__class__.__name__)
assert 'axes' in kwargs, "NNEF %s node is missing 'axes' value"%(self.__class__.__name__)
self.defaults = {'bias': 0.0, 'epsilon': 0.0}
super(L2Normalization, self).__init__('l2_normalization', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
if 'bias' in self.parameters and isinstance(self.parameters['bias'], Node):
self.add_edges(nx_graph, ['input', 'bias'])
else:
self.add_edges(nx_graph, ['input'])
def run(self, nx_graph):
self.output_value_ = None
class Matmul(Node):
def __init__(self, **kwargs):
assert 'A' in kwargs, "NNEF %s node is missing 'A' value"%(self.__class__.__name__)
assert 'B' in kwargs, "NNEF %s node is missing 'B' value"%(self.__class__.__name__)
if 'transposeA' in kwargs:
assert isinstance(kwargs['transposeA'], bool)
if 'transposeB' in kwargs:
assert isinstance(kwargs['transposeB'], bool)
self.defaults = {'transposeA': False, 'transposeB': False}
super(Matmul, self).__init__('matmul', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
self.add_edges(nx_graph, ['A', 'B'])
def run(self, nx_graph):
self.output_value_ = None
class Max(Node):
def __init__(self, **kwargs):
assert 'x' in kwargs, "NNEF %s node is missing 'x' value"%(self.__class__.__name__)
assert 'y' in kwargs, "NNEF %s node is missing 'y' value"%(self.__class__.__name__)
super(Max, self).__init__('max', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
self.add_edges(nx_graph, ['x', 'y'])
def run(self, nx_graph):
self.output_value_ = None
class MaxPool(Node):
def __init__(self, **kwargs):
assert 'input' in kwargs, "NNEF %s node is missing 'input' value"%(self.__class__.__name__)
assert 'size' in kwargs, "NNEF %s node is missing 'size' value"%(self.__class__.__name__)
self.defaults = {'border': 'constant', 'padding': [], 'stride': [], 'dilation': []}
super(MaxPool, self).__init__('max_pool', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
self.add_edges(nx_graph, ['input'])
def run(self, nx_graph):
self.output_value_ = None
class MaxPoolWithIndex(Node):
def __init__(self, **kwargs):
assert 'input' in kwargs, "NNEF %s node is missing 'input' value"%(self.__class__.__name__)
assert 'size' in kwargs, "NNEF %s node is missing 'size' value"%(self.__class__.__name__)
self.defaults = {'border': 'constant', 'padding': [], 'stride': [], 'dilation': []}
super(MaxPoolWithIndex, self).__init__('max_pool_with_index', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
self.add_edges(nx_graph, ['input'])
def run(self, nx_graph):
self.output_value_ = None
class MaxReduce(Node):
def __init__(self, **kwargs):
assert 'input' in kwargs, "NNEF %s node is missing 'input' value" % (self.__class__.__name__)
assert 'axes' in kwargs, "NNEF %s node is missing 'axes' value" % (self.__class__.__name__)
super(MaxReduce, self).__init__('max_reduce', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
self.add_edges(nx_graph, ['input'])
def run(self, nx_graph):
self.output_value_ = None
class MeanReduce(Node):
def __init__(self, **kwargs):
assert 'input' in kwargs, "NNEF %s node is missing 'input' value" % (self.__class__.__name__)
assert 'axes' in kwargs, "NNEF %s node is missing 'axes' value" % (self.__class__.__name__)
super(MeanReduce, self).__init__('mean_reduce', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
self.add_edges(nx_graph, ['input'])
def run(self, nx_graph):
self.output_value_ = None
class Min(Node):
def __init__(self, **kwargs):
assert 'x' in kwargs, "NNEF %s node is missing 'x' value"%(self.__class__.__name__)
assert 'y' in kwargs, "NNEF %s node is missing 'y' value"%(self.__class__.__name__)
super(Min, self).__init__('min', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
self.add_edges(nx_graph, ['x', 'y'])
def run(self, nx_graph):
self.output_value_ = None
class Mul(Node):
def __init__(self, **kwargs):
assert 'x' in kwargs, "NNEF %s node is missing 'x' value"%(self.__class__.__name__)
assert 'y' in kwargs, "NNEF %s node is missing 'y' value"%(self.__class__.__name__)
super(Mul, self).__init__('mul', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
self.add_edges(nx_graph, ['x', 'y'])
def run(self, nx_graph):
self.output_value_ = None
class MultilinearUpsample(Node):
def __init__(self, **kwargs):
assert 'input' in kwargs, "NNEF %s node is missing 'input' value"%(self.__class__.__name__)
assert 'factor' in kwargs, "NNEF %s node is missing 'factor' value"%(self.__class__.__name__)
self.defaults = {'method': 'symmetric', 'border': 'replicate'}
super(MultilinearUpsample, self).__init__('multilinear_upsample', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
self.add_edges(nx_graph, ['input'])
def run(self, nx_graph):
self.output_value_ = None
class NE(Node):
def __init__(self, **kwargs):
assert 'x' in kwargs, "NNEF %s node is missing 'x' value"%(self.__class__.__name__)
assert 'y' in kwargs, "NNEF %s node is missing 'y' value"%(self.__class__.__name__)
super(NE, self).__init__('ne', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
self.add_edges(nx_graph, ['x', 'y'])
def run(self, nx_graph):
self.output_value_ = None
class NearestDownsample(Node):
def __init__(self, **kwargs):
assert 'input' in kwargs, "NNEF %s node is missing 'input' value"%(self.__class__.__name__)
assert 'factor' in kwargs, "NNEF %s node is missing 'factor' value"%(self.__class__.__name__)
super(NearestDownsample, self).__init__('nearest_downsample', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
self.add_edges(nx_graph, ['input'])
def run(self, nx_graph):
self.output_value_ = None
class NearestUpsample(Node):
def __init__(self, **kwargs):
assert 'input' in kwargs, "NNEF %s node is missing 'input' value"%(self.__class__.__name__)
assert 'factor' in kwargs, "NNEF %s node is missing 'factor' value"%(self.__class__.__name__)
super(NearestUpsample, self).__init__('nearest_upsample', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
self.add_edges(nx_graph, ['input'])
def run(self, nx_graph):
self.output_value_ = None
class Neg(Node):
def __init__(self, **kwargs):
assert 'x' in kwargs, "NNEF %s node is missing 'x' value" % (self.__class__.__name__)
super(Neg, self).__init__('neg', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
self.add_edges(nx_graph, ['x'])
def run(self, nx_graph):
self.output_value_ = None
class Not(Node):
def __init__(self, **kwargs):
assert 'x' in kwargs, "NNEF %s node is missing 'x' value" % (self.__class__.__name__)
super(Not, self).__init__('not', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
self.add_edges(nx_graph, ['x'])
def run(self, nx_graph):
self.output_value_ = None
class Or(Node):
def __init__(self, **kwargs):
assert 'x' in kwargs, "NNEF %s node is missing 'x' value"%(self.__class__.__name__)
assert 'y' in kwargs, "NNEF %s node is missing 'y' value"%(self.__class__.__name__)
super(Or, self).__init__('or', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
self.add_edges(nx_graph, ['x', 'y'])
def run(self, nx_graph):
self.output_value_ = None
class OutputVal(Node):
def __init__(self, **kwargs):
super(OutputVal, self).__init__('output_val', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
self.add_edges(nx_graph, ['base_node'])
class Pad(Node):
def __init__(self, **kwargs):
super(Pad, self).__init__('pad', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
class PlanewiseConv(Node):
def __init__(self, **kwargs):
assert 'input' in kwargs, "NNEF %s node is missing 'input' value" % (self.__class__.__name__)
assert 'filter' in kwargs, "NNEF %s node is missing 'input' value" % (self.__class__.__name__)
self.defaults = {'bias': 0.0, 'border': 'constant', 'padding': [], 'stride': [], 'dilation': []}
super(PlanewiseConv, self).__init__('planewise_conv', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
if 'bias' in self.parameters and isinstance(self.parameters['bias'], Node):
self.add_edges(nx_graph, ['input', 'filter', 'bias'])
else:
self.add_edges(nx_graph, ['input', 'filter'])
def run(self, nx_graph):
self.output_value_ = None
class Pow(Node):
def __init__(self, **kwargs):
assert 'x' in kwargs, "NNEF %s node is missing 'x' value"%(self.__class__.__name__)
assert 'y' in kwargs, "NNEF %s node is missing 'y' value"%(self.__class__.__name__)
super(Pow, self).__init__('pow', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
self.add_edges(nx_graph, ['x', 'y'])
def run(self, nx_graph):
self.output_value_ = None
class Rcp(Node):
def __init__(self, **kwargs):
assert 'x' in kwargs, "NNEF %s node is missing 'x' value" % (self.__class__.__name__)
super(Rcp, self).__init__('rcp', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
self.add_edges(nx_graph, ['x'])
def run(self, nx_graph):
self.output_value_ = None
class Relu(Node):
def __init__(self, **kwargs):
assert 'x' in kwargs, "NNEF %s node is missing 'x' value"%(self.__class__.__name__)
super(Relu, self).__init__('relu', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
self.add_edges(nx_graph, ['x'])
def run(self, nx_graph):
self.output_value_ = None
class Reshape(Node):
def __init__(self, **kwargs):
assert 'input' in kwargs, "NNEF %s node is missing 'input' value"%(self.__class__.__name__)
assert 'shape' in kwargs, "NNEF %s node is missing 'shape' value"%(self.__class__.__name__)
super(Reshape, self).__init__('reshape', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
self.add_edges(nx_graph, ['input'])
def run(self, nx_graph):
self.output_value_ = None
class Round(Node):
def __init__(self, **kwargs):
assert 'x' in kwargs, "NNEF %s node is missing 'x' value" % (self.__class__.__name__)
super(Round, self).__init__('round', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
self.add_edges(nx_graph, ['x'])
def run(self, nx_graph):
self.output_value_ = None
class Rsqrt(Node):
def __init__(self, **kwargs):
assert 'x' in kwargs, "NNEF %s node is missing 'x' value"%(self.__class__.__name__)
super(Rsqrt, self).__init__('rsqrt', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
self.add_edges(nx_graph, ['x'])
def run(self, nx_graph):
self.output_value_ = None
class Select(Node):
def __init__(self, **kwargs):
assert 'condition' in kwargs, "NNEF %s node is missing 'condition' value"%(self.__class__.__name__)
assert 'true_value' in kwargs, "NNEF %s node is missing 'true_value' value"%(self.__class__.__name__)
assert 'false_value' in kwargs, "NNEF %s node is missing 'false_value' value"%(self.__class__.__name__)
super(Select, self).__init__('select', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
self.add_edges(nx_graph, ['condition', 'true_value', 'false_value'])
def run(self, nx_graph):
self.output_value_ = None
class SeparableConv(Node):
def __init__(self, **kwargs):
assert 'input' in kwargs, "NNEF %s node is missing 'input' value"%(self.__class__.__name__)
assert 'plane_filter' in kwargs, "NNEF %s node is missing 'plane_filter' value"%(self.__class__.__name__)
assert 'point_filter' in kwargs, "NNEF %s node is missing 'point_filter' value"%(self.__class__.__name__)
self.defaults = {'bias': 0.0, 'border': 'constant', 'padding': [],
'stride': [], 'dilation': [], 'groups': 1}
super(SeparableConv, self).__init__('separable_conv', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
if 'bias' in self.parameters and isinstance(self.parameters['bias'], Node):
self.add_edges(nx_graph, ['input', 'plane_filter', 'point_filter', 'bias'])
else:
self.add_edges(nx_graph, ['input', 'plane_filter', 'point_filter'])
def run(self, nx_graph):
self.output_value_ = None
class ShapeOf(Node):
def __init__(self, **kwargs):
assert 'x' in kwargs, "NNEF %s noe is missing 'x' value"%(self.__class__.__name__)
super(ShapeOf, self).__init__('shape_of', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
self.add_edges(nx_graph, ['x'])
def run(self, nx_graph):
self.output_value_ = None
def get_value(self):
return self.output_shape
class Sigmoid(Node):
def __init__(self, **kwargs):
assert 'x' in kwargs, "NNEF %s node is imissing 'x' value"%(self.__class__.__name__)
super(Sigmoid, self).__init__('sigmoid', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
self.add_edges(nx_graph, ['x'])
def run(self, nx_graph):
self.output_value_ = None
class Sign(Node):
def __init__(self, **kwargs):
assert 'x' in kwargs, "NNEF %s node is missing 'x' value"%(self.__class__.__name__)
super(Sign, self).__init__('sign', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
self.add_edges(nx_graph, ['x'])
def run(self, nx_graph):
self.output_value_ = None
class Slice(Node):
def __init__(self, **kwargs):
assert 'input' in kwargs, "NNEF %s node is missing 'input' value"%(self.__class__.__name__)
assert 'axes' in kwargs, "NNEF %s node is missing 'axes' value"%(self.__class__.__name__)
assert 'begin' in kwargs, "NNEF %s node is missing 'begin' value"%(self.__class__.__name__)
assert 'end' in kwargs, "NNEF %s node is missing 'end' value"%(self.__class__.__name__)
super(Slice, self).__init__('slice', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
self.add_edges(nx_graph, ['input'])
def run(self, nx_graph):
self.output_value_ = None
def get_value(self):
init_val = self.parameters['input'].get_value()
output_val = init_val[:]
axes = self.parameters['axes']
for i in range(len(axes)):
if i in axes:
if i == 0:
output_val = output_val[int(self.parameters['begin'][axes.index(i)]):int(self.parameters['end'][axes.index(i)])]
elif i == 1:
output_val = output_val[:][int(self.parameters['begin'][axes.index(i)]):int(self.parameters['end'][axes.index(i)])]
elif i == 2:
output_val = output_val[:][:][int(self.parameters['begin'][axes.index(i)]):int(self.parameters['end'][axes.index(i)])]
elif i == 3:
output_val = output_val[:][:][:][int(self.parameters['begin'][axes.index(i)]):int(self.parameters['end'][axes.index(i)])]
elif i == 4:
output_val = output_val[:][:][:][:][int(self.parameters['begin'][axes.index(i)]):int(self.parameters['end'][axes.index(i)])]
else:
raise ValueError("Axes goes too high, currently not handled")
return output_val
class Softmax(Node):
def __init__(self, **kwargs):
assert 'x' in kwargs, "NNEF %s node is missing 'x' value"%(self.__class__.__name__)
self.defaults = {'axes': [1]}
super(Softmax, self).__init__('softmax', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
self.add_edges(nx_graph, ['x'])
def run(self, nx_graph):
self.output_value_ = None
class Softplus(Node):
def __init__(self, **kwargs):
assert 'x' in kwargs, "NNEF %s node is missing 'x' value"%(self.__class__.__name__)
super(Softplus, self).__init__('softplus', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
self.add_edges(nx_graph, ['x'])
def run(self, nx_graph):
self.output_value_ = None
class Softsign(Node):
def __init__(self, **kwargs):
assert 'x' in kwargs, "NNEF %s node is missing 'x' value"%(self.__class__.__name__)
super(Softsign, self).__init__('softsign', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
self.add_edges(nx_graph, ['x'])
def run(self, nx_graph):
self.output_value_ = None
class Split(Node):
def __init__(self, **kwargs):
assert 'value' in kwargs, "NNEF %s node is missing 'value' value"%(self.__class__.__name__)
assert 'axis' in kwargs, "NNEF %s node is missing 'axis' value"%(self.__class__.__name__)
assert 'ratios' in kwargs, "NNEF %s node is missing 'ratios' value"%(self.__class__.__name__)
super(Split, self).__init__('split', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
self.add_edges(nx_graph, ['value'])
def run(self, nx_graph):
self.output_value_ = None
class Sqr(Node):
def __init__(self, **kwargs):
assert 'x' in kwargs, "NNEF %s node is missing 'x' value"%(self.__class__.__name__)
super(Sqr, self).__init__('sqr', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
self.add_edges(nx_graph, ['x'])
def run(self, nx_graph):
self.output_value_ = None
class Sqrt(Node):
def __init__(self, **kwargs):
assert 'x' in kwargs, "NNEF %s node is missing 'x' value"%(self.__class__.__name__)
super(Sqrt, self).__init__('sqrt', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
self.add_edges(nx_graph, ['x'])
def run(self, nx_graph):
self.output_value_ = None
class Stack(Node):
def __init__(self, **kwargs):
assert 'values' in kwargs, "NNEF %s node is missing 'values' value"%(self.__class__.__name__)
assert 'axis' in kwargs, "NNEF %s node is missing 'axis' value"%(self.__class__.__name__)
super(Stack, self).__init__('stack', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
self.add_edges(nx_graph, ['values'])
def run(self, nx_graph):
self.output_value_ = None
def get_value(self):
values = []
for i in range(len(self.parameters['values'])):
values.append(self.parameters['values'][i].get_value())
return np.stack(values, self.parameters['axis'])
class Sub(Node):
def __init__(self, **kwargs):
assert 'x' in kwargs, "NNEF %s node is missing 'x' value"%(self.__class__.__name__)
assert 'y' in kwargs, "NNEF %s node is missing 'y' value"%(self.__class__.__name__)
super(Sub, self).__init__('sub', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
self.add_edges(nx_graph, ['x', 'y'])
def run(self, nx_graph):
self.output_value_ = None
def get_value(self):
x_val = self.parameters['x'].get_value()
y_val = self.parameters['y'].get_value()
return np.subtract(x_val, y_val)
class SumReduce(Node):
def __init__(self, **kwargs):
assert 'input' in kwargs, "NNEF %s node is missing 'input' value" % (self.__class__.__name__)
assert 'axes' in kwargs, "NNEF %s node is missing 'axes' value" % (self.__class__.__name__)
self.defaults = {'normalize': False}
super(SumReduce, self).__init__('sum_reduce', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
self.add_edges(nx_graph, ['input'])
def run(self, nx_graph):
self.output_value_ = None
class Tanh(Node):
def __init__(self, **kwargs):
assert 'x' in kwargs, "NNEF %s node is missing 'x' value"%(self.__class__.__name__)
super(Tanh, self).__init__('tanh', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
self.add_edges(nx_graph, ['x'])
def run(self, nx_graph):
self.output_value_ = None
class Transpose(Node):
def __init__(self, **kwargs):
assert 'input' in kwargs, "NNEF %s node is missing 'input' value"%(self.__class__.__name__)
assert 'axes' in kwargs, "NNEF %s node is missing 'perm' value"%(self.__class__.__name__)
super(Transpose, self).__init__('transpose', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
self.add_edges(nx_graph, ['input'])
def run(self, nx_graph):
self.output_value_ = None
class Update(Node):
def __init__(self, **kwargs):
assert 'variable' in kwargs, "NNEF %s node is missing 'variable' value"%(self.__class__.__name__)
assert 'value' in kwargs, "NNEF %s node is missing 'value' value"%(self.__class__.__name__)
super(Update, self).__init__('update', **kwargs)
def compile(self, nx_graph):
Node.compile(self, nx_graph)
self.add_edges(nx_graph, ['variable', 'value'])
def run(self, nx_graph):
self.output_value_ = None
class Variable(Node):
def __init__(self, **kwargs):
assert 'label' in kwargs, "NNEF %s node is missing 'label' value"%(self.__class__.__name__)
assert 'shape' in kwargs, "NNEF %s node is missing 'shape' value"%(self.__class__.__name__)
assert isinstance(kwargs['label'], str), "NNEF %s node, 'label' value is not of 'str' type"%(self.__class__.__name__)
assert isinstance(kwargs['shape'], list), "NNEF %s node, 'shape' value is not of 'list' type"%(self.__class__.__name__)
super(Variable, self).__init__('variable', **kwargs)
self.modified = False
if isinstance(kwargs['_np_tensor'], bytes):
self.tensor = np.frombuffer(kwargs['_np_tensor'], dtype = kwargs['_np_dtype'])
self.tensor = np.reshape(self.tensor, kwargs['shape'])
else:
self.tensor = kwargs['_np_tensor']
self.output_shape = kwargs['shape']
def run(self, **kwargs):
self.output_value_ = self.tensor
return self.output_value_
```
#### File: nnef_converters/common/dog_to_nnef.py
```python
from __future__ import division, print_function
from collections import OrderedDict
import nnef
from . import dog
from . import utils
from .types import *
from .utils import StringIO
from .nnef_dog_types import NnefGraph
def nnefdog_to_source(nnefdog, file_handle=None, custom_fragments=""):
if file_handle is None:
string_io = StringIO()
try:
# noinspection PyUnresolvedReferences, PyTypeChecker
return _nnefdog_to_source_impl(nnefdog, string_io,
custom_fragments=custom_fragments).getvalue()
finally:
string_io.close()
else:
return _nnefdog_to_source_impl(nnefdog, file_handle,
custom_fragments=custom_fragments)
def _nnefdog_to_source_impl(nnefdog, file_handle, custom_fragments=""):
# type: (NnefGraph, TextIO, str)->TextIO
f = file_handle
fix_str = utils.ensure_not_unicode_in_python2
indent = 4 * " "
print(nnef.format_version((1, 0)), file=f)
extensions = []
print(nnef.format_extensions(extensions), file=f)
print(file=f)
if custom_fragments:
print(custom_fragments, file=f)
print(file=f)
graph_name = fix_str(nnefdog.name)
graph_inputs = [fix_str(name) for name in nnefdog.input_dn_names]
graph_outputs = [fix_str(name) for name in nnefdog.output_dn_names]
print("graph {}({}) -> ({})".format(graph_name, ', '.join(graph_inputs), ', '.join(graph_outputs)), file=f)
print("{", file=f)
for op in nnefdog.ops:
dtype = op.result.dtype if op.name in ["external", "constant", "variable"] else None
invocation = nnef.format_invocation(name=fix_str(op.name),
attribs=_preprocess_args(op.args),
inputs=tuple(),
outputs=_results_to_result_names(op.results.values()),
dtype=dtype)
comments = utils.without_nones([op.extra.get(dog.EXTRA_COMMENT)]
+ [dn.extra.get(dog.EXTRA_COMMENT) for dn in op.get_result_nodes()])
comment = " # {}".format(", ".join(comments)) if comments else ""
print("{}{};{}".format(indent, invocation, comment), file=f)
print("}", file=f)
return file_handle
def _preprocess_args(args):
# type: (OrderedDict[str, Any])->OrderedDict[str, Any]
def transform(arg):
if isinstance(arg, dog.DataNode):
return nnef.Identifier(utils.ensure_not_unicode_in_python2(arg.name))
else:
return utils.ensure_not_unicode_in_python2(arg)
return utils.recursive_transform(args, transform)
def _results_to_result_names(results):
results2 = []
for r in results:
if isinstance(r, (list, tuple)):
results2.append(_results_to_result_names(r))
else:
results2.append(nnef.Identifier(utils.ensure_not_unicode_in_python2(r.name)))
if isinstance(results, tuple):
return tuple(results2)
else:
return results2
```
#### File: nnef_tools/conversion/conversion_info.py
```python
from __future__ import division, print_function, absolute_import
import json
import os
import typing
from nnef_tools.conversion.transforms import *
class _Empty(object):
pass
class _CustomEncoder(json.JSONEncoder):
def default(self, o):
d = dict(o.__dict__)
d["__class__"] = o.__class__.__name__
return d
class _CustomDecoder(object):
def __init__(self, class_by_name):
self._class_by_name = class_by_name
def __call__(self, d):
if "__class__" in d:
o = _Empty()
d2 = dict(d)
del d2["__class__"]
o.__dict__.update(d2)
o.__class__ = self._class_by_name[d["__class__"]]
return o
return d
def _json_dump(obj, file_name):
if not os.path.exists(os.path.dirname(file_name)):
os.makedirs(os.path.dirname(file_name))
with open(file_name, 'w') as f:
json.dump(obj, f, indent=4, sort_keys=True, cls=_CustomEncoder)
def _json_load(file_name, classes=None):
if classes is None:
classes = []
with open(file_name, 'r') as f:
return json.load(f, object_hook=_CustomDecoder({class_.__name__: class_ for class_ in classes}))
class TensorInfo(object):
def __init__(self, source_name, target_name, target_shape, target_dtype,
is_input=False, is_output=False, is_variable=False, transforms=None):
if transforms is None:
transforms = []
self.source_name = source_name
self.target_name = target_name
self.is_input = is_input
self.is_output = is_output
self.is_variable = is_variable
self.transforms = transforms # type: typing.List[Transform]
self.target_dtype = target_dtype
self.target_shape = target_shape
def copy(self):
return TensorInfo(source_name=self.source_name,
target_name=self.target_name,
is_input=self.is_input,
is_output=self.is_output,
is_variable=self.is_variable,
transforms=[t.copy() for t in self.transforms],
target_shape=self.target_shape,
target_dtype=self.target_dtype)
class ConversionInfo(object):
def __init__(self, tensors):
self.tensors = tensors # type: typing.List[TensorInfo]
def copy(self):
return ConversionInfo([t.copy() for t in self.tensors])
def dump(conversion_info, file_name):
_json_dump(conversion_info, file_name)
def load(file_name):
return _json_load(file_name, [ConversionInfo, TensorInfo, Transpose, Squeeze, Unsqueeze, Reshape])
def _compose(info1, info2):
# type: (ConversionInfo, ConversionInfo)->ConversionInfo
tensor_info1_by_target_name = {info.target_name: info for info in info1.tensors}
tensor_infos = []
for right in info2.tensors:
if right.source_name in tensor_info1_by_target_name:
left = tensor_info1_by_target_name[right.source_name] # type: TensorInfo
del tensor_info1_by_target_name[right.source_name]
result = right.copy()
result.source_name = left.source_name
result.transforms = left.transforms + right.transforms
tensor_infos.append(result)
return ConversionInfo(tensor_infos)
def compose(info1, info2, *more_infos):
infos = (info1, info2) + more_infos
infos = [info for info in infos if info is not None]
if not infos:
return None
left = infos[0]
for right in infos[1:]:
left = _compose(left, right)
return left
```
#### File: conversion/tensorflow/nnef_to_tf.py
```python
from __future__ import division, print_function, absolute_import
import copy
import typing
from functools import partial
import numpy as np
from nnef_tools.conversion import converter
from nnef_tools.conversion import transforms
from nnef_tools.conversion.shape_utils import ShapeUtils
from nnef_tools.conversion.tensorflow import nnef_to_tf_trafos
from nnef_tools.core import utils
from nnef_tools.io.nnef.nnef_graph import NNEFGraph, NNEFOperation, NNEFTensor
from nnef_tools.io.nnef.parser_config import NNEFParserConfig
from nnef_tools.io.tensorflow.tf_graph import TFGraph, TFTensor, TFOperation
# NNEF must be parsed with this before calling nnef_to_tf.Converter on it
ParserConfig = NNEFParserConfig(expand=[
"rms_pool",
"softabs",
"log2",
"linear",
"separable_conv",
"separable_deconv",
"l1_normalization",
])
_tf_dtype_by_nnef_dtype = {
"scalar": "float32",
"integer": 'int64',
"logical": 'bool'
}
class Converter(converter.Converter[NNEFTensor, NNEFOperation, NNEFGraph,
TFTensor, TFOperation, TFGraph], ShapeUtils):
FORMAT_NHWC = "NHWC"
FORMAT_NCHW = "NCHW"
FORMAT_SPATIAL = "SPATIAL"
def __init__(self,
prefer_nhwc=True,
enable_default_conversion=False,
enable_imprecise_image_resize=False,
enable_imprecise_padding_border=False,
custom_converter_by_op_name=None):
default_op_converter = convert_default if enable_default_conversion else None
converters = {}
converters.update(_DefaultConverters)
if custom_converter_by_op_name is not None:
converters.update(custom_converter_by_op_name)
super(Converter, self).__init__(op_converter_by_name=converters, default_op_converter=default_op_converter)
self._preferred_format = self.FORMAT_NHWC if prefer_nhwc else self.FORMAT_NCHW
self._enable_imprecise_image_resize = enable_imprecise_image_resize
self._enable_imprecise_padding_border = enable_imprecise_padding_border
@property
def preferred_format(self):
return self._preferred_format
@property
def prefer_nhwc(self):
return self._preferred_format == self.FORMAT_NHWC
def create_graph(self, source_graph):
# type: (NNEFGraph)->TFGraph
return TFGraph(source_graph.name)
def convert_tensors(self, source_graph, target_graph):
# type: (NNEFGraph, TFGraph)->None
super(Converter, self).convert_tensors(source_graph, target_graph)
if any(tensor.quantization is not None for tensor in target_graph.tensors):
for tensor in target_graph.tensors:
if tensor.dtype not in ["int32", "uint8"]:
tensor.dtype = "uint8"
def convert_tensor(self, source_tensor, target_graph):
# type: (NNEFTensor, TFGraph)->TFTensor
quantization = None
dtype = self.tf_dtype(source_tensor.dtype)
if source_tensor.quantization:
assert source_tensor.quantization.name == 'tflite_quantize'
quantization = TFTensor.Quantization(min=source_tensor.quantization.attribs['min'],
max=source_tensor.quantization.attribs['max'],
scale=source_tensor.quantization.attribs['scale'],
zero_point=source_tensor.quantization.attribs['zero_point'])
dtype = "uint8" if source_tensor.quantization.attribs['bits'] == 8 else "int32"
return TFTensor(graph=target_graph,
label=source_tensor.label, # can be None
shape=list(source_tensor.shape),
dtype=dtype,
data=copy.copy(source_tensor.data),
quantization=quantization)
def convert_graph(self, source_graph):
# type: (NNEFGraph)->TFGraph
nnef_to_tf_trafos.pre_conversion_transform(source_graph)
target_graph = super(Converter, self).convert_graph(source_graph) # type: TFGraph
target_graph.generate_missing_names()
return target_graph
@property
def enable_imprecise_image_resize(self):
return self._enable_imprecise_image_resize
@property
def enable_imprecise_padding_border(self):
return self._enable_imprecise_padding_border
@staticmethod
def tf_dtype(nnef_dtype):
return _tf_dtype_by_nnef_dtype[nnef_dtype]
@staticmethod
def create_constant_tf_tensor(tf_graph, data, dtype="float32"):
if dtype.startswith("float"):
data = float(data)
return TFTensor(graph=tf_graph, shape=[], dtype=dtype, data=[data])
@staticmethod
def convert_format(shapelike, in_format, out_format, default_elem):
if in_format == out_format:
return list(shapelike)
if in_format == Converter.FORMAT_SPATIAL:
if out_format == Converter.FORMAT_NCHW:
return [default_elem, default_elem] + shapelike
elif out_format == Converter.FORMAT_NHWC:
return [default_elem] + shapelike + [default_elem]
else:
assert False
elif in_format == Converter.FORMAT_NCHW:
if out_format == Converter.FORMAT_SPATIAL:
return shapelike[2:]
elif out_format == Converter.FORMAT_NHWC:
return [shapelike[0]] + shapelike[2:] + [shapelike[1]]
else:
assert False
elif in_format == Converter.FORMAT_NHWC:
if out_format == Converter.FORMAT_SPATIAL:
return shapelike[1:-1]
elif out_format == Converter.FORMAT_NCHW:
return [shapelike[0], shapelike[-1]] + shapelike[1:-1]
else:
assert False
else:
assert False
@staticmethod
def tf_data_format(data_format, rank):
if rank == 3:
return "NWC" if data_format == Converter.FORMAT_NHWC else "NCW"
elif rank == 4:
return "NHWC" if data_format == Converter.FORMAT_NHWC else "NCHW"
elif rank == 5:
return "NDHWC" if data_format == Converter.FORMAT_NHWC else "NCDHW"
else:
print("Warning: data format called for rank not in [3, 4, 5]")
return "NHWC" if data_format == Converter.FORMAT_NHWC else "NCHW"
@staticmethod
def tf_border_mode(nnef_border_mode, enable_imprecise_padding_border=False):
to_tf = {
'ignore': None,
'constant': 'CONSTANT',
'replicate': None,
'reflect': 'REFLECT',
'reflect-even': 'SYMMETRIC'
}
if to_tf[nnef_border_mode] is None:
if enable_imprecise_padding_border:
print("Warning: Border mode {} is not supported in TensorFlow, "
"using CONSTANT because enable_imprecise_padding_border was True.".format(nnef_border_mode))
else:
assert False, \
"Error: Border mode {} is not supported in TensorFlow, " \
"use enable_imprecise_padding_border to suppress this error.".format(nnef_border_mode)
return 'CONSTANT'
else:
return to_tf[nnef_border_mode]
@staticmethod
def nnef_reshaped_shape(input_shape, reshape_shape):
for i in range(len(reshape_shape)):
assert reshape_shape[i] != 0 or i <= len(input_shape), "Invalid input_shape and reshape_shape combination"
reshape_shape = [input_shape[i] if reshape_shape[i] == 0 else reshape_shape[i] for i in
range(len(reshape_shape))]
if -1 in reshape_shape:
idx = reshape_shape.index(-1)
reshape_shape2 = list(reshape_shape)
reshape_shape2[idx] = 1
rem = int(np.prod(input_shape)) % int(np.prod(reshape_shape2))
assert rem == 0, "Invalid input_shape and reshape_shape combination"
div = int(int(np.prod(input_shape)) / int(np.prod(reshape_shape2)))
reshape_shape2[idx] = div
return reshape_shape2
return reshape_shape
@staticmethod
def nnef_flatten_shape(input_shape):
return [input_shape[0], int(np.prod(input_shape[1:]))]
@staticmethod
def has_gt_1(data):
return utils.has_gt_1(data)
@staticmethod
def add_nchw_to_nhwc_transpose(tf_graph, tf_tensor, nhwc_tensor=None):
assert isinstance(tf_graph, TFGraph)
assert isinstance(tf_tensor, TFTensor)
assert nhwc_tensor is None or isinstance(nhwc_tensor, TFTensor)
if nhwc_tensor is None:
nhwc_tensor = TFTensor(graph=tf_graph,
shape=Converter.shape_nchw_to_nhwc(tf_tensor.shape),
dtype=tf_tensor.dtype)
TFOperation(graph=tf_graph,
name="tf.transpose",
inputs=tf_tensor,
attribs=dict(perm=Converter.transpose_axes_nchw_to_nhwc(tf_tensor.rank)),
outputs=nhwc_tensor)
return nhwc_tensor
@staticmethod
def add_squeeze(tf_graph, tf_tensor, axes):
# type: (TFGraph, TFTensor, typing.List[int])->TFTensor
assert isinstance(tf_graph, TFGraph)
assert isinstance(tf_tensor, TFTensor)
return TFOperation(graph=tf_graph,
name="tf.squeeze",
inputs=tf_tensor,
attribs=dict(axis=axes),
outputs=TFTensor(graph=tf_graph,
shape=transforms.squeezed_shape(tf_tensor.shape, axes),
dtype=tf_tensor.dtype)).output
@staticmethod
def add_nchw_to_hwcn_transpose(tf_graph, tf_tensor, hwcn_tensor=None):
assert isinstance(tf_graph, TFGraph)
assert isinstance(tf_tensor, TFTensor)
assert hwcn_tensor is None or isinstance(hwcn_tensor, TFTensor)
if hwcn_tensor is None:
hwcn_tensor = TFTensor(graph=tf_graph,
shape=Converter.shape_nchw_to_hwcn(tf_tensor.shape),
dtype=tf_tensor.dtype)
TFOperation(graph=tf_graph,
name="tf.transpose",
inputs=tf_tensor,
attribs=dict(perm=Converter.transpose_axes_nchw_to_hwcn(tf_tensor.rank)),
outputs=hwcn_tensor)
return hwcn_tensor
@staticmethod
def add_hwcn_to_hwcm_reshape(tf_graph, tf_tensor, hwcm_tensor=None, in_channels=None):
assert isinstance(tf_graph, TFGraph)
assert isinstance(tf_tensor, TFTensor)
assert hwcm_tensor is None or isinstance(hwcm_tensor, TFTensor)
assert hwcm_tensor is not None or in_channels is not None
if hwcm_tensor is not None:
new_shape = hwcm_tensor.shape
else:
in_channels = int(in_channels)
assert in_channels % tf_tensor.shape[-2] == 0
groups = int(in_channels // tf_tensor.shape[-2])
assert tf_tensor.shape[-1] % groups == 0
new_shape = tf_tensor.shape[:-2] + [in_channels, int(tf_tensor.shape[-1] // groups)]
hwcm_tensor = TFTensor(graph=tf_graph,
shape=list(new_shape),
dtype=tf_tensor.dtype)
return TFOperation(graph=tf_graph,
name="tf.reshape",
inputs=tf_tensor,
attribs=dict(shape=list(new_shape)),
outputs=hwcm_tensor).output
@staticmethod
def create_nhwc_intermediate_for_nchw_output(tf_graph, tf_tensor):
assert isinstance(tf_graph, TFGraph)
assert isinstance(tf_tensor, TFTensor)
return TFTensor(graph=tf_graph,
shape=Converter.shape_nchw_to_nhwc(tf_tensor.shape),
dtype=tf_tensor.dtype,
data=tf_tensor.data)
@staticmethod
def add_nhwc_to_nchw_transpose(tf_graph, tf_tensor, nchw_tensor=None):
assert isinstance(tf_graph, TFGraph)
assert isinstance(tf_tensor, TFTensor)
assert nchw_tensor is None or isinstance(nchw_tensor, TFTensor)
if nchw_tensor is None:
nchw_tensor = TFTensor(graph=tf_graph,
shape=Converter.shape_nhwc_to_nchw(tf_tensor.shape),
dtype=tf_tensor.dtype)
return TFOperation(graph=tf_graph,
name="tf.transpose",
inputs=tf_tensor,
attribs=dict(perm=Converter.transpose_axes_nhwc_to_nchw(tf_tensor.rank)),
outputs=nchw_tensor).output
@staticmethod
def create_hwcn_intermediate_for_nchw_output(tf_graph, tf_tensor):
assert isinstance(tf_graph, TFGraph)
assert isinstance(tf_tensor, TFTensor)
return TFTensor(graph=tf_graph,
shape=Converter.shape_nchw_to_hwcn(tf_tensor.shape),
dtype=tf_tensor.dtype,
data=tf_tensor.data)
@staticmethod
def add_hwcn_to_nchw_transpose(tf_graph, tf_tensor, nchw_tensor=None):
assert isinstance(tf_graph, TFGraph)
assert isinstance(tf_tensor, TFTensor)
assert nchw_tensor is None or isinstance(nchw_tensor, TFTensor)
if nchw_tensor is None:
nchw_tensor = TFTensor(graph=tf_graph,
shape=Converter.shape_hwcn_to_nchw(tf_tensor.shape),
dtype=tf_tensor.dtype)
return TFOperation(graph=tf_graph,
name="tf.transpose",
inputs=tf_tensor,
attribs=dict(perm=Converter.transpose_axes_hwcn_to_nchw(tf_tensor.rank)),
outputs=nchw_tensor).output
@staticmethod
def create_hwcm_intermediate_for_nchw_output(tf_graph, tf_tensor, input_channels):
assert isinstance(tf_graph, TFGraph)
assert isinstance(tf_tensor, TFTensor)
return TFTensor(graph=tf_graph,
shape=Converter.shape_nchw_to_hwcm(tf_tensor.shape, input_channels),
dtype=tf_tensor.dtype,
data=tf_tensor.data)
@staticmethod
def add_hwcm_to_hwcn_reshape(tf_graph, tf_tensor):
assert isinstance(tf_graph, TFGraph)
assert isinstance(tf_tensor, TFTensor)
new_shape = tf_tensor.shape[:-2] + [1, tf_tensor.shape[-1] * tf_tensor.shape[-2]]
return TFOperation(graph=tf_graph,
name="tf.reshape",
inputs=tf_tensor,
attribs=dict(shape=list(new_shape)),
outputs=TFTensor(graph=tf_graph,
shape=list(new_shape),
dtype=tf_tensor.dtype)).output
@staticmethod
def add_unsqueeze(tf_graph, tf_tensor, axes):
assert isinstance(tf_graph, TFGraph)
assert isinstance(tf_tensor, TFTensor)
for axis in sorted(axes):
tf_tensor = TFOperation(graph=tf_graph,
name="tf.expand_dims",
inputs=tf_tensor,
attribs=dict(axis=axis),
outputs=TFTensor(graph=tf_graph,
shape=transforms.unsqueezed_shape(tf_tensor.shape, [axis]),
dtype=tf_tensor.dtype)).output
return tf_tensor
@staticmethod
def tf_zero_value(tf_dtype):
if "float" in tf_dtype:
return 0.0
elif "int" in tf_dtype:
return 0
elif "bool" in tf_dtype:
return False
assert False, "Unsupported TF dtype: {}".format(tf_dtype)
@staticmethod
def tf_addition_op(tf_dtype):
if "float" in tf_dtype:
return "tf.add"
elif "int" in tf_dtype:
return "tf.add"
elif "bool" in tf_dtype:
return "tf.logical_or"
assert False, "Unsupported TF dtype: {}".format(tf_dtype)
def convert_default(converter, nnef_op, tf_graph):
# type: (Converter, NNEFOperation, TFGraph)->None
print("Warning: Converter of {} is not implemented, doing default conversion.".format(nnef_op.name))
TFOperation(graph=tf_graph,
name="nnef_" + nnef_op.name,
inputs=converter.converted_tensors(nnef_op.inputs),
attribs=utils.recursive_copy(nnef_op.attribs),
outputs=converter.converted_tensors(nnef_op.outputs))
def convert_update(converter, nnef_op, tf_graph):
# type: (Converter, NNEFOperation, TFGraph)->None
var, val = converter.converted_tensors(nnef_op.inputs)
output = converter.converted_tensor(nnef_op.output)
TFOperation(graph=tf_graph, name="tf.assign", inputs=(var, val), outputs=output)
def convert_reshape(converter, nnef_op, tf_graph):
# type: (Converter, NNEFOperation, TFGraph)->None
input, output = converter.converted_tensors((nnef_op.input, nnef_op.output))
new_shape = converter.nnef_reshaped_shape(nnef_op.input.shape, nnef_op.attribs["shape"])
if nnef_op.input.rank >= 2 and new_shape == converter.nnef_flatten_shape(nnef_op.input.shape):
TFOperation(graph=tf_graph, name="tf.layers.flatten", inputs=input, outputs=output)
else:
TFOperation(graph=tf_graph, name="tf.reshape", inputs=input, attribs=dict(shape=new_shape), outputs=output)
def convert_squeeze(converter, nnef_op, tf_graph):
# type: (Converter, NNEFOperation, TFGraph)->None
input, output = converter.converted_tensors((nnef_op.input, nnef_op.output))
if nnef_op.attribs["axes"]:
TFOperation(graph=tf_graph,
name="tf.squeeze",
inputs=input,
attribs=dict(axis=list(nnef_op.attribs["axes"])),
outputs=output)
else: # axes=[] is special in tf
TFOperation(graph=tf_graph,
name="tf.identity",
inputs=input,
outputs=output)
def convert_unsqueeze(converter, nnef_op, tf_graph):
# type: (Converter, NNEFOperation, TFGraph)->None
if len(nnef_op.attribs["axes"]) == 1:
partial_convert_unsqueeze_to_expand_dims(converter, nnef_op, tf_graph)
else:
partial_convert_unsqueeze_to_reshape(converter, nnef_op, tf_graph)
def partial_convert_unsqueeze_to_reshape(converter, nnef_op, tf_graph):
# type: (Converter, NNEFOperation, TFGraph)->None
input, output = converter.converted_tensors((nnef_op.input, nnef_op.output))
TFOperation(graph=tf_graph,
name="tf.reshape",
inputs=input,
attribs=dict(shape=transforms.unsqueezed_shape(input.shape, nnef_op.attribs["axes"])),
outputs=output)
def partial_convert_unsqueeze_to_expand_dims(converter, nnef_op, tf_graph):
# type: (Converter, NNEFOperation, TFGraph)->None
assert len(nnef_op.attribs["axes"]) == 1
input, output = converter.converted_tensors((nnef_op.input, nnef_op.output))
TFOperation(graph=tf_graph,
name="tf.expand_dims",
inputs=input,
attribs=dict(axis=nnef_op.attribs["axes"][0]),
outputs=output)
def convert_transpose(converter, nnef_op, tf_graph):
# type: (Converter, NNEFOperation, TFGraph)->None
input, output = converter.converted_tensors((nnef_op.input, nnef_op.output))
axes = nnef_op.attribs["axes"]
if len(axes) < nnef_op.input.rank:
axes = axes + list(range(len(axes), nnef_op.input.rank))
TFOperation(graph=tf_graph,
name="tf.transpose",
inputs=input,
attribs=dict(perm=axes),
outputs=output)
def convert_concat(converter, nnef_op, tf_graph):
# type: (Converter, NNEFOperation, TFGraph)->None
inputs = list(converter.converted_tensors(nnef_op.inputs))
output = converter.converted_tensor(nnef_op.output)
TFOperation(graph=tf_graph,
name="tf.concat",
inputs=inputs,
attribs=dict(axis=nnef_op.attribs["axis"]),
outputs=output)
def convert_split(converter, nnef_op, tf_graph):
# type: (Converter, NNEFOperation, TFGraph)->None
input = converter.converted_tensor(nnef_op.input)
outputs = list(converter.converted_tensors(nnef_op.outputs))
nnef_axis = nnef_op.attribs["axis"]
nnef_ratios = nnef_op.attribs["ratios"]
if input.shape[nnef_axis] == 0:
if len(np.unique(nnef_ratios)) == 1:
tf_num_or_size_splits = len(nnef_ratios)
else:
assert False, "Split with different ratios on an axis ({}) with unspecified size.".format(nnef_axis)
else:
part_size = input.shape[nnef_axis] // int(np.sum(nnef_ratios))
tf_num_or_size_splits = [ratio * part_size for ratio in nnef_ratios]
TFOperation(graph=tf_graph,
name="tf.split",
inputs=input,
attribs=dict(num_or_size_splits=tf_num_or_size_splits,
axis=nnef_axis),
outputs=outputs)
def convert_bias_add(converter, nnef_op, tf_graph):
# type: (Converter, NNEFOperation, TFGraph)->None
x, y = converter.converted_tensors(nnef_op.inputs)
output = converter.converted_tensor(nnef_op.output)
tf_op = TFOperation(graph=tf_graph,
name="tf.nn.bias_add",
inputs=(converter.add_nchw_to_nhwc_transpose(tf_graph, x) if converter.prefer_nhwc else x,
converter.add_squeeze(tf_graph, y, axes=[0])),
attribs=dict(data_format=converter.tf_data_format(converter.preferred_format, x.rank)),
outputs=(converter.create_nhwc_intermediate_for_nchw_output(tf_graph, output)
if converter.prefer_nhwc else output))
if converter.prefer_nhwc:
converter.add_nhwc_to_nchw_transpose(tf_graph, tf_op.output, nchw_tensor=output)
def generic_convert_binary(converter, nnef_op, tf_graph, target_name):
# type: (Converter, NNEFOperation, TFGraph, str)->None
x, y = converter.converted_tensors(nnef_op.inputs)
output = converter.converted_tensor(nnef_op.output)
TFOperation(graph=tf_graph,
name=target_name,
inputs=((converter.add_unsqueeze(tf_graph, x, list(range(y.rank - x.rank, y.rank)))
if 0 < x.rank < y.rank else x),
(converter.add_unsqueeze(tf_graph, y, list(range(x.rank - y.rank, x.rank)))
if 0 < y.rank < x.rank else y)),
outputs=output)
def generic_convert_unary(converter, nnef_op, tf_graph, target_name):
# type: (Converter, NNEFOperation, TFGraph, str)->None
TFOperation(graph=tf_graph,
name=target_name,
inputs=converter.converted_tensor(nnef_op.input),
outputs=converter.converted_tensor(nnef_op.output))
def convert_rsqr(converter, nnef_op, tf_graph):
# type: (Converter, NNEFOperation, TFGraph)->None
input, output = converter.converted_tensors((nnef_op.input, nnef_op.output))
TFOperation(graph=tf_graph,
name="tf.pow",
inputs=(input, converter.create_constant_tf_tensor(tf_graph, -2.0)),
outputs=output)
def convert_select(converter, nnef_op, tf_graph):
# type: (Converter, NNEFOperation, TFGraph)->None
# TensorFlow does not support broadcast in tf.where
condition, x, y = converter.converted_tensors(nnef_op.inputs)
output = converter.converted_tensor(nnef_op.output)
def fixed(tensor):
if 0 < tensor.rank < output.rank:
tensor = converter.add_unsqueeze(tf_graph, tensor, list(range(output.rank - tensor.rank, output.rank)))
if tensor.shape != output.shape:
tensor = TFOperation(graph=tf_graph,
name=converter.tf_addition_op(tensor.dtype),
inputs=(tensor, TFTensor(graph=tf_graph,
shape=list(output.shape),
dtype=tensor.dtype,
data=[converter.tf_zero_value(tensor.dtype)])),
outputs=TFTensor(graph=tf_graph,
shape=list(output.shape),
dtype=tensor.dtype)).output
return tensor
TFOperation(graph=tf_graph,
name="tf.where",
inputs=(fixed(condition), fixed(x), fixed(y)),
outputs=output)
def convert_clamp(converter, nnef_op, tf_graph):
# type: (Converter, NNEFOperation, TFGraph)->None
x, a, b = converter.converted_tensors(nnef_op.inputs)
output = converter.converted_tensor(nnef_op.output)
TFOperation(graph=tf_graph,
name="tf.clip_by_value",
inputs=(x,
(converter.add_unsqueeze(tf_graph, a, list(range(x.rank - a.rank, x.rank)))
if 0 < a.rank < x.rank else a),
(converter.add_unsqueeze(tf_graph, b, list(range(x.rank - b.rank, x.rank)))
if 0 < b.rank < x.rank else b)),
outputs=output)
def convert_matmul(converter, nnef_op, tf_graph):
# type: (Converter, NNEFOperation, TFGraph)->None
a, b = converter.converted_tensors(nnef_op.inputs)
output = converter.converted_tensor(nnef_op.output)
TFOperation(graph=tf_graph,
name="tf.matmul",
inputs=(a, b),
attribs=dict(transpose_a=nnef_op.attribs["transposeA"], transpose_b=nnef_op.attribs["transposeB"]),
outputs=output)
def convert_conv(converter, nnef_op, tf_graph):
# type: (Converter, NNEFOperation, TFGraph)->None
groups = nnef_op.attribs["groups"]
input = nnef_op.inputs[0]
d = input.rank - 2
if groups == 1:
if d in [2, 3]:
partial_convert_conv_to_conv2d_or_conv3d(converter, nnef_op, tf_graph)
else:
partial_convert_conv_to_convolution(converter, nnef_op, tf_graph)
else:
if groups in [0, input.shape[1]] and d == 2:
partial_convert_conv_to_deptwise_conv2d(converter, nnef_op, tf_graph)
else:
assert False, "Grouped convolutions are only supported if they can be converted to tf.nn.depthwise_conv2d."
def partial_convert_conv_to_conv2d_or_conv3d(converter, nnef_op, tf_graph):
# type: (Converter, NNEFOperation, TFGraph)->None
input, filter = converter.converted_tensors(nnef_op.inputs)
output = converter.converted_tensor(nnef_op.output)
d = input.rank - 2
assert d in [2, 3]
tf_op = TFOperation(
graph=tf_graph,
name=("tf.nn.conv2d" if d == 2 else "tf.nn.conv3d"),
inputs=(converter.add_nchw_to_nhwc_transpose(tf_graph, input) if converter.prefer_nhwc else input,
converter.add_nchw_to_hwcn_transpose(tf_graph, filter)),
attribs=dict(
data_format=converter.tf_data_format(converter.preferred_format, input.rank),
strides=converter.convert_format(
nnef_op.attribs["stride"] if nnef_op.attribs["stride"] else [1] * d,
in_format=converter.FORMAT_SPATIAL,
out_format=converter.preferred_format,
default_elem=1),
dilations=converter.convert_format(
nnef_op.attribs["dilation"] if nnef_op.attribs["dilation"] else [1] * d,
in_format=converter.FORMAT_SPATIAL,
out_format=converter.preferred_format,
default_elem=1),
padding=nnef_op.attribs["padding"]),
outputs=(converter.create_nhwc_intermediate_for_nchw_output(tf_graph, output)
if converter.prefer_nhwc else output))
if converter.prefer_nhwc:
converter.add_nhwc_to_nchw_transpose(tf_graph, tf_op.output, nchw_tensor=output)
def partial_convert_conv_to_convolution(converter, nnef_op, tf_graph):
# type: (Converter, NNEFOperation, TFGraph)->None
input, filter = converter.converted_tensors(nnef_op.inputs)
output = converter.converted_tensor(nnef_op.output)
assert not (converter.has_gt_1(nnef_op.attribs["stride"]) and converter.has_gt_1(nnef_op.attribs["dilation"])), \
"Custom stride AND dilation is not supported by tf.nn.convolution."
tf_op = TFOperation(
graph=tf_graph,
name="tf.nn.convolution",
inputs=(converter.add_nchw_to_nhwc_transpose(tf_graph, input) if converter.prefer_nhwc else input,
converter.add_nchw_to_hwcn_transpose(tf_graph, filter)),
attribs=dict(data_format=converter.tf_data_format(converter.preferred_format, input.rank),
padding=nnef_op.attribs["padding"]),
outputs=(converter.create_nhwc_intermediate_for_nchw_output(tf_graph, output)
if converter.prefer_nhwc else output))
if converter.prefer_nhwc:
converter.add_nhwc_to_nchw_transpose(tf_graph, tf_op.output, nchw_tensor=output)
if utils.has_gt_1(nnef_op.attribs["stride"]):
tf_op.attribs["strides"] = list(nnef_op.attribs["stride"])
if utils.has_gt_1(nnef_op.attribs["dilation"]):
tf_op.attribs["dilation_rate"] = list(nnef_op.attribs["dilation"])
def partial_convert_conv_to_deptwise_conv2d(converter, nnef_op, tf_graph):
# type: (Converter, NNEFOperation, TFGraph)->None
input, filter = converter.converted_tensors(nnef_op.inputs)
output = converter.converted_tensor(nnef_op.output)
in_channels = input.shape[1]
d = input.rank - 2
assert d == 2
assert not (utils.has_gt_1(nnef_op.attribs["stride"]) and utils.has_gt_1(nnef_op.attribs["dilation"])), \
"Custom stride AND dilation is not supported by tf.nn.depthwise_conv2d."
input = converter.add_nchw_to_nhwc_transpose(tf_graph, input) if converter.prefer_nhwc else input
filter = converter.add_nchw_to_hwcn_transpose(tf_graph, filter)
filter = converter.add_hwcn_to_hwcm_reshape(tf_graph, filter, in_channels=in_channels)
tf_op = TFOperation(
graph=tf_graph,
name="tf.nn.depthwise_conv2d",
inputs=(input, filter),
attribs=dict(
data_format=converter.tf_data_format(converter.preferred_format, input.rank),
padding=nnef_op.attribs["padding"],
strides=converter.convert_format(nnef_op.attribs["stride"] if nnef_op.attribs["stride"] else [1] * d,
in_format=converter.FORMAT_SPATIAL,
out_format=converter.preferred_format,
default_elem=1)),
outputs=(converter.create_nhwc_intermediate_for_nchw_output(tf_graph, output)
if converter.prefer_nhwc else output))
if converter.prefer_nhwc:
converter.add_nhwc_to_nchw_transpose(tf_graph, tf_op.output, nchw_tensor=output)
if utils.has_gt_1(nnef_op.attribs["dilation"]):
tf_op.attribs["rate"] = converter.convert_format(shapelike=nnef_op.attribs["dilation"],
in_format=converter.FORMAT_SPATIAL,
out_format=converter.preferred_format,
default_elem=1)
def convert_deconv(converter, nnef_op, tf_graph):
# type: (Converter, NNEFOperation, TFGraph)->None
groups = nnef_op.attribs["groups"]
input = nnef_op.inputs[0]
d = input.rank - 2
is_dilated = utils.has_gt_1(nnef_op.attribs["dilation"])
if groups == 1:
if not is_dilated and d in [2, 3]:
partial_convert_deconv_to_conv2d_transpose_or_conv3d_transpose(converter, nnef_op, tf_graph)
elif is_dilated and d == 2:
partial_convert_deconv_to_atrous_conv2d_transpose(converter, nnef_op, tf_graph)
else:
assert False, \
"{} dimensional{} deconv is not supported by TensorFlow.".format(d, " dilated" if is_dilated else "")
else:
if groups in [0, input.shape[1]] and d == 2:
partial_convert_deconv_to_depthwise_conv2d_native_backprop_input(converter, nnef_op, tf_graph)
else:
assert False, \
"Grouped deconvolutions are only supported if they can be " \
"converted to tf.nn.depthwise_conv2d_native_backprop_input."
def partial_convert_deconv_to_conv2d_transpose_or_conv3d_transpose(converter, nnef_op, tf_graph):
# type: (Converter, NNEFOperation, TFGraph)->None
input, filter = converter.converted_tensors(nnef_op.inputs)
output = converter.converted_tensor(nnef_op.output)
d = input.rank - 2
assert d in [2, 3]
target_format = converter.FORMAT_NHWC if d == 3 else converter.preferred_format
assert not nnef_op.attribs["output_shape"] or nnef_op.attribs["output_shape"] == nnef_op.output.shape
tf_op = TFOperation(
graph=tf_graph,
name=("tf.nn.conv2d_transpose" if d == 2 else "tf.nn.conv3d_transpose"),
inputs=((converter.add_nchw_to_nhwc_transpose(tf_graph, input)
if target_format == converter.FORMAT_NHWC else input),
converter.add_nchw_to_hwcn_transpose(tf_graph, filter)),
attribs=dict(
data_format=converter.tf_data_format(target_format, input.rank),
strides=converter.convert_format(nnef_op.attribs["stride"] if nnef_op.attribs["stride"] else [1] * d,
in_format=converter.FORMAT_SPATIAL,
out_format=target_format,
default_elem=1),
padding=nnef_op.attribs["padding"],
output_shape=(converter.shape_nchw_to_nhwc(output.shape)
if target_format == converter.FORMAT_NHWC else list(output.shape))),
outputs=(converter.create_nhwc_intermediate_for_nchw_output(tf_graph, output)
if target_format == converter.FORMAT_NHWC else output))
if target_format == converter.FORMAT_NHWC:
converter.add_nhwc_to_nchw_transpose(tf_graph, tf_op.output, nchw_tensor=output)
def partial_convert_deconv_to_atrous_conv2d_transpose(converter, nnef_op, tf_graph):
# type: (Converter, NNEFOperation, TFGraph)->None
# Only NHWC is supported
input, filter = converter.converted_tensors(nnef_op.inputs)
output = converter.converted_tensor(nnef_op.output)
d = input.rank - 2
assert d == 2
assert len(utils.unique(nnef_op.attribs["dilation"])) <= 1, \
"Cannot specify different x and y dilation in tf.nn.atrous_conv2d_transpose."
assert not utils.has_gt_1(nnef_op.attribs["stride"]), \
"Cannot use stride>1 in tf.nn.atrous_conv2d_transpose."
assert not nnef_op.attribs["output_shape"] or nnef_op.attribs["output_shape"] == nnef_op.output.shape
tf_op = TFOperation(
graph=tf_graph,
name="tf.nn.atrous_conv2d_transpose",
inputs=(converter.add_nchw_to_nhwc_transpose(tf_graph, input),
converter.add_nchw_to_hwcn_transpose(tf_graph, filter)),
attribs=dict(rate=nnef_op.attribs["dilation"][0] if nnef_op.attribs["dilation"] else 1,
padding=nnef_op.attribs["padding"],
output_shape=converter.shape_nchw_to_nhwc(output.shape)),
outputs=converter.create_nhwc_intermediate_for_nchw_output(tf_graph, output))
converter.add_nhwc_to_nchw_transpose(tf_graph, tf_op.output, nchw_tensor=output)
def partial_convert_deconv_to_depthwise_conv2d_native_backprop_input(converter, nnef_op, tf_graph):
# type: (Converter, NNEFOperation, TFGraph)->None
input, filter = converter.converted_tensors(nnef_op.inputs)
output = converter.converted_tensor(nnef_op.output)
in_channels = output.shape[1]
d = input.rank - 2
assert d == 2
assert not nnef_op.attribs["output_shape"] or nnef_op.attribs["output_shape"] == nnef_op.output.shape
assert not (utils.has_gt_1(nnef_op.attribs["stride"]) and utils.has_gt_1(nnef_op.attribs["dilation"])), \
"Custom stride AND dilation is not supported by tf.nn.atrous_conv2d_transpose."
input = converter.add_nchw_to_nhwc_transpose(tf_graph, input) if converter.prefer_nhwc else input
filter = converter.add_nchw_to_hwcn_transpose(tf_graph, filter)
filter = converter.add_hwcn_to_hwcm_reshape(tf_graph, filter, in_channels=in_channels)
tf_op = TFOperation(
graph=tf_graph,
name="tf.nn.depthwise_conv2d_native_backprop_input",
inputs=(filter, input),
attribs=dict(
data_format=converter.tf_data_format(converter.preferred_format, input.rank),
strides=converter.convert_format(nnef_op.attribs["stride"] if nnef_op.attribs["stride"] else [1] * d,
in_format=converter.FORMAT_SPATIAL,
out_format=converter.preferred_format,
default_elem=1),
padding=nnef_op.attribs["padding"],
input_sizes=converter.shape_nchw_to_nhwc(output.shape) if converter.prefer_nhwc else list(output.shape)),
outputs=(converter.create_nhwc_intermediate_for_nchw_output(tf_graph, output)
if converter.prefer_nhwc else output))
if converter.prefer_nhwc:
converter.add_nhwc_to_nchw_transpose(tf_graph, tf_op.output, nchw_tensor=output)
if utils.has_gt_1(nnef_op.attribs["dilation"]):
tf_op.attribs["dilations"] = converter.convert_format(
nnef_op.attribs["dilation"] if nnef_op.attribs["dilation"] else [1] * d,
in_format=converter.FORMAT_SPATIAL,
out_format=converter.preferred_format,
default_elem=1)
def generic_convert_pooling(converter, nnef_op, tf_graph, target_name):
# type: (Converter, NNEFOperation, TFGraph, str)->None
assert not utils.has_gt_1(nnef_op.attribs["dilation"]), "Dilated pool is not supported in TensorFlow"
input = converter.converted_tensor(nnef_op.input)
outputs = converter.converted_tensors(nnef_op.outputs)
size, stride, padding = utils.get_dict_items(nnef_op.attribs, "size", "stride", "padding")
tf_op = TFOperation(
graph=tf_graph,
name=target_name,
inputs=converter.add_nchw_to_nhwc_transpose(tf_graph, input) if converter.prefer_nhwc else input,
attribs=dict(
data_format=converter.tf_data_format(converter.preferred_format, input.rank),
ksize=converter.shape_nchw_to_nhwc(size) if converter.prefer_nhwc else list(size),
strides=converter.convert_format(stride if stride else [1] * input.rank,
in_format=converter.FORMAT_NCHW,
out_format=converter.preferred_format,
default_elem=1),
padding=padding),
outputs=tuple(converter.create_nhwc_intermediate_for_nchw_output(tf_graph, output)
if converter.prefer_nhwc else output
for output in outputs))
if converter.prefer_nhwc:
for op_output, output in zip(tf_op.outputs, outputs):
converter.add_nhwc_to_nchw_transpose(tf_graph, op_output, output)
def convert_max_pool_with_index(converter, nnef_op, tf_graph):
# type: (Converter, NNEFOperation, TFGraph)->None
# always nhwc
assert not utils.has_gt_1(nnef_op.attribs["dilation"]), "Dilated pool is not supported in TensorFlow"
size, stride, padding = utils.get_dict_items(nnef_op.attribs, "size", "stride", "padding")
input = converter.converted_tensor(nnef_op.input)
outputs = converter.converted_tensors(nnef_op.outputs)
tf_op = TFOperation(
graph=tf_graph,
name="tf.nn.max_pool_with_argmax",
inputs=converter.add_nchw_to_nhwc_transpose(tf_graph, input),
attribs=dict(
ksize=converter.shape_nchw_to_nhwc(size),
strides=converter.shape_nchw_to_nhwc(stride) if stride else [1] * input.rank,
padding=padding),
outputs=tuple(converter.create_nhwc_intermediate_for_nchw_output(tf_graph, output) for output in outputs))
for op_output, output in zip(tf_op.outputs, outputs):
converter.add_nhwc_to_nchw_transpose(tf_graph, op_output, output)
def convert_argmax_pool(converter, nnef_op, tf_graph):
# type: (Converter, NNEFOperation, TFGraph)->None
# always nhwc
assert not utils.has_gt_1(nnef_op.attribs["dilation"]), "Dilated pool is not supported in TensorFlow"
size, stride, padding = utils.get_dict_items(nnef_op.attribs, "size", "stride", "padding")
input = converter.converted_tensor(nnef_op.input)
index = converter.converted_tensor(nnef_op.output)
index_tmp = converter.create_nhwc_intermediate_for_nchw_output(tf_graph, index)
output_tmp = TFTensor(graph=tf_graph, shape=index_tmp.shape, dtype="float32")
tf_op = TFOperation(
graph=tf_graph,
name="tf.nn.max_pool_with_argmax",
inputs=converter.add_nchw_to_nhwc_transpose(tf_graph, input),
attribs=dict(
ksize=converter.shape_nchw_to_nhwc(size),
strides=converter.shape_nchw_to_nhwc(stride) if stride else [1] * input.rank,
padding=padding),
outputs=(output_tmp, index_tmp))
converter.add_nhwc_to_nchw_transpose(tf_graph, index_tmp, index)
def generic_convert_upsample_downsample(converter, nnef_op, tf_graph, target_name, is_downsample):
# type: (Converter, NNEFOperation, TFGraph, str, bool)->None
input, output = converter.converted_tensors((nnef_op.input, nnef_op.output))
factors = nnef_op.attribs["factor"]
# always nhwc
tf_op = TFOperation(
graph=tf_graph,
name=target_name,
inputs=converter.add_nchw_to_nhwc_transpose(tf_graph, input),
attribs=dict(size=[(int(i / f) if is_downsample else int(i * f))
for i, f in zip(input.shape[2:], factors)]),
outputs=converter.create_nhwc_intermediate_for_nchw_output(tf_graph, output))
converter.add_nhwc_to_nchw_transpose(tf_graph, tf_op.output, output)
if nnef_op.name == "multilinear_upsample":
if nnef_op.attribs["border"].lower() != "replicate":
if converter.enable_imprecise_image_resize:
print("Warning: border={} is unsupported in multilinear_upsample, "
"using replicate, because enable_imprecise_image_resize was True"
.format(nnef_op.attribs["border"]))
else:
assert False, "Error: border={} is unsupported in multilinear_upsample. " \
"Use enable_imprecise_image_resize=True, to suppress this error." \
.format(nnef_op.attribs["border"])
if nnef_op.attribs["method"].lower() not in ["aligned", "asymmetric"]:
if converter.enable_imprecise_image_resize:
print("Warning: method={} is unsupported in multilinear_upsample, "
"using align_corners=False, because enable_imprecise_image_resize was True"
.format(nnef_op.attribs["method"]))
else:
assert False, "Error: method={} is unsupported in multilinear_upsample. " \
"Use enable_imprecise_image_resize=True, to suppress this error." \
.format(nnef_op.attribs["method"])
tf_op.attribs["align_corners"] = (nnef_op.attribs["method"].lower() == 'aligned')
def generic_convert_reduce(converter, nnef_op, tf_graph, target_name, target_name_if_normalize=None):
# type: (Converter, NNEFOperation, TFGraph, str, typing.Optional[str])->None
input, output = converter.converted_tensors((nnef_op.input, nnef_op.output))
if nnef_op.attribs.get("normalize"):
assert target_name_if_normalize
target_name = target_name_if_normalize
TFOperation(graph=tf_graph,
name=target_name,
inputs=input,
attribs=dict(axis=list(nnef_op.attribs["axes"]),
keepdims=True),
outputs=output)
def generic_convert_argminmax_reduce(converter, nnef_op, tf_graph, target_name):
# type: (Converter, NNEFOperation, TFGraph, str)->None
assert len(nnef_op.attribs["axes"]) == 1, "{} is only supported for one axis".format(nnef_op.name)
axis = nnef_op.attribs["axes"][0]
input, output = converter.converted_tensors((nnef_op.input, nnef_op.output))
argminmax = TFOperation(graph=tf_graph,
name=target_name,
inputs=input,
attribs=dict(axis=axis),
outputs=TFTensor(graph=tf_graph,
shape=transforms.squeezed_shape(input.shape, [axis],
can_squeeze_not_one=True),
dtype=converter.tf_dtype("integer")))
TFOperation(graph=tf_graph,
name="tf.expand_dims",
inputs=argminmax.output,
attribs=dict(axis=axis),
outputs=output)
def convert_moments(converter, nnef_op, tf_graph):
# type: (Converter, NNEFOperation, TFGraph)->None
input = converter.converted_tensor(nnef_op.input)
mean, variance = converter.converted_tensors(nnef_op.outputs)
TFOperation(graph=tf_graph,
name="tf.nn.moments",
inputs=input,
attribs=dict(axes=list(nnef_op.attribs["axes"]),
keep_dims=True),
outputs=(mean, variance))
def convert_softmax(converter, nnef_op, tf_graph):
# type: (Converter, NNEFOperation, TFGraph)->None
assert len(nnef_op.attribs["axes"]) == 1, "{} is only supported for one axis".format(nnef_op.name)
axis = nnef_op.attribs["axes"][0]
input, output = converter.converted_tensors((nnef_op.input, nnef_op.output))
TFOperation(graph=tf_graph,
name="tf.nn.softmax",
inputs=input,
attribs=dict(axis=axis),
outputs=output)
def convert_prelu(converter, nnef_op, tf_graph):
# type: (Converter, NNEFOperation, TFGraph)->None
input, alpha = converter.converted_tensors(nnef_op.inputs)
output = converter.converted_tensor(nnef_op.output)
TFOperation(graph=tf_graph,
name="tf.nn.leaky_relu",
inputs=(input, alpha),
outputs=output)
def convert_leaky_relu(converter, nnef_op, tf_graph):
# type: (Converter, NNEFOperation, TFGraph)->None
input = converter.converted_tensor(nnef_op.input)
alpha = converter.create_constant_tf_tensor(tf_graph=tf_graph, data=nnef_op.attribs["alpha"])
output = converter.converted_tensor(nnef_op.output)
TFOperation(graph=tf_graph,
name="tf.nn.leaky_relu",
inputs=(input, alpha),
outputs=output)
def convert_local_response_normalization(converter, nnef_op, tf_graph):
# type: (Converter, NNEFOperation, TFGraph)->None
# Probably only NHWC is supported
input, output = converter.converted_tensors((nnef_op.input, nnef_op.output))
nnefsize = converter.shape_nchw_to_nhwc(nnef_op.attribs["size"])
assert len(nnefsize) >= 2, "Argument 'size' of local_response_normalization must have at least 2 elements"
depth_size = nnefsize[-1]
nnefsize[-1] = 1
assert not utils.has_gt_1(nnefsize), \
"local_response_normalization only supported when only the last element of 'size' is > 1"
depth_radius = (depth_size - 1) / 2
alpha = nnef_op.attribs["alpha"] / depth_size
tf_op = TFOperation(graph=tf_graph,
name="tf.nn.lrn",
inputs=converter.add_nchw_to_nhwc_transpose(tf_graph, input),
attribs=dict(depth_radius=int(depth_radius),
bias=nnef_op.attribs["bias"],
alpha=alpha,
beta=nnef_op.attribs["beta"]),
outputs=converter.create_nhwc_intermediate_for_nchw_output(tf_graph, output))
converter.add_nhwc_to_nchw_transpose(tf_graph, tf_op.output, output)
def convert_l2_normalization(converter, nnef_op, tf_graph):
# type: (Converter, NNEFOperation, TFGraph)->None
input, output = converter.converted_tensors((nnef_op.input, nnef_op.output))
# TODO epsilon and bias are not really the same, is this a problem?
TFOperation(graph=tf_graph,
name="tf.nn.l2_normalize",
inputs=input,
attribs=dict(axis=nnef_op.attribs["axes"],
epsilon=nnef_op.attribs["bias"]),
outputs=output)
def convert_batch_normalization(converter, nnef_op, tf_graph):
# type: (Converter, NNEFOperation, TFGraph)->None
input, mean, variance, offset, scale = converter.converted_tensors(nnef_op.inputs)
output = converter.converted_tensor(nnef_op.output)
if converter.prefer_nhwc:
def add_squeeze_or_transpose(tf_tensor):
if tf_tensor.rank == 2 and tf_tensor.shape[0] == 1:
return converter.add_squeeze(tf_graph, tf_tensor, axes=[0])
else:
return converter.add_nchw_to_nhwc_transpose(tf_graph, tf_tensor)
tf_op = TFOperation(graph=tf_graph,
name="tf.nn.batch_normalization",
inputs=(converter.add_nchw_to_nhwc_transpose(tf_graph, input),
add_squeeze_or_transpose(mean),
add_squeeze_or_transpose(variance),
add_squeeze_or_transpose(offset),
add_squeeze_or_transpose(scale)),
attribs=dict(variance_epsilon=nnef_op.attribs["epsilon"]),
outputs=converter.create_nhwc_intermediate_for_nchw_output(tf_graph, output))
converter.add_nhwc_to_nchw_transpose(tf_graph, tf_op.output, output)
else:
def add_unsqueeze(tf_tensor):
return converter.add_unsqueeze(tf_graph, tf_tensor, list(range(input.rank - tf_tensor.rank, input.rank)))
TFOperation(graph=tf_graph,
name="tf.nn.batch_normalization",
inputs=(input,
add_unsqueeze(mean) if 0 < mean.rank < input.rank else mean,
add_unsqueeze(variance) if 0 < variance.rank < input.rank else variance,
add_unsqueeze(offset) if 0 < offset.rank < input.rank else offset,
add_unsqueeze(scale) if 0 < scale.rank < input.rank else scale),
attribs=dict(variance_epsilon=nnef_op.attribs["epsilon"]),
outputs=output)
def convert_copy_n(converter, nnef_op, tf_graph):
# type: (Converter, NNEFOperation, TFGraph)->None
assert len(nnef_op.outputs) == nnef_op.attribs["times"], "copy_n must have 'times' outputs"
input = converter.converted_tensor(nnef_op.input)
outputs = converter.converted_tensors(nnef_op.outputs)
for output in outputs:
TFOperation(graph=tf_graph,
name="tf.identity",
inputs=input,
outputs=output)
def convert_add_n(converter, nnef_op, tf_graph):
# type: (Converter, NNEFOperation, TFGraph)->None
inputs = list(converter.converted_tensors(nnef_op.inputs))
output = converter.converted_tensor(nnef_op.output)
TFOperation(graph=tf_graph, name="tf.add_n", inputs=inputs, outputs=output)
def convert_slice(converter, nnef_op, tf_graph):
# type: (Converter, NNEFOperation, TFGraph)->None
input, output = converter.converted_tensors((nnef_op.input, nnef_op.output))
axes = nnef_op.attribs["axes"]
begin = nnef_op.attribs["begin"]
end = nnef_op.attribs["end"]
size = [
-1 if e == -1 else e - b
for b, e in zip(begin, end)
]
tfbegin, tfsize = utils.zip_inverse(2, [
(begin[axes.index(i)], size[axes.index(i)]) if i in axes else (0, -1)
for i in range(input.rank)
])
TFOperation(graph=tf_graph,
name="tf.slice",
inputs=input,
attribs=dict(begin=tfbegin,
size=tfsize),
outputs=output)
def convert_stack(converter, nnef_op, tf_graph):
# type: (Converter, NNEFOperation, TFGraph)->None
inputs = list(converter.converted_tensors(nnef_op.inputs))
output = converter.converted_tensor(nnef_op.output)
TFOperation(graph=tf_graph,
name="tf.stack",
inputs=inputs,
attribs=dict(axis=nnef_op.attribs["axis"]),
outputs=output)
def convert_unstack(converter, nnef_op, tf_graph):
# type: (Converter, NNEFOperation, TFGraph)->None
input = converter.converted_tensor(nnef_op.input)
outputs = converter.converted_tensors(nnef_op.outputs)
TFOperation(graph=tf_graph,
name="tf.unstack",
inputs=input,
attribs=dict(num=len(outputs),
axis=nnef_op.attribs["axis"]),
outputs=outputs)
def convert_box(converter, nnef_op, tf_graph):
# type: (Converter, NNEFOperation, TFGraph)->None
input, output = converter.converted_tensors((nnef_op.input, nnef_op.output))
if (all(s == 1 for s in nnef_op.attribs["size"])
and all(s == 1 for s in nnef_op.attribs["stride"])
and all(d == 1 for d in nnef_op.attribs["dilation"])):
tf_op = TFOperation(graph=tf_graph,
name="tf.pad",
inputs=input,
outputs=output)
tf_border_mode = converter.tf_border_mode(nnef_op.attribs["border"],
converter.enable_imprecise_padding_border)
if tf_border_mode != 'CONSTANT':
tf_op.attribs["mode"] = tf_border_mode
else:
assert False, "Box is not yet fully supported, only for padding"
def convert_copy(converter, nnef_op, tf_graph):
# type: (Converter, NNEFOperation, TFGraph)->None
input, output = converter.converted_tensors((nnef_op.input, nnef_op.output))
TFOperation(graph=tf_graph,
name="tf.identity",
inputs=input,
outputs=output)
def convert_box_to_pad(converter, nnef_op, tf_graph):
# type: (Converter, NNEFOperation, TFGraph)->None
input, output = converter.converted_tensors((nnef_op.input, nnef_op.output))
assert nnef_op.attribs["size"] == [1] * input.rank, "Only 1x1 box is supported (which is used to simulate padding)"
tf_op = TFOperation(graph=tf_graph,
name="tf.pad",
inputs=input,
attribs=dict(paddings=list(nnef_op.attribs["padding"]), mode='CONSTANT', constant_values=0),
outputs=output)
tf_pad_mode = converter.tf_border_mode(nnef_op.attribs["border"], converter.enable_imprecise_padding_border)
if tf_pad_mode != "CONSTANT":
tf_op.attribs["mode"] = tf_pad_mode
_DefaultConverters = {
"update": convert_update,
"reshape": convert_reshape,
"squeeze": convert_squeeze,
"unsqueeze": convert_unsqueeze,
"transpose": convert_transpose,
"concat": convert_concat,
"split": convert_split,
"add": partial(generic_convert_binary, target_name="tf.add"),
"sub": partial(generic_convert_binary, target_name="tf.subtract"),
"mul": partial(generic_convert_binary, target_name="tf.multiply"),
"div": partial(generic_convert_binary, target_name="tf.divide"),
"pow": partial(generic_convert_binary, target_name="tf.pow"),
"lt": partial(generic_convert_binary, target_name="tf.less"),
"gt": partial(generic_convert_binary, target_name="tf.greater"),
"le": partial(generic_convert_binary, target_name="tf.less_equal"),
"ge": partial(generic_convert_binary, target_name="tf.greater_equal"),
"eq": partial(generic_convert_binary, target_name="tf.equal"),
"ne": partial(generic_convert_binary, target_name="tf.not_equal"),
"and": partial(generic_convert_binary, target_name="tf.logical_and"),
"or": partial(generic_convert_binary, target_name="tf.logical_or"),
"min": partial(generic_convert_binary, target_name="tf.minimum"),
"max": partial(generic_convert_binary, target_name="tf.maximum"),
"exp": partial(generic_convert_unary, target_name="tf.exp"),
"log": partial(generic_convert_unary, target_name="tf.log"),
"abs": partial(generic_convert_unary, target_name="tf.abs"),
"sign": partial(generic_convert_unary, target_name="tf.sign"),
"rcp": partial(generic_convert_unary, target_name="tf.reciprocal"),
"neg": partial(generic_convert_unary, target_name="tf.negative"),
"floor": partial(generic_convert_unary, target_name="tf.floor"),
"ceil": partial(generic_convert_unary, target_name="tf.ceil"),
"round": partial(generic_convert_unary, target_name="tf.round"),
"sqr": partial(generic_convert_unary, target_name="tf.square"),
"sqrt": partial(generic_convert_unary, target_name="tf.sqrt"),
"rsqrt": partial(generic_convert_unary, target_name="tf.rsqrt"),
"not": partial(generic_convert_unary, target_name="tf.logical_not"),
"sigmoid": partial(generic_convert_unary, target_name="tf.nn.sigmoid"),
"tanh": partial(generic_convert_unary, target_name="tf.nn.tanh"),
"elu": partial(generic_convert_unary, target_name="tf.nn.elu"),
"relu": partial(generic_convert_unary, target_name="tf.nn.relu"),
"softplus": partial(generic_convert_unary, target_name="tf.nn.softplus"),
"rsqr": convert_rsqr,
"select": convert_select,
"clamp": convert_clamp,
"matmul": convert_matmul,
"conv": convert_conv,
"deconv": convert_deconv,
"argmax_pool": convert_argmax_pool,
"max_pool": partial(generic_convert_pooling, target_name="tf.nn.max_pool"),
"avg_pool": partial(generic_convert_pooling, target_name="tf.nn.avg_pool"),
"max_pool_with_index": convert_max_pool_with_index,
"nearest_downsample": partial(generic_convert_upsample_downsample,
target_name="tf.image.resize_nearest_neighbor",
is_downsample=True),
"area_downsample": partial(generic_convert_upsample_downsample,
target_name="tf.image.resize_area",
is_downsample=True),
"nearest_upsample": partial(generic_convert_upsample_downsample,
target_name="tf.image.resize_nearest_neighbor",
is_downsample=False),
"multilinear_upsample": partial(generic_convert_upsample_downsample,
target_name="tf.image.resize_bilinear",
is_downsample=False),
"sum_reduce": partial(generic_convert_reduce, target_name="tf.reduce_sum", target_name_if_normalize="tf.reduce_mean"),
"min_reduce": partial(generic_convert_reduce, target_name="tf.reduce_min"),
"max_reduce": partial(generic_convert_reduce, target_name="tf.reduce_max"),
"mean_reduce": partial(generic_convert_reduce, target_name="tf.reduce_mean"),
"any_reduce": partial(generic_convert_reduce, target_name="tf.reduce_any"),
"all_reduce": partial(generic_convert_reduce, target_name="tf.reduce_all"),
"argmax_reduce": partial(generic_convert_argminmax_reduce, target_name="tf.argmax"),
"argmin_reduce": partial(generic_convert_argminmax_reduce, target_name="tf.argmin"),
"moments": convert_moments,
"softmax": convert_softmax,
"leaky_relu": convert_leaky_relu,
"prelu": convert_prelu,
"local_response_normalization": convert_local_response_normalization,
"l2_normalization": convert_l2_normalization,
"batch_normalization": convert_batch_normalization,
"copy_n": convert_copy_n,
"add_n": convert_add_n,
"slice": convert_slice,
"stack": convert_stack,
"unstack": convert_unstack,
"copy": convert_copy,
"_bias_add": convert_bias_add,
"box": convert_box_to_pad,
# unsupported: debox, sample, desample, local_mean_normalization, local_variance_normalization,
# local_contrast_normalization, linear_quantize, logarithmic_quantize, binary_quantize, ternary_quantize
}
```
#### File: conversion/tensorflow/tf_py_to_tf_pb.py
```python
from __future__ import division, print_function, absolute_import
import typing
from functools import partial
import numpy as np
import six
from nnef_tools.conversion.tensorflow import tf_pb_to_tf_py
from nnef_tools.core import utils
from nnef_tools.io.tensorflow.tf_graph import *
from nnef_tools.shape_inference import shape_inference as infer
# noinspection PyProtectedMember
_tf_py_dtype_to_tf_pb_dtype = utils.key_value_swapped(tf_pb_to_tf_py._tf_py_dtype_by_tf_pb_dtype)
def convert(tf_graph):
# type: (TFGraph)->None
for tensor in tf_graph.tensors:
if tensor.is_variable:
if tensor.data.dtype == np.int64:
tensor.data = tensor.data.astype(np.int32)
tensor.dtype = "int32"
if tensor.data.dtype == np.float64:
tensor.data = tensor.data.astype(np.float32)
tensor.dtype = "float32"
for op in list(tf_graph.operations):
if op.name == "tf.nn.softmax":
expand_softmax(tf_graph, op)
for op in list(tf_graph.operations):
assert op.name in _DefaultConverters, "No tf_py_to_tf_pb converter for {}".format(op.name)
_DefaultConverters[op.name](op)
for tensor in tf_graph.tensors:
tensor.dtype = _tf_py_dtype_to_tf_pb_dtype[tensor.dtype]
for op in tf_graph.operations:
if op.name not in ['LogicalAnd', 'LogicalNot', 'LogicalOr']:
if op.name in ['Select', 'Conv2DBackpropInput', 'Conv3DBackpropInputV2']:
op.attribs['T'] = op.inputs[1].dtype
else:
op.attribs['T'] = op.inputs[0].dtype
if op.name == 'MaxPoolWithArgmax':
op.attribs['Targmax'] = 'DT_INT64'
tf_graph.generate_missing_names()
def expand_softmax(tf_graph, tf_op):
assert tf_op.input.rank != 0
axis = tf_op.attribs.get('axis')
if axis is None:
axis = -1
if axis < 0:
axis += tf_op.input.rank
tf_op.attribs['axis'] = -1
if tf_op.input.rank == 2 and axis == 1:
return
if axis != tf_op.input.rank - 1:
perm = utils.without(range(tf_op.input.rank), axis) + [axis]
perm_inv = utils.inverse_permutation(perm)
transpose = TFOperation(graph=tf_graph,
name="tf.transpose",
inputs=tf_op.input,
attribs=dict(perm=perm),
outputs=TFTensor(graph=tf_graph,
name=None,
shape=infer.transpose(input=tf_op.input.shape, axes=perm),
dtype=tf_op.input.dtype))
tf_op.inputs = transpose.output
old_output = tf_op.output
tf_op.outputs = TFTensor(graph=tf_graph,
name=None,
shape=tf_op.input.shape,
dtype=tf_op.input.dtype)
TFOperation(graph=tf_graph,
name="tf.transpose",
inputs=tf_op.output,
attribs=dict(perm=perm_inv),
outputs=old_output)
if tf_op.input.rank != 2:
shape = [-1, tf_op.input.shape[-1]]
reshape = TFOperation(graph=tf_graph,
name="tf.reshape",
inputs=tf_op.input,
attribs=dict(shape=shape),
outputs=TFTensor(graph=tf_graph,
name=None,
shape=infer.reshape(input=tf_op.input.shape, shape=shape),
dtype=tf_op.input.dtype))
tf_op.inputs = reshape.output
old_output = tf_op.output
tf_op.outputs = TFTensor(graph=tf_graph, name=None, shape=list(tf_op.input.shape), dtype=tf_op.input.dtype)
TFOperation(graph=tf_graph,
name="tf.reshape",
inputs=tf_op.output,
attribs=dict(shape=old_output.shape),
outputs=old_output)
def create_constant_tensor(graph, value, np_dtype=None):
if np_dtype is not None:
arr = np.array(value, dtype=np_dtype)
else:
arr = np.array(value)
if arr.dtype == np.float64:
arr = arr.astype(np.float32)
elif arr.dtype == np.int64:
arr = arr.astype(np.int32) # Constants must be int32 at most places
return TFTensor(graph=graph, name=None, shape=list(arr.shape), dtype=str(arr.dtype), data=arr.flatten().tolist())
def generic_converter(op, # type: TFOperation
target_name, # type: str
revert_inputs=False, # type: bool
attrib_name_dict=None, # type: typing.Optional[typing.Dict[str, str]]
attrib_to_input_dict=None, # type: typing.Optional[typing.Dict[str, int]]
attribs_to_remove=None # type: typing.Optional[typing.List[str]]
):
op.name = target_name
if revert_inputs:
op.inputs = tuple(reversed(op.inputs))
if attrib_name_dict:
attribs = {}
for k, v in six.iteritems(op.attribs):
if k in attrib_name_dict:
attribs[attrib_name_dict[k]] = v
else:
attribs[k] = v
op.attribs = attribs
if attrib_to_input_dict:
inputs = list(op.inputs)
attribs_inputs = sorted([(a, i) for a, i in six.iteritems(attrib_to_input_dict)], key=lambda t: t[1])
for attrib_name, input_id in attribs_inputs:
if input_id < 0:
input_id += len(inputs) + 1
inputs.insert(input_id, create_constant_tensor(op.graph, op.attribs[attrib_name]))
del op.attribs[attrib_name]
op.inputs = inputs
if attribs_to_remove:
for attrib_name in attribs_to_remove:
del op.attribs[attrib_name]
def generate_back_converters(converters):
back_converters = {}
for target_name, converter in six.iteritems(converters):
if isinstance(converter, partial) and converter.func == tf_pb_to_tf_py.generic_converter:
from_name = converter.keywords['target_name']
revert_inputs = converter.keywords.get('revert_inputs', False)
attrib_name_dict = utils.key_value_swapped(converter.keywords.get('attrib_name_dict', {}))
attrib_to_input_dict = utils.key_value_swapped(converter.keywords.get('input_to_attrib_dict', {}))
attribs_to_remove = list(six.iterkeys(converter.keywords.get('new_attribs', {})))
back_converters[from_name] = partial(generic_converter,
target_name=target_name,
revert_inputs=revert_inputs,
attrib_name_dict=attrib_name_dict,
attrib_to_input_dict=attrib_to_input_dict,
attribs_to_remove=attribs_to_remove)
return back_converters
def convert_batch_normalization(op):
# type: (TFOperation)->None
op.name = "FusedBatchNorm"
input, mean, variance, offset, scale = op.inputs
is_nhwc = not (mean.rank >= 2 and mean.shape[1] > 1)
def make_1d(tensor):
# type: (TFTensor)->TFTensor
if tensor.rank == 1:
return tensor
return TFOperation(name='Reshape',
graph=tensor.graph,
inputs=(tensor, create_constant_tensor(graph=tensor.graph, value=[tensor.count])),
outputs=TFTensor(graph=tensor.graph, shape=[tensor.count], dtype=tensor.dtype)).output
op.inputs = (input, make_1d(scale), make_1d(offset), make_1d(mean), make_1d(variance))
op.attribs['is_training'] = False
op.attribs['epsilon'] = op.attribs['variance_epsilon']
op.attribs['data_format'] = 'NHWC' if is_nhwc else 'NCHW'
del op.attribs['variance_epsilon']
def convert_flatten(op):
# type: (TFOperation)->None
op.name = "Reshape"
op.inputs = tuple(op.inputs) + (create_constant_tensor(op.graph, list(op.output.shape)),)
def convert_split(op): # TODO what if split has -1, and fix split
# type: (TFOperation)->None
if isinstance(op.attribs['num_or_size_splits'], (list, tuple)):
op.name = 'SplitV'
op.inputs = (op.input,
create_constant_tensor(op.graph, op.attribs['num_or_size_splits'], np_dtype=np.int64),
create_constant_tensor(op.graph, op.attribs['axis']))
op.attribs['num_split'] = len(op.attribs['num_or_size_splits'])
del op.attribs['num_or_size_splits']
del op.attribs['axis']
else:
op.name = 'Split'
op.inputs = (create_constant_tensor(op.graph, op.attribs['axis']), op.input)
op.attribs['num_split'] = op.attribs['num_or_size_splits']
del op.attribs['num_or_size_splits']
del op.attribs['axis']
def postconvert_concat(op):
# type: (TFOperation)->None
op.attribs['N'] = len(op.inputs) - 1
def postconvert_slice(op):
# type: (TFOperation)->None
op.attribs['Index'] = 'DT_INT32'
def converter_sequence(fun1, fun2):
def f(op):
fun1(op)
fun2(op)
return f
# noinspection PyProtectedMember
_DefaultConverters = generate_back_converters(
tf_pb_to_tf_py._DefaultConverters
) # type: typing.Dict[str, typing.Callable[[TFOperation], None]]
_DefaultConverters.update({
"tf.nn.batch_normalization": convert_batch_normalization,
"tf.layers.flatten": convert_flatten,
'tf.concat': converter_sequence(_DefaultConverters['tf.concat'], postconvert_concat),
'tf.nn.depthwise_conv2d': _DefaultConverters['tf.nn.depthwise_conv2d_native'],
'tf.split': convert_split,
'tf.slice': converter_sequence(_DefaultConverters['tf.slice'], postconvert_slice)
})
```
#### File: nnef_tools/core/graph_utils.py
```python
from __future__ import division, print_function, absolute_import
import typing
from collections import deque, OrderedDict
from nnef_tools.core.graph import *
_OpOrOps = typing.Union[Operation, typing.Tuple[Operation, ...], typing.List[Operation]]
_TensorOrTensors = typing.Union[Tensor, typing.Tuple[Tensor, ...], typing.List[Tensor]]
def remove_subgraph(graph, operations):
# type: (Graph, typing.List[Operation])->None
"""
Deletes the given ops and the tensors that are enclosed in their sub-graph
"""
tensors = set()
for operation in operations:
for tensor in operation.inputs:
if (tensor not in tensors
and tensor.producers
and all(producer in operations for producer in tensor.producers)):
tensors.add(tensor)
for tensor in tensors:
for operation in tensor.consumers:
assert operation in operations
graph.remove_operations(operations, unlink=True)
graph.remove_tensors(tensors)
def remove_unreachable(graph):
# type: (Graph)->None
visited_ops = set() # type: typing.Set[Operation]
q = deque() # type: typing.Deque[Operation]
output_ops = set([producer
for t in graph.outputs
for producer in t.producers])
for op in output_ops:
visited_ops.add(op)
q.append(op)
while q:
op = q.popleft()
for tensor in op.inputs:
for producer in tensor.producers:
if producer not in visited_ops:
visited_ops.add(producer)
q.append(producer)
graph.remove_operations([op for op in graph.operations if op not in visited_ops], unlink=True)
def can_remove(g, t):
return len(t.producers) == 0 and len(t.consumers) == 0 and t not in g.outputs and t not in g.inputs
graph.remove_tensors([t for t in graph.tensors if can_remove(graph, t)])
def replace_tensor_in_inputs(graph, old_tensor, new_tensor, remove=False):
if graph.input_ids is not None:
graph.inputs = OrderedDict((name, new_tensor if t is old_tensor else t)
for name, t in zip(graph.input_ids, graph.inputs))
else:
graph.inputs = [new_tensor if t is old_tensor else t for t in graph.inputs]
if remove:
graph.remove_tensor(old_tensor)
def replace_tensor_in_outputs(graph, old_tensor, new_tensor, remove=False):
if graph.output_ids is not None:
graph.outputs = OrderedDict((name, new_tensor if t is old_tensor else t)
for name, t in zip(graph.output_ids, graph.outputs))
else:
graph.outputs = [new_tensor if t is old_tensor else t for t in graph.outputs]
if remove:
graph.remove_tensor(old_tensor)
def replace_tensor_in_consumers(graph, old_tensor, new_tensor, remove=False):
# type: (Graph, Tensor, Tensor, bool)->None
for consumer in list(old_tensor.consumers):
if isinstance(consumer.inputs, tuple):
consumer.inputs = tuple(new_tensor if t is old_tensor else t for t in consumer.inputs)
else:
consumer.inputs = [new_tensor if t is old_tensor else t for t in consumer.inputs]
replace_tensor_in_outputs(graph, old_tensor, new_tensor)
if remove:
graph.remove_tensor(old_tensor)
def remove_passthrough(g, op):
# type: (Graph, Operation)->None
assert len(op.outputs) == 1 and len(op.inputs) == 1
op_input = op.input
op_output = op.output
g.remove_operation(op, unlink=True)
replace_tensor_in_consumers(g, op_output, op_input, remove=True)
def remove_passthroughs(g, is_passthrough):
# type: (Graph, typing.Callable[[Operation], bool])->None
for op in list(g.operations):
if is_passthrough(op):
remove_passthrough(g, op)
```
#### File: io/nnef/parser_config.py
```python
from __future__ import division, print_function, absolute_import
import importlib
import typing
import nnef
from nnef_tools.core import utils
class NNEFParserConfig(object):
STANDARD_CONFIG = None # It is loaded after the class definition
def __init__(self, source=None, shapes=None, expand=None):
if source is None:
source = ""
if shapes is None:
shapes = {}
if expand is None:
expand = list()
if not isinstance(expand, list):
expand = list(expand)
self._source = source
self._shapes = shapes
self._expand = expand
def parse_string(self, graph_str, quant_str=None):
return nnef.parse_string(graph_str=graph_str,
quant_str=quant_str,
stdlib=self._source,
lowered=self._expand)
def load_graph(self, path):
return nnef.load_graph(path=path,
stdlib=self._source,
lowered=self._expand)
def infer_shapes(self, graph):
nnef.infer_shapes(graph=graph, custom_shapes=self._shapes)
return graph
@property
def empty(self):
return not self._source and not self._shapes and not self._expand
@staticmethod
def load_config(module_name):
# type: (str)->NNEFParserConfig
"""
:param module_name: "package.module"
:return: NNEFParserConfig
"""
module = importlib.import_module(module_name)
custom_fragments = ""
if hasattr(module, "NNEF_OP_DEFINITIONS"):
custom_fragments = module.NNEF_OP_DEFINITIONS
custom_expands = []
if hasattr(module, "NNEF_LOWERED_OPS"):
custom_expands = module.NNEF_LOWERED_OPS
custom_shapes = {}
if hasattr(module, "NNEF_SHAPE_PROPAGATORS"):
custom_shapes = module.NNEF_SHAPE_PROPAGATORS
return NNEFParserConfig(source=custom_fragments, shapes=custom_shapes, expand=custom_expands)
@staticmethod
def load_configs(module_names="", load_standard=True):
# type: (typing.Union[str, typing.List[str], None], bool)->typing.List[NNEFParserConfig]
"""
:param module_names: "package.module" or "p1.m1,p2.m2", or ["p1.m1", "p2.m2"]
:param load_standard: Load the standard NNEF op definitions as well
:return: parser configs
"""
if module_names is None:
module_names = []
if utils.is_anystr(module_names):
module_names = [name.strip() for name in module_names.split(',')] if module_names.strip() else []
configs = [NNEFParserConfig.STANDARD_CONFIG] if load_standard else []
for module_name in module_names:
config = NNEFParserConfig.load_config(module_name)
if not config.empty:
configs.append(config)
return configs
@staticmethod
def combine_configs(configs):
assert all(isinstance(config, NNEFParserConfig) for config in configs)
shapes = {}
for config in configs:
shapes.update(config._shapes)
expand = []
for config in configs:
expand += config._expand
expand = utils.unique(expand)
return NNEFParserConfig(source='\n\n'.join(config._source for config in configs),
shapes=shapes,
expand=expand)
NNEFParserConfig.STANDARD_CONFIG = NNEFParserConfig.load_config("nnef_tools.io.nnef.parser_config_std")
```
#### File: tensorflow/tf_py/tf_py_definitions.py
```python
from __future__ import division, print_function, absolute_import
import typing
from collections import OrderedDict
import six
import tensorflow as tf
from nnef_tools.core import utils
from nnef_tools.io.tensorflow.tf_graph import *
def _getattr(module, *names):
for name in names:
if hasattr(module, name):
return getattr(module, name)
_name = "_" + name
if hasattr(module, _name):
return getattr(module, _name)
return None
class ArgProto(object):
def __init__(self, arg_names, is_tensor, is_array, is_optional):
self.arg_names = arg_names
self.is_tensor = is_tensor
self.is_array = is_array
self.is_optional = is_optional
@property
def primary_arg_name(self):
return self.arg_names[0]
def __repr__(self):
return "ArgProto({})".format(self.arg_names)
class OpProto(object):
def __init__(self, op_name, arg_protos):
self.op_name = op_name
self.arg_protos = arg_protos # type: typing.List[ArgProto]
def list_tensor_arg_protos(self):
return [a for a in self.arg_protos if a.is_tensor]
def list_nontensor_arg_protos(self):
return [a for a in self.arg_protos if not a.is_tensor]
def __repr__(self):
return "OpProto({})".format(self.op_name)
def parse_arg_proto(s):
is_tensor = False
is_optional = False
is_array = False
if s.endswith('?'):
is_optional = True
s = s[:-1]
if s.endswith('[]'):
is_array = True
s = s[:-2]
if ':' in s:
s, t = s.split(':', 1)
t = t.strip()
assert t == "T"
is_tensor = True
arg_names = list(t.strip() for t in s.split('/'))
assert all(utils.is_identifier(n) for n in arg_names), "{}".format(arg_names)
return ArgProto(arg_names=arg_names, is_tensor=is_tensor, is_array=is_array, is_optional=is_optional)
def parse_op_proto(s):
assert s and s[-1] == ')'
s = s[:-1]
op_name, args = s.split('(', 1)
arg_protos = []
for arg in args.split(','):
arg = arg.strip()
assert arg
arg_protos.append(parse_arg_proto(arg))
assert utils.is_identifier(op_name.replace('.', '_'))
return OpProto(op_name=op_name, arg_protos=arg_protos)
def args_from_tfop(op, op_proto, allow_missing=False):
# type: (TFOperation, OpProto, bool)->OrderedDict[str, typing.Any]
args = OrderedDict()
i_tensor = 0
for arg_proto in op_proto.arg_protos:
if arg_proto.is_tensor and arg_proto.is_array:
args[arg_proto.primary_arg_name] = []
while i_tensor < len(op.inputs):
args[arg_proto.primary_arg_name].append(op.inputs[i_tensor])
i_tensor += 1
elif arg_proto.is_tensor and not arg_proto.is_array:
assert i_tensor < len(op.inputs) or arg_proto.is_optional
if i_tensor < len(op.inputs):
args[arg_proto.primary_arg_name] = op.inputs[i_tensor]
i_tensor += 1
else:
if not ((allow_missing or arg_proto.is_optional) and arg_proto.primary_arg_name not in op.attribs):
args[arg_proto.primary_arg_name] = op.attribs[arg_proto.primary_arg_name]
return args
class TraceableFunction(object):
def __init__(self, proto, fun):
# type: (typing.Union[str, OpProto], typing.Union[typing.Callable, typing.List[typing.Callable]])->None
funs = list(fun) if isinstance(fun, (list, tuple)) else [fun]
funs = [f for f in funs if f is not None]
self.op_proto = parse_op_proto(proto) if not isinstance(proto, OpProto) else proto
self.functions = funs
def __repr__(self):
return "TraceableFunction({})".format(self.op_proto.op_name)
class _Functions(object):
def __init__(self):
self.sinh = _getattr(tf, "sinh")
self.cosh = _getattr(tf, "cosh")
self.leaky_relu = _getattr(tf.nn, "leaky_relu")
class _InternalFunctions(object):
def __init__(self):
from tensorflow.python.ops import gen_array_ops as tf_gen_array_ops
from tensorflow.python.ops import array_grad as tf_array_grad
from tensorflow.python.ops import gen_math_ops as tf_gen_math_ops
from tensorflow.python.ops import math_grad as tf_math_grad
from tensorflow.python.ops import gen_nn_ops as tf_gen_nn_ops
from tensorflow.python.ops import gen_image_ops as tf_gen_image_ops
from tensorflow.python.ops import variables as tf_variables
self.RefVariable = _getattr(tf_variables, "RefVariable")
self.add = _getattr(tf_gen_math_ops, "add")
self.div = _getattr(tf_gen_math_ops, "div")
self.pow = _getattr(tf_gen_math_ops, "pow")
self.logical_and = _getattr(tf_gen_math_ops, "logical_and")
self.logical_or = _getattr(tf_gen_math_ops, "logical_or")
self.reciprocal = _getattr(tf_gen_math_ops, "reciprocal")
self.logical_not = _getattr(tf_gen_math_ops, "logical_not")
self.abs = _getattr(tf_gen_math_ops, "abs")
self.sign = _getattr(tf_gen_math_ops, "sign")
self.exp = _getattr(tf_gen_math_ops, "exp")
self.log = _getattr(tf_gen_math_ops, "log")
self.square = _getattr(tf_gen_math_ops, "square")
self.floor = _getattr(tf_gen_math_ops, "floor")
self.ceil = _getattr(tf_gen_math_ops, "ceil")
self.round = _getattr(tf_gen_math_ops, "round")
self.greater = _getattr(tf_gen_math_ops, "greater")
self.greater_equal = _getattr(tf_gen_math_ops, "greater_equal")
self.less = _getattr(tf_gen_math_ops, "less")
self.less_equal = _getattr(tf_gen_math_ops, "less_equal")
self.equal = _getattr(tf_gen_math_ops, "equal")
self.not_equal = _getattr(tf_gen_math_ops, "not_equal")
self.sqrt = _getattr(tf_gen_math_ops, "sqrt")
self.rsqrt = _getattr(tf_gen_math_ops, "rsqrt")
self.range = _getattr(tf_gen_math_ops, "range")
self.rank = _getattr(tf_gen_array_ops, "rank")
self.conv3d_backprop_input_v2 = _getattr(tf_gen_nn_ops, "conv3d_backprop_input_v2")
self.fused_batch_norm = _getattr(tf_gen_nn_ops, "fused_batch_norm")
self.transpose = _getattr(tf_gen_array_ops, "transpose")
self.strided_slice_grad = _getattr(tf_gen_array_ops, "strided_slice_grad")
self.bias_add_grad = _getattr(tf_gen_nn_ops, "bias_add_grad")
self.fused_batch_norm_grad = _getattr(tf_gen_nn_ops, "fused_batch_norm_grad")
self.resize_nearest_neighbor_grad = _getattr(tf_gen_image_ops, "resize_nearest_neighbor_grad")
self.resize_bilinear_grad = _getattr(tf_gen_image_ops, "resize_bilinear_grad")
self.resize_bicubic_grad = _getattr(tf_gen_image_ops, "resize_bicubic_grad")
self.TransposeGrad = _getattr(tf_array_grad, "TransposeGrad")
self.MinOrMaxGrad = _getattr(tf_math_grad, "MinOrMaxGrad")
self.fused_batch_norm_grad_v2 = _getattr(tf_gen_nn_ops, "fused_batch_norm_grad_v2")
self.sub = _getattr(tf_gen_math_ops, "sub")
self.mul = _getattr(tf_gen_math_ops, "mul")
self.real_div = _getattr(tf_gen_math_ops, "real_div")
self.neg = _getattr(tf_gen_math_ops, "neg")
self.mat_mul = _getattr(tf_gen_math_ops, "mat_mul")
self.softmax = _getattr(tf_gen_nn_ops, "softmax")
self.concat_offset = _getattr(tf_gen_array_ops, "concat_offset")
self.fused_batch_norm_v2 = _getattr(tf_gen_nn_ops, "fused_batch_norm_v2")
self.max_pool_grad = _getattr(tf_gen_nn_ops, "max_pool_grad")
self.max_pool_grad_with_argmax = _getattr(tf_gen_nn_ops, "max_pool_grad_with_argmax")
self.avg_pool_grad = _getattr(tf_gen_nn_ops, "avg_pool_grad")
self.sqrt_grad = _getattr(tf_gen_math_ops, "sqrt_grad")
self.elu_grad = _getattr(tf_gen_nn_ops, "elu_grad")
self.relu_grad = _getattr(tf_gen_nn_ops, "relu_grad")
self.relu6_grad = _getattr(tf_gen_nn_ops, "relu6_grad")
self.softplus_grad = _getattr(tf_gen_nn_ops, "softplus_grad")
self.rsqrt_grad = _getattr(tf_gen_math_ops, "rsqrt_grad")
self.sigmoid_grad = _getattr(tf_gen_math_ops, "sigmoid_grad")
self.tanh_grad = _getattr(tf_gen_math_ops, "tanh_grad")
self.reciprocal_grad = _getattr(tf_gen_math_ops, "reciprocal_grad")
self.lrn_grad = _getattr(tf_gen_nn_ops, "lrn_grad")
self.mirror_pad_grad = _getattr(tf_gen_array_ops, "mirror_pad_grad")
self.broadcast_gradient_args = _getattr(tf_gen_array_ops, "broadcast_gradient_args")
tf_functions = _Functions()
tf_internal = _InternalFunctions()
DefaultTraceableFunctions = [
TraceableFunction("tf.gradients(xs:T[], ys:T[])", [tf.gradients]),
TraceableFunction("tf.constant(value, dtype, shape, name)", [tf.constant]),
TraceableFunction("tf.placeholder(shape, dtype, name)", [tf.placeholder]),
TraceableFunction("tf.get_variable(shape, dtype, name)", [tf.get_variable]),
TraceableFunction("tf.Variable(initial_value, dtype, name)", [tf.Variable, tf_internal.RefVariable]),
TraceableFunction("tf.assign(ref:T, value:T)", [tf.assign]),
TraceableFunction("tf.concat(values:T[], axis)", [tf.concat]),
TraceableFunction("tf.split(value:T, num_or_size_splits, axis)", [tf.split]),
TraceableFunction("tf.reshape(tensor:T, shape)", [tf.reshape]),
TraceableFunction("tf.squeeze(input:T, axis/squeeze_dims[])", [tf.squeeze]),
TraceableFunction("tf.expand_dims(input:T, axis/dim)", [tf.expand_dims]),
TraceableFunction("tf.transpose(a/x:T, perm)", [tf.transpose, tf_internal.transpose]),
TraceableFunction("tf.pad(tensor:T, paddings, mode, constant_values)", [tf.pad]),
TraceableFunction("tf.add(x:T, y:T)", [tf.add, tf_internal.add]),
TraceableFunction("tf.subtract(x:T, y:T)", [tf.subtract, tf_internal.sub]),
TraceableFunction("tf.multiply(x:T, y:T)", [tf.multiply, tf_internal.mul]),
TraceableFunction("tf.divide(x:T, y:T)", [tf.divide, tf_internal.div, tf_internal.real_div]),
TraceableFunction("tf.floor_div(x:T, y:T)", [tf.floor_div]),
TraceableFunction("tf.mod(x:T, y:T)", [tf.mod]),
TraceableFunction("tf.pow(x:T, y:T)", [tf.pow, tf_internal.pow]),
TraceableFunction("tf.logical_and(x:T, y:T)", [tf.logical_and, tf_internal.logical_and]),
TraceableFunction("tf.logical_or(x:T, y:T)", [tf.logical_or, tf_internal.logical_or]),
TraceableFunction("tf.greater(x:T, y:T)", [tf.greater, tf_internal.greater]),
TraceableFunction("tf.greater_equal(x:T, y:T)", [tf.greater_equal, tf_internal.greater_equal]),
TraceableFunction("tf.less(x:T, y:T)", [tf.less, tf_internal.less]),
TraceableFunction("tf.less_equal(x:T, y:T)", [tf.less_equal, tf_internal.less_equal]),
TraceableFunction("tf.equal(x:T, y:T)", [tf.equal, tf_internal.equal]),
TraceableFunction("tf.not_equal(x:T, y:T)", [tf.not_equal, tf_internal.not_equal]),
TraceableFunction("tf.squared_difference(x:T, y:T)", [tf.squared_difference]),
TraceableFunction("tf.minimum(x:T, y:T)", [tf.minimum]),
TraceableFunction("tf.maximum(x:T, y:T)", [tf.maximum]),
TraceableFunction("tf.reciprocal(x:T)", [tf.reciprocal, tf_internal.reciprocal]),
TraceableFunction("tf.negative(x:T)", [tf.negative, tf_internal.neg]),
TraceableFunction("tf.logical_not(x:T)", [tf.logical_not, tf_internal.logical_not]),
TraceableFunction("tf.abs(x:T)", [tf.abs, tf_internal.abs]),
TraceableFunction("tf.sign(x:T)", [tf.sign, tf_internal.sign]),
TraceableFunction("tf.exp(x:T)", [tf.exp, tf_internal.exp]),
TraceableFunction("tf.log(x:T)", [tf.log, tf_internal.log]),
TraceableFunction("tf.sqrt(x:T)", [tf.sqrt, tf_internal.sqrt]),
TraceableFunction("tf.rsqrt(x:T)", [tf.rsqrt, tf_internal.rsqrt]),
TraceableFunction("tf.square(x:T)", [tf.square, tf_internal.square]),
TraceableFunction("tf.floor(x:T)", [tf.floor, tf_internal.floor]),
TraceableFunction("tf.ceil(x:T)", [tf.ceil, tf_internal.ceil]),
TraceableFunction("tf.round(x:T)", [tf.round, tf_internal.round]),
# TraceableFunction("tf.sin(x:T)", [tf.sin]),
# TraceableFunction("tf.cos(x:T)", [tf.cos]),
TraceableFunction("tf.sinh(x:T)", [tf_functions.sinh]),
TraceableFunction("tf.cosh(x:T)", [tf_functions.cosh]),
TraceableFunction("tf.nn.sigmoid(x:T)", [tf.sigmoid, tf.nn.sigmoid]),
TraceableFunction("tf.nn.tanh(x:T)", [tf.tanh, tf.nn.tanh]),
TraceableFunction("tf.where(condition:T, x:T, y:T)", [tf.where]),
TraceableFunction("tf.reduce_sum(input_tensor:T, axis/reduction_indices[], keepdims/keep_dims)", [tf.reduce_sum]),
TraceableFunction("tf.reduce_mean(input_tensor:T, axis/reduction_indices[], keepdims/keep_dims)", [tf.reduce_mean]),
TraceableFunction("tf.reduce_max(input_tensor:T, axis/reduction_indices[], keepdims/keep_dims)", [tf.reduce_max]),
TraceableFunction("tf.reduce_min(input_tensor:T, axis/reduction_indices[], keepdims/keep_dims)", [tf.reduce_min]),
TraceableFunction("tf.reduce_any(input_tensor:T, axis/reduction_indices[], keepdims/keep_dims)", [tf.reduce_any]),
TraceableFunction("tf.reduce_all(input_tensor:T, axis/reduction_indices[], keepdims/keep_dims)", [tf.reduce_all]),
TraceableFunction("tf.argmax(input:T, axis/dimension)", [tf.argmax]),
TraceableFunction("tf.argmin(input:T, axis/dimension)", [tf.argmin]),
TraceableFunction("tf.matmul(a:T, b:T, transpose_a, transpose_b, adjoint_a?, adjoint_b?)",
[tf.matmul, tf_internal.mat_mul]),
TraceableFunction("tf.add_n(inputs:T[])", [tf.add_n]),
TraceableFunction("tf.nn.elu(features:T)", [tf.nn.elu]),
TraceableFunction("tf.nn.relu(features:T)", [tf.nn.relu]),
TraceableFunction("tf.nn.relu6(features:T)", [tf.nn.relu6]),
TraceableFunction("tf.nn.softsign(features:T)", [tf.nn.softsign]),
TraceableFunction("tf.nn.softplus(features:T)", [tf.nn.softplus]),
TraceableFunction("tf.nn.leaky_relu(features:T, alpha:T)", [tf_functions.leaky_relu]),
TraceableFunction("tf.nn.conv1d(value:T, filters:T, stride[], padding, data_format?)", [tf.nn.conv1d]),
TraceableFunction("tf.nn.conv2d(input:T, filter:T, strides, padding, data_format?, dilations?)", [tf.nn.conv2d]),
TraceableFunction("tf.nn.conv3d(input:T, filter:T, strides, padding, data_format?, dilations?)", [tf.nn.conv3d]),
TraceableFunction("tf.nn.convolution(input:T, filter:T, padding, strides, dilation_rate, data_format?)",
[tf.nn.convolution]),
TraceableFunction("tf.nn.atrous_conv2d(value:T, filters:T, rate, padding)", [tf.nn.atrous_conv2d]),
TraceableFunction("tf.nn.conv2d_transpose(value:T, filter:T, output_shape, strides, padding, data_format?)",
[tf.nn.conv2d_transpose]),
TraceableFunction("tf.nn.conv3d_transpose(value:T, filter:T, output_shape, strides, padding, data_format?)",
[tf.nn.conv3d_transpose]),
TraceableFunction("tf.nn.atrous_conv2d_transpose(value:T, filters:T, output_shape, rate, padding)",
[tf.nn.atrous_conv2d_transpose]),
TraceableFunction("tf.nn.depthwise_conv2d(input:T, filter:T, strides, padding, rate, data_format?)",
[tf.nn.depthwise_conv2d]),
TraceableFunction("tf.nn.depthwise_conv2d_native(input:T, filter:T, strides, padding, data_format?, dilations?)",
[tf.nn.depthwise_conv2d_native]),
TraceableFunction("tf.nn.separable_conv2d(input:T, depthwise_filter:T, pointwise_filter:T, strides, padding, "
"rate, data_format?)", [tf.nn.separable_conv2d]),
TraceableFunction("tf.nn.conv2d_backprop_input(input_sizes, filter:T, out_backprop:T, strides, padding, "
"data_format?, dilations?)", [tf.nn.conv2d_backprop_input]),
TraceableFunction("tf.nn.depthwise_conv2d_native_backprop_input(input_sizes, filter:T, out_backprop:T, strides, "
"padding, data_format?, dilations?)", [tf.nn.depthwise_conv2d_native_backprop_input]),
TraceableFunction("tf.nn.conv2d_backprop_filter(input:T, filter_sizes, out_backprop:T, strides, padding, "
"data_format?, dilations?)", [tf.nn.conv2d_backprop_filter]),
TraceableFunction("tf.nn.conv3d_backprop_filter_v2(input:T, filter_sizes, out_backprop:T, strides, padding, "
"data_format?, dilations?)", [tf.nn.conv3d_backprop_filter_v2]),
TraceableFunction("tf.nn.depthwise_conv2d_native_backprop_filter(input:T, filter_sizes, out_backprop:T, strides, "
"padding, data_format?, dilations?)", [tf.nn.depthwise_conv2d_native_backprop_filter]),
TraceableFunction("tf.nn.max_pool(value:T, ksize, strides, padding, data_format?)", [tf.nn.max_pool]),
TraceableFunction("tf.nn.avg_pool(value:T, ksize, strides, padding, data_format?)", [tf.nn.avg_pool]),
TraceableFunction("tf.nn.max_pool_with_argmax(input:T, ksize, strides, padding, data_format?)",
[tf.nn.max_pool_with_argmax]),
TraceableFunction("tf.nn.bias_add(value:T, bias:T, data_format?)", [tf.nn.bias_add]),
TraceableFunction("tf.nn.lrn(input:T, depth_radius, bias, alpha, beta)", [tf.nn.lrn]),
TraceableFunction("tf.nn.batch_normalization(x:T, mean:T, variance:T, offset:T, scale:T, variance_epsilon)",
[tf.nn.batch_normalization]),
TraceableFunction("tf.nn.fused_batch_norm(x:T, scale:T, offset:T, mean:T?, variance:T?, epsilon, data_format?, "
"is_training)",
[tf.nn.fused_batch_norm, tf_internal.fused_batch_norm, tf_internal.fused_batch_norm_v2]),
TraceableFunction("tf.nn.l2_normalize(x:T, axis/dim[], epsilon)", [tf.nn.l2_normalize]),
TraceableFunction("tf.nn.softmax(logits:T, axis/dim?)",
[tf.nn.softmax, tf.contrib.layers.softmax, tf_internal.softmax]),
TraceableFunction("tf.nn.moments(x:T, axes[], keep_dims)", [tf.nn.moments]),
TraceableFunction("tf.image.resize_images(images:T, size, method, align_corners)", [tf.image.resize_images]),
TraceableFunction("tf.image.resize_bilinear(images:T, size, align_corners)", [tf.image.resize_bilinear]),
TraceableFunction("tf.image.resize_nearest_neighbor(images:T, size, align_corners)",
[tf.image.resize_nearest_neighbor]),
TraceableFunction("tf.image.resize_bicubic(images:T, size, align_corners)", [tf.image.resize_bicubic]),
TraceableFunction("tf.image.resize_area(images:T, size, align_corners)", [tf.image.resize_area]),
TraceableFunction("tf.layers.flatten(inputs:T)", [tf.layers.flatten, tf.contrib.layers.flatten]),
TraceableFunction("tf.clip_by_value(t:T, clip_value_min:T, clip_value_max:T)", [tf.clip_by_value]),
TraceableFunction("tf.slice(input_:T, begin, size)", [tf.slice]),
TraceableFunction("tf.strided_slice(input_:T, begin, end, strides, begin_mask, end_mask, "
"ellipsis_mask, new_axis_mask, shrink_axis_mask, var)", [tf.strided_slice]),
TraceableFunction("tf.stack(values:T[], axis)", [tf.stack]),
TraceableFunction("tf.unstack(value:T, num, axis)", [tf.unstack]),
TraceableFunction("tf.identity(input:T)", [tf.identity]),
TraceableFunction("tf.stop_gradient(input:T)", [tf.stop_gradient]),
TraceableFunction("tf.cast(x:T, dtype)", [tf.cast]),
TraceableFunction("tf.nn.dropout(x:T, keep_prob)", [tf.nn.dropout]),
TraceableFunction("tf.space_to_batch(input:T, paddings, block_size)", [tf.space_to_batch]),
TraceableFunction("tf.space_to_batch_nd(input:T, block_shape, paddings)", [tf.space_to_batch_nd]),
TraceableFunction("tf.batch_to_space(input:T, crops, block_size)", [tf.batch_to_space]),
TraceableFunction("tf.batch_to_space_nd(input:T, block_shape, crops)", [tf.batch_to_space_nd]),
TraceableFunction("tf.zeros(shape, dtype)", [tf.zeros]),
TraceableFunction("tf.ones(shape, dtype)", [tf.ones]),
TraceableFunction("tf.zeros_like(tensor:T, dtype)", [tf.zeros_like]),
TraceableFunction("tf.ones_like(tensor:T, dtype)", [tf.ones_like]),
TraceableFunction("tf.tile(input:T, multiples)", [tf.tile]),
TraceableFunction("tf.dynamic_stitch(indices, data)", [tf.dynamic_stitch]),
TraceableFunction("tf.range(start, limit, delta, dtype)", [tf.range, tf_internal.range]),
TraceableFunction("tf.rank(input:T)", [tf.rank, tf_internal.rank]),
TraceableFunction("tf.shape(input:T)", [tf.shape]),
TraceableFunction("tf.shape_n(input:T[])", [tf.shape_n]),
TraceableFunction("tf.invert_permutation(x:T)", [tf.invert_permutation]),
TraceableFunction("tf.fill(dims, value)", [tf.fill]),
TraceableFunction("tf.random_uniform(shape, minval, maxval, dtype, seed)", [tf.random_uniform]),
TraceableFunction("_tf.conv3d_backprop_input_v2(input_sizes, filter:T, out_backprop:T, strides, padding, "
"data_format?, dilations?)", [tf_internal.conv3d_backprop_input_v2]),
TraceableFunction("_tf.concat_offset(concat_dim, shape)", [tf_internal.concat_offset]),
TraceableFunction("_tf.broadcast_gradient_args(s0, s1)", [tf_internal.broadcast_gradient_args]),
TraceableFunction("_tf.sqrt_grad(y:T, dy:T)", [tf_internal.sqrt_grad]),
TraceableFunction("_tf.rsqrt_grad(y:T, dy:T)", [tf_internal.rsqrt_grad]),
TraceableFunction("_tf.sigmoid_grad(y:T, dy:T)", [tf_internal.sigmoid_grad]),
TraceableFunction("_tf.tanh_grad(y:T, dy:T)", [tf_internal.tanh_grad]),
TraceableFunction("_tf.reciprocal_grad(y:T, dy:T)", [tf_internal.reciprocal_grad]),
TraceableFunction("_tf.strided_slice_grad(shape, begin, end, strides, dy:T, begin_mask, end_mask, "
"ellipsis_mask, new_axis_mask, shrink_axis_mask)", [tf_internal.strided_slice_grad]),
TraceableFunction("_tf.bias_add_grad(out_backprop:T, data_format?)", [tf_internal.bias_add_grad]),
TraceableFunction("_tf.fused_batch_norm_grad(y_backprop:T, x:T, scale:T, reserve_space_1:T?, reserve_space_2:T?, "
"epsilon, data_format?, is_training)",
[tf_internal.fused_batch_norm_grad, tf_internal.fused_batch_norm_grad_v2]),
TraceableFunction("_tf.max_pool_grad(orig_input:T, orig_output:T, grad:T, ksize, strides, padding, data_format?)",
[tf_internal.max_pool_grad]),
TraceableFunction("_tf.max_pool_grad_with_argmax(input:T, grad:T, argmax:T, ksize, strides, padding)",
[tf_internal.max_pool_grad_with_argmax]),
TraceableFunction("_tf.avg_pool_grad(orig_input_shape, grad:T, ksize, strides, padding, data_format?)",
[tf_internal.avg_pool_grad]),
TraceableFunction("_tf.elu_grad(gradients:T, outputs:T)", [tf_internal.elu_grad]),
TraceableFunction("_tf.relu_grad(gradients:T, features:T)", [tf_internal.relu_grad]),
TraceableFunction("_tf.relu6_grad(gradients:T, features:T)", [tf_internal.relu6_grad]),
TraceableFunction("_tf.softplus_grad(gradients:T, features:T)", [tf_internal.softplus_grad]),
TraceableFunction("_tf.lrn_grad(input_grads:T, input_image:T, output_image:T, depth_radius, bias, alpha, beta)",
[tf_internal.lrn_grad]),
TraceableFunction("_tf.TransposeGrad(orig_input/_orig_input_0:T, orig_perm/_orig_input_1, "
"orig_output/_orig_output_0:T, grad:T)", [tf_internal.TransposeGrad]),
TraceableFunction("_tf.MinOrMaxGrad(orig_input/_orig_input_0:T, orig_axis/_orig_input_1[], "
"orig_output/_orig_output_0:T, grad:T)", [tf_internal.MinOrMaxGrad]),
TraceableFunction("_tf.resize_nearest_neighbor_grad(grads:T, size, align_corners)",
[tf_internal.resize_nearest_neighbor_grad]),
TraceableFunction("_tf.resize_bilinear_grad(grads:T, original_image:T, align_corners)",
[tf_internal.resize_bilinear_grad]),
TraceableFunction("_tf.resize_bicubic_grad(grads:T, original_image:T, align_corners)",
[tf_internal.resize_bicubic_grad]),
TraceableFunction("_tf.mirror_pad_grad(input:T, paddings, mode)", [tf_internal.mirror_pad_grad]),
]
```
#### File: optimization/tensorflow/tf_data_format_optimizer.py
```python
from __future__ import division, print_function, absolute_import
import typing
from nnef_tools.conversion.conversion_info import ConversionInfo
from nnef_tools.io.tensorflow.tf_graph import TFGraph, TFOperation, TFTensor
from nnef_tools.optimization.data_format_optimizer import *
class TFDataFormatOptimizationDriver(DataFormatOptimizationDriver):
@property
def graph_type(self):
return TFGraph
@property
def tensor_type(self):
return TFTensor
@property
def op_type(self):
return TFOperation
@property
def conv_grad_filter_op_names(self):
return ["tf.nn.conv2d_backprop_filter",
"tf.nn.conv3d_backprop_filter_v2",
"tf.nn.depthwise_conv2d_native_backprop_filter"]
@property
def squeeze_op_name(self):
return "tf.squeeze"
@property
def unsqueeze_op_name(self):
return "tf.expand_dims"
@property
def transpose_op_name(self):
return "tf.transpose"
@property
def reshape_op_name(self):
return "tf.reshape"
@property
def copy_op_name(self):
return "tf.identity"
def get_axes_from_squeeze(self, squeeze):
assert isinstance(squeeze.attribs["axis"], list)
return squeeze.attribs["axis"]
def set_axes_on_squeeze(self, squeeze, axes):
squeeze.attribs["axis"] = axes
def get_axes_from_unsqueeze(self, unsqueeze):
assert isinstance(unsqueeze.attribs["axis"], int)
return [unsqueeze.attribs["axis"]]
def get_axes_from_transpose(self, transpose):
assert isinstance(transpose.attribs["perm"], list)
return transpose.attribs["perm"]
def set_axes_on_transpose(self, transpose, axes):
transpose.attribs["perm"] = axes
def get_shape_from_reshape(self, reshape):
assert isinstance(reshape.attribs["shape"], list)
return reshape.attribs["shape"]
def create_tensor(self, graph, name, shape, dtype):
return TFTensor(graph=graph, name=name, shape=shape, dtype=dtype)
def create_transpose_op(self, graph, input, output, axes):
return TFOperation(graph=graph, name="tf.transpose", inputs=input, attribs=dict(perm=axes), outputs=output)
def create_copy_op(self, graph, input, output):
return TFOperation(graph=graph, name="tf.identity", inputs=input, outputs=output)
def generate_missing_names(self, graph):
graph.generate_missing_names()
def get_input_of_transform(self, transform):
return transform.input
def transpose_operation_default(transposer, graph, op, perm):
# type: (Transposer, TFGraph, TFOperation, typing.List[int])->None
if "begin" in op.attribs:
op.attribs["begin"] = transposer.apply_permutation(op.attribs["begin"], perm)
if "size" in op.attribs:
op.attribs["size"] = transposer.apply_permutation(op.attribs["size"], perm)
if "paddings" in op.attribs:
op.attribs["paddings"] = transposer.apply_permutation(op.attribs["paddings"], perm)
if "axis" in op.attribs:
if isinstance(op.attribs["axis"], (list, tuple)):
op.attribs["axis"] = transposer.apply_permutation_to_axes(op.attribs["axis"], perm)
else:
op.attribs["axis"] = transposer.apply_permutation_to_axis(op.attribs["axis"], perm)
if "axes" in op.attribs:
op.attribs["axes"] = transposer.apply_permutation_to_axes(op.attribs["axes"], perm)
def _get_default_transposable_ops():
unary_ops = ['tf.identity', 'tf.negative', 'tf.reciprocal', 'tf.exp', 'tf.log', 'tf.abs', 'tf.sign',
'tf.logical_not', 'tf.floor', 'tf.ceil', 'tf.round', 'tf.square', 'tf.sqrt', 'tf.rsqrt']
binary_ops = ['tf.add', 'tf.subtract', 'tf.multiply', 'tf.divide', 'tf.pow', 'tf.less', 'tf.greater',
'tf.less_equal', 'tf.greater_equal', 'tf.equal', 'tf.not_equal', 'tf.logical_and', 'tf.logical_or',
'tf.minimum', 'tf.maximum']
activation_ops = ['tf.nn.sigmoid', 'tf.nn.relu', 'tf.nn.relu6', 'tf.nn.leaky_relu', 'tf.nn.elu', 'tf.nn.tanh',
'tf.nn.softplus', 'tf.nn.softsign']
reduce_ops = ['tf.reduce_sum', 'tf.reduce_mean', 'tf.reduce_max', 'tf.reduce_min', 'tf.reduce_any', 'tf.reduce_all']
skippable_norm_ops = ['tf.nn.l2_normalize']
skippable_other_ops = ['tf.where', 'tf.concat', 'tf.stack', 'tf.unstack', 'tf.add_n', 'tf.nn.moments',
'tf.pad', '_tf.mirror_pad_grad', 'tf.clip_by_value', 'tf.split', 'tf.nn.softmax', "tf.slice"]
op_names = (unary_ops + binary_ops + activation_ops + reduce_ops + skippable_norm_ops
+ skippable_other_ops)
ops = [TransposableOperation(name=name, dg_transpose=transpose_operation_default) for name in op_names]
return ops
_DefaultTransposableOps = _get_default_transposable_ops() # type: typing.List[TransposableOperation]
def optimize(g, # type: TFGraph
remove_unneeded_copies=False, # type: bool
remove_inverse_transposes=False, # type: bool
merge_transforms_into_variables=False, # type: bool
merge_transforms_into_constants=False, # type: bool
custom_transposable_ops=None, # type: typing.Optional[typing.List[TransposableOperation]]
io_transform=None, # type:typing.Optional[TrafoOrTrafoDictType]
verbose=False, # type: bool
rename_tensors=False, # type: bool
):
transposable_ops = _DefaultTransposableOps + (custom_transposable_ops if custom_transposable_ops else [])
return optimize_impl(g=g,
driver=TFDataFormatOptimizationDriver(),
remove_unneeded_copies=remove_unneeded_copies,
remove_inverse_transposes=remove_inverse_transposes,
merge_transforms_into_variables=merge_transforms_into_variables,
merge_transforms_into_constants=merge_transforms_into_constants,
transposable_ops=transposable_ops,
io_transform=io_transform,
verbose=verbose,
rename_tensors=rename_tensors)
class Optimizer(object):
def __init__(self,
io_transform=None, # type:typing.Optional[TrafoOrTrafoDictType]
custom_transposable_ops=None, # type: typing.Optional[typing.List[TransposableOperation]]
merge_transforms_into_variables=False, # type: bool
):
# type: (...)->None
self._io_transform = io_transform
self._custom_transposable_ops = custom_transposable_ops
self._merge_transforms_into_variables = merge_transforms_into_variables
def __call__(self, g):
# type: (TFGraph)->ConversionInfo
transposable_ops = (_DefaultTransposableOps +
(self._custom_transposable_ops if self._custom_transposable_ops else []))
return optimize_impl(g=g,
driver=TFDataFormatOptimizationDriver(),
remove_unneeded_copies=True,
remove_inverse_transposes=True,
merge_transforms_into_variables=self._merge_transforms_into_variables,
merge_transforms_into_constants=True,
transposable_ops=transposable_ops,
io_transform=self._io_transform,
verbose=False,
rename_tensors=True)
```
#### File: tests/activation/tflite_layer_test_cases.py
```python
from __future__ import division, print_function, absolute_import
import os
import shutil
import unittest
import tensorflow as tf
from tests.activation.tflite_test_runner import TFLiteTestRunner
if not os.path.exists('nnef_tools') and os.path.exists('../../nnef_tools'):
os.chdir('../..')
class TFLiteLayerTestCases(TFLiteTestRunner):
@classmethod
def setUpClass(cls):
if os.path.exists("out"):
shutil.rmtree("out")
@staticmethod
def to_tflite(fun):
tf.reset_default_graph()
input, output = fun()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
converter = tf.contrib.lite.TFLiteConverter.from_session(sess, [input], [output])
tflite_model = converter.convert()
filename = "out/orig/{}.tflite".format(fun.__name__)
dir = os.path.dirname(filename)
if not os.path.exists(dir):
os.makedirs(dir)
with open(filename, "wb") as f:
f.write(tflite_model)
return filename
@staticmethod
def conv_network():
input_ = tf.placeholder(tf.float32, shape=[1, 64, 64, 3], name="input")
filter_ = tf.get_variable(dtype=tf.float32, shape=[4, 4, 3, 16], name="filter")
return input_, tf.nn.conv2d(input_, filter_, [1, 1, 1, 1], 'VALID')
def test_conv(self):
self._test_model(self.to_tflite(self.conv_network))
@staticmethod
def conv_relu_network():
input_ = tf.placeholder(tf.float32, shape=[1, 64, 64, 3], name="input")
filter_ = tf.get_variable(dtype=tf.float32, shape=[4, 4, 3, 16], name="filter")
conv = tf.nn.conv2d(input_, filter_, [1, 1, 1, 1], 'VALID')
relu = tf.nn.relu(conv)
return input_, relu
def test_conv_relu(self):
self._test_model(self.to_tflite(self.conv_relu_network))
@staticmethod
def depthwise_conv_relu_network():
input_ = tf.placeholder(tf.float32, shape=[1, 64, 64, 3], name="input")
filter_ = tf.get_variable(dtype=tf.float32, shape=[4, 4, 3, 2], name="filter")
conv = tf.nn.depthwise_conv2d(input_, filter_, [1, 1, 1, 1], 'VALID')
relu = tf.nn.relu(conv)
return input_, relu
def test_depthwise_conv_relu(self):
self._test_model(self.to_tflite(self.depthwise_conv_relu_network))
@staticmethod
def max_pool_network():
input_ = tf.placeholder(tf.float32, shape=[1, 64, 64, 3], name="input")
return input_, tf.nn.relu(tf.nn.max_pool(input_, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='VALID'))
def test_max_pool(self):
self._test_model(self.to_tflite(self.max_pool_network))
@staticmethod
def avg_pool_network():
input_ = tf.placeholder(tf.float32, shape=[1, 64, 64, 3], name="input")
return input_, tf.nn.relu(tf.nn.avg_pool(input_, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='VALID'))
def test_avg_pool(self):
self._test_model(self.to_tflite(self.avg_pool_network))
@staticmethod
def concat_network():
input_ = tf.placeholder(tf.float32, shape=[1, 64, 64, 3], name="input")
return input_, tf.concat([input_, input_ + input_], axis=2)
def test_concat(self):
self._test_model(self.to_tflite(self.concat_network))
@staticmethod
def softmax_network():
input_ = tf.placeholder(tf.float32, shape=[1, 64, 64, 3], name="input")
return input_, tf.nn.softmax(input_)
def test_softmax(self):
self._test_model(self.to_tflite(self.softmax_network))
@staticmethod
def network1():
input_ = tf.placeholder(tf.float32, shape=[1, 64, 64, 3], name="input")
filter_ = tf.get_variable(dtype=tf.float32, shape=[4, 4, 3, 16], name="filter")
conv = tf.nn.conv2d(input_, filter_, [1, 1, 1, 1], 'VALID')
bias = tf.constant(dtype=tf.float32, shape=[16], value=4.5, name="bias")
bias_add = tf.nn.bias_add(conv, bias)
relu = tf.nn.relu(bias_add)
max = tf.nn.max_pool(relu, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='VALID')
filter2_ = tf.get_variable(dtype=tf.float32, shape=[4, 4, 16, 16], name="filter2")
conv2 = tf.nn.conv2d(max, filter2_, [1, 1, 1, 1], 'VALID')
bias_add2 = tf.nn.bias_add(conv2, bias)
relu2 = tf.nn.relu(bias_add2)
return input_, relu2
def test_network1(self):
self._test_model(self.to_tflite(self.network1))
@staticmethod
def abs_network():
input_ = tf.placeholder(tf.float32, shape=[1, 64, 64, 3], name="input")
return input_, tf.abs(input_)
@unittest.skip('Not yet supported in tflite')
def test_abs(self):
self._test_model(self.to_tflite(self.abs_network))
@staticmethod
def add_network():
input_ = tf.placeholder(tf.float32, shape=[1, 64, 64, 3], name="input")
return input_, tf.nn.relu(tf.add(input_, input_))
def test_add(self):
self._test_model(self.to_tflite(self.add_network))
@staticmethod
def subtract_network():
input_ = tf.placeholder(tf.float32, shape=[1, 64, 64, 3], name="input")
return input_, tf.nn.relu(tf.subtract(input_, input_))
def test_subtract(self):
self._test_model(self.to_tflite(self.subtract_network))
@staticmethod
def multiply_network():
input_ = tf.placeholder(tf.float32, shape=[1, 64, 64, 3], name="input")
return input_, tf.nn.relu(tf.multiply(input_, input_))
def test_multiply(self):
self._test_model(self.to_tflite(self.multiply_network))
@staticmethod
def add_n_network():
input_ = tf.placeholder(tf.float32, shape=[1, 64, 64, 3], name="input")
return input_, tf.add_n([input_, input_, input_])
@unittest.skip('Not yet supported in tflite')
def test_add_n(self):
self._test_model(self.to_tflite(self.add_n_network))
@staticmethod
def argmax_network():
input_ = tf.placeholder(tf.float32, shape=[1, 64, 64, 3], name="input")
output_ = tf.argmax(input_, axis=3)
return input_, output_
def test_argmax(self):
self._test_model(self.to_tflite(self.argmax_network))
@staticmethod
def argmin_network():
input_ = tf.placeholder(tf.float32, shape=[1, 64, 64, 3], name="input")
return input_, tf.argmin(input_, axis=3)
def test_argmin(self):
self._test_model(self.to_tflite(self.argmin_network))
@staticmethod
def deconv_network():
input_ = tf.placeholder(tf.float32, shape=[1, 64, 64, 16], name="input")
filter_ = tf.get_variable(dtype=tf.float32, shape=[4, 4, 3, 16], name="filter")
return input_, tf.nn.relu(tf.nn.conv2d_transpose(value=input_,
filter=filter_,
output_shape=[1, 64, 64, 3],
strides=[1, 1, 1, 1],
padding='SAME'))
def test_deconv(self):
self._test_model(self.to_tflite(self.deconv_network))
@staticmethod
def elu_network():
input_ = tf.placeholder(tf.float32, shape=[1, 2, 2, 3], name="input")
return input_, tf.nn.elu(input_)
@unittest.skip('Not yet supported in tflite')
def test_elu(self):
self._test_model(self.to_tflite(self.elu_network))
@staticmethod
def equal_network():
input_ = tf.placeholder(tf.float32, shape=[1, 2, 2, 3], name="input")
return input_, tf.equal(input_, input_)
def test_equal(self):
self._test_model(self.to_tflite(self.equal_network))
@staticmethod
def exp_network():
input_ = tf.placeholder(tf.float32, shape=[1, 2, 2, 3], name="input")
return input_, tf.exp(input_)
def test_exp(self):
self._test_model(self.to_tflite(self.exp_network), max_val=1.0)
@staticmethod
def fill_network():
input_ = tf.placeholder(tf.float32, shape=[1, 2, 2, 3], name="input")
return input_, tf.fill([1, 2, 2, 3], 0.6) + input_
def test_fill(self):
self._test_model(self.to_tflite(self.fill_network))
@staticmethod
def floor_network():
input_ = tf.placeholder(tf.float32, shape=[1, 2, 2, 3], name="input")
return input_, tf.floor(input_)
def test_floor(self):
self._test_model(self.to_tflite(self.floor_network))
@staticmethod
def ceil_network():
input_ = tf.placeholder(tf.float32, shape=[1, 2, 2, 3], name="input")
return input_, tf.ceil(input_)
@unittest.skip('Not yet supported in tflite')
def test_ceil(self):
self._test_model(self.to_tflite(self.ceil_network))
@staticmethod
def greater_equal_network():
input_ = tf.placeholder(tf.float32, shape=[1, 2, 2, 3], name="input")
return input_, tf.greater_equal(input_, input_)
def test_greater_equal(self):
self._test_model(self.to_tflite(self.greater_equal_network))
@staticmethod
def greater_network():
input_ = tf.placeholder(tf.float32, shape=[1, 2, 2, 3], name="input")
return input_, tf.greater(input_, input_)
def test_greater(self):
self._test_model(self.to_tflite(self.greater_network))
@staticmethod
def l2_normalize_network():
input_ = tf.placeholder(tf.float32, shape=[1, 2, 2, 3], name="input")
return input_, tf.nn.relu(tf.nn.l2_normalize(input_, axis=-1))
def test_l2_normalize(self):
self._test_model(self.to_tflite(self.l2_normalize_network))
@staticmethod
def matmul_network():
input_ = tf.placeholder(tf.float32, shape=[2, 5], name="input")
filter_ = tf.get_variable(dtype=tf.float32, shape=[5, 6], name="filter")
return input_, tf.matmul(input_, filter_) + tf.nn.relu(tf.matmul(input_, filter_))
def test_matmul(self):
self._test_model(self.to_tflite(self.matmul_network))
@staticmethod
def leaky_relu_network():
input_ = tf.placeholder(tf.float32, shape=[2, 5], name="input")
return input_, tf.nn.leaky_relu(input_, alpha=0.3)
def test_leaky_relu(self):
self._test_model(self.to_tflite(self.leaky_relu_network))
@staticmethod
def less_equal_network():
input_ = tf.placeholder(tf.float32, shape=[1, 2, 2, 3], name="input")
return input_, tf.less_equal(input_, input_)
def test_less_equal(self):
self._test_model(self.to_tflite(self.less_equal_network))
@staticmethod
def less_network():
input_ = tf.placeholder(tf.float32, shape=[1, 2, 2, 3], name="input")
return input_, tf.less(input_, input_)
def test_less(self):
self._test_model(self.to_tflite(self.less_network))
@staticmethod
def lrn_network():
input_ = tf.placeholder(tf.float32, shape=[1, 2, 2, 3], name="input")
return input_, tf.nn.lrn(input_)
def test_lrn(self):
self._test_model(self.to_tflite(self.lrn_network))
@staticmethod
def logical_or_network():
input_ = tf.placeholder(tf.float32, shape=[1, 2, 2, 3], name="input")
return input_, tf.logical_or(input_ < 128, input_ > 200)
def test_logical_or(self):
self._test_model(self.to_tflite(self.logical_or_network))
@staticmethod
def logical_and_network():
input_ = tf.placeholder(tf.float32, shape=[1, 2, 2, 3], name="input")
return input_, tf.logical_and(input_ < 128, input_ > 200)
def test_logical_and(self):
self._test_model(self.to_tflite(self.logical_and_network))
@staticmethod
def logical_not_network():
input_ = tf.placeholder(tf.float32, shape=[1, 2, 2, 3], name="input")
return input_, tf.logical_not(input_ < 128)
def test_logical_not(self):
self._test_model(self.to_tflite(self.logical_not_network))
@staticmethod
def log_network():
input_ = tf.placeholder(tf.float32, shape=[1, 2, 2, 3], name="input")
return input_, tf.log(input_)
def test_log(self):
self._test_model(self.to_tflite(self.log_network), max_val=1.0)
@staticmethod
def negative_network():
input_ = tf.placeholder(tf.float32, shape=[1, 2, 2, 3], name="input")
return input_, tf.negative(input_)
def test_negative(self):
self._test_model(self.to_tflite(self.negative_network), max_val=1.0)
@staticmethod
def maximum_network():
input_ = tf.placeholder(tf.float32, shape=[1, 2, 2, 3], name="input")
return input_, tf.maximum(input_, input_)
def test_maximum(self):
self._test_model(self.to_tflite(self.maximum_network))
@staticmethod
def minimum_network():
input_ = tf.placeholder(tf.float32, shape=[1, 2, 2, 3], name="input")
return input_, tf.minimum(input_, input_)
def test_minimum(self):
self._test_model(self.to_tflite(self.minimum_network))
@staticmethod
def divide_network():
input_ = tf.placeholder(tf.float32, shape=[1, 2, 2, 3], name="input")
return input_, tf.nn.relu(tf.divide(input_, input_))
def test_divide(self):
self._test_model(self.to_tflite(self.divide_network))
@staticmethod
def expand_dims_network():
input_ = tf.placeholder(tf.float32, shape=[1, 2, 2, 3], name="input")
return input_, tf.expand_dims(input_, axis=0)
def test_expand_dims(self):
self._test_model(self.to_tflite(self.expand_dims_network))
@staticmethod
def not_equal_network():
input_ = tf.placeholder(tf.float32, shape=[1, 2, 2, 3], name="input")
return input_, tf.not_equal(input_, input_)
def test_not_equal(self):
self._test_model(self.to_tflite(self.not_equal_network))
@staticmethod
def stack_network():
input_ = tf.placeholder(tf.float32, shape=[1, 2, 2], name="input")
return input_, tf.stack([input_, input_], axis=1)
def test_stack(self):
self._test_model(self.to_tflite(self.stack_network))
@staticmethod
def unstack_network():
input_ = tf.placeholder(tf.float32, shape=[1, 2, 2], name="input")
return input_, tf.unstack(input_, axis=1)[0]
def test_unstack(self):
self._test_model(self.to_tflite(self.unstack_network))
@staticmethod
def pow_network():
input_ = tf.placeholder(tf.float32, shape=[1, 2, 2, 3], name="input")
return input_, tf.pow(input_, input_)
def test_pow(self):
self._test_model(self.to_tflite(self.pow_network), max_val=1.0)
@staticmethod
def prelu_network():
input_ = tf.placeholder(tf.float32, shape=[1, 2, 2, 3], name="input")
return input_, tf.nn.leaky_relu(input_, input_)
def test_prelu(self):
self._test_model(self.to_tflite(self.prelu_network), max_val=1.0)
@staticmethod
def mean_network():
input_ = tf.placeholder(tf.float32, shape=[1, 2, 2, 3], name="input")
return input_, tf.reduce_mean(input_, axis=1)
def test_mean(self):
self._test_model(self.to_tflite(self.mean_network))
@staticmethod
def sum_network():
input_ = tf.placeholder(tf.float32, shape=[1, 2, 2, 3], name="input")
return input_, tf.reduce_sum(input_, axis=1)
def test_sum(self):
self._test_model(self.to_tflite(self.sum_network))
@staticmethod
def max_reduce_network():
input_ = tf.placeholder(tf.float32, shape=[1, 2, 2, 3], name="input")
return input_, tf.reduce_max(input_, axis=1)
def test_max_reduce(self):
self._test_model(self.to_tflite(self.max_reduce_network))
@staticmethod
def min_reduce_network():
input_ = tf.placeholder(tf.float32, shape=[1, 2, 2, 3], name="input")
return input_, tf.reduce_min(input_, axis=1)
def test_min_reduce(self):
self._test_model(self.to_tflite(self.min_reduce_network))
@staticmethod
def relu6_network():
input_ = tf.placeholder(tf.float32, shape=[1, 2, 2, 3], name="input")
return input_, tf.nn.relu6(input_)
def test_relu6(self):
self._test_model(self.to_tflite(self.relu6_network))
@staticmethod
def resize_nearest_neighbor_network():
input_ = tf.placeholder(tf.float32, shape=[1, 2, 2, 3], name="input")
return input_, tf.image.resize_nearest_neighbor(input_, (4, 4), align_corners=False)
@unittest.skip('Not yet supported in tflite')
def test_resize_nearest_neighbor(self):
self._test_model(self.to_tflite(self.resize_nearest_neighbor_network))
@staticmethod
def where_network():
input_ = tf.placeholder(tf.float32, shape=[1, 2, 2, 3], name="input")
return input_, tf.where(input_ < 128, input_, -input_)
def test_where(self):
self._test_model(self.to_tflite(self.where_network))
@staticmethod
def slice_network():
input_ = tf.placeholder(tf.float32, shape=[1, 2, 2, 3], name="input")
return input_, tf.slice(input_, [0, 1, 1, 0], [1, 1, 1, 2])
def test_slice(self):
self._test_model(self.to_tflite(self.slice_network))
@staticmethod
def split_network():
input_ = tf.placeholder(tf.float32, shape=[1, 2, 10, 3], name="input")
return input_, tf.split(input_, 5, 2)[2]
def test_split(self):
self._test_model(self.to_tflite(self.split_network))
@staticmethod
def split_v_network():
input_ = tf.placeholder(tf.float32, shape=[1, 2, 10, 3], name="input")
return input_, tf.split(input_, [5, 3, 1, 1], 2)[2]
@unittest.skip('Not yet supported in tflite')
def test_split_v(self):
self._test_model(self.to_tflite(self.split_v_network))
@staticmethod
def sqrt_network():
input_ = tf.placeholder(tf.float32, shape=[1, 2, 2, 3], name="input")
return input_, tf.sqrt(input_)
def test_sqrt(self):
self._test_model(self.to_tflite(self.sqrt_network))
@staticmethod
def square_network():
input_ = tf.placeholder(tf.float32, shape=[1, 2, 2, 3], name="input")
return input_, tf.square(input_)
def test_square(self):
self._test_model(self.to_tflite(self.square_network))
@staticmethod
def transpose_network():
input_ = tf.placeholder(tf.float32, shape=[1, 2, 2, 3], name="input")
return input_, tf.transpose(input_, perm=[0, 3, 1, 2])
def test_transpose(self):
self._test_model(self.to_tflite(self.transpose_network))
@staticmethod
def zeros_like_network():
input_ = tf.placeholder(tf.float32, shape=[1, 2, 2, 3], name="input")
return input_, tf.zeros_like(input_) + input_
@unittest.skip('Not yet supported in tflite')
def test_zeros_like(self):
self._test_model(self.to_tflite(self.zeros_like_network))
if __name__ == '__main__':
unittest.main()
```
#### File: tests/activation/tf_pb_network_test_cases.py
```python
from __future__ import division, print_function, absolute_import
import os
import shutil
import unittest
import numpy as np
import tensorflow as tf
from nnef_tools import convert
from tests.activation.file_downloader import download_and_untar_once
if not os.path.exists('nnef_tools') and os.path.exists('../../nnef_tools'):
os.chdir('../..')
# This test only tests the outputs of the networks
def load_graph_from_pb(frozen_graph_filename):
import tensorflow as tf
with tf.gfile.GFile(frozen_graph_filename, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
def get_placeholders():
return [tensor
for op in tf.get_default_graph().get_operations()
if 'Placeholder' in op.node_def.op
for tensor in op.values()]
def get_tensors_with_no_consumers():
return [tensor
for op in tf.get_default_graph().get_operations()
if not any(tensor.consumers() for tensor in op.values())
for tensor in op.values()]
# From: https://www.tensorflow.org/lite/guide/hosted_models
class TFPbNetworkTestCases(unittest.TestCase):
@classmethod
def setUpClass(cls):
if 'TF_CPP_MIN_LOG_LEVEL' not in os.environ:
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
np.random.seed(0)
tf.set_random_seed(0)
if os.path.exists('out'):
shutil.rmtree('out')
def _test_network(self, path, size):
network = os.path.basename(path.rsplit('.', 1)[0])
input_shape = "(float32, [2, {size}, {size}, 3])".format(size=size)
command = """
./nnef_tools/convert.py --input-framework=tensorflow-pb \\
--input-model={} \\
--input-shape="{}" \\
--output-framework=nnef \\
--output-directory=out/nnef/{} \\
--compress""".format(path, input_shape, network)
print(command)
convert.convert_using_command(command)
command = """
./nnef_tools/convert.py --input-framework=nnef \\
--input-model=out/nnef/{}/model.nnef.tgz \\
--output-framework=tensorflow-pb \\
--output-directory=out/tensorflow-pb/{}""".format(network, network)
print(command)
convert.convert_using_command(command)
tf.reset_default_graph()
load_graph_from_pb(path)
[input] = get_placeholders()
outputs = get_tensors_with_no_consumers()
feed = np.random.random([2, size, size, 3])
with tf.Session() as sess:
activations = sess.run(outputs, feed_dict={input: feed})
tf.reset_default_graph()
load_graph_from_pb('out/tensorflow-pb/{}/model.pb'.format(network))
[input] = get_placeholders()
outputs = get_tensors_with_no_consumers()
with tf.Session() as sess:
activations2 = sess.run(outputs, feed_dict={input: feed})
for a1, a2 in zip(activations, activations2):
self.assertTrue(np.allclose(a1, a2))
def test_densenet(self):
path = download_and_untar_once(
url="http://download.tensorflow.org/models/tflite/model_zoo/upload_20180427/densenet_2018_04_27.tgz",
member="*.pb",
path="_models/tensorflow-pb/densenet_2018_04_27.pb")
self._test_network(path, 224)
def test_squeezenet(self):
path = download_and_untar_once(
url="http://download.tensorflow.org/models/tflite/model_zoo/upload_20180427/squeezenet_2018_04_27.tgz",
member="*.pb",
path="_models/tensorflow-pb/squeezenet_2018_04_27.pb")
self._test_network(path, 224)
def test_nasnet_mobile(self):
path = download_and_untar_once(
url="http://download.tensorflow.org/models/tflite/model_zoo/upload_20180427/nasnet_mobile_2018_04_27.tgz",
member="*.pb",
path="_models/tensorflow-pb/nasnet_mobile_2018_04_27.pb")
self._test_network(path, 224)
def test_nasnet_large(self):
path = download_and_untar_once(
url="http://download.tensorflow.org/models/tflite/model_zoo/upload_20180427/nasnet_large_2018_04_27.tgz",
member="*.pb",
path="_models/tensorflow-pb/nasnet_large_2018_04_27.pb")
self._test_network(path, 331)
def test_inception_v3(self):
path = download_and_untar_once(
url="http://download.tensorflow.org/models/tflite/model_zoo/upload_20180427/inception_v3_2018_04_27.tgz",
member="*.pb",
path="_models/tensorflow-pb/inception_v3_2018_04_27.pb")
self._test_network(path, 299)
def test_inception_v4(self):
path = download_and_untar_once(
url="http://download.tensorflow.org/models/tflite/model_zoo/upload_20180427/inception_v4_2018_04_27.tgz",
member="*.pb",
path="_models/tensorflow-pb/inception_v4_2018_04_27.pb")
self._test_network(path, 299)
def test_inception_resnet_v2(self):
path = download_and_untar_once(
url="http://download.tensorflow.org/models/tflite/model_zoo/upload_20180427/inception_resnet_v2_2018_04_27.tgz",
member="*.pb",
path="_models/tensorflow-pb/inception_resnet_v2_2018_04_27.pb")
self._test_network(path, 299)
def test_mobilenet_v1_0_25_128(self):
path = download_and_untar_once(
url="http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.25_128.tgz",
member="*.pb",
path="_models/tensorflow-pb/mobilenet_v1_0.25_128.pb")
self._test_network(path, 128)
def test_mobilenet_v1_1_0_128(self):
path = download_and_untar_once(
url="http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_1.0_128.tgz",
member="*.pb",
path="_models/tensorflow-pb/mobilenet_v1_1.0_128.pb")
self._test_network(path, 128)
def test_mobilenet_v2_1_0_224(self):
path = download_and_untar_once(
url="http://download.tensorflow.org/models/tflite_11_05_08/mobilenet_v2_1.0_224.tgz",
member="*.pb",
path="_models/tensorflow-pb/mobilenet_v2_1.0_224.pb")
self._test_network(path, 224)
if __name__ == '__main__':
unittest.main()
```
#### File: tests/activation/tf_pb_test_runner.py
```python
from __future__ import division, print_function, absolute_import
import os
import shutil
from nnef_tools.activation_export import activation_test
from nnef_tools.conversion import conversion_info
from nnef_tools.conversion.tensorflow import tf_pb_all_in_one
def test_tf_pb(filename, source_shapes, feed_dict, prefer_nhwc, network_name, export_io_only, delete_after_each):
dir = os.path.join('out', network_name + ('_nhwc' if prefer_nhwc else '_nchw'))
if os.path.exists(dir):
shutil.rmtree(dir)
tf_pb_all_in_one.convert_tf_pb_to_nnef(
file_name=filename,
output_directory=os.path.join(dir, 'nnef'),
network_name=network_name,
optimization_level=tf_pb_all_in_one.OptimizationLevel.FULL,
io_transform=tf_pb_all_in_one.IOTransform.SMART_TF_NHWC_TO_NCHW,
source_shapes=source_shapes,
activation_export_feed_dict=feed_dict,
activation_export_io_only=export_io_only)
conv_info1 = conversion_info.load(os.path.join(dir, 'nnef', 'conversion.json'))
tf_pb_all_in_one.convert_nnef_to_tf_pb(
nnef_tgz_or_dir_path=os.path.join(dir, 'nnef', network_name + '_nnef'),
output_directory=os.path.join(dir, 'tf_pb'),
optimization_level=tf_pb_all_in_one.OptimizationLevel.FULL,
io_transform=(tf_pb_all_in_one.IOTransform.SMART_NCHW_TO_TF_NHWC
if prefer_nhwc else tf_pb_all_in_one.IOTransform.SMART_NCHW_TO_TF_NCHW),
prefer_nhwc=prefer_nhwc)
conv_info2 = conversion_info.load(os.path.join(dir, 'tf_pb', 'conversion.json'))
feed_dict2 = (activation_test.transform_feed_dict(feed_dict=feed_dict,
conv_info=conversion_info.compose(conv_info1, conv_info2))
if feed_dict is not None else None)
tf_pb_all_in_one.convert_tf_pb_to_nnef(
file_name=os.path.join(dir, 'tf_pb', 'graph.pb'),
output_directory=os.path.join(dir, 'nnef2'),
network_name=network_name,
optimization_level=tf_pb_all_in_one.OptimizationLevel.FULL,
io_transform=(tf_pb_all_in_one.IOTransform.SMART_TF_NHWC_TO_NCHW
if prefer_nhwc else tf_pb_all_in_one.IOTransform.SMART_TF_NCHW_TO_NCHW),
activation_export_feed_dict=feed_dict2,
activation_export_io_only=export_io_only)
conv_info3 = conversion_info.load(os.path.join(dir, 'nnef2', 'conversion.json'))
if feed_dict is not None:
activation_test.compare_activation_dirs(
os.path.join(dir, 'nnef', 'activations'),
os.path.join(dir, 'nnef2', 'activations'),
conv_info=conversion_info.compose(conv_info2, conv_info3),
verbose=False,
allowed_bad_pixel_ratio=0.01 if not prefer_nhwc and not export_io_only else 0.0)
if delete_after_each:
shutil.rmtree(dir)
```
#### File: tests/functional/shape_inference_test.py
```python
from __future__ import division, print_function, absolute_import
from nnef_tools.shape_inference import shape_inference as infer
import unittest
class TestShapePropagation(unittest.TestCase):
def test_singleton(self):
self.assertEqual(infer.singleton(0), [])
self.assertEqual(infer.singleton(1), [1])
self.assertEqual(infer.singleton(4), [1, 1, 1, 1])
def test_copy(self):
a = [1, 2, 3]
b = infer.copy(a)
self.assertEqual(a, b)
self.assertIsNot(a, b)
def test_elementwise(self):
a = [1, 2, 3]
b = [1, 2, 3]
c = infer.elementwise([a, b], infer.Broadcast.NONE)
self.assertEqual(c, a)
self.assertIsNot(c, a)
self.assertIsNot(c, b)
a = [1, 2, 3]
b = [3]
ab = [a, b]
c = infer.elementwise([a, b], infer.Broadcast.FROM_RIGHT)
self.assertEqual([1, 2, 3], c)
self.assertEqual([1, 2, 3], a)
self.assertEqual([3], b)
self.assertEqual([a, b], ab)
with self.assertRaises(AssertionError):
infer.elementwise([[1, 2], [1]], infer.Broadcast.NONE)
with self.assertRaises(AssertionError):
infer.elementwise([[1, 2], [1, 3]], infer.Broadcast.NONE)
self.assertEqual([2, 4, 3], infer.elementwise([[1, 4, 1], [2, 1, 3]], infer.Broadcast.SAME_RANK))
with self.assertRaises(AssertionError):
infer.elementwise([[1, 2], [1]], infer.Broadcast.SAME_RANK)
self.assertEqual([4, 2, 3], infer.elementwise([[1, 2, 3], [4, 2]], infer.Broadcast.FROM_LEFT))
with self.assertRaises(AssertionError):
infer.elementwise([[2, 3], [3]], infer.Broadcast.FROM_LEFT)
self.assertEqual([1, 2, 3], infer.elementwise([[1, 1, 3], [2, 3]], infer.Broadcast.FROM_RIGHT))
with self.assertRaises(AssertionError):
infer.elementwise([[2, 3], [2]], infer.Broadcast.FROM_RIGHT)
def test_sliding_window(self):
with self.assertRaises(AssertionError):
infer.sliding_window(input=[1], filter=[], padding=[(1, 1)], stride=[1], dilation=[1])
with self.assertRaises(AssertionError):
infer.sliding_window(input=[1], filter=[1], padding=[], stride=[1], dilation=[1])
with self.assertRaises(AssertionError):
infer.sliding_window(input=[1], filter=[1], padding=[(1, 1)], stride=[], dilation=[1])
with self.assertRaises(AssertionError):
infer.sliding_window(input=[1], filter=[1], padding=[(1, 1)], stride=[1], dilation=[])
with self.assertRaises(AssertionError):
infer.sliding_window(input=[], filter=[1], padding=[(1, 1)], stride=[1], dilation=[1])
self.assertEqual([10, 30, 30, 3], infer.sliding_window(input=[10, 32, 32, 3],
filter=[1, 3, 3, 1],
padding=[(0, 0)] * 4,
stride=[1, 1, 1, 1],
dilation=[1, 1, 1, 1]))
self.assertEqual([10, 32, 32, 3], infer.sliding_window(input=[10, 30, 30, 3],
filter=[1, 3, 3, 1],
padding=[(0, 0)] * 4,
stride=[1, 1, 1, 1],
dilation=[1, 1, 1, 1],
upscale=True))
self.assertEqual([10, 28, 26, 3], infer.sliding_window(input=[10, 32, 32, 3],
filter=[1, 3, 3, 1],
padding=[(0, 0)] * 4,
stride=[1, 1, 1, 1],
dilation=[1, 2, 3, 1]))
self.assertEqual([10, 15, 32, 3], infer.sliding_window(input=[10, 32, 32, 3],
filter=[1, 3, 1, 1],
padding=[(0, 0)] * 4,
stride=[1, 2, 1, 1],
dilation=[1, 1, 1, 1]))
self.assertEqual([10, 16, 32, 3], infer.sliding_window(input=[10, 32, 32, 3],
filter=[1, 3, 1, 1],
padding=[(0, 0)] * 4,
stride=[1, 2, 1, 1],
dilation=[1, 1, 1, 1],
ceil=True))
def test_valid_padding(self):
self.assertEqual([], infer.valid_padding(0))
self.assertEqual([(0, 0)], infer.valid_padding(1))
self.assertEqual([(0, 0), (0, 0), (0, 0), (0, 0)], infer.valid_padding(4))
def test_same_padding(self):
self.assertEqual([(0, 1)], infer.same_padding(upscaled_input=[32], filter=[3], stride=[2], dilation=[1]))
self.assertEqual([(1, 0)], infer.same_padding(upscaled_input=[32],
filter=[3],
stride=[2],
dilation=[1],
left_bigger=True))
self.assertEqual([(2, 3)], infer.same_padding(upscaled_input=[32], filter=[3], stride=[2], dilation=[3]))
def test_concat(self):
self.assertEqual([3, 2, 3, 4], infer.concat([[1, 2, 3, 4], [2, 2, 3, 4]], 0))
self.assertEqual([3, 2, 3, 4], infer.concat([[1, 2, 3, 4], [2, 2, 3, 4]], -4))
self.assertEqual([1, 12, 3, 4], infer.concat([[1, 2, 3, 4], [1, 10, 3, 4]], 1))
self.assertEqual([1, 2, 3, 9], infer.concat([[1, 2, 3, 4], [1, 2, 3, 5]], -1))
def test_split(self):
self.assertEqual([[1, 2, 2], [1, 2, 2], [1, 2, 2]], infer.split([1, 2, 6], -1, num=3))
self.assertEqual([[1, 2, 2], [1, 2, 4]], infer.split([1, 2, 6], -1, ratios=[1, 2]))
self.assertEqual([[1, 2, 2], [1, 2, 4]], infer.split([1, 2, 6], -1, sizes=[2, 4]))
def test_conv(self):
self.assertEqual([10, 30, 30, 16], infer.conv(input=[10, 32, 32, 3],
filter=[3, 3],
padding=infer.Padding.VALID,
stride=[1, 1],
dilation=[1, 1],
groups=1,
spatial_begin=infer.spatial_begin(infer.Format.NHWC),
channel_axis=infer.channel_axis(infer.Format.NHWC),
output_channels=16))
self.assertEqual([10, 16, 30, 30], infer.conv(input=[10, 3, 32, 32],
filter=[3, 3],
padding=[(0, 0), (0, 0)],
stride=[1, 1],
dilation=[1, 1],
groups=1,
spatial_begin=infer.spatial_begin(infer.Format.NCHW),
channel_axis=infer.channel_axis(infer.Format.NCHW),
output_channels=16))
self.assertEqual([10, 3, 32, 32], infer.conv(input=[10, 16, 30, 30],
filter=[3, 3],
padding=[(0, 0), (0, 0)],
stride=[1, 1],
dilation=[1, 1],
groups=1,
format=infer.Format.NCHW,
output_channels=3,
deconv=True))
self.assertEqual([10, 3, 32, 32], infer.conv(input=[10, 16, 32, 32],
filter=[3, 3],
padding=infer.Padding.SAME_UPPER,
stride=[1, 1],
dilation=[1, 1],
groups=1,
format=infer.Format.NCHW,
output_channels=3))
self.assertEqual([10, 6, 32, 32], infer.conv(input=[10, 3, 32, 32],
filter=[3, 3],
padding=infer.Padding.SAME_LOWER,
stride=[1, 1],
dilation=[1, 1],
groups=0,
format=infer.Format.NCHW,
output_channels=6))
self.assertEqual([10, 16, 32, 32], infer.conv(input=[10, 3, 32, 32],
filter=[3, 3],
padding=infer.Padding.SAME_UPPER,
stride=[1, 1],
dilation=[1, 1],
groups=1,
format=infer.Format.NCHW,
output_channels=16,
deconv=True))
self.assertEqual([10, 16, 64, 64], infer.conv(input=[10, 3, 32, 32],
filter=[3, 3],
padding=infer.Padding.SAME_UPPER,
stride=[2, 2],
dilation=[1, 1],
groups=1,
format=infer.Format.NCHW,
output_channels=16,
deconv=True))
self.assertEqual([10, 16, 65, 65], infer.conv(input=[10, 3, 32, 32],
filter=[3, 3],
padding=infer.Padding.SAME_UPPER,
stride=[2, 2],
dilation=[1, 1],
groups=1,
format=infer.Format.NCHW,
output_channels=16,
output_padding=[(0, 1), (0, 1)],
deconv=True))
def test_squeeze(self):
self.assertEqual([3, 2], infer.squeeze([3, 2, 1], [-1]))
self.assertEqual([2, 3], infer.squeeze([1, 2, 3], [0]))
self.assertEqual([2, 2], infer.squeeze([2, 1, 2, 1], [1, -1]))
self.assertEqual([], infer.squeeze([1, 1], [0, -1]))
self.assertEqual([3, 2], infer.squeeze([1, 3, 1, 1, 2, 1, 1]))
def test_unsqueeze(self):
self.assertEqual([1, 2, 3], infer.unsqueeze([2, 3], [0]))
self.assertEqual([2, 3, 1], infer.unsqueeze([2, 3], [-1]))
self.assertEqual([2, 1, 3], infer.unsqueeze([2, 3], [-2]))
self.assertEqual([1, 2, 3], infer.unsqueeze([2, 3], [-3]))
self.assertEqual([1, 2, 1], infer.unsqueeze([1, 2], [2]))
self.assertEqual([2, 1, 3], infer.unsqueeze([2, 3], [1]))
self.assertEqual([1, 1, 1, 2, 1, 1], infer.unsqueeze([1, 2], [0, 2, 4, 5]))
self.assertEqual([1], infer.unsqueeze([], [0]))
def test_matmul(self):
self.assertEqual([3, 5], infer.matmul([3, 4], [4, 5]))
self.assertEqual([3, 5], infer.matmul([4, 3], [4, 5], transpose_a=True))
self.assertEqual([3, 5], infer.matmul([3, 4], [5, 4], transpose_b=True))
self.assertEqual([10, 20, 3, 5], infer.matmul([10, 20, 4, 3],
[10, 20, 5, 4],
transpose_a=True,
transpose_b=True))
def test_reduce(self):
self.assertEqual([1, 1, 1, 1, 5], infer.reduce([1, 2, 3, 4, 5], [1, 2, 3]))
self.assertEqual([1, 5], infer.reduce([1, 2, 3, 4, 5], [1, 2, 3], squeeze=True))
self.assertEqual([], infer.reduce([5], [-1], squeeze=True))
self.assertEqual([1, 6], infer.reduce([5, 6], [-2]))
def test_stack(self):
self.assertEqual([1, 2, 3], infer.stack([[1, 2], [1, 2], [1, 2]], 2))
self.assertEqual([1, 2, 3], infer.stack([[1, 2], [1, 2], [1, 2]], -1))
self.assertEqual([3, 1, 2], infer.stack([[1, 2], [1, 2], [1, 2]], 0))
self.assertEqual([1, 3, 2], infer.stack([[1, 2], [1, 2], [1, 2]], 1))
def test_unstack(self):
self.assertEqual([[1, 2], [1, 2], [1, 2]], infer.unstack([1, 2, 3], 2))
self.assertEqual([[1, 2], [1, 2], [1, 2]], infer.unstack([1, 2, 3], -1))
self.assertEqual([[2, 3]], infer.unstack([1, 2, 3], 0))
self.assertEqual([[1, 3], [1, 3]], infer.unstack([1, 2, 3], 1))
s1, s2 = infer.unstack([1, 2, 3], 1)
self.assertIsNot(s1, s2)
def test_pad(self):
self.assertEqual([2, 7, 12], infer.pad([1, 2, 3], [(0, 1), (2, 3), (4, 5)]))
def test_reshape(self):
self.assertEqual([4, 6], infer.reshape([4, 2, 3], [4, 6]))
self.assertEqual([4, 6], infer.reshape([4, 2, 3], [4, -1]))
self.assertEqual([24], infer.reshape([4, 2, 3], [24]))
self.assertEqual([24], infer.reshape([4, 2, 3], [-1]))
self.assertEqual([4, 2, 1, 3], infer.reshape([4, 2, 3], [4, 2, -1, 3]))
self.assertEqual([4, 6, 1], infer.reshape([4, 2, 3], [4, -1, 1]))
self.assertEqual([4, 6, 1], infer.reshape([4, 2, 3], [4, -1, 1]))
self.assertEqual([1], infer.reshape([], [1]))
self.assertEqual([1], infer.reshape([], [-1]))
self.assertEqual([], infer.reshape([1], []))
self.assertEqual([], infer.reshape([1, 1, 1], []))
self.assertEqual([0, 1], infer.reshape([0], [0, 1]))
with self.assertRaises(AssertionError):
infer.reshape([0], [0, -1])
self.assertEqual([1, 2, 1, 3], infer.reshape([1, 2, 3], [0, 0, 1, -1], zero_means_same=True))
self.assertEqual([1], infer.reshape([1], [0], zero_means_same=True))
with self.assertRaises(AssertionError):
infer.reshape([], [0], zero_means_same=True)
with self.assertRaises(AssertionError):
infer.reshape([1], [1, 0], zero_means_same=True)
def test_flatten(self):
self.assertEqual([1, 1], infer.flatten([]))
self.assertEqual([2, 1], infer.flatten([2]))
self.assertEqual([2, 3], infer.flatten([2, 3]))
self.assertEqual([2, 12], infer.flatten([2, 3, 4]))
self.assertEqual([0, 1], infer.flatten([0]))
self.assertEqual([0, 3], infer.flatten([0, 3]))
self.assertEqual([0, 12], infer.flatten([0, 3, 4]))
self.assertEqual([0, 0], infer.flatten([0, 3, 4, 0]))
self.assertEqual([1, 0], infer.flatten([1, 3, 4, 0]))
def test_resize(self):
self.assertEqual([10, 16, 64, 3], infer.resize([10, 32, 32, 3], [16, 64],
spatial_begin=infer.spatial_begin(infer.Format.NHWC)))
self.assertEqual([10, 16, 64], infer.resize([10, 32, 32], [16, 64], format=infer.Format.NHWC))
self.assertEqual([10, 3, 16, 64], infer.resize([10, 3, 32, 32], [16, 64], format=infer.Format.NCHW))
def test_downsample(self):
self.assertEqual([10, 16, 16, 3],
infer.downsample([10, 32, 32, 3], [2, 2], spatial_begin=infer.spatial_begin(infer.Format.NHWC)))
self.assertEqual([10, 16, 8], infer.downsample([10, 32, 32], [2, 4], format=infer.Format.NHWC))
self.assertEqual([10, 3, 8, 16], infer.downsample([10, 3, 32, 32], [4, 2], format=infer.Format.NCHW))
def test_upsample(self):
self.assertEqual([10, 64, 64, 3], infer.upsample([10, 32, 32, 3], [2, 2],
spatial_begin=infer.spatial_begin(infer.Format.NHWC)))
self.assertEqual([10, 64, 128], infer.upsample([10, 32, 32], [2, 4], format=infer.Format.NHWC))
self.assertEqual([10, 3, 128, 64], infer.upsample([10, 3, 32, 32], [4, 2], format=infer.Format.NCHW))
def test_transpose(self):
self.assertEqual([], infer.transpose([]))
self.assertEqual([1, 2, 3], infer.transpose([3, 2, 1]))
self.assertEqual([10, 3, 32, 16], infer.transpose([10, 32, 16, 3], [0, 3, 1, 2]))
def test_slice(self):
self.assertEqual([1, 1, 1, 2], infer.slice(input=[1, 2, 3, 4], begin=[0, 1, 2, 2], size=[1, 1, 1, 2]))
self.assertEqual([1, 1, 1, 2], infer.slice(input=[1, 2, 3, 4], begin=[0, 1, 2, 2], size=[-1, -1, -1, -1]))
self.assertEqual([1, 1, 1, 2], infer.slice(input=[1, 2, 3, 4],
begin=[0, 1, 2, 2],
size=[0, 0, 0, 0],
zero_means_all=True))
self.assertEqual([0, 0, 0, 0], infer.slice([1, 2, 3, 4], begin=[0, 1, 2, 2], size=[0, 0, 0, 0]))
self.assertEqual([2, 4, 6, 36], infer.slice(input=[10, 20, 30, 40], begin=[1, 2, 3, 4], size=[2, 4, 6, -1]))
self.assertEqual([1, 1, 1, 2], infer.slice(input=[1, 2, 3, 4], begin=[0, 1, 2, 2], end=[1, 2, 3, 4]))
self.assertEqual([1, 1, 1, 2], infer.slice(input=[1, 2, 3, 4],
begin=[0, 1, 2, 2],
end=[0, 0, 0, 0],
zero_means_all=True))
self.assertEqual([0, 0, 0, 0], infer.slice([1, 2, 3, 4], begin=[0, 1, 2, 2], end=[0, 1, 2, 2]))
self.assertEqual([2, 4, 6, 36], infer.slice(input=[10, 20, 30, 40],
begin=[1, 2, 3, 4],
end=[3, 6, 9, 0],
zero_means_all=True))
self.assertEqual([10, 32, 32, 1], infer.slice(input=[10, 32, 32, 3], axes=[3], begin=[1], end=[2]))
self.assertEqual([10, 32, 32, 0], infer.slice(input=[10, 32, 32, 3], axes=[3], begin=[1], end=[1]))
self.assertEqual([10, 32, 32, 2], infer.slice(input=[10, 32, 32, 3],
axes=[3],
begin=[1],
end=[0],
zero_means_all=True))
self.assertEqual([10, 32, 32, 1], infer.slice(input=[10, 32, 32, 3], axes=[3], begin=[1], size=[1]))
self.assertEqual([10, 32, 32, 2], infer.slice(input=[10, 32, 32, 3], axes=[3], begin=[1], size=[-1]))
self.assertEqual([10, 32, 32, 0], infer.slice(input=[10, 32, 32, 3], axes=[-1], begin=[1], size=[0]))
self.assertEqual([10, 32, 32, 2], infer.slice(input=[10, 32, 32, 3],
axes=[-1],
begin=[1],
size=[0],
zero_means_all=True))
self.assertEqual([1, 2, 1, 2], infer.slice(input=[10, 32, 32, 3],
axes=[-1, 2, -3, 0],
begin=[1, 2, 3, 0],
end=[3, 3, 5, 1]))
self.assertEqual([1, 2, 1, 2], infer.slice(input=[10, 32, 32, 3],
axes=[-1, 2, -3, 0],
begin=[1, 2, 3, 0],
size=[-1, 1, 2, 1]))
self.assertEqual([10, 5, 3, 2], infer.slice(input=[10, 20, 30, 40],
begin=[0, 0, 0, 0],
size=[10, 10, 10, 10],
stride=[1, 2, 3, 4]))
def test_bit_mask_to_array(self):
self.assertEqual([0, 1, 1, 0], infer.bit_mask_to_array(6, 4))
self.assertEqual([1, 1, 1, 0, 0], infer.bit_mask_to_array(7, 5))
self.assertEqual([1], infer.bit_mask_to_array(1, 1))
self.assertEqual([0], infer.bit_mask_to_array(0, 1))
self.assertEqual([], infer.bit_mask_to_array(0, 0))
def test_decompose_strided_slice(self):
anything = 0xdeadbeef
ssl_begin, ssl_end, ssl_stride, ssl_shape, reshape_shape = \
infer.decompose_strided_slice(input=[10, 32, 32, 3],
begin=[1, anything, 2, 2, 0],
end=[3, anything, 5, 3, 3],
stride=[1, anything, 1, 1, 1],
new_axis_mask=[0, 1, 0, 0, 0],
shrink_axis_mask=[0, 0, 0, 1, 0],
begin_mask=[0, 0, 0, 0, 0],
end_mask=[0, 0, 0, 0, 0],
ellipsis_mask=[0, 0, 0, 0, 0])
self.assertEqual([1, 2, 2, 0], ssl_begin)
self.assertEqual([3, 5, 3, 3], ssl_end)
self.assertEqual([1, 1, 1, 1], ssl_stride)
self.assertEqual([2, 3, 1, 3], ssl_shape)
self.assertEqual([2, 1, 3, 3], reshape_shape)
ssl_begin, ssl_end, ssl_stride, ssl_shape, reshape_shape = \
infer.decompose_strided_slice(input=[10, 32, 32, 3],
begin=[anything, anything, anything, anything],
end=[anything, anything, anything, anything],
stride=[1, -1, 3, -2],
new_axis_mask=[0, 0, 0, 0],
shrink_axis_mask=[0, 0, 0, 0],
begin_mask=[1, 1, 1, 1],
end_mask=[1, 1, 1, 1],
ellipsis_mask=[0, 0, 0, 0])
self.assertEqual([0, 0, 0, 0], ssl_begin)
self.assertEqual([10, 32, 32, 3], ssl_end)
self.assertEqual([1, -1, 3, -2], ssl_stride)
self.assertEqual([10, 32, 10, 1], ssl_shape)
self.assertEqual([10, 32, 10, 1], reshape_shape)
self.assertIsNot(ssl_shape, reshape_shape)
decomposed = infer.decompose_strided_slice(input=[10, 32, 32, 3],
begin=[0, anything, 0],
end=[1, anything, 1],
stride=[1, anything, 1],
new_axis_mask=[0, 0, 0],
shrink_axis_mask=[0, 0, 0],
begin_mask=[0, 0, 0],
end_mask=[0, 0, 0],
ellipsis_mask=[0, 1, 0])
self.assertEqual([0, 0, 0, 0], decomposed.ssl_begin)
self.assertEqual([1, 32, 32, 1], decomposed.ssl_end)
self.assertEqual([1, 1, 1, 1], decomposed.ssl_stride)
self.assertEqual([1, 32, 32, 1], decomposed.ssl_shape)
self.assertEqual([1, 32, 32, 1], decomposed.reshape_shape)
self.assertIsNot(decomposed.ssl_shape, decomposed.reshape_shape)
def test_strided_slice(self):
anything = 0xcafecafe
self.assertEqual([2, 1, 3, 3], infer.strided_slice(input=[10, 32, 32, 3],
begin=[1, anything, 2, 2, 0],
end=[3, anything, 5, 3, 3],
stride=[1, anything, 1, 1, 1],
new_axis_mask=[0, 1, 0, 0, 0],
shrink_axis_mask=[0, 0, 0, 1, 0],
begin_mask=[0, 0, 0, 0, 0],
end_mask=[0, 0, 0, 0, 0],
ellipsis_mask=[0, 0, 0, 0, 0]))
self.assertEqual([10, 32, 10, 1], infer.strided_slice(input=[10, 32, 32, 3],
begin=[anything, anything, anything, anything],
end=[anything, anything, anything, anything],
stride=[1, -1, 3, -2],
new_axis_mask=[0, 0, 0, 0],
shrink_axis_mask=[0, 0, 0, 0],
begin_mask=[1, 1, 1, 1],
end_mask=[1, 1, 1, 1],
ellipsis_mask=[0, 0, 0, 0]))
self.assertEqual([1, 32, 32, 1], infer.strided_slice(input=[10, 32, 32, 3],
begin=[0, anything, 0],
end=[1, anything, 1],
stride=[1, anything, 1],
new_axis_mask=[0, 0, 0],
shrink_axis_mask=[0, 0, 0],
begin_mask=[0, 0, 0],
end_mask=[0, 0, 0],
ellipsis_mask=[0, 1, 0]))
self.assertEqual([1, 32, 32, 1], infer.strided_slice(input=[10, 32, 32, 3],
begin=[0, anything, 0],
end=[1, anything, 1],
stride=[1, anything, 1],
new_axis_mask=0,
shrink_axis_mask=0,
begin_mask=0,
end_mask=0,
ellipsis_mask=2))
def test_tile(self):
self.assertEqual([4, 6, 6, 4], infer.tile(input=[1, 2, 3, 4], repeat=[4, 3, 2, 1]))
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jnory/easy_lddc",
"score": 3
} |
#### File: easy_lddc/easy_ldcc/loader.py
```python
import tarfile
from io import BytesIO
import os
import requests
import re
ARCHIVE_URL = "https://www.rondhuit.com/download/ldcc-20140209.tar.gz"
MIRROR_URL = "https://github.com/jnory/datasets/raw/master/ldcc/ldcc-20140209.tar.gz"
CACHE_DIR = ".cache"
CACHE_PATH = "{}/ldcc-20140209.tar".format(CACHE_DIR)
CORPUS_PAT = re.compile(r"^text/(.*?)/.*\d\.txt$")
def _get_data(use_mirror=True):
if not os.path.exists(CACHE_DIR):
os.mkdir(CACHE_DIR)
if not os.path.exists(CACHE_PATH):
url = MIRROR_URL if use_mirror else ARCHIVE_URL
resp = requests.get(url)
with open(CACHE_PATH, "wb") as fp:
fp.write(resp.content)
content = resp.content
else:
with open(CACHE_PATH, "rb") as fp:
content = fp.read()
return content
class Downloader(object):
def __init__(self, use_mirror=True):
tar = tarfile.open(fileobj=BytesIO(_get_data(use_mirror)))
self.categories = []
self.texts = []
for member in tar.getmembers():
name = member.name
if not member.isfile():
continue
m = CORPUS_PAT.match(name)
if m is None:
continue
category = m.group(1)
obj = tar.extractfile(member)
text = obj.read().decode("utf-8")
self.categories.append(category)
self.texts.append(text)
```
#### File: easy_lddc/easy_ldcc/parser.py
```python
import re
import MeCab
from dateutil.parser import parse
DATETIME_PAT = re.compile(r"^\d\d\d\d-\d\d-\d\dT\d\d:\d\d:\d\d.*$")
def remove_meta(sentences):
data = []
timestamp = None
for sentence in sentences:
sentence = sentence.strip()
if sentence == "":
continue
if sentence.startswith("http"):
continue
if DATETIME_PAT.match(sentence):
timestamp = parse(sentence)
continue
data.append(sentence)
return data, timestamp
class Parser(object):
def __init__(self):
self.tagger = MeCab.Tagger("-Owakati")
self.tagger.parse("")
def parse(self, text):
sentences = text.split("\n")
sentences, timestamp = remove_meta(sentences)
assert timestamp is not None
doc = []
for sentence in sentences:
tokenized = self.tagger.parse(sentence).strip()
words = tokenized.split()
doc.append(["<s>"] + words + ["</s>"])
return doc, timestamp
``` |
{
"source": "jnosal/py-sls-audit",
"score": 2
} |
#### File: py-sls-audit/py_sls_audit/cli.py
```python
import json
import sys
import boto3
import botocore.exceptions
import click
from .common import AWSBridge
_COMMANDS = ["metrics", "conf", "exit"]
class CLIAwsProxy(AWSBridge):
def fetch_and_list_functions(self):
data = self.fetch_functions()
for index, name in enumerate(data):
click.echo(f"{index + 1}. {name}")
def fetch_and_list_metrics(self, index):
data = self.fetch_metrics(index)
for index, name in enumerate(data):
click.echo(f"{index + 1}. {name}")
def fetch_and_list_metric_statistics(self, function_index, metric_index):
data = self.fetch_metric_statistics(function_index, metric_index)
click.echo(json.dumps(data, indent=2))
def command_exit(self, *args):
sys.exit(1)
def command_conf(self, index):
function = self.functions[index]
client = self.session.client("lambda")
response = client.get_function_configuration(FunctionName=function)
click.echo(json.dumps(response, indent=2))
def command_metrics(self, index):
self.fetch_and_list_metrics(index)
if len(self.metrics) < 1:
return
metric_index = prompt_for_index("metric", len(self.metrics))
self.fetch_and_list_metric_statistics(index, metric_index)
def prompt_for_index(name, total):
index = click.prompt(f"Please enter {name} index", type=int)
if index < 1 or index > total:
click.echo(f"Index must be between 1 and {total}")
return prompt_for_index(name, total)
return index - 1
@click.command()
@click.option("-p", "--profile", help="AWS profile to use.", required=True)
@click.option("-r", "--region", help="AWS region.", default="eu-west-1")
def run(profile, region):
try:
session = boto3.Session(profile_name=profile, region_name=region)
except botocore.exceptions.ProfileNotFound:
click.echo(f"Could not load profile {profile}")
sys.exit(1)
proxy = CLIAwsProxy(session=session)
proxy.fetch_and_list_functions()
function_index = prompt_for_index("function", len(proxy.functions))
click.clear()
while True:
choice = click.prompt(
"Please enter command to run", type=click.Choice(_COMMANDS)
)
click.clear()
getattr(proxy, f"command_{choice}")(function_index)
click.echo("")
``` |
{
"source": "jnosal/tormon",
"score": 2
} |
#### File: tormon/api/views.py
```python
import tornado.web
import tornado.gen
from . import utils
class BaseApiHandler(tornado.web.RequestHandler):
def response(self, data, code=200):
if code != 200:
# if something went wrong, we include returned HTTP code in the
# JSON response
data[u'status'] = code
self.write(utils.json_dumps(data) + u"\n")
self.set_status(code)
def error(self, message, code=500):
self.response({u'message': message}, code)
def options(self, *args, **kwargs):
pass
class StatsHandler(BaseApiHandler):
@tornado.gen.coroutine
def get(self, *args, **kwargs):
stats = yield self.application.monitor.get_stats()
self.response({u'stats': stats}, 200)
class UrlsListHandler(BaseApiHandler):
@tornado.gen.coroutine
def get(self, *args, **kwargs):
data = yield self.application.monitor.iter_resources()
objects = [
dict(url=url, response=response)
for url, response
in data
]
self.response({u'objects': objects}, 200)
class UrlDetailsHandler(BaseApiHandler):
@tornado.gen.coroutine
def get(self, *args, **kwargs):
url = self.get_argument('url', None)
try:
info = yield self.application.monitor.get_info(url)
self.response({u'response': info}, 200)
except KeyError:
error = (
u"Url: {} is not monitored. Perhaps slash is missing"
u" or is redundant."
).format(url)
self.response({u'error': error}, 400)
```
#### File: tormon/core/utils.py
```python
import logging
import importlib
import signal
import time
from tormon import applications
from tormon.monitor import client
from tornado.ioloop import IOLoop
def load_app(name, default=None):
default = default or applications.MonitorWebApplication
return load_component(name, default)
def load_monitor(name, default=None):
default = default or client.HealthCheckMonitor
return load_component(name, default)
def load_component(name, default):
loaded = default
if name:
try:
components = name.split(u'.')
module_name = u".".join(components[:-1])
module = importlib.import_module(module_name)
loaded = getattr(module, components[-1])
except AttributeError:
logging.error(u"Error loading: {0}. Switching to: {1}.".format(
name, default.__name__
))
logging.info(u"Loaded component: {0}".format(loaded.__name__))
return loaded
def shutdown(server_instance, *args):
ioloop_instance = IOLoop.instance()
logging.info(u'Shutting down gracefully.')
server_instance.stop()
for component in args:
component.stop()
def finalize():
ioloop_instance.stop()
logging.info(u'Shut down.')
ioloop_instance.add_timeout(time.time() + 1.5, finalize)
def register_shutdown_handlers(server, *args):
shutdown_handler = lambda sig, frame: shutdown(server, *args)
signal.signal(signal.SIGINT, shutdown_handler)
signal.signal(signal.SIGTERM, shutdown_handler)
```
#### File: tormon/monitor/handlers.py
```python
import abc
import logging
DEFAULT_ALGORITHM = u''
class IBaseHandler(object):
__metaclass__ = abc.ABCMeta
def __init__(self, *args, **kwargs):
logging.info(u'Initializing handler: {0}'.format(
self.__class__.__name__
))
self.algorithm = kwargs.get(u'algorithm', DEFAULT_ALGORITHM)
@abc.abstractmethod
def notify(self, *args, **kwargs):
pass
class SmtpHandler(IBaseHandler):
def notify(self, *args, **kwargs):
pass
class LogHandler(IBaseHandler):
def notify(self, *args, **kwargs):
pass
class ApiCallHandler(IBaseHandler):
def notify(self, *args, **kwargs):
pass
```
#### File: tormon/tests/base.py
```python
import tornado.testing
class BaseTestCase(tornado.testing.AsyncTestCase):
def setUp(self):
super(BaseTestCase, self).setUp()
def tearDown(self):
super(BaseTestCase, self).tearDown()
``` |
{
"source": "JNotelddim/python-snake",
"score": 4
} |
#### File: python-snake/src/board.py
```python
import copy
from typing import Tuple, List
from src.coordinate import Coordinate
from src.snake import Snake
class Board:
"""Track the cooardinates for all snakes and food in the game."""
def __init__(self, data):
self._data = data
self._snakes = None
self._foods = None
@property
def snakes(self) -> List[Snake]:
"""Retreive the list of snakes from the board data."""
if self._snakes is None:
snakes = [Snake(snake_data) for snake_data in self._data['snakes']]
self._snakes = snakes
return self._snakes
@property
def foods(self) -> List[Coordinate]:
"""Retreive the list of food from the board data."""
if self._foods is None:
self._foods = [Coordinate(food_data) for food_data in self._data['food']]
return self._foods
@property
def width(self) -> int:
"""Get width of the board -- note: it's a square."""
return self._data['width']
def is_coordinate_in_bounds(self, coordinate) -> bool:
"""Check whether or not the Coordinate is within the bounds of the Board."""
is_wall = (coordinate.x == -1 or coordinate.x == self.width
or coordinate.y == -1 or coordinate.y == self.width)
return not is_wall
def get_other_snakes(self, exclude_id) -> List[Snake]:
"""Get the List of Snakes whose IDs don't match the given ID."""
return [snake for snake in self.snakes if snake.id != exclude_id]
def advance_snake_along_path(self, snake_id: str, path: List[Coordinate]):
"""Return a new board with our snake advanced along given path."""
new_board = copy.deepcopy(self)
return new_board.__help_advance_snake_along_path(snake_id, path)
def __help_advance_snake_along_path(self, snake_id: str, path: List[Coordinate]):
"""Do the actual advancement of the snake along the path."""
me = next((snake for snake in self.snakes if snake.id == snake_id), None)
if not me:
raise ValueError("No snake for given id!")
me.coordinates += path
me.coordinates = me.coordinates[len(path):]
me.coordinates.reverse()
me.coordinates.append(me.coordinates[-1])
print("new coords:")
for coord in me.coordinates:
print(coord)
return self
``` |
{
"source": "jnothman/datatemplate",
"score": 2
} |
#### File: datatemplate/template_libs/withjson.py
```python
import cjson
from django.template import Library, Node, Context, TemplateSyntaxError
from django.template.defaultfilters import stringfilter
register = Library()
@register.filter
def json(value):
return cjson.decode(value)
json = stringfilter(json)
@register.filter
def item(value, arg):
return value[unicode(arg)]
class WithJsonNode(Node):
def __init__(self, var, name, nodelist):
self.var = var
self.name = name
self.nodelist = nodelist
def __repr__(self):
return "<WithJsonNode>"
def render(self, context):
val = self.var
context.push()
context[self.name] = val
output = self.nodelist.render(context)
context.pop()
return output
def do_withjson(parser, token):
bits = list(token.split_contents())
if len(bits) != 4 or bits[2] != "as":
raise TemplateSyntaxError("%r expected format is 'value as name'" %
bits[0])
obj = cjson.decode(parser.compile_filter(bits[1]).resolve(Context()))
name = bits[3]
nodelist = parser.parse(('endwith',))
parser.delete_first_token()
return WithJsonNode(obj, name, nodelist)
do_withjson = register.tag('withjson', do_withjson)
``` |
{
"source": "jnothman/fastcore",
"score": 2
} |
#### File: fastcore/fastcore/docments.py
```python
__all__ = ['docments']
# Cell
from tokenize import tokenize,COMMENT
from ast import parse,FunctionDef
from io import BytesIO
from textwrap import dedent
from types import SimpleNamespace
from inspect import getsource,isfunction,isclass,signature,Parameter
from .basics import *
import re
# Cell
def _parses(s):
"Parse Python code in string or function object `s`"
return parse(dedent(getsource(s) if isfunction(s) else s))
def _tokens(s):
"Tokenize Python code in string or function object `s`"
if isfunction(s): s = getsource(s)
return tokenize(BytesIO(s.encode('utf-8')).readline)
_clean_re = re.compile('^\s*#(.*)\s*$')
def _clean_comment(s):
res = _clean_re.findall(s)
return res[0] if res else None
def _param_locs(s, returns=True):
"`dict` of parameter line numbers to names"
body = _parses(s).body
if len(body)!=1or not isinstance(body[0], FunctionDef): return None
defn = body[0]
res = {arg.lineno:arg.arg for arg in defn.args.args}
if returns and defn.returns: res[defn.returns.lineno] = 'return'
return res
# Cell
def _get_comment(line, arg, comments, parms):
if line in comments: return comments[line].strip()
line -= 1
res = []
while line and line in comments and line not in parms:
res.append(comments[line])
line -= 1
return dedent('\n'.join(reversed(res))) if res else None
def _get_full(anno, name, default, docs):
if anno==Parameter.empty and default!=Parameter.empty: anno = type(default)
return AttrDict(docment=docs.get(name), anno=anno, default=default)
# Cell
def docments(s, full=False, returns=True):
"`dict` of parameter names to 'docment-style' comments in function or string `s`"
if isclass(s): s = s.__init__ # Constructor for a class
comments = {o.start[0]:_clean_comment(o.string) for o in _tokens(s) if o.type==COMMENT}
parms = _param_locs(s, returns=returns)
docs = {arg:_get_comment(line, arg, comments, parms) for line,arg in parms.items()}
if not full: return docs
if isinstance(s,str): s = eval(s)
sig = signature(s)
res = {arg:_get_full(p.annotation, p.name, p.default, docs) for arg,p in sig.parameters.items()}
if returns: res['return'] = _get_full(sig.return_annotation, 'return', Parameter.empty, docs)
return res
``` |
{
"source": "jnothman/joblib",
"score": 3
} |
#### File: joblib/test/test_parallel.py
```python
import time
import sys
import io
import os
try:
import cPickle as pickle
PickleError = TypeError
except:
import pickle
PickleError = pickle.PicklingError
if sys.version_info[0] == 3:
PickleError = pickle.PicklingError
try:
# Python 2/Python 3 compat
unicode('str')
except NameError:
unicode = lambda s: s
from ..parallel import Parallel, delayed, SafeFunction, WorkerInterrupt, \
multiprocessing, cpu_count
from ..my_exceptions import JoblibException
import nose
###############################################################################
def division(x, y):
return x / y
def square(x):
return x ** 2
def exception_raiser(x):
if x == 7:
raise ValueError
return x
def interrupt_raiser(x):
time.sleep(.05)
raise KeyboardInterrupt
def f(x, y=0, z=0):
""" A module-level function so that it can be spawn with
multiprocessing.
"""
return x ** 2 + y + z
###############################################################################
def test_cpu_count():
assert cpu_count() > 0
###############################################################################
# Test parallel
def test_simple_parallel():
X = range(5)
for n_jobs in (1, 2, -1, -2):
yield (nose.tools.assert_equal, [square(x) for x in X],
Parallel(n_jobs=-1)(
delayed(square)(x) for x in X))
try:
# To smoke-test verbosity, we capture stdout
orig_stdout = sys.stdout
orig_stderr = sys.stdout
if sys.version_info[0] == 3:
sys.stderr = io.StringIO()
sys.stderr = io.StringIO()
else:
sys.stdout = io.BytesIO()
sys.stderr = io.BytesIO()
for verbose in (2, 11, 100):
Parallel(n_jobs=-1, verbose=verbose)(
delayed(square)(x) for x in X)
Parallel(n_jobs=1, verbose=verbose)(
delayed(square)(x) for x in X)
Parallel(n_jobs=2, verbose=verbose, pre_dispatch=2)(
delayed(square)(x) for x in X)
except Exception as e:
my_stdout = sys.stdout
my_stderr = sys.stderr
sys.stdout = orig_stdout
sys.stderr = orig_stderr
print(unicode(my_stdout.getvalue()))
print(unicode(my_stderr.getvalue()))
raise e
finally:
sys.stdout = orig_stdout
sys.stderr = orig_stderr
def nested_loop():
Parallel(n_jobs=2)(delayed(square)(.01) for _ in range(2))
def test_nested_loop():
Parallel(n_jobs=2)(delayed(nested_loop)() for _ in range(2))
def test_parallel_kwargs():
""" Check the keyword argument processing of pmap.
"""
lst = range(10)
for n_jobs in (1, 4):
yield (nose.tools.assert_equal,
[f(x, y=1) for x in lst],
Parallel(n_jobs=n_jobs)(delayed(f)(x, y=1) for x in lst)
)
def test_parallel_pickling():
""" Check that pmap captures the errors when it is passed an object
that cannot be pickled.
"""
def g(x):
return x ** 2
nose.tools.assert_raises(PickleError,
Parallel(),
(delayed(g)(x) for x in range(10))
)
def test_error_capture():
# Check that error are captured, and that correct exceptions
# are raised.
if multiprocessing is not None:
# A JoblibException will be raised only if there is indeed
# multiprocessing
nose.tools.assert_raises(JoblibException,
Parallel(n_jobs=2),
[delayed(division)(x, y) for x, y in zip((0, 1), (1, 0))],
)
nose.tools.assert_raises(WorkerInterrupt,
Parallel(n_jobs=2),
[delayed(interrupt_raiser)(x) for x in (1, 0)],
)
else:
nose.tools.assert_raises(KeyboardInterrupt,
Parallel(n_jobs=2),
[delayed(interrupt_raiser)(x) for x in (1, 0)],
)
nose.tools.assert_raises(ZeroDivisionError,
Parallel(n_jobs=2),
[delayed(division)(x, y) for x, y in zip((0, 1), (1, 0))],
)
try:
ex = JoblibException
Parallel(n_jobs=1)(
delayed(division)(x, y) for x, y in zip((0, 1), (1, 0)))
except Exception:
# Cannot use 'except as' to maintain Python 2.5 compatibility
ex = sys.exc_info()[1]
nose.tools.assert_false(isinstance(ex, JoblibException))
class Counter(object):
def __init__(self, list1, list2):
self.list1 = list1
self.list2 = list2
def __call__(self, i):
self.list1.append(i)
nose.tools.assert_equal(len(self.list1), len(self.list2))
def consumer(queue, item):
queue.append('Consumed %s' % item)
def test_dispatch_one_job():
""" Test that with only one job, Parallel does act as a iterator.
"""
queue = list()
def producer():
for i in range(6):
queue.append('Produced %i' % i)
yield i
Parallel(n_jobs=1)(delayed(consumer)(queue, x) for x in producer())
nose.tools.assert_equal(queue,
['Produced 0', 'Consumed 0',
'Produced 1', 'Consumed 1',
'Produced 2', 'Consumed 2',
'Produced 3', 'Consumed 3',
'Produced 4', 'Consumed 4',
'Produced 5', 'Consumed 5']
)
nose.tools.assert_equal(len(queue), 12)
def test_dispatch_multiprocessing():
""" Check that using pre_dispatch Parallel does indeed dispatch items
lazily.
"""
if multiprocessing is None:
return
manager = multiprocessing.Manager()
queue = manager.list()
def producer():
for i in range(6):
queue.append('Produced %i' % i)
yield i
Parallel(n_jobs=2, pre_dispatch=3)(delayed(consumer)(queue, i)
for i in producer())
nose.tools.assert_equal(list(queue)[:4],
['Produced 0', 'Produced 1', 'Produced 2',
'Consumed 0', ])
nose.tools.assert_equal(len(queue), 12)
def test_exception_dispatch():
"Make sure that exception raised during dispatch are indeed captured"
nose.tools.assert_raises(
ValueError,
Parallel(n_jobs=6, pre_dispatch=16, verbose=0),
(delayed(exception_raiser)(i) for i in range(30)),
)
def _reload_joblib():
# Retrieve the path of the parallel module in a robust way
joblib_path = Parallel.__module__.split(os.sep)
joblib_path = joblib_path[:1]
joblib_path.append('parallel.py')
joblib_path = '/'.join(joblib_path)
module = __import__(joblib_path)
# Reload the module. This should trigger a fail
reload(module)
def test_multiple_spawning():
# Test that attempting to launch a new Python after spawned
# subprocesses will raise an error, to avoid infinite loops on
# systems that do not support fork
if not int(os.environ.get('JOBLIB_MULTIPROCESSING', 1)):
raise nose.SkipTest()
nose.tools.assert_raises(ImportError, Parallel(n_jobs=2),
[delayed(_reload_joblib)() for i in range(10)])
###############################################################################
# Test helpers
def test_joblib_exception():
# Smoke-test the custom exception
e = JoblibException('foobar')
# Test the repr
repr(e)
# Test the pickle
pickle.dumps(e)
def test_safe_function():
safe_division = SafeFunction(division)
nose.tools.assert_raises(JoblibException, safe_division, 1, 0)
``` |
{
"source": "jnothman/liac-arff",
"score": 3
} |
#### File: liac-arff/tests/test_decode_attribute.py
```python
import unittest
import arff
class TestDecodeAttribute(unittest.TestCase):
def get_decoder(self):
decoder = arff.ArffDecoder()
return decoder
def test_padding(self):
'''Attributes with spaces between attribute declaration and name, and
between attribute name and type.'''
decoder = self.get_decoder()
fixture = u'@ATTRIBUTE attribute-name NUMERIC'
result = decoder._decode_attribute(fixture)
expected = u'attribute-name'
self.assertEqual(len(result), 2)
self.assertEqual(result[0], expected)
def test_quoted(self):
'''Attributes with quoted name but without space.'''
decoder = self.get_decoder()
# Double Quote
fixture = u'@ATTRIBUTE "attribute-name" NUMERIC'
result = decoder._decode_attribute(fixture)
expected = u'attribute-name'
self.assertEqual(len(result), 2)
self.assertEqual(result[0], expected)
# Simple Quote
fixture = u"@ATTRIBUTE 'attribute-name' NUMERIC"
result = decoder._decode_attribute(fixture)
expected = u'attribute-name'
self.assertEqual(len(result), 2)
self.assertEqual(result[0], expected)
def test_numeric_name(self):
'''Attributes with quoted name but without space.'''
decoder = self.get_decoder()
# Double Quote
fixture = u'@ATTRIBUTE 0 NUMERIC'
result = decoder._decode_attribute(fixture)
expected = u'0'
self.assertEqual(len(result), 2)
self.assertEqual(result[0], expected)
def test_quoted_special(self):
'''Attributes with quoted name but without space.'''
decoder = self.get_decoder()
# Double Quote
fixture = u'@ATTRIBUTE "%attribute-name" NUMERIC'
result = decoder._decode_attribute(fixture)
expected = u'%attribute-name'
self.assertEqual(len(result), 2)
self.assertEqual(result[0], expected)
# Simple Quote
fixture = u"@ATTRIBUTE ',attribute {} name' NUMERIC"
result = decoder._decode_attribute(fixture)
expected = u',attribute {} name'
self.assertEqual(len(result), 2)
self.assertEqual(result[0], expected)
def test_quoted_space(self):
'''Attributes with quoted name and space.'''
decoder = self.get_decoder()
# Double Quote
fixture = u'@ATTRIBUTE "attribute name" NUMERIC'
result = decoder._decode_attribute(fixture)
expected = u'attribute name'
self.assertEqual(len(result), 2)
self.assertEqual(result[0], expected)
# Simple Quote
fixture = u"@ATTRIBUTE 'attribute name' NUMERIC"
result = decoder._decode_attribute(fixture)
expected = u'attribute name'
self.assertEqual(len(result), 2)
self.assertEqual(result[0], expected)
def test_invalid_format(self):
'''Attributes with bad format.'''
decoder = self.get_decoder()
fixture = u'@ATTRIBUTE badformat'
self.assertRaises(
arff.BadAttributeFormat,
decoder._decode_attribute,
fixture
)
fixture = u'@ATTRIBUTE NUMERIC'
self.assertRaises(
arff.BadAttributeFormat,
decoder._decode_attribute,
fixture
)
def test_invalid_characters(self):
'''Attributes with bad format.'''
decoder = self.get_decoder()
fixture = u'@ATTRIBUTE %badformat'
self.assertRaises(
arff.BadAttributeFormat,
decoder._decode_attribute,
fixture
)
fixture = u'@ATTRIBUTE na,me NUMERIC'
self.assertRaises(
arff.BadAttributeFormat,
decoder._decode_attribute,
fixture
)
fixture = u'@ATTRIBUTE {name NUMERIC'
self.assertRaises(
arff.BadAttributeFormat,
decoder._decode_attribute,
fixture
)
``` |
{
"source": "jnothman/pandas",
"score": 2
} |
#### File: pandas/tests/test_indexing.py
```python
import unittest
import nose
import itertools
from numpy import random, nan
from numpy.random import randn
import numpy as np
from numpy.testing import assert_array_equal
import pandas as pan
import pandas.core.common as com
from pandas.core.api import (DataFrame, Index, Series, Panel, notnull, isnull,
MultiIndex, DatetimeIndex, Timestamp)
from pandas.util.testing import (assert_almost_equal, assert_series_equal,
assert_frame_equal, assert_panel_equal)
from pandas.util import py3compat
import pandas.util.testing as tm
import pandas.lib as lib
from pandas import date_range
from numpy.testing.decorators import slow
_verbose = False
#-------------------------------------------------------------------------------
# Indexing test cases
def _generate_indices(f, values=False):
""" generate the indicies
if values is True , use the axis values
is False, use the range
"""
axes = f.axes
if values:
axes = [ range(len(a)) for a in axes ]
return itertools.product(*axes)
def _get_value(f, i, values=False):
""" return the value for the location i """
# check agains values
if values:
return f.values[i]
# this is equiv of f[col][row].....
#v = f
#for a in reversed(i):
# v = v.__getitem__(a)
#return v
return f.ix[i]
def _get_result(obj, method, key, axis):
""" return the result for this obj with this key and this axis """
if isinstance(key, dict):
key = key[axis]
# use an artifical conversion to map the key as integers to the labels
# so ix can work for comparisions
if method == 'indexer':
method = 'ix'
key = obj._get_axis(axis)[key]
# in case we actually want 0 index slicing
try:
xp = getattr(obj, method).__getitem__(_axify(obj,key,axis))
except:
xp = getattr(obj, method).__getitem__(key)
return xp
def _axify(obj, key, axis):
# create a tuple accessor
if axis is not None:
axes = [ slice(None) ] * obj.ndim
axes[axis] = key
return tuple(axes)
return k
class TestIndexing(unittest.TestCase):
_multiprocess_can_split_ = True
_objs = set(['series','frame','panel'])
_typs = set(['ints','labels','mixed','ts','floats','empty'])
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.series_ints = Series(np.random.rand(4), index=range(0,8,2))
self.frame_ints = DataFrame(np.random.randn(4, 4), index=range(0, 8, 2), columns=range(0,12,3))
self.panel_ints = Panel(np.random.rand(4,4,4), items=range(0,8,2),major_axis=range(0,12,3),minor_axis=range(0,16,4))
self.series_labels = Series(np.random.randn(4), index=list('abcd'))
self.frame_labels = DataFrame(np.random.randn(4, 4), index=list('abcd'), columns=list('ABCD'))
self.panel_labels = Panel(np.random.randn(4,4,4), items=list('abcd'), major_axis=list('ABCD'), minor_axis=list('ZYXW'))
self.series_mixed = Series(np.random.randn(4), index=[2, 4, 'null', 8])
self.frame_mixed = DataFrame(np.random.randn(4, 4), index=[2, 4, 'null', 8])
self.panel_mixed = Panel(np.random.randn(4,4,4), items=[2,4,'null',8])
self.series_ts = Series(np.random.randn(4), index=date_range('20130101', periods=4))
self.frame_ts = DataFrame(np.random.randn(4, 4), index=date_range('20130101', periods=4))
self.panel_ts = Panel(np.random.randn(4, 4, 4), items=date_range('20130101', periods=4))
#self.series_floats = Series(np.random.randn(4), index=[1.00, 2.00, 3.00, 4.00])
#self.frame_floats = DataFrame(np.random.randn(4, 4), columns=[1.00, 2.00, 3.00, 4.00])
#self.panel_floats = Panel(np.random.rand(4,4,4), items = [1.00,2.00,3.00,4.00])
self.frame_empty = DataFrame({})
self.series_empty = Series({})
self.panel_empty = Panel({})
# form agglomerates
for o in self._objs:
d = dict()
for t in self._typs:
d[t] = getattr(self,'%s_%s' % (o,t),None)
setattr(self,o,d)
def check_values(self, f, func, values = False):
if f is None: return
axes = f.axes
indicies = itertools.product(*axes)
for i in indicies:
result = getattr(f,func)[i]
# check agains values
if values:
expected = f.values[i]
else:
expected = f
for a in reversed(i):
expected = expected.__getitem__(a)
assert_almost_equal(result, expected)
def check_result(self, name, method1, key1, method2, key2, typs = None, objs = None, axes = None, fails = None):
def _eq(t, o, a, obj, k1, k2):
""" compare equal for these 2 keys """
if a is not None and a > obj.ndim-1:
return
def _print(result, error = None):
if error is not None:
error = str(error)
v = "%-16.16s [%-16.16s]: [typ->%-8.8s,obj->%-8.8s,key1->(%-4.4s),key2->(%-4.4s),axis->%s] %s" % (name,result,t,o,method1,method2,a,error or '')
if _verbose:
print(v)
try:
### good debug location ###
#if name == 'bool' and t == 'empty' and o == 'series' and method1 == 'loc':
# import pdb; pdb.set_trace()
rs = getattr(obj, method1).__getitem__(_axify(obj,k1,a))
try:
xp = _get_result(obj,method2,k2,a)
except:
result = 'no comp'
_print(result)
return
try:
if np.isscalar(rs) and np.isscalar(xp):
self.assert_(rs == xp)
elif xp.ndim == 1:
assert_series_equal(rs,xp)
elif xp.ndim == 2:
assert_frame_equal(rs,xp)
elif xp.ndim == 3:
assert_panel_equal(rs,xp)
result = 'ok'
except (AssertionError):
result = 'fail'
# reverse the checks
if fails is True:
if result == 'fail':
result = 'ok (fail)'
if not result.startswith('ok'):
raise AssertionError(_print(result))
_print(result)
except (AssertionError):
raise
except (TypeError):
raise AssertionError(_print('type error'))
except (Exception), detail:
# if we are in fails, the ok, otherwise raise it
if fails is not None:
if fails == type(detail):
result = 'ok (%s)' % type(detail).__name__
_print(result)
return
result = type(detail).__name__
raise AssertionError(_print(result, error = detail))
if typs is None:
typs = self._typs
if objs is None:
objs = self._objs
if axes is not None:
if not isinstance(axes,(tuple,list)):
axes = [ axes ]
else:
axes = list(axes)
else:
axes = [ 0, 1, 2]
# check
for o in objs:
if o not in self._objs:
continue
d = getattr(self,o)
for a in axes:
for t in typs:
if t not in self._typs:
continue
obj = d[t]
if obj is not None:
obj = obj.copy()
k2 = key2
_eq(t, o, a, obj, key1, k2)
def test_at_and_iat_get(self):
def _check(f, func, values = False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
result = getattr(f,func)[i]
expected = _get_value(f,i,values)
assert_almost_equal(result, expected)
for o in self._objs:
d = getattr(self,o)
# iat
_check(d['ints'],'iat', values=True)
for f in [d['labels'],d['ts'],d['floats']]:
if f is not None:
self.assertRaises(ValueError, self.check_values, f, 'iat')
# at
_check(d['ints'], 'at')
_check(d['labels'],'at')
_check(d['ts'], 'at')
_check(d['floats'],'at')
def test_at_and_iat_set(self):
def _check(f, func, values = False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
getattr(f,func)[i] = 1
expected = _get_value(f,i,values)
assert_almost_equal(expected, 1)
for t in self._objs:
d = getattr(self,t)
_check(d['ints'],'iat',values=True)
for f in [d['labels'],d['ts'],d['floats']]:
if f is not None:
self.assertRaises(ValueError, _check, f, 'iat')
# at
_check(d['ints'], 'at')
_check(d['labels'],'at')
_check(d['ts'], 'at')
_check(d['floats'],'at')
def test_at_timestamp(self):
# as timestamp is not a tuple!
dates = date_range('1/1/2000', periods=8)
df = DataFrame(randn(8, 4), index=dates, columns=['A', 'B', 'C', 'D'])
s = df['A']
result = s.at[dates[5]]
xp = s.values[5]
self.assert_(result == xp)
def test_iat_invalid_args(self):
pass
def test_iloc_getitem_int(self):
# integer
self.check_result('integer', 'iloc', 2, 'ix', { 0 : 4, 1: 6, 2: 8 }, typs = ['ints'])
self.check_result('integer', 'iloc', 2, 'indexer', 2, typs = ['labels','mixed','ts','floats','empty'], fails = IndexError)
def test_iloc_getitem_neg_int(self):
# neg integer
self.check_result('neg int', 'iloc', -1, 'ix', { 0 : 6, 1: 9, 2: 12 }, typs = ['ints'])
self.check_result('neg int', 'iloc', -1, 'indexer', -1, typs = ['labels','mixed','ts','floats','empty'], fails = IndexError)
def test_iloc_getitem_list_int(self):
# list of ints
self.check_result('list int', 'iloc', [0,1,2], 'ix', { 0 : [0,2,4], 1 : [0,3,6], 2: [0,4,8] }, typs = ['ints'])
self.check_result('list int', 'iloc', [0,1,2], 'indexer', [0,1,2], typs = ['labels','mixed','ts','floats','empty'], fails = IndexError)
def test_iloc_getitem_dups(self):
# no dups in panel (bug?)
self.check_result('list int (dups)', 'iloc', [0,1,1,3], 'ix', { 0 : [0,2,2,6], 1 : [0,3,3,9] }, objs = ['series','frame'], typs = ['ints'])
def test_iloc_getitem_array(self):
# array like
s = Series(index=range(1,4))
self.check_result('array like', 'iloc', s.index, 'ix', { 0 : [2,4,6], 1 : [3,6,9], 2: [4,8,12] }, typs = ['ints'])
def test_iloc_getitem_bool(self):
# boolean indexers
b = [True,False,True,False,]
self.check_result('bool', 'iloc', b, 'ix', b, typs = ['ints'])
self.check_result('bool', 'iloc', b, 'ix', b, typs = ['labels','mixed','ts','floats','empty'], fails = IndexError)
def test_iloc_getitem_slice(self):
# slices
self.check_result('slice', 'iloc', slice(1,3), 'ix', { 0 : [2,4], 1: [3,6], 2: [4,8] }, typs = ['ints'])
self.check_result('slice', 'iloc', slice(1,3), 'indexer', slice(1,3), typs = ['labels','mixed','ts','floats','empty'], fails = IndexError)
def test_iloc_getitem_out_of_bounds(self):
# out-of-bounds slice
self.assertRaises(IndexError, self.frame_ints.iloc.__getitem__, tuple([slice(None),slice(1,5,None)]))
self.assertRaises(IndexError, self.frame_ints.iloc.__getitem__, tuple([slice(None),slice(-5,3,None)]))
self.assertRaises(IndexError, self.frame_ints.iloc.__getitem__, tuple([slice(1,5,None)]))
self.assertRaises(IndexError, self.frame_ints.iloc.__getitem__, tuple([slice(-5,3,None)]))
def test_iloc_setitem(self):
df = self.frame_ints
df.iloc[1,1] = 1
result = df.iloc[1,1]
self.assert_(result == 1)
df.iloc[:,2:3] = 0
expected = df.iloc[:,2:3]
result = df.iloc[:,2:3]
assert_frame_equal(result, expected)
def test_iloc_multiindex(self):
df = DataFrame(np.random.randn(3, 3),
columns=[[2,2,4],[6,8,10]],
index=[[4,4,8],[8,10,12]])
rs = df.iloc[2]
xp = df.irow(2)
assert_series_equal(rs, xp)
rs = df.iloc[:,2]
xp = df.icol(2)
assert_series_equal(rs, xp)
rs = df.iloc[2,2]
xp = df.values[2,2]
self.assert_(rs == xp)
def test_loc_getitem_int(self):
# int label
self.check_result('int label', 'loc', 2, 'ix', 2, typs = ['ints'], axes = 0)
self.check_result('int label', 'loc', 3, 'ix', 3, typs = ['ints'], axes = 1)
self.check_result('int label', 'loc', 4, 'ix', 4, typs = ['ints'], axes = 2)
self.check_result('int label', 'loc', 2, 'ix', 2, typs = ['label'], fails = KeyError)
def test_loc_getitem_label(self):
# label
self.check_result('label', 'loc', 'c', 'ix', 'c', typs = ['labels'], axes=0)
self.check_result('label', 'loc', 'null', 'ix', 'null', typs = ['mixed'] , axes=0)
self.check_result('label', 'loc', 8, 'ix', 8, typs = ['mixed'] , axes=0)
self.check_result('label', 'loc', Timestamp('20130102'), 'ix', 1, typs = ['ts'], axes=0)
self.check_result('label', 'loc', 'c', 'ix', 'c', typs = ['empty'], fails = KeyError)
def test_loc_getitem_label_out_of_range(self):
# out of range label
self.check_result('label range', 'loc', 'f', 'ix', 'f', typs = ['ints','labels','mixed','ts','floats'], fails=KeyError)
def test_loc_getitem_label_list(self):
# list of labels
self.check_result('list lbl', 'loc', [0,2,4], 'ix', [0,2,4], typs = ['ints'], axes=0)
self.check_result('list lbl', 'loc', [3,6,9], 'ix', [3,6,9], typs = ['ints'], axes=1)
self.check_result('list lbl', 'loc', [4,8,12], 'ix', [4,8,12], typs = ['ints'], axes=2)
self.check_result('list lbl', 'loc', ['a','b','d'], 'ix', ['a','b','d'], typs = ['labels'], axes=0)
self.check_result('list lbl', 'loc', ['A','B','C'], 'ix', ['A','B','C'], typs = ['labels'], axes=1)
self.check_result('list lbl', 'loc', ['Z','Y','W'], 'ix', ['Z','Y','W'], typs = ['labels'], axes=2)
self.check_result('list lbl', 'loc', [2,8,'null'], 'ix', [2,8,'null'], typs = ['mixed'], axes=0)
self.check_result('list lbl', 'loc', [Timestamp('20130102'),Timestamp('20130103')], 'ix',
[Timestamp('20130102'),Timestamp('20130103')], typs = ['ts'], axes=0)
# fails
self.check_result('list lbl', 'loc', [0,1,2], 'indexer', [0,1,2], typs = ['empty'], fails = KeyError)
self.check_result('list lbl', 'loc', [0,2,3], 'ix', [0,2,3], typs = ['ints'], axes=0, fails = KeyError)
self.check_result('list lbl', 'loc', [3,6,7], 'ix', [3,6,9], typs = ['ints'], axes=1, fails = KeyError)
self.check_result('list lbl', 'loc', [4,8,10], 'ix', [4,8,12], typs = ['ints'], axes=2, fails = KeyError)
# array like
self.check_result('array like', 'loc', Series(index=[0,2,4]).index, 'ix', [0,2,4], typs = ['ints'], axes=0)
self.check_result('array like', 'loc', Series(index=[3,6,9]).index, 'ix', [3,6,9], typs = ['ints'], axes=1)
self.check_result('array like', 'loc', Series(index=[4,8,12]).index, 'ix', [4,8,12], typs = ['ints'], axes=2)
def test_loc_getitem_bool(self):
# boolean indexers
b = [True,False,True,False]
self.check_result('bool', 'loc', b, 'ix', b, typs = ['ints','labels','mixed','ts','floats'])
self.check_result('bool', 'loc', b, 'ix', b, typs = ['empty'], fails = KeyError)
def test_loc_getitem_int_slice(self):
# int slices in int
self.check_result('int slice1', 'loc', slice(2,4), 'ix', { 0 : [2,4], 1: [3,6], 2: [4,8] }, typs = ['ints'], fails=KeyError)
# ok
self.check_result('int slice2', 'loc', slice(2,4), 'ix', [2,4], typs = ['ints'], axes = 0)
self.check_result('int slice2', 'loc', slice(3,6), 'ix', [3,6], typs = ['ints'], axes = 1)
self.check_result('int slice2', 'loc', slice(4,8), 'ix', [4,8], typs = ['ints'], axes = 2)
# GH 3053
# loc should treat integer slices like label slices
from itertools import product
index = MultiIndex.from_tuples([t for t in product([6,7,8], ['a', 'b'])])
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[6:8,:]
expected = df.ix[6:8,:]
assert_frame_equal(result,expected)
index = MultiIndex.from_tuples([t for t in product([10, 20, 30], ['a', 'b'])])
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[20:30,:]
expected = df.ix[20:30,:]
assert_frame_equal(result,expected)
# doc examples
result = df.loc[10,:]
expected = df.ix[10,:]
assert_frame_equal(result,expected)
result = df.loc[:,10]
#expected = df.ix[:,10] (this fails)
expected = df[10]
assert_frame_equal(result,expected)
def test_loc_to_fail(self):
# GH3449
df = DataFrame(np.random.random((3, 3)),
index=['a', 'b', 'c'],
columns=['e', 'f', 'g'])
# raise a KeyError?
self.assertRaises(KeyError, df.loc.__getitem__, tuple([[1, 2], [1, 2]]))
def test_loc_getitem_label_slice(self):
# label slices (with ints)
self.check_result('lab slice', 'loc', slice(1,3), 'ix', slice(1,3), typs = ['labels','mixed','ts','floats','empty'], fails=KeyError)
# real label slices
self.check_result('lab slice', 'loc', slice('a','c'), 'ix', slice('a','c'), typs = ['labels'], axes=0)
self.check_result('lab slice', 'loc', slice('A','C'), 'ix', slice('A','C'), typs = ['labels'], axes=1)
self.check_result('lab slice', 'loc', slice('W','Z'), 'ix', slice('W','Z'), typs = ['labels'], axes=2)
self.check_result('ts slice', 'loc', slice('20130102','20130104'), 'ix', slice('20130102','20130104'), typs = ['ts'], axes=0)
self.check_result('ts slice', 'loc', slice('20130102','20130104'), 'ix', slice('20130102','20130104'), typs = ['ts'], axes=1, fails=KeyError)
self.check_result('ts slice', 'loc', slice('20130102','20130104'), 'ix', slice('20130102','20130104'), typs = ['ts'], axes=2, fails=KeyError)
self.check_result('mixed slice', 'loc', slice(2,8), 'ix', slice(2,8), typs = ['mixed'], axes=0, fails=KeyError)
self.check_result('mixed slice', 'loc', slice(2,8), 'ix', slice(2,8), typs = ['mixed'], axes=1, fails=KeyError)
self.check_result('mixed slice', 'loc', slice(2,8), 'ix', slice(2,8), typs = ['mixed'], axes=2, fails=KeyError)
self.check_result('mixed slice', 'loc', slice(2,4,2), 'ix', slice(2,4,2), typs = ['mixed'], axes=0)
def test_loc_general(self):
# GH 2922 (these are fails)
df = DataFrame(np.random.rand(4,4),columns=['A','B','C','D'])
self.assertRaises(KeyError, df.loc.__getitem__, tuple([slice(0,2),slice(0,2)]))
df = DataFrame(np.random.rand(4,4),columns=['A','B','C','D'], index=['A','B','C','D'])
self.assertRaises(KeyError, df.loc.__getitem__, tuple([slice(0,2),df.columns[0:2]]))
# want this to work
result = df.loc[:,"A":"B"].iloc[0:2,:]
self.assert_((result.columns == ['A','B']).all() == True)
self.assert_((result.index == ['A','B']).all() == True)
def test_loc_setitem_frame(self):
df = self.frame_labels
result = df.iloc[0,0]
df.loc['a','A'] = 1
result = df.loc['a','A']
self.assert_(result == 1)
result = df.iloc[0,0]
self.assert_(result == 1)
df.loc[:,'B':'D'] = 0
expected = df.loc[:,'B':'D']
result = df.ix[:,1:]
assert_frame_equal(result, expected)
def test_iloc_getitem_frame(self):
""" originally from test_frame.py"""
df = DataFrame(np.random.randn(10, 4), index=range(0, 20, 2), columns=range(0,8,2))
result = df.iloc[2]
exp = df.ix[4]
assert_series_equal(result, exp)
result = df.iloc[2,2]
exp = df.ix[4,4]
self.assert_(result == exp)
# slice
result = df.iloc[4:8]
expected = df.ix[8:14]
assert_frame_equal(result, expected)
result = df.iloc[:,2:3]
expected = df.ix[:,4:5]
assert_frame_equal(result, expected)
# list of integers
result = df.iloc[[0,1,3]]
expected = df.ix[[0,2,6]]
assert_frame_equal(result, expected)
result = df.iloc[[0,1,3],[0,1]]
expected = df.ix[[0,2,6],[0,2]]
assert_frame_equal(result, expected)
# neg indicies
result = df.iloc[[-1,1,3],[-1,1]]
expected = df.ix[[18,2,6],[6,2]]
assert_frame_equal(result, expected)
# dups indicies
result = df.iloc[[-1,-1,1,3],[-1,1]]
expected = df.ix[[18,18,2,6],[6,2]]
assert_frame_equal(result, expected)
# with index-like
s = Series(index=range(1,5))
result = df.iloc[s.index]
expected = df.ix[[2,4,6,8]]
assert_frame_equal(result, expected)
# out-of-bounds slice
self.assertRaises(IndexError, df.iloc.__getitem__, tuple([slice(None),slice(1,5,None)]))
self.assertRaises(IndexError, df.iloc.__getitem__, tuple([slice(None),slice(-5,3,None)]))
self.assertRaises(IndexError, df.iloc.__getitem__, tuple([slice(1,11,None)]))
self.assertRaises(IndexError, df.iloc.__getitem__, tuple([slice(-11,3,None)]))
# try with labelled frame
df = DataFrame(np.random.randn(10, 4), index=list('abcdefghij'), columns=list('ABCD'))
result = df.iloc[1,1]
exp = df.ix['b','B']
self.assert_(result == exp)
result = df.iloc[:,2:3]
expected = df.ix[:,['C']]
assert_frame_equal(result, expected)
# negative indexing
result = df.iloc[-1,-1]
exp = df.ix['j','D']
self.assert_(result == exp)
# out-of-bounds exception
self.assertRaises(IndexError, df.iloc.__getitem__, tuple([10,5]))
# trying to use a label
self.assertRaises(ValueError, df.iloc.__getitem__, tuple(['j','D']))
def test_iloc_setitem_series(self):
""" originally from test_series.py """
df = DataFrame(np.random.randn(10, 4), index=list('abcdefghij'), columns=list('ABCD'))
df.iloc[1,1] = 1
result = df.iloc[1,1]
self.assert_(result == 1)
df.iloc[:,2:3] = 0
expected = df.iloc[:,2:3]
result = df.iloc[:,2:3]
assert_frame_equal(result, expected)
def test_iloc_setitem_series(self):
s = Series(np.random.randn(10), index=range(0,20,2))
s.iloc[1] = 1
result = s.iloc[1]
self.assert_(result == 1)
s.iloc[:4] = 0
expected = s.iloc[:4]
result = s.iloc[:4]
assert_series_equal(result, expected)
def test_iloc_multiindex(self):
mi_labels = DataFrame(np.random.randn(4, 3), columns=[['i', 'i', 'j'],
['A', 'A', 'B']],
index=[['i', 'i', 'j', 'k'], ['X', 'X', 'Y','Y']])
mi_int = DataFrame(np.random.randn(3, 3),
columns=[[2,2,4],[6,8,10]],
index=[[4,4,8],[8,10,12]])
# the first row
rs = mi_int.iloc[0]
xp = mi_int.ix[4].ix[8]
assert_series_equal(rs, xp)
# 2nd (last) columns
rs = mi_int.iloc[:,2]
xp = mi_int.ix[:,2]
assert_series_equal(rs, xp)
# corner column
rs = mi_int.iloc[2,2]
xp = mi_int.ix[:,2].ix[2]
self.assert_(rs == xp)
# this is basically regular indexing
rs = mi_labels.iloc[2,2]
xp = mi_labels.ix['j'].ix[:,'j'].ix[0,0]
self.assert_(rs == xp)
def test_loc_multiindex(self):
mi_labels = DataFrame(np.random.randn(3, 3), columns=[['i', 'i', 'j'],
['A', 'A', 'B']],
index=[['i', 'i', 'j'], ['X', 'X', 'Y']])
mi_int = DataFrame(np.random.randn(3, 3),
columns=[[2,2,4],[6,8,10]],
index=[[4,4,8],[8,10,12]])
# the first row
rs = mi_labels.loc['i']
xp = mi_labels.ix['i']
assert_frame_equal(rs, xp)
# 2nd (last) columns
rs = mi_labels.loc[:,'j']
xp = mi_labels.ix[:,'j']
assert_frame_equal(rs, xp)
# corner column
rs = mi_labels.loc['j'].loc[:,'j']
xp = mi_labels.ix['j'].ix[:,'j']
assert_frame_equal(rs,xp)
# with a tuple
rs = mi_labels.loc[('i','X')]
xp = mi_labels.ix[('i','X')]
assert_frame_equal(rs,xp)
rs = mi_int.loc[4]
xp = mi_int.ix[4]
assert_frame_equal(rs,xp)
def test_ix_general(self):
# ix general issues
# GH 2817
data={'amount': {0: 700, 1: 600, 2: 222, 3: 333, 4: 444},
'col': {0: 3.5, 1: 3.5, 2: 4.0, 3: 4.0, 4: 4.0},
'year': {0: 2012, 1: 2011, 2: 2012, 3: 2012, 4: 2012}}
df = DataFrame(data).set_index(keys=['col','year'])
# this should raise correct error
self.assertRaises(KeyError, df.ix.__getitem__, tuple([4.0,2012]))
# this is ok
df.sortlevel(inplace=True)
df.ix[(4.0,2012)]
def test_xs_multiindex(self):
# GH2903
columns = MultiIndex.from_tuples([('a', 'foo'), ('a', 'bar'), ('b', 'hello'), ('b', 'world')], names=['lvl0', 'lvl1'])
df = DataFrame(np.random.randn(4, 4), columns=columns)
df.sortlevel(axis=1,inplace=True)
result = df.xs('a', level='lvl0', axis=1)
expected = df.iloc[:,0:2].loc[:,'a']
assert_frame_equal(result,expected)
result = df.xs('foo', level='lvl1', axis=1)
expected = df.iloc[:, 1:2].copy()
expected.columns = expected.columns.droplevel('lvl1')
assert_frame_equal(result, expected)
def test_setitem_dtype_upcast(self):
# GH3216
df = DataFrame([{"a": 1}, {"a": 3, "b": 2}])
df['c'] = np.nan
self.assert_(df['c'].dtype == np.float64)
df.ix[0,'c'] = 'foo'
expected = DataFrame([{"a": 1, "c" : 'foo'}, {"a": 3, "b": 2, "c" : np.nan}])
assert_frame_equal(df,expected)
def test_setitem_iloc(self):
# setitem with an iloc list
df = DataFrame(np.arange(9).reshape((3, 3)), index=["A", "B", "C"], columns=["A", "B", "C"])
df.iloc[[0,1],[1,2]]
df.iloc[[0,1],[1,2]] += 100
expected = DataFrame(np.array([0,101,102,3,104,105,6,7,8]).reshape((3, 3)), index=["A", "B", "C"], columns=["A", "B", "C"])
assert_frame_equal(df,expected)
def test_dups_fancy_indexing(self):
# GH 3455
from pandas.util.testing import makeCustomDataframe as mkdf
df= mkdf(10, 3)
df.columns = ['a','a','b']
cols = ['b','a']
result = df[['b','a']].columns
expected = Index(['b','a','a'])
self.assert_(result.equals(expected))
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
``` |
{
"source": "jnothman/pickleback",
"score": 2
} |
#### File: jnothman/pickleback/setup.py
```python
import os
import sys
from setuptools import setup, find_packages
def setup_package():
src_path = os.path.dirname(os.path.abspath(sys.argv[0]))
old_path = os.getcwd()
os.chdir(src_path)
sys.path.insert(0, src_path)
try:
# See setup.cfg
setup(name='pickleback',
version='0.1a1',
packages=find_packages(),
setup_requires=['pytest-runner'],
tests_require=['pytest>=2.7', 'pytest-cov~=2.4', 'matplotlib'],
)
# We have considered installing a console script, but avoid it to
# avoid environment mismatches that would break pickles.
finally:
del sys.path[0]
os.chdir(old_path)
return
if __name__ == '__main__':
setup_package()
``` |
{
"source": "jnothman/sphinx-gallery",
"score": 2
} |
#### File: sphinxgallery/tests/test_backreferences.py
```python
from __future__ import division, absolute_import, print_function
import sphinxgallery.backreferences as sg
from nose.tools import assert_equal
def test_thumbnail_div():
"""Test if the thumbnail div generates the correct string"""
html_div = sg._thumbnail_div('fake_dir', 'test_file.py', 'test formating')
reference = """
.. raw:: html
<div class="sphx-glr-thumbContainer" tooltip="test formating">
.. figure:: /fake_dir/images/thumb/sphx_glr_test_file_thumb.png
:ref:`sphx_glr_fake_dir_test_file.py`
.. raw:: html
</div>
"""
assert_equal(html_div, reference)
``` |
{
"source": "Jnotyogi/NN",
"score": 4
} |
#### File: Jnotyogi/NN/NNJ 3c &change.py
```python
import numpy as np
J = 6 ; M = 6 # numbers of nodes in the hidden layers 1 and 2 (set M to zero for just one hidden layer)
def sigmoid(f): # but other activation functions can be tried
return 1.0/(1+ np.exp(-f))
def sigmderiv(z): # derivative of above function but note that z is the value sigmoid(f) not the sigmoid argument f
return z * (1.0 - z)
class NeuralNetwork:
def __init__(self, x, y):
if M > 0: # ie if there is a second hidden layer
self.z3ea = x # input
self.w32am = np.random.rand(A,M) * 0.1 # initial weights for layer 3 to layer 2 (hidden)
self.w21mj = np.random.rand(M,J) * 0.1 # initial weights for layer 2 to layer 1 (hidden)
else:
self.z2em = x # input although m is a now !! (when M=0)
self.w21mj = np.random.rand(A,J) * 0.1 # initial weights for layer 2 to layer 1 (hidden)
self.w10jp = np.random.rand(J,P) * 0.1 # initial weights for layer 1 to layer 0 (output)
self.t0ep = y # Target training results of e training cases for layer 0 (output): p values each
def feedforward(self):
if M > 0:
self.z2em = sigmoid(np.dot(self.z3ea, self.w32am)) # = sigmoid(f2em)
self.z1ej = sigmoid(np.dot(self.z2em, self.w21mj)) # = sigmoid(f1ej)
self.z0ep = sigmoid(np.dot(self.z1ej, self.w10jp)) # = sigmoid(f0ep)
def backprop(self):
dEp = (self.t0ep - self.z0ep) * sigmderiv(self.z0ep) # = -dE/df0ep
self.w10jp += np.dot(self.z1ej.T, dEp)
dEj = np.dot(dEp, self.w10jp.T) * sigmderiv(self.z1ej) # = -de/df1ej
self.w21mj += np.dot(self.z2em.T, dEj)
if M > 0:
dEm = np.dot(dEj,self.w21mj.T)*sigmderiv(self.z2em) # = -dE/df2em
self.w32am += np.dot(self.z3ea.T, dEm)
def singlerun(self,zInput):
if M > 0:
z2m = sigmoid(np.dot(zInput, self.w32am))
z1j = sigmoid(np.dot(z2m, self.w21mj))
else:
z1j = sigmoid(np.dot(zInput, self.w21mj))
return sigmoid(np.dot(z1j, self.w10jp))
# main
# input defines by E down and A across, the E examples of the nodes of the input layer
TrainingInput_ea = np.array([
[0,1,1,1,0,0],
[0,1,1,1,0,1],
[0,1,1,1,1,0],
[1,1,1,1,0,0],
[0,1,1,0,1,1],
[0,0,1,1,1,0],
[0,0,0,1,0,0],
[0,1,0,0,0,0],
[0,0,1,0,0,0],
[1,0,0,0,0,0]])
TrainingOutput_ep = np.array([[1,0],[1,0],[1,0],[1,0],[1,0],[1,0], [0,1],[0,1],[0,1],[0,1]])
E = TrainingInput_ea.shape[0]
A = TrainingInput_ea.shape[1]
P = TrainingOutput_ep.shape[1]
nn = NeuralNetwork(TrainingInput_ea,TrainingOutput_ep) # define class and its initial setup with random weights
T = 9000 # train with this number of training sets each employing all of the E example/result combos
for ts in range(T):
nn.feedforward() # get output result for this set, starting with weights from previous set
nn.backprop() # modify the weighting factors following this set
print("A M J P T : ", A, M, J, P, T)
print("From training:")
print(nn.z0ep)
# Single runs each with their own input data: print target then result
print("New cases:")
print(1,0,nn.singlerun(np.array([0,1,1,1,0,0]))) # same as training case
print(0,1,nn.singlerun(np.array([0,0,1,0,0,0]))) # same as training case
print("u",nn.singlerun(np.array([0,0,0,0,0,1]))) # unspecified as not one of the training cases
print()
``` |
{
"source": "JNovaM/AutoForward",
"score": 3
} |
#### File: JNovaM/AutoForward/animesdb.py
```python
from sqlalchemy import Column, String, BigInteger, Integer
from sqlalchemy.orm import sessionmaker, Session
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine
Base = declarative_base()
class Anime(Base):
__tablename__ = 'anime'
id = Column(Integer, primary_key=True, autoincrement=True)
att = Column(String)
class ChannelFrom(Base):
__tablename__ = 'channel_from'
id = Column(Integer, primary_key=True, autoincrement=True)
att = Column(BigInteger)
class ChannelTo(Base):
__tablename__ = 'channel_to'
id = Column(Integer, primary_key=True, autoincrement=True)
att = Column(BigInteger)
tables = ('anime', 'channel_from', 'channel_to')
def asign(table: str):
if table == tables[0]:
obj = Anime
elif table == tables[1]:
obj = ChannelFrom
else:
obj = ChannelTo
return obj
class DBHelper:
def __init__(self, dbname: str):
if dbname.startswith('postgres://'):
dbname = dbname.replace('postgres://', 'postgresql://', 1)
self.engine = create_engine(dbname)
Base.metadata.bind = self.engine
Base.metadata.create_all(checkfirst=True)
def get_items(self, table: str):
session: Session = sessionmaker(self.engine)()
try:
db_item = session.query(asign(table)).all()
session.close()
return db_item
except Exception as e:
session.close()
print(f'An error occurred retrieving items. Item was\n{id}')
raise e
def add_items(self, table: str, attributes: list):
session: Session = sessionmaker(self.engine)()
try:
obj = asign(table)
for element in attributes:
if not session.query(obj).filter_by(att=element).first():
session.add(obj(att=element))
session.commit()
session.close()
return True
except Exception as e:
session.close()
print(f'An error occurred in insertion. The item to insert was\n' +f'{id}')
print(e)
return False
def del_items(self, table: str, list_id: list):
session: Session = sessionmaker(self.engine)()
try:
for element in list_id:
db_item = session.query(asign(table)).filter_by(id=element).first()
if db_item:
session.delete(db_item)
session.commit()
session.close()
return True
except Exception as e:
session.close()
print(f'An error occurred updating. The item to update was\n{id}')
return False
``` |
{
"source": "jnovikov/ctfscoreboard",
"score": 2
} |
#### File: scoreboard/attachments/testing.py
```python
import hashlib
import flask
import StringIO
from scoreboard import main
app = main.get_app()
files = {}
def send(attachment):
"""Send the attachment to the client."""
return flask.send_file(files[attachment.aid],
attachment_filename="testing.txt",
as_attachment=True)
def delete(attachment):
"""Delete the attachment from disk."""
del files[attachment.aid]
def upload(fp):
"""Upload the file attachment to the storage medium."""
md = hashlib.sha256()
ret = StringIO.StringIO()
while True:
blk = fp.read(2**16)
if not blk:
break
md.update(blk)
ret.write(blk)
aid = md.hexdigest()
ret.seek(0)
files[aid] = ret
return aid, ('test://%s' % aid)
```
#### File: ctfscoreboard/scoreboard/mail.py
```python
from email.mime import text
import email.utils
import smtplib
from scoreboard import main
app = main.get_app()
class MailFailure(Exception):
"""Inability to send mail."""
pass
def send(message, subject, to, to_name=None, sender=None, sender_name=None):
sender = sender or app.config.get('MAIL_FROM')
sender_name = sender_name or app.config.get('MAIL_FROM_NAME')
host = app.config.get('MAIL_HOST')
try:
server = smtplib.SMTP(host)
except smtplib.SMTPConnectError as ex:
app.logger.error('Unable to send mail: %s', str(ex))
raise MailFailure()
msg = text.MIMEText(message)
msg['Subject'] = subject
msg['To'] = email.utils.formataddr((to_name, to))
msg['From'] = email.utils.formataddr((sender_name, sender))
try:
if app.debug:
server.set_debuglevel(True)
server.sendmail(sender, [to], msg.as_string())
except smtplib.SMTPException as ex:
app.logger.error('Unable to send mail: %s', str(ex))
raise MailFailure()
finally:
try:
server.quit()
except smtplib.SMTPException:
pass
```
#### File: scoreboard/tests/rest_test.py
```python
import flask
import json
import StringIO
from scoreboard.tests import base
from scoreboard.tests import data
from scoreboard import models
from scoreboard import rest
from scoreboard import utils
from scoreboard import views
# Needed imports
_ = (rest, views)
def makeTestUser():
u = models.User.create('<EMAIL>', 'Nick', 'hunter2')
models.db.session.commit()
return u
def makeTestTeam(user):
t = models.Team.create('Test')
user.team = t
models.db.session.commit()
return t
def makeTestChallenges():
cats = data.make_categories()
tags = data.make_tags()
challs = data.make_challenges(cats, tags)
models.db.session.commit()
return challs, cats
class ConfigzTest(base.RestTestCase):
PATH = '/api/configz'
def testGetFails(self):
with self.queryLimit(0):
response = self.client.get(self.PATH)
self.assert403(response)
testGetFailsNonAdmin = base.authenticated_test(testGetFails)
@base.admin_test
def testAdmin(self):
with self.queryLimit(0):
response = self.client.get(self.PATH)
self.assert200(response)
class PageTest(base.RestTestCase):
PATH = '/api/page/home'
PATH_NEW = '/api/page/new'
PATH_404 = '/api/page/404'
def setUp(self):
super(PageTest, self).setUp()
page = models.Page()
page.path = 'home'
page.title = 'Home'
page.contents = 'Home Page'
models.db.session.add(page)
models.db.session.commit()
self.page = page
def testGetAnonymous(self):
with self.queryLimit(1):
response = self.client.get(self.PATH)
self.assert200(response)
self.assertEqual(self.page.title, response.json['title'])
self.assertEqual(self.page.contents, response.json['contents'])
def testGetNonExistent(self):
with self.queryLimit(1):
self.assert404(self.client.get(self.PATH_404))
@base.admin_test
def testDeletePage(self):
with self.client as c:
self.assert200(c.get(self.PATH))
with self.queryLimit(1):
self.assert200(c.delete(self.PATH))
self.assert404(c.get(self.PATH))
@base.admin_test
def testCreatePage(self):
page_data = dict(
title='Test',
contents='Test Page Contents',
)
with self.queryLimit(3):
resp = self.postJSON(self.PATH_NEW, page_data)
self.assert200(resp)
self.assertEqual(page_data['title'], resp.json['title'])
self.assertEqual(page_data['contents'], resp.json['contents'])
@base.authenticated_test
def testCreatePageNonAdmin(self):
page_data = dict(
title='Test',
contents='Test Page Contents',
)
with self.queryLimit(0):
resp = self.postJSON(self.PATH_NEW, page_data)
self.assert403(resp)
@base.admin_test
def testUpdatePage(self):
page_data = dict(
title='Test',
contents='Test Page Contents',
)
with self.queryLimit(3):
resp = self.postJSON(self.PATH, page_data)
self.assert200(resp)
self.assertEqual(page_data['title'], resp.json['title'])
self.assertEqual(page_data['contents'], resp.json['contents'])
class UpdateTeam(base.RestTestCase):
PATH = '/api/teams/change'
_state = None
def createTeam(self, teamname):
team = models.Team.create(teamname)
models.db.session.commit()
return team
def changeTeam(self, tid, code):
return self.putJSON(self.PATH, {
'uid': self.authenticated_client.uid,
'team_tid': tid,
'code': code
})
def patchState(self, time='BEFORE'):
self._state = utils.GameTime.state
utils.GameTime.state = staticmethod(lambda: time)
def restoreState(self):
if self._state:
utils.GameTime.state = self._state
@base.authenticated_test
def testChangeTeam(self):
self.patchState()
test_team = self.createTeam('test')
resp = self.changeTeam(test_team.tid, test_team.code)
self.assert200(resp)
self.restoreState()
@base.authenticated_test
def testTeamChangeWorked(self):
self.patchState()
test_team = self.createTeam('test2')
self.changeTeam(test_team.tid, test_team.code)
tid = self.authenticated_client.user.team.tid
self.assertEqual(tid, test_team.tid)
self.restoreState()
@base.authenticated_test
def testEmptyTeamIsDeleted(self):
self.patchState()
test_team_first = self.createTeam('first')
test_team_second = self.createTeam('second')
self.changeTeam(test_team_first.tid, test_team_first.code)
self.changeTeam(test_team_second.tid, test_team_second.code)
tid = self.authenticated_client.user.team.tid
self.assertEqual(tid, test_team_second.tid)
self.assertIsNone(models.Team.query.get(test_team_first.tid))
self.restoreState()
@base.authenticated_test
def testTeamWithSolvesNotDeleted(self):
self.patchState()
test_team_first = self.createTeam('first')
test_team_second = self.createTeam('second')
self.changeTeam(test_team_first.tid, test_team_first.code)
chall = models.Challenge.create('Foo', 'Foo', 1, 'Foo', 'foo')
models.Answer.create(chall, test_team_first, 'Foo')
self.changeTeam(test_team_second.tid, test_team_second.code)
tid = self.authenticated_client.user.team.tid
self.assertEqual(tid, test_team_second.tid)
self.assertIsNotNone(models.Team.query.get(test_team_first.tid))
self.restoreState()
@base.authenticated_test
def testCantSwitchAfterStart(self):
self.patchState('DURING')
test_team = self.createTeam('test')
resp = self.changeTeam(test_team.tid, test_team.code)
self.assert403(resp)
self.restoreState()
class AttachmentTest(base.RestTestCase):
PATH = '/api/attachments/%s'
ATTACHMENT_FIELDS = ('aid', 'filename', 'challenges')
text = "This is a test"
name = "test.txt"
def uploadFile(self, filename, text):
with self.admin_client as c:
string = StringIO.StringIO()
string.write(text)
string.seek(0)
return c.post('/api/attachments', data={
'file': (string, filename)
})
def fetchFile(self, aid):
with self.admin_client as c:
return c.get(self.PATH % aid)
def testUploadFile(self):
resp = self.uploadFile("test.txt", "This is a test")
self.assert200(resp)
# Calculated using an external sha256 tool
self.assertEquals(
resp.json['aid'],
"c7be1ed902fb8dd4d48997c6452f5d7e"
"509fbcdbe2808b16bcf4edce4c07d14e")
def testQueryFile(self):
postresp = self.uploadFile(self.name, self.text)
getresp = self.fetchFile(postresp.json['aid'])
self.assert200(getresp)
def testFileQueryAID(self):
postresp = self.uploadFile(self.name, self.text)
getresp = self.fetchFile(postresp.json['aid'])
self.assertEqual(getresp.json['aid'], postresp.json['aid'])
def testFileQueryName(self):
postresp = self.uploadFile(self.name, self.text)
getresp = self.fetchFile(postresp.json['aid'])
self.assertEqual(getresp.json['filename'], self.name)
def testFileChallengesEmpty(self):
postresp = self.uploadFile(self.name, self.text)
getresp = self.fetchFile(postresp.json['aid'])
self.assertEqual(len(getresp.json['challenges']), 0)
def testRetrieveFile(self):
postresp = self.uploadFile(self.name, self.text)
with self.admin_client as c:
getresp = c.get('/attachments/%s' % postresp.json['aid'])
self.assert200(getresp)
def testFileRetrievalValue(self):
postresp = self.uploadFile(self.name, self.text)
with self.admin_client as c:
getresp = c.get('/attachment/%s' % postresp.json['aid'])
self.assertEqual(getresp.get_data(), self.text)
def testFileDelete(self):
postresp = self.uploadFile(self.name, self.text)
with self.admin_client as c:
delresp = c.delete('/api/attachments/%s' % postresp.json['aid'])
self.assert200(delresp)
def testDeletionRemovesFile(self):
postresp = self.uploadFile(self.name, self.text)
with self.admin_client as c:
c.delete('/api/attachments/%s' % postresp.json['aid'])
with self.admin_client as c:
getresp = c.get('/api/attachments/%s' % postresp.json['aid'])
self.assert404(getresp)
def testFileUpdate(self):
new_name = "file.png"
postresp = self.uploadFile(self.name, self.text)
with self.admin_client as c:
putresp = c.put(
'/api/attachments/%s' % postresp.json['aid'],
data=json.dumps({
'filename': new_name,
'aid': postresp.json['aid'],
'challenges': [],
}), content_type="application/json")
self.assert200(putresp)
def testUpdateChangesName(self):
new_name = "file.png"
postresp = self.uploadFile(self.name, self.text)
with self.admin_client as c:
c.put('/api/attachments/%s' % postresp.json['aid'],
data=json.dumps({
'filename': new_name,
'aid': postresp.json['aid'],
'challenges': [],
}), content_type="application/json")
getresp = c.get('/api/attachments/%s' % postresp.json['aid'])
self.assertEqual(getresp.json['filename'], new_name)
class UserTest(base.RestTestCase):
PATH = '/api/users/%d'
USER_FIELDS = ('admin', 'nick', 'email', 'team_tid', 'uid')
def testGetAnonymous(self):
path = self.PATH % makeTestUser().uid
with self.queryLimit(0):
self.assert403(self.client.get(path))
def testGetNonExistent(self):
path = self.PATH % 999
with self.queryLimit(0):
resp = self.client.get(path)
self.assert403(resp)
testGetNonExistentAuth = base.authenticated_test(
testGetNonExistent)
@base.admin_test
def testGetNonExistentAdmin(self):
path = self.PATH % 999
with self.queryLimit(1):
resp = self.client.get(path)
self.assert404(resp)
@base.authenticated_test
def testGetSelf(self):
user = self.authenticated_client.user
with self.queryLimit(1):
resp = self.client.get(self.PATH % user.uid)
self.assert200(resp)
self.assertEqual(user.email, resp.json['email'])
self.assertEqual(user.nick, resp.json['nick'])
self.assertEqual(user.admin, resp.json['admin'])
@base.authenticated_test
def testUpdateUser(self):
user = self.authenticated_client.user
data = {'password': '<PASSWORD>'} # for security
with self.queryLimit(2):
self.assert200(self.putJSON(
self.PATH % user.uid,
data))
@base.authenticated_test
def testUpdateUserNoAccess(self):
uid = self.admin_client.user.uid
data = {'password': '<PASSWORD>'}
with self.queryLimit(0):
resp = self.putJSON(self.PATH % uid, data)
self.assert403(resp)
@base.admin_test
def testUpdateUserAdmin(self):
uid = self.authenticated_client.user.uid
data = {'nick': 'Lame'}
with self.queryLimit(2):
resp = self.putJSON(self.PATH % uid, data)
self.assert200(resp)
self.assertEqual('Lame', resp.json['nick'])
self.assertNotEqual(
'Lame',
self.authenticated_client.user.team.name)
@base.admin_test
def testUpdateUsersNoTeams(self):
uid = self.authenticated_client.user.uid
self.app.config['TEAMS'] = False
data = {'nick': 'Lame'}
with self.queryLimit(3):
resp = self.putJSON(self.PATH % uid, data)
self.assert200(resp)
self.assertEqual('Lame', resp.json['nick'])
self.assertEqual('Lame', self.authenticated_client.user.team.name)
@base.admin_test
def testUpdateUserPromote(self):
user = self.authenticated_client.user
data = {'nick': user.nick, 'admin': True}
# yes, this is a lot, but promoting is infrequent
with self.queryLimit(7):
resp = self.putJSON(self.PATH % user.uid, data)
self.assert200(resp)
self.assertTrue(resp.json['admin'])
@base.admin_test
def testUpdateUserDemote(self):
user = self.admin_client.user
data = {'nick': user.nick, 'admin': False}
with self.queryLimit(3):
resp = self.putJSON(self.PATH % user.uid, data)
self.assert200(resp)
self.assertFalse(resp.json['admin'])
@base.authenticated_test
def testUpdateUserNoSelfPromotion(self):
uid = self.authenticated_client.user.uid
data = {'admin': True}
with self.queryLimit(1):
resp = self.putJSON(self.PATH % uid, data)
self.assert200(resp)
self.assertFalse(resp.json['admin'])
@base.admin_test
def testUpdateUserNoAnswers(self):
user = self.authenticated_client.user
team = self.authenticated_client.user.team
chall = models.Challenge.create('Foo', 'Foo', 1, 'Foo', 'foo')
models.Answer.create(chall, team, 'Foo')
models.db.session.commit()
data = {'nick': user.nick, 'admin': True}
with self.queryLimit(3):
resp = self.putJSON(self.PATH % user.uid, data)
self.assert400(resp)
user = models.User.query.get(user.uid)
self.assertFalse(user.admin)
@base.authenticated_test
def testGetUsersNoAccess(self):
with self.queryLimit(0):
resp = self.client.get('/api/users')
self.assert403(resp)
@base.admin_test
def testGetUsers(self):
self.admin_client.user
with self.queryLimit(1):
resp = self.client.get('/api/users')
self.assert200(resp)
self.assertIsInstance(resp.json, dict)
self.assertIn('users', resp.json)
self.assertIsInstance(resp.json['users'], list)
users = resp.json['users']
self.assertEqual(2, len(users))
for u in users:
self.assertItemsEqual(self.USER_FIELDS, u.keys())
@staticmethod
def default_data():
return {
'email': '<EMAIL>',
'nick': 'test3',
'password': '<PASSWORD>',
'team_id': 'new',
'team_name': 'New Team',
'team_code': None,
}
def testRegisterUserNewTeam(self):
data = self.default_data()
# TODO: maybe optimize?
with self.client:
with self.queryLimit(9):
resp = self.postJSON('/api/users', data)
self.assert200(resp)
self.assertItemsEqual(self.USER_FIELDS, resp.json.keys())
self.assertEqual(resp.json['uid'], flask.session['user'])
self.assertEqual(resp.json['admin'], flask.session['admin'])
self.assertEqual(resp.json['team_tid'], flask.session['team'])
def testRegisterUserExistingTeam(self):
team = self.authenticated_client.team
data = self.default_data()
data.update({
'team_id': team.tid,
'team_name': None,
'team_code': team.code,
})
with self.client:
with self.queryLimit(8):
resp = self.postJSON('/api/users', data)
self.assert200(resp)
self.assertItemsEqual(self.USER_FIELDS, resp.json.keys())
self.assertEqual(resp.json['uid'], flask.session['user'])
self.assertEqual(resp.json['admin'], flask.session['admin'])
self.assertEqual(resp.json['team_tid'], flask.session['team'])
self.assertEqual(team.tid, resp.json['team_tid'])
def testRegisterUserTeamWrongCode(self):
team = self.authenticated_client.team
data = self.default_data()
data.update({
'team_id': team.tid,
'team_name': None,
'team_code': 'xxx',
})
with self.client:
with self.queryLimit(1):
resp = self.postJSON('/api/users', data)
self.assert400(resp)
@base.authenticated_test
def testRegisterUserLoggedInFails(self):
data = self.default_data()
with self.queryLimit(0):
resp = self.postJSON('/api/users', data)
self.assert400(resp)
def testRegisterUserNoNick(self):
data = self.default_data()
del data['nick']
with self.queryLimit(0):
self.assert400(self.postJSON('/api/users', data))
def testRegisterUserNoTeam(self):
data = self.default_data()
del data['team_name']
del data['team_id']
with self.queryLimit(0):
self.assert400(self.postJSON('/api/users', data))
def testRegisterUserInviteKey(self):
self.app.config['INVITE_KEY'] = 'foobar'
data = self.default_data()
data['invite_key'] = self.app.config['INVITE_KEY']
with self.client:
resp = self.postJSON('/api/users', data)
self.assert200(resp)
def testRegisterUserNoInviteKey(self):
self.app.config['INVITE_KEY'] = 'foobar'
data = self.default_data()
with self.client:
with self.queryLimit(0):
resp = self.postJSON('/api/users', data)
self.assert400(resp)
def testRegisterUserWrongInviteKey(self):
self.app.config['INVITE_KEY'] = 'foobar'
data = self.default_data()
data['invite_key'] = 'notright'
with self.client:
with self.queryLimit(0):
resp = self.postJSON('/api/users', data)
self.assert400(resp)
class TeamTest(base.RestTestCase):
LIST_URL = '/api/teams'
def setUp(self):
super(TeamTest, self).setUp()
self.user = makeTestUser()
self.team = makeTestTeam(self.user)
self.team_path = '/api/teams/%d' % self.team.tid
@base.authenticated_test
def testGetTeam(self):
with self.queryLimit(4):
resp = self.client.get(self.team_path)
self.assert200(resp)
self.assertEqual(0, len(resp.json['players']))
self.assertEqual(self.team.name, resp.json['name'])
# TODO: check other fields
def testGetTeamAnonymous(self):
with self.queryLimit(0):
self.assert403(self.client.get(self.team_path))
@base.admin_test
def testGetTeamAdmin(self):
with self.queryLimit(4):
resp = self.client.get(self.team_path)
self.assert200(resp)
self.assertEqual(1, len(resp.json['players']))
@base.admin_test
def testUpdateTeamAdmin(self):
data = {'name': 'Updated'}
with self.queryLimit(6):
resp = self.putJSON(self.team_path, data)
self.assert200(resp)
self.assertEqual('Updated', resp.json['name'])
team = models.Team.query.get(self.team.tid)
self.assertEqual('Updated', team.name)
def testGetTeamList(self):
with self.client as c:
with self.queryLimit(3) as ctr:
resp = c.get(self.LIST_URL)
n_queries = ctr.query_count
self.assert200(resp)
models.Team.create('Test 2')
models.Team.create('Test 3')
models.Team.create('Test 4')
models.db.session.commit()
with self.queryLimit(n_queries):
resp = c.get(self.LIST_URL)
self.assert200(resp)
class SessionTest(base.RestTestCase):
PATH = '/api/session'
def testGetSessionAnonymous(self):
self.assert403(self.client.get(self.PATH))
@base.authenticated_test
def testGetSessionAuthenticated(self):
with self.queryLimit(1):
resp = self.client.get(self.PATH)
self.assert200(resp)
self.assertEqual(
self.authenticated_client.user.nick,
resp.json['user']['nick'])
self.assertEqual(
self.authenticated_client.team.name,
resp.json['team']['name'])
@base.admin_test
def testGetSessionAdmin(self):
with self.queryLimit(1):
resp = self.client.get(self.PATH)
self.assert200(resp)
self.assertEqual(
self.admin_client.user.nick,
resp.json['user']['nick'])
self.assertTrue(resp.json['user']['admin'])
self.assertItemsEqual(
{'tid': 0, 'score': 0, 'name': None,
'code': None},
resp.json['team'])
def testSessionLoginSucceeds(self):
data = {
'email': self.authenticated_client.user.email,
'password': <PASSWORD>,
}
with self.client:
with self.queryLimit(4):
resp = self.postJSON(self.PATH, data)
self.assert200(resp)
self.assertEqual(
flask.session['user'],
self.authenticated_client.user.uid)
self.assertEqual(
flask.session['team'],
self.authenticated_client.team.tid)
self.assertFalse(flask.session['admin'])
self.assertEqual(
flask.g.user.email,
self.authenticated_client.user.email)
def testSessionLoginFailsBadPassword(self):
data = {
'email': self.authenticated_client.user.email,
'password': '<PASSWORD>',
}
with self.client:
with self.queryLimit(1):
resp = self.postJSON(self.PATH, data)
self.assert403(resp)
self.assertIsNone(flask.session.get('user'))
self.assertIsNone(flask.session.get('team'))
self.assertIsNone(flask.session.get('admin'))
def testSessionLoginFailsBadUser(self):
data = {
'email': '<EMAIL>',
'password': '<PASSWORD>',
}
with self.client:
with self.queryLimit(1):
resp = self.postJSON(self.PATH, data)
self.assert403(resp)
self.assertIsNone(flask.session.get('user'))
self.assertIsNone(flask.session.get('team'))
self.assertIsNone(flask.session.get('admin'))
@base.admin_test
def testSessionLoginAlreadyLoggedIn(self):
data = {
'email': self.authenticated_client.user.email,
'password': <PASSWORD>,
}
# This makes sure admin->non-admin downgrades properly
with self.client:
with self.queryLimit(4):
resp = self.postJSON(self.PATH, data)
self.assert200(resp)
self.assertEqual(
flask.session['user'],
self.authenticated_client.user.uid)
self.assertEqual(
flask.session['team'],
self.authenticated_client.team.tid)
self.assertFalse(flask.session['admin'])
self.assertEqual(
flask.g.user.email,
self.authenticated_client.user.email)
@base.authenticated_test
def testSessionLogout(self):
with self.client as c:
with self.queryLimit(1):
resp = c.delete(self.PATH)
self.assert200(resp)
self.assertIsNone(flask.session.get('user'))
self.assertIsNone(flask.session.get('team'))
self.assertIsNone(flask.session.get('admin'))
def testSessionLogoutAnonymous(self):
with self.client as c:
with self.queryLimit(0):
resp = c.delete(self.PATH)
self.assert200(resp)
self.assertIsNone(flask.session.get('user'))
self.assertIsNone(flask.session.get('team'))
self.assertIsNone(flask.session.get('admin'))
class ChallengeTest(base.RestTestCase):
PATH_LIST = '/api/challenges'
PATH_SINGLE = '/api/challenges/%d'
def setUp(self):
super(ChallengeTest, self).setUp()
self.challs, _ = makeTestChallenges()
self.chall = self.challs[0]
self.PATH_SINGLE %= self.chall.cid
def testGetListAnonymous(self):
with self.queryLimit(0):
self.assert403(self.client.get(self.PATH_LIST))
testGetListAuthenticated = base.authenticated_test(
testGetListAnonymous)
@base.admin_test
def testGetListAdmin(self):
# TODO: fix to not be O(n)
with self.queryLimit(None):
resp = self.client.get(self.PATH_LIST)
self.assert200(resp)
self.assertEqual(len(self.challs), len(resp.json['challenges']))
def newChallengeData(self):
return {
'name': 'Chall 1',
'description': 'Challenge 1',
'points': 200,
'answer': 'abc',
'cat_slug': self.chall.cat_slug,
'unlocked': True,
'validator': 'static_pbkdf2',
}
def testCreateChallengeAnonymous(self):
data = self.newChallengeData()
with self.queryLimit(0):
self.assert403(self.postJSON(
self.PATH_LIST, data))
testCreateChallengeAuthenticated = base.authenticated_test(
testCreateChallengeAnonymous)
@base.admin_test
def testCreateChallenge(self):
# TODO: variants
data = self.newChallengeData()
# TODO: optimize count
with self.queryLimit(8):
resp = self.postJSON(self.PATH_LIST, data)
self.assert200(resp)
for field in ('name', 'description', 'points', 'unlocked'):
self.assertEqual(data[field], resp.json[field])
def getUpdateData(self):
return {
'name': 'Renamed',
'points': 1,
'weight': 12,
'unlocked': False
}
@base.admin_test
def testUpdateChallenge(self):
data = self.getUpdateData()
with self.queryLimit(6):
resp = self.putJSON(self.PATH_SINGLE, data)
self.assert200(resp)
for k in data.keys():
self.assertEqual(data[k], resp.json[k])
def testUpdateChallengeAnonymous(self):
data = self.getUpdateData()
with self.queryLimit(0):
self.assert403(self.putJSON(self.PATH_SINGLE, data))
testUpdateChallengeAuthenticated = base.authenticated_test(
testUpdateChallengeAnonymous)
@base.admin_test
def testGetSingleton(self):
with self.queryLimit(6):
resp = self.client.get(self.PATH_SINGLE)
self.assert200(resp)
for field in ('name', 'points', 'description', 'unlocked'):
self.assertEqual(getattr(self.chall, field), resp.json[field])
def testGetSingletonAnonymous(self):
with self.queryLimit(0):
self.assert403(self.client.get(self.PATH_SINGLE))
testGetSingletonAuthenticated = base.authenticated_test(
testGetSingletonAnonymous)
@base.admin_test
def testDeleteChallenge(self):
with self.queryLimit(5):
self.assert200(self.client.delete(self.PATH_SINGLE))
def testDeleteChallengeAnonymous(self):
with self.queryLimit(0):
self.assert403(self.client.delete(self.PATH_SINGLE))
testDeleteChallengeAuthenticated = base.authenticated_test(
testDeleteChallengeAnonymous)
class ScoreboardTest(base.RestTestCase):
PATH = '/api/scoreboard'
def setUp(self):
super(ScoreboardTest, self).setUp()
data.create_all() # Make a bunch of data for scoreboard
def testGetScoreboard(self):
resp = self.client.get(self.PATH)
self.assert200(resp)
# TODO: check contents
class CategoryTest(base.RestTestCase):
PATH_LIST = '/api/categories'
PATH_SINGLE = '/api/categories/%s'
def setUp(self):
super(CategoryTest, self).setUp()
self.challs, self.cats = makeTestChallenges()
self.cat = self.cats[0]
self.PATH_SINGLE %= self.cat.slug
def _testGetList(self):
with self.queryLimit(3):
resp = self.client.get(self.PATH_LIST)
self.assert200(resp)
self.assertEqual(len(self.cats), len(resp.json['categories']))
# TODO: check that the expected fields are visible and that others are
# not.
testGetListAuthenticated = base.authenticated_test(_testGetList)
testGetListAdmin = base.admin_test(_testGetList)
def testGetListAnonymous(self):
with self.queryLimit(0):
resp = self.client.get(self.PATH_LIST)
self.assert403(resp)
def testCreateCategoryFails(self):
data = {
'name': 'New',
'description': 'New Category',
}
with self.queryLimit(0):
resp = self.postJSON(self.PATH_LIST, data)
self.assert403(resp)
self.assertEqual(len(self.cats), models.Category.query.count())
testCreateCategoryFailsAuthenticated = base.authenticated_test(
testCreateCategoryFails)
@base.admin_test
def testCreateCategory(self):
data = {
'name': 'New',
'description': 'New Category',
}
with self.queryLimit(6):
resp = self.postJSON(self.PATH_LIST, data)
self.assert200(resp)
for f in ('name', 'description'):
self.assertEqual(data[f], resp.json[f])
cat = models.Category.query.get(resp.json['slug'])
for f in ('name', 'description'):
self.assertEqual(data[f], getattr(cat, f))
def testDeleteCategoryFails(self):
with self.queryLimit(0):
resp = self.client.delete(self.PATH_SINGLE)
self.assert403(resp)
self.assertIsNotNone(models.Category.query.get(self.cat.slug))
testDeleteCategoryFailsAuthenticated = base.authenticated_test(
testDeleteCategoryFails)
@base.admin_test
def testDeleteNonEmptyCategory(self):
with self.queryLimit(3):
resp = self.client.delete(self.PATH_SINGLE)
self.assert400(resp)
self.assertIsNotNone(models.Category.query.get(self.cat.slug))
@base.admin_test
def testDeleteEmptyCategory(self):
for i in self.cat.challenges:
models.db.session.delete(i)
models.db.session.commit()
with self.queryLimit(3):
resp = self.client.delete(self.PATH_SINGLE)
self.assert200(resp)
self.assertIsNone(models.Category.query.get(self.cat.slug))
def testGetCategoryAnonymous(self):
with self.queryLimit(0):
self.assert403(self.client.get(self.PATH_SINGLE))
def _testGetCategory(self):
with self.queryLimit(None):
resp = self.client.get(self.PATH_SINGLE)
self.assert200(resp)
testGetCategoryAuthenticated = base.authenticated_test(
_testGetCategory)
testGetCategoryAdmin = base.admin_test(_testGetCategory)
def testUpdateCategoryAnonymous(self):
with self.queryLimit(0):
resp = self.putJSON(self.PATH_SINGLE, {
'name': 'new name'})
self.assert403(resp)
testUpdateCategoryAuthenticated = base.authenticated_test(
testUpdateCategoryAnonymous)
@base.admin_test
def testUpdateCategoryAdmin(self):
data = {'name': 'new name'}
with self.queryLimit(None):
resp = self.putJSON(self.PATH_SINGLE, data)
self.assert200(resp)
self.assertEqual(data['name'], resp.json['name'])
cat = models.Category.query.get(self.cat.slug)
self.assertEqual(data['name'], cat.name)
class AnswerTest(base.RestTestCase):
PATH = '/api/answers'
def setUp(self):
super(AnswerTest, self).setUp()
cat = models.Category.create('test', 'test')
self.answer = 'foobar'
self.points = 100
self.chall = models.Challenge.create(
'test', 'test', self.points,
self.answer, cat.slug, unlocked=True)
self.cid = self.chall.cid
models.db.session.commit()
def testSubmitAnonymous(self):
with self.queryLimit(0):
self.assert403(self.postJSON(self.PATH, {
'cid': self.cid,
'answer': self.answer,
}))
@base.admin_test
def testSubmitAdmin_Regular(self):
with self.queryLimit(0):
self.assert400(self.postJSON(self.PATH, {
'cid': self.cid,
'answer': self.answer,
}))
@base.admin_test
def testSubmitAdmin_Override(self):
team = models.Team.create('crash_override')
models.db.session.commit()
with self.queryLimit(7):
resp = self.postJSON(self.PATH, {
'cid': self.cid,
'tid': team.tid,
})
self.assert200(resp)
self.assertEqual(self.points, resp.json['points'])
@base.authenticated_test
def testSubmitCorrect(self):
with self.queryLimit(6):
resp = self.postJSON(self.PATH, {
'cid': self.cid,
'answer': self.answer,
})
self.assert200(resp)
self.assertEqual(self.points, resp.json['points'])
@base.authenticated_test
def testSubmitIncorrect(self):
old_score = self.client.team.score
with self.queryLimit(2):
resp = self.postJSON(self.PATH, {
'cid': self.cid,
'answer': 'incorrect',
})
self.assert403(resp)
team = models.Team.query.get(self.client.team.tid)
self.assertEqual(old_score, team.score)
@base.authenticated_test
def testSubmitDouble(self):
models.Answer.create(self.chall, self.client.team, '')
old_score = self.client.team.score
with self.queryLimit(4):
resp = self.postJSON(self.PATH, {
'cid': self.cid,
'answer': self.answer,
})
self.assert403(resp)
team = models.Team.query.get(self.client.team.tid)
self.assertEqual(old_score, team.score)
@base.authenticated_test
def testSubmit_ProofOfWork(self):
test_nbits = 12
def MockValidate(val, key, nbits):
self.assertEqual(val, self.answer)
self.assertEqual(key, 'foo')
self.assertEqual(nbits, test_nbits)
return True
self.app.config['PROOF_OF_WORK_BITS'] = test_nbits
# TODO: switch to mock framework
old_pow_test = utils.validate_proof_of_work
utils.validate_proof_of_work = MockValidate
with self.queryLimit(6):
resp = self.postJSON(self.PATH, {
'cid': self.cid,
'answer': self.answer,
'token': 'foo'
})
self.assert200(resp)
self.assertEqual(self.points, resp.json['points'])
utils.validate_proof_of_work = old_pow_test
@base.authenticated_test
def testSubmit_ProofOfWorkFails(self):
test_nbits = 12
def MockValidate(val, key, nbits):
self.assertEqual(val, self.answer)
self.assertEqual(key, 'foo')
self.assertEqual(nbits, test_nbits)
return False
self.app.config['PROOF_OF_WORK_BITS'] = test_nbits
# TODO: switch to mock framework
old_pow_test = utils.validate_proof_of_work
utils.validate_proof_of_work = MockValidate
old_score = self.client.team.score
with self.queryLimit(2):
resp = self.postJSON(self.PATH, {
'cid': self.cid,
'answer': self.answer,
'token': '<PASSWORD>'
})
self.assert403(resp)
team = models.Team.query.get(self.client.team.tid)
self.assertEqual(old_score, team.score)
utils.validate_proof_of_work = old_pow_test
class ConfigTest(base.RestTestCase):
PATH = '/api/config'
def makeTestGetConfig(extra_keys=None):
extra_keys = set(extra_keys or [])
def testGetConfig(self):
with self.queryLimit(0):
resp = self.client.get(self.PATH)
self.assert200(resp)
expected_keys = set((
'teams',
'sbname',
'news_mechanism',
'news_poll_interval',
'csrf_token',
'rules',
'game_start',
'game_end',
'login_url',
'register_url',
'login_method',
'scoring',
'validators',
'proof_of_work_bits',
'invite_only',
'tags_only',
))
expected_keys |= extra_keys
self.assertEqual(expected_keys, set(resp.json.keys()))
self.assertIsInstance(resp.json['invite_only'], bool)
return testGetConfig
testGetConfig = makeTestGetConfig()
testGetConfigAuthenticated = base.authenticated_test(
makeTestGetConfig())
testGetConfigAdmin = base.admin_test(
makeTestGetConfig())
class NewsTest(base.RestTestCase):
PATH = '/api/news'
def setUp(self):
super(NewsTest, self).setUp()
models.News.broadcast('test', 'Test message.')
models.News.unicast(
self.authenticated_client.team.tid,
'test', 'Test team message.')
models.commit()
def testGetNews(self):
with self.queryLimit(2):
resp = self.client.get(self.PATH)
self.assert200(resp)
self.assertEqual(1, len(resp.json))
testGetNewsAdmin = base.admin_test(testGetNews)
@base.authenticated_test
def testGetNewsAuthenticated(self):
with self.queryLimit(2):
resp = self.client.get(self.PATH)
self.assert200(resp)
self.assertEqual(2, len(resp.json))
def testCreateNews(self):
with self.queryLimit(0):
resp = self.postJSON(self.PATH, {
'message': 'some message',
})
self.assert403(resp)
testCreateNewsAuthenticated = base.authenticated_test(testCreateNews)
@base.admin_test
def testCreateNewsAdmin(self):
msg = 'some message'
with self.queryLimit(3):
resp = self.postJSON(self.PATH, {
'message': msg,
})
self.assert200(resp)
self.assertEqual(self.client.user.nick, resp.json['author'])
self.assertEqual(msg, resp.json['message'])
@base.admin_test
def testCreateTeamNewsAdmin(self):
msg = 'some message'
tid = self.authenticated_client.team.tid
with self.queryLimit(3):
resp = self.postJSON(self.PATH, {
'message': msg,
'tid': tid,
})
self.assert200(resp)
self.assertEqual(self.client.user.nick, resp.json['author'])
self.assertEqual(msg, resp.json['message'])
self.assertEqual('Unicast', resp.json['news_type'])
news = models.News.query.get(resp.json['nid'])
self.assertEqual(tid, news.audience_team_tid)
class CTFTimeTest(base.RestTestCase):
PATH = '/api/ctftime/scoreboard'
def testGetScoreboard(self):
with self.queryLimit(1):
resp = self.client.get(self.PATH)
self.assert200(resp)
self.assertIn('standings', resp.json)
standings = resp.json['standings']
required = set(('pos', 'team', 'score'))
for i in standings:
self.assertEqual(required, required & set(i.keys()))
```
#### File: scoreboard/validators/nonce.py
```python
import base64
import hashlib
import hmac
import struct
from scoreboard import main
from scoreboard import utils
from scoreboard import models
from . import base
app = main.get_app()
class BaseNonceValidator(base.BaseValidator):
# Bits to use for each of the nonce and authenticator
NONCE_BITS = 0
AUTHENTICATOR_BITS = 0
HASH = hashlib.sha256
def __init__(self, *args, **kwargs):
super(BaseNonceValidator, self).__init__(*args, **kwargs)
if not self.NONCE_BITS or self.NONCE_BITS % 8:
raise ValueError('NONCE_BITS must be non-0 and a multiple of 8.')
if not self.AUTHENTICATOR_BITS or self.AUTHENTICATOR_BITS % 8:
raise ValueError(
'AUTHENTICATOR_BITS must be non-0 and a multiple of 8.')
@staticmethod
def _decode(buf):
raise NotImplementedError('Must implement decode.')
@staticmethod
def _encode(buf):
raise NotImplementedError('Must implement encode.')
def validate_answer(self, answer, team):
"""Validate the nonce-based flag."""
try:
decoded_answer = self._decode(answer)
except TypeError:
app.logger.error('Invalid padding for answer.')
return False
if len(decoded_answer) != (
self.NONCE_BITS + self.AUTHENTICATOR_BITS) / 8:
app.logger.error('Invalid length of decoded answer in %s',
type(self).__name__)
return False
nonce = decoded_answer[:self.NONCE_BITS/8]
authenticator = decoded_answer[self.NONCE_BITS/8:]
if not utils.compare_digest(authenticator,
self.compute_authenticator(nonce)):
app.logger.error('Invalid nonce flag: %s', answer)
return False
# At this point, it's a valid flag, but need to check for reuse.
# We do this by inserting and primary key checks will fail in the
# commit phase.
if team:
models.NonceFlagUsed.create(
self.challenge, self.unpack_nonce(nonce),
team)
return True
def compute_authenticator(self, nonce):
"""Compute the authenticator part for a nonce."""
mac = hmac.new(
self.challenge.answer_hash.encode('utf-8'),
nonce,
digestmod=self.HASH).digest()
return mac[:self.AUTHENTICATOR_BITS/8]
def make_answer(self, nonce):
"""Compute the whole answer for a nonce."""
if isinstance(nonce, int):
nonce = struct.pack('>Q', nonce)
nonce = nonce[8 - (self.NONCE_BITS / 8):]
if len(nonce) != self.NONCE_BITS / 8:
raise ValueError('nonce is wrong length!')
return self._encode(nonce + self.compute_authenticator(nonce))
@classmethod
def unpack_nonce(cls, nonce):
pad = '\x00' * (8 - cls.NONCE_BITS / 8)
return struct.unpack('>Q', pad + nonce)[0]
class Base32Validator(BaseNonceValidator):
def __init__(self, *args, **kwargs):
if (self.NONCE_BITS + self.AUTHENTICATOR_BITS) % 5 != 0:
raise ValueError('Length must be a mulitple of 5 bits.')
super(Base32Validator, self).__init__(*args, **kwargs)
@staticmethod
def _encode(buf):
return base64.b32encode(buf)
@staticmethod
def _decode(buf):
if isinstance(buf, unicode):
buf = buf.encode('utf-8')
return base64.b32decode(buf, casefold=True, map01='I')
class Nonce_16_64_Base32_Validator(Base32Validator):
name = 'Nonce: 16 bits, 64 bit validator, Base32 encoded'
NONCE_BITS = 16
AUTHENTICATOR_BITS = 64
class Nonce_24_56_Base32_Validator(Base32Validator):
name = 'Nonce: 24 bits, 56 bit validator, Base32 encoded'
NONCE_BITS = 24
AUTHENTICATOR_BITS = 56
class Nonce_32_88_Base32_Validator(Base32Validator):
name = 'Nonce: 32 bits, 88 bit validator, Base32 encoded'
NONCE_BITS = 32
AUTHENTICATOR_BITS = 88
```
#### File: ctfscoreboard/scoreboard/views.py
```python
import flask
import os
from werkzeug import exceptions
from scoreboard import attachments
from scoreboard import main
from scoreboard import models
from scoreboard import utils
app = main.get_app()
_VIEW_CACHE = {}
@app.errorhandler(404)
def handle_404(ex):
"""Handle 404s, sending index.html for unhandled paths."""
path = flask.request.path[1:]
try:
return app.send_static_file(path)
except (exceptions.NotFound, UnicodeEncodeError):
if '.' not in path and not path.startswith('api/'):
app.logger.info('%s -> index.html', path)
return render_index()
return '404 Not Found', 404
# Needed because emails with a "." in them prevent 404 handler from working
@app.route('/pwreset/<path:unused>')
def render_pwreset(unused):
return render_index()
@app.route('/')
@app.route('/index.html')
def render_index():
"""Render index.
Do not include any user-controlled content to avoid XSS!
"""
try:
tmpl = _VIEW_CACHE['index']
except KeyError:
minify = not app.debug and os.path.exists(
os.path.join(app.static_folder, 'js/app.min.js'))
tmpl = flask.render_template('index.html', minify=minify)
_VIEW_CACHE['index'] = tmpl
return flask.make_response(tmpl, 200)
@app.route('/attachment/#')
@utils.login_required
def download(filename):
"""Download an attachment."""
attachment = models.Attachment.query.get_or_404(filename)
valid = models.User.current().admin
for ch in attachment.challenges:
if ch.unlocked:
valid = True
break
if not valid:
flask.abort(404)
app.logger.info('Download of %s by %r.', attachment, models.User.current())
return attachments.backend.send(attachment)
@app.route('/createdb')
def createdb():
"""Create database schema without CLI access.
Useful for AppEngine and other container environments.
Should be safe to be exposed, as operation is idempotent and does not
clear any data.
"""
try:
models.db.create_all()
return 'Tables created.'
except Exception as ex:
app.logger.exception('Failed creating tables: %s', str(ex))
return 'Failed creating tables: see log.'
``` |
{
"source": "jnovikov/training-05-10-19",
"score": 3
} |
#### File: checkers/bot5words/bot_lib.py
```python
from bs4 import BeautifulSoup
from checklib import *
import secrets
import requests
import json
import base64
PORT = 5002
web_links = {
'https://yandex.ru': "Яндекс",
"https://shadowservants.ru": "Ддосишь",
}
class CheckMachine:
@property
def url(self):
return f'http://{self.host}:{self.port}'
def __init__(self, host, port):
self.host = host
self.port = port
def register_in_service(self, who='MC'):
register_url = self.url + '/register'
login = secrets.choice(('Sunlover', 'Pomo', 'Johnny', 'alagunto', 'Kekov'))
login = login + '_' + rnd_username()
password = <PASSWORD>()
type = '2'
if who != 'MC':
type = '1'
data = dict(
type=type,
login=login,
password=password
)
r = requests.post(url=register_url, data=data, allow_redirects=False)
assert_eq(r.status_code, 303, "Can't register in service")
return data
def login_in_service(self, data):
login_url = self.url + '/login'
s = requests.Session()
r = s.get(url=login_url, allow_redirects=True)
assert_eq(r.status_code, 200, "Can't login in service")
r = s.post(url=login_url, data=data, allow_redirects=True)
assert_eq(r.status_code, 200, "Can't login in service")
return s
def put_string(self, session, s):
name = rnd_string(10)
create_url = self.url + '/create'
r = session.post(url=create_url, data=dict(name=name, text=s))
assert_eq(r.status_code, 200, "Can't create text")
def get_searchable_link(self, s, link):
resp = s.get(self.url + link)
html = resp.text
soup = BeautifulSoup(html, 'html.parser')
links = soup.find_all('a')
last_link = links.pop()
return last_link.get("href", "/not_found_lol")
def find_all_links(self, session):
list_url = self.url + '/list'
r = session.get(url=list_url)
html = r.text
soup = BeautifulSoup(html, 'html.parser')
links = soup.find_all('a')
result_links = []
for link in links:
link = link.get("href", "/not_found_lol")
if "text" in link:
result_links.append(link)
result_links = [self.get_searchable_link(session, x) for x in result_links]
return dict(links=result_links)
def find_all_strings_by_links(self, session, links):
s = []
for link in links:
find_url = self.url + link
r = session.get(find_url)
html = r.text
soup = BeautifulSoup(html, 'html.parser')
areas = soup.find_all('textarea')
if areas:
s += [x.string for x in areas]
return s
def create_text_by_url(self, session):
create_url = self.url + '/create_url'
name = rnd_string(10)
text_url_pair = secrets.choice(list(web_links.items()))
text_url = text_url_pair[0]
r = session.post(url=create_url, data=dict(name=name, url=text_url))
assert_eq(r.status_code, 200, "Can't create text by link")
list_url = self.url + '/list'
r = session.get(url=list_url)
html = r.text
soup = BeautifulSoup(html, 'html.parser')
links = soup.find_all('a')
last = links.pop().get("href", "/bad_href")
r = session.get(url=self.url + last)
assert_in(text_url_pair[1], r.text, "Can't create text by link")
def check_print_session_works(self, username, s):
print_url = self.url + '/print'
r = s.get(print_url)
assert_eq(r.status_code, 200, "Can't print banned user data")
try:
data = json.loads(base64.b64decode(r.text))
assert_in(username, data.values(), "Can't print banned user data")
except Exception:
cquit(Status.MUMBLE, "Can't print banned user data")
``` |
{
"source": "jnovinger/django-batch-requests",
"score": 2
} |
#### File: django-batch-requests/batch_requests/utils.py
```python
from __future__ import absolute_import, unicode_literals
import six
from django.test.client import RequestFactory, FakePayload
from batch_requests.settings import br_settings as _settings
# Standard WSGI supported headers
WSGI_HEADERS = {
"CONTENT_LENGTH", "CONTENT_TYPE", "QUERY_STRING", "REMOTE_ADDR",
"REMOTE_HOST", "REMOTE_USER", "REQUEST_METHOD", "SERVER_NAME",
"SERVER_PORT",
}
class BatchRequestFactory(RequestFactory):
'''
Extend the RequestFactory and update the environment variables for WSGI.
'''
def _base_environ(self, **request):
'''
Override the default values for the wsgi environment variables.
'''
# This is a minimal valid WSGI environ dictionary, plus:
# - HTTP_COOKIE: for cookie support,
# - REMOTE_ADDR: often useful, see #8551.
# See http://www.python.org/dev/peps/pep-3333/#environ-variables
environ = {
'HTTP_COOKIE': self.cookies.output(header='', sep='; '),
'PATH_INFO': str('/'),
'REMOTE_ADDR': str('127.0.0.1'),
'REQUEST_METHOD': str('GET'),
'SCRIPT_NAME': str(''),
'SERVER_NAME': str('localhost'),
'SERVER_PORT': str('8000'),
'SERVER_PROTOCOL': str('HTTP/1.1'),
'wsgi.version': (1, 0),
'wsgi.url_scheme': str('http'),
'wsgi.input': FakePayload(b''),
'wsgi.errors': self.errors,
'wsgi.multiprocess': True,
'wsgi.multithread': True,
'wsgi.run_once': False,
}
environ.update(self.defaults)
environ.update(request)
return environ
def get_wsgi_request_object(
curr_request, method, url, headers, body,
REQUEST_FACTORY=BatchRequestFactory() # purposefully using a shared instance
):
'''
Based on the given request parameters, constructs and returns the WSGI request object.
'''
def transform_header(header, _wsgi_headers=WSGI_HEADERS):
"""Transform headers, if necessary
For every header, replace - to _, prepend http_ if necessary and
convert to upper case.
"""
header = header.replace("-", "_").upper()
if header not in _wsgi_headers:
header = "HTTP_{header}".format(header=header)
return header
t_headers = {"CONTENT_TYPE": _settings.DEFAULT_CONTENT_TYPE}
t_headers.update({
transform_header(h): v for h, v in six.iteritems(headers)
})
# Override existing batch requests headers with the new headers passed for this request.
x_headers = {
h: v for h, v in six.iteritems(curr_request.META)
if h in _settings.HEADERS_TO_INCLUDE
}
x_headers.update(t_headers)
return getattr(REQUEST_FACTORY, method.lower())(
url,
data=body,
secure=_settings.USE_HTTPS,
content_type=x_headers.get("CONTENT_TYPE", _settings.DEFAULT_CONTENT_TYPE),
**x_headers
)
```
#### File: django-batch-requests/tests/test_bad_batch_request.py
```python
import json
from django.test import TestCase
from tests import ensure_text_content
class TestBadBatchRequest(TestCase):
'''
Check the behavior of bad batch request.
'''
def _batch_request(self, method, path, data, headers={}):
'''
Prepares a batch request.
'''
return {"url": path, "method": method, "headers": headers, "body": data}
def test_invalid_http_method(self):
'''
Make a batch request with invalid HTTP method.
'''
resp = ensure_text_content(
self.client.post(
"/api/v1/batch/",
json.dumps([self._batch_request("select", "/views", "", {})]),
content_type="application/json"
)
)
self.assertEqual(resp.status_code, 400, "Method validation is broken!")
self.assertEqual(resp.text.lower(), "invalid request method.", "Method validation is broken!")
def test_missing_http_method(self):
'''
Make a batch request without HTTP method.
'''
resp = ensure_text_content(
self.client.post(
"/api/v1/batch/",
json.dumps([{"body": "/views"}]),
content_type="application/json"
)
)
self.assertEqual(resp.status_code, 400, "Method & URL validation is broken!")
self.assertEqual(resp.text.lower(), "request definition should have url, method defined.", "Method validation is broken!")
def test_missing_url(self):
'''
Make a batch request without the URL.
'''
resp = ensure_text_content(
self.client.post(
"/api/v1/batch/",
json.dumps([{"method": "get"}]),
content_type="application/json"
)
)
self.assertEqual(resp.status_code, 400, "Method & URL validation is broken!")
self.assertEqual(resp.text.lower(), "request definition should have url, method defined.",
"Method validation is broken!")
def test_invalid_batch_request(self):
'''
Make a batch request without wrapping in the list.
'''
resp = ensure_text_content(
self.client.post(
"/api/v1/batch/",
json.dumps({"method": "get", "url": "/views/"}),
content_type="application/json"
)
)
self.assertEqual(resp.status_code, 400, "Batch requests should always be in list.")
self.assertEqual(resp.text.lower(), "the body of batch request should always be list!",
"List validation is broken!")
def test_view_that_raises_exception(self):
'''
Make a batch request to a view that raises exception.
'''
resp = ensure_text_content(
self.client.post(
"/api/v1/batch/",
json.dumps([{"method": "get", "url": "/exception/"}]),
content_type="application/json"
)
)
resp = json.loads(resp.text)[0]
self.assertEqual(resp['status_code'], 500, "Exceptions should return 500.")
self.assertEqual(resp['body'].lower(), "exception", "Exception handling is broken!")
``` |
{
"source": "jnozsc/GeoIP2-python",
"score": 2
} |
#### File: GeoIP2-python/geoip2/records.py
```python
import ipaddress
# pylint:disable=R0903
from abc import ABCMeta
from geoip2.compat import compat_ip_network
from geoip2.mixins import SimpleEquality
class Record(SimpleEquality):
"""All records are subclasses of the abstract class ``Record``."""
__metaclass__ = ABCMeta
def __repr__(self):
args = ", ".join("%s=%r" % x for x in self.__dict__.items())
return "{module}.{class_name}({data})".format(
module=self.__module__, class_name=self.__class__.__name__, data=args
)
class PlaceRecord(Record):
"""All records with :py:attr:`names` subclass :py:class:`PlaceRecord`."""
__metaclass__ = ABCMeta
def __init__(self, locales=None, names=None):
if locales is None:
locales = ["en"]
self._locales = locales
if names is None:
names = {}
self.names = names
@property
def name(self):
"""Dict with locale codes as keys and localized name as value."""
# pylint:disable=E1101
return next((self.names.get(x) for x in self._locales if x in self.names), None)
class City(PlaceRecord):
"""Contains data for the city record associated with an IP address.
This class contains the city-level data associated with an IP address.
This record is returned by ``city``, ``enterprise``, and ``insights``.
Attributes:
.. attribute:: confidence
A value from 0-100 indicating MaxMind's
confidence that the city is correct. This attribute is only available
from the Insights end point and the GeoIP2 Enterprise database.
:type: int
.. attribute:: geoname_id
The GeoName ID for the city.
:type: int
.. attribute:: name
The name of the city based on the locales list passed to the
constructor.
:type: unicode
.. attribute:: names
A dictionary where the keys are locale codes
and the values are names.
:type: dict
"""
def __init__(self, locales=None, confidence=None, geoname_id=None, names=None, **_):
self.confidence = confidence
self.geoname_id = geoname_id
super(City, self).__init__(locales, names)
class Continent(PlaceRecord):
"""Contains data for the continent record associated with an IP address.
This class contains the continent-level data associated with an IP
address.
Attributes:
.. attribute:: code
A two character continent code like "NA" (North America)
or "OC" (Oceania).
:type: unicode
.. attribute:: geoname_id
The GeoName ID for the continent.
:type: int
.. attribute:: name
Returns the name of the continent based on the locales list passed to
the constructor.
:type: unicode
.. attribute:: names
A dictionary where the keys are locale codes
and the values are names.
:type: dict
"""
def __init__(self, locales=None, code=None, geoname_id=None, names=None, **_):
self.code = code
self.geoname_id = geoname_id
super(Continent, self).__init__(locales, names)
class Country(PlaceRecord):
"""Contains data for the country record associated with an IP address.
This class contains the country-level data associated with an IP address.
Attributes:
.. attribute:: confidence
A value from 0-100 indicating MaxMind's confidence that
the country is correct. This attribute is only available from the
Insights end point and the GeoIP2 Enterprise database.
:type: int
.. attribute:: geoname_id
The GeoName ID for the country.
:type: int
.. attribute:: is_in_european_union
This is true if the country is a member state of the European Union.
:type: bool
.. attribute:: iso_code
The two-character `ISO 3166-1
<http://en.wikipedia.org/wiki/ISO_3166-1>`_ alpha code for the
country.
:type: unicode
.. attribute:: name
The name of the country based on the locales list passed to the
constructor.
:type: unicode
.. attribute:: names
A dictionary where the keys are locale codes and the values
are names.
:type: dict
"""
def __init__(
self,
locales=None,
confidence=None,
geoname_id=None,
is_in_european_union=False,
iso_code=None,
names=None,
**_
):
self.confidence = confidence
self.geoname_id = geoname_id
self.is_in_european_union = is_in_european_union
self.iso_code = iso_code
super(Country, self).__init__(locales, names)
class RepresentedCountry(Country):
"""Contains data for the represented country associated with an IP address.
This class contains the country-level data associated with an IP address
for the IP's represented country. The represented country is the country
represented by something like a military base.
Attributes:
.. attribute:: confidence
A value from 0-100 indicating MaxMind's confidence that
the country is correct. This attribute is only available from the
Insights end point and the GeoIP2 Enterprise database.
:type: int
.. attribute:: geoname_id
The GeoName ID for the country.
:type: int
.. attribute:: is_in_european_union
This is true if the country is a member state of the European Union.
:type: bool
.. attribute:: iso_code
The two-character `ISO 3166-1
<http://en.wikipedia.org/wiki/ISO_3166-1>`_ alpha code for the country.
:type: unicode
.. attribute:: name
The name of the country based on the locales list passed to the
constructor.
:type: unicode
.. attribute:: names
A dictionary where the keys are locale codes and the values
are names.
:type: dict
.. attribute:: type
A string indicating the type of entity that is representing the
country. Currently we only return ``military`` but this could expand to
include other types in the future.
:type: unicode
"""
def __init__(
self,
locales=None,
confidence=None,
geoname_id=None,
is_in_european_union=False,
iso_code=None,
names=None,
# pylint:disable=redefined-builtin
type=None,
**_
):
self.type = type
super(RepresentedCountry, self).__init__(
locales, confidence, geoname_id, is_in_european_union, iso_code, names
)
class Location(Record):
"""Contains data for the location record associated with an IP address.
This class contains the location data associated with an IP address.
This record is returned by ``city``, ``enterprise``, and ``insights``.
Attributes:
.. attribute:: average_income
The average income in US dollars associated with the requested IP
address. This attribute is only available from the Insights end point.
:type: int
.. attribute:: accuracy_radius
The approximate accuracy radius in kilometers around the latitude and
longitude for the IP address. This is the radius where we have a 67%
confidence that the device using the IP address resides within the
circle centered at the latitude and longitude with the provided radius.
:type: int
.. attribute:: latitude
The approximate latitude of the location associated with the IP
address. This value is not precise and should not be used to identify a
particular address or household.
:type: float
.. attribute:: longitude
The approximate longitude of the location associated with the IP
address. This value is not precise and should not be used to identify a
particular address or household.
:type: float
.. attribute:: metro_code
The metro code of the location if the
location is in the US. MaxMind returns the same metro codes as the
`Google AdWords API
<https://developers.google.com/adwords/api/docs/appendix/cities-DMAregions>`_.
:type: int
.. attribute:: population_density
The estimated population per square kilometer associated with the IP
address. This attribute is only available from the Insights end point.
:type: int
.. attribute:: time_zone
The time zone associated with location, as specified by the `IANA Time
Zone Database <http://www.iana.org/time-zones>`_, e.g.,
"America/New_York".
:type: unicode
"""
def __init__(
self,
average_income=None,
accuracy_radius=None,
latitude=None,
longitude=None,
metro_code=None,
population_density=None,
postal_code=None,
postal_confidence=None,
time_zone=None,
**_
):
self.average_income = average_income
self.accuracy_radius = accuracy_radius
self.latitude = latitude
self.longitude = longitude
self.metro_code = metro_code
self.population_density = population_density
self.postal_code = postal_code
self.postal_confidence = postal_confidence
self.time_zone = time_zone
class MaxMind(Record):
"""Contains data related to your MaxMind account.
Attributes:
.. attribute:: queries_remaining
The number of remaining queries you have
for the end point you are calling.
:type: int
"""
def __init__(self, queries_remaining=None, **_):
self.queries_remaining = queries_remaining
class Postal(Record):
"""Contains data for the postal record associated with an IP address.
This class contains the postal data associated with an IP address.
This attribute is returned by ``city``, ``enterprise``, and ``insights``.
Attributes:
.. attribute:: code
The postal code of the location. Postal
codes are not available for all countries. In some countries, this will
only contain part of the postal code.
:type: unicode
.. attribute:: confidence
A value from 0-100 indicating
MaxMind's confidence that the postal code is correct. This attribute is
only available from the Insights end point and the GeoIP2 Enterprise
database.
:type: int
"""
def __init__(self, code=None, confidence=None, **_):
self.code = code
self.confidence = confidence
class Subdivision(PlaceRecord):
"""Contains data for the subdivisions associated with an IP address.
This class contains the subdivision data associated with an IP address.
This attribute is returned by ``city``, ``enterprise``, and ``insights``.
Attributes:
.. attribute:: confidence
This is a value from 0-100 indicating MaxMind's
confidence that the subdivision is correct. This attribute is only
available from the Insights end point and the GeoIP2 Enterprise
database.
:type: int
.. attribute:: geoname_id
This is a GeoName ID for the subdivision.
:type: int
.. attribute:: iso_code
This is a string up to three characters long
contain the subdivision portion of the `ISO 3166-2 code
<http://en.wikipedia.org/wiki/ISO_3166-2>`_.
:type: unicode
.. attribute:: name
The name of the subdivision based on the locales list passed to the
constructor.
:type: unicode
.. attribute:: names
A dictionary where the keys are locale codes and the
values are names
:type: dict
"""
def __init__(
self,
locales=None,
confidence=None,
geoname_id=None,
iso_code=None,
names=None,
**_
):
self.confidence = confidence
self.geoname_id = geoname_id
self.iso_code = iso_code
super(Subdivision, self).__init__(locales, names)
class Subdivisions(tuple):
"""A tuple-like collection of subdivisions associated with an IP address.
This class contains the subdivisions of the country associated with the
IP address from largest to smallest.
For instance, the response for Oxford in the United Kingdom would have
England as the first element and Oxfordshire as the second element.
This attribute is returned by ``city``, ``enterprise``, and ``insights``.
"""
def __new__(cls, locales, *subdivisions):
subdivisions = [Subdivision(locales, **x) for x in subdivisions]
obj = super(cls, Subdivisions).__new__(cls, subdivisions)
return obj
def __init__(self, locales, *subdivisions): # pylint:disable=W0613
self._locales = locales
super(Subdivisions, self).__init__()
@property
def most_specific(self):
"""The most specific (smallest) subdivision available.
If there are no :py:class:`Subdivision` objects for the response,
this returns an empty :py:class:`Subdivision`.
:type: :py:class:`Subdivision`
"""
try:
return self[-1]
except IndexError:
return Subdivision(self._locales)
class Traits(Record):
"""Contains data for the traits record associated with an IP address.
This class contains the traits data associated with an IP address.
This class has the following attributes:
.. attribute:: autonomous_system_number
The `autonomous system
number <http://en.wikipedia.org/wiki/Autonomous_system_(Internet)>`_
associated with the IP address. This attribute is only available from
the City and Insights web service end points and the GeoIP2 Enterprise
database.
:type: int
.. attribute:: autonomous_system_organization
The organization associated with the registered `autonomous system
number <http://en.wikipedia.org/wiki/Autonomous_system_(Internet)>`_ for
the IP address. This attribute is only available from the City and
Insights web service end points and the GeoIP2 Enterprise database.
:type: unicode
.. attribute:: connection_type
The connection type may take the following values:
- Dialup
- Cable/DSL
- Corporate
- Cellular
Additional values may be added in the future.
This attribute is only available in the GeoIP2 Enterprise database.
:type: unicode
.. attribute:: domain
The second level domain associated with the
IP address. This will be something like "example.com" or
"example.co.uk", not "foo.example.com". This attribute is only available
from the City and Insights web service end points and the GeoIP2
Enterprise database.
:type: unicode
.. attribute:: ip_address
The IP address that the data in the model
is for. If you performed a "me" lookup against the web service, this
will be the externally routable IP address for the system the code is
running on. If the system is behind a NAT, this may differ from the IP
address locally assigned to it.
:type: unicode
.. attribute:: is_anonymous
This is true if the IP address belongs to any sort of anonymous network.
This attribute is only available from GeoIP2 Precision Insights.
:type: bool
.. attribute:: is_anonymous_proxy
This is true if the IP is an anonymous
proxy. See http://dev.maxmind.com/faq/geoip#anonproxy for further
details.
:type: bool
.. deprecated:: 2.2.0
Use our our `GeoIP2 Anonymous IP database
<https://www.maxmind.com/en/geoip2-anonymous-ip-database GeoIP2>`_
instead.
.. attribute:: is_anonymous_vpn
This is true if the IP address is registered to an anonymous VPN
provider.
If a VPN provider does not register subnets under names associated with
them, we will likely only flag their IP ranges using the
``is_hosting_provider`` attribute.
This attribute is only available from GeoIP2 Precision Insights.
:type: bool
.. attribute:: is_hosting_provider
This is true if the IP address belongs to a hosting or VPN provider
(see description of ``is_anonymous_vpn`` attribute).
This attribute is only available from GeoIP2 Precision Insights.
:type: bool
.. attribute:: is_legitimate_proxy
This attribute is true if MaxMind believes this IP address to be a
legitimate proxy, such as an internal VPN used by a corporation. This
attribute is only available in the GeoIP2 Enterprise database.
:type: bool
.. attribute:: is_public_proxy
This is true if the IP address belongs to a public proxy. This attribute
is only available from GeoIP2 Precision Insights.
:type: bool
.. attribute:: is_satellite_provider
This is true if the IP address is from a satellite provider that
provides service to multiple countries.
:type: bool
.. deprecated:: 2.2.0
Due to the increased coverage by mobile carriers, very few
satellite providers now serve multiple countries. As a result, the
output does not provide sufficiently relevant data for us to maintain
it.
.. attribute:: is_tor_exit_node
This is true if the IP address is a Tor exit node. This attribute is
only available from GeoIP2 Precision Insights.
:type: bool
.. attribute:: isp
The name of the ISP associated with the IP address. This attribute is
only available from the City and Insights web service end points and the
GeoIP2 Enterprise database.
:type: unicode
.. attribute:: network
The network associated with the record. In particular, this is the
largest network where all of the fields besides ip_address have the same
value.
:type: ipaddress.IPv4Network or ipaddress.IPv6Network
.. attribute:: organization
The name of the organization associated with the IP address. This
attribute is only available from the City and Insights web service end
points and the GeoIP2 Enterprise database.
:type: unicode
.. attribute:: static_ip_score
An indicator of how static or dynamic an IP address is. The value ranges
from 0 to 99.99 with higher values meaning a greater static association.
For example, many IP addresses with a user_type of cellular have a
lifetime under one. Static Cable/DSL IPs typically have a lifetime above
thirty.
This indicator can be useful for deciding whether an IP address represents
the same user over time. This attribute is only available from GeoIP2
Precision Insights.
:type: float
.. attribute:: user_count
The estimated number of users sharing the IP/network during the past 24
hours. For IPv4, the count is for the individual IP. For IPv6, the count
is for the /64 network. This attribute is only available from GeoIP2
Precision Insights.
:type: int
.. attribute:: user_type
The user type associated with the IP
address. This can be one of the following values:
* business
* cafe
* cellular
* college
* content_delivery_network
* dialup
* government
* hosting
* library
* military
* residential
* router
* school
* search_engine_spider
* traveler
This attribute is only available from the Insights end point and the
GeoIP2 Enterprise database.
:type: unicode
"""
def __init__(
self,
autonomous_system_number=None,
autonomous_system_organization=None,
connection_type=None,
domain=None,
is_anonymous=False,
is_anonymous_proxy=False,
is_anonymous_vpn=False,
is_hosting_provider=False,
is_legitimate_proxy=False,
is_public_proxy=False,
is_satellite_provider=False,
is_tor_exit_node=False,
isp=None,
ip_address=None,
network=None,
organization=None,
prefix_len=None,
static_ip_score=None,
user_count=None,
user_type=None,
**_
):
self.autonomous_system_number = autonomous_system_number
self.autonomous_system_organization = autonomous_system_organization
self.connection_type = connection_type
self.domain = domain
self.is_anonymous = is_anonymous
self.is_anonymous_proxy = is_anonymous_proxy
self.is_anonymous_vpn = is_anonymous_vpn
self.is_hosting_provider = is_hosting_provider
self.is_legitimate_proxy = is_legitimate_proxy
self.is_public_proxy = is_public_proxy
self.is_satellite_provider = is_satellite_provider
self.is_tor_exit_node = is_tor_exit_node
self.isp = isp
self.organization = organization
self.static_ip_score = static_ip_score
self.user_type = user_type
self.user_count = user_count
self.ip_address = ip_address
self._network = network
self._prefix_len = prefix_len
# This code is duplicated for performance reasons
# pylint: disable=duplicate-code
@property
def network(self):
"""The network for the record"""
network = self._network
if isinstance(network, (ipaddress.IPv4Network, ipaddress.IPv6Network)):
return network
if network is None:
ip_address = self.ip_address
prefix_len = self._prefix_len
if ip_address is None or prefix_len is None:
return None
network = "{}/{}".format(ip_address, prefix_len)
network = compat_ip_network(network, False)
self._network = network
return network
``` |
{
"source": "jnplonte/flask-api",
"score": 2
} |
#### File: python-app/app/__init__.py
```python
import os
import base64
import datetime
from flask import Flask, jsonify, g
from flask_pymongo import PyMongo
from flask_restful import Resource, Api, abort, request
from app import config
from app.services.api_response import ApiResponse
from app.v1.authentication.token import startToken
from app.v1.core.users.user import startUser
from app.v1.core.users.users import startUsers
from app.models.users import Users
env = os.environ.get('ENV')
port = os.environ.get('PORT')
if env == 'development':
configs = config.DevConfig
elif env == 'production':
configs = config.ProdConfig
else:
configs = config.LocalConfig
print('api environment is %s' % env.upper() if env is not None else 'local')
print('api listening to http://localhost:%s' % os.environ.get('PORT') if env is not None else '8383' )
# ---------------------------------------------------------------------------------------------------------------------
app = Flask(__name__)
app.config["MONGO_URI"] = 'mongodb://%s:%s@%s:%s/%s' % (configs.MONGO_USERNAME, configs.MONGO_PASSWORD, configs.MONGO_HOST, configs.MONGO_PORT, configs.MONGO_DATABASE)
mongo = PyMongo(app)
api = Api(app)
@app.before_request
def tokenCheck():
if request.method == 'OPTIONS':
return
userData = {}
userString = request.headers.get(configs.SECRET_KEY, None)
if userString is None:
response = jsonify(api_response.failed('authentication', ''))
response.status_code = 401
return response
if 'auth' in request.url:
if userString != configs.SECRET_KEY_HASH:
response = jsonify(api_response.failed('authentication', ''))
response.status_code = 401
return response
else:
userKey = request.headers.get('user-key', None)
if userKey is None:
response = jsonify(api_response.failed('authentication', ''))
response.status_code = 401
return response
userData = Users(mongo, False).get({'key': str(userKey)})
if not bool(userData):
response = jsonify(api_response.failed('authentication', ''))
response.status_code = 401
return response
g.user = userData
@app.after_request
def corsCheck(response):
response.headers.add('Access-Control-Allow-Origin', '*')
response.headers.add('Access-Control-Allow-Headers', '*')
response.headers.add('Access-Control-Allow-Methods', '*')
return response
@app.errorhandler(404)
def not_found(error):
response = jsonify(api_response.failed('notfound', ''))
response.status_code = 404
return response
api_response = ApiResponse()
startToken(api, configs, api_response, mongo)
startUser(api, configs, api_response, mongo)
startUsers(api, configs, api_response, mongo)
if __name__ == "__main__":
app.run(debug=True)
``` |
{
"source": "jnpoJuwan/just_a_bot",
"score": 3
} |
#### File: bot/cogs/meta.py
```python
import discord
from discord.ext import commands
from ..utils.constants import COLOUR
def fmt(dt=None):
if not dt:
return 'N/A'
return f'{dt:%A, %d/%m/%Y %H:%M UTC}'
class Meta(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(aliases=['github', 'invite', 'source_code'])
async def links(self, ctx):
"""Sends the bot's invite link."""
msg = (
'[Bot Invite](https://discord.com/api/oauth2/authorize?client_id=764106437701140490&permissions=8'
'&scope=bot)\n'
'[Source code](https://github.com/jnpoJuwan/Just-a-bot)'
)
embed = discord.Embed(title='Links', description=msg, colour=COLOUR)
await ctx.send(embed=embed)
@commands.command(aliases=['member', 'user', 'user_info'])
async def member_info(self, ctx, member: discord.Member = None):
"""Sends some information about the member."""
member = member or ctx.author
roles = [role.mention for role in member.roles if role.name != '@everyone'][::-1]
top_role = (member.top_role.mention if member.top_role.name != '@everyone' else member.top_role.name)
embed = discord.Embed(colour=COLOUR)
embed.set_author(name=str(member), icon_url=member.avatar.url)
embed.add_field(name='Nickname', value=member.display_name)
embed.add_field(name='Top Role', value=top_role)
embed.add_field(name=f'Roles ({len(roles)})',
value=' '.join(roles) if len(roles) <= 10 else ' '.join(roles[:10]) + ' ...')
embed.add_field(name='Created:', value=fmt(member.created_at))
embed.add_field(name='Joined:', value=fmt(member.joined_at))
embed.add_field(name='User ID', value=member.id)
embed.set_thumbnail(url=member.avatar.url)
embed.set_footer(text=f'Requested by {ctx.author.display_name}', icon_url=ctx.author.avatar.url)
await ctx.send(embed=embed)
@member_info.error
async def member_error(self, ctx, error):
if isinstance(error, commands.BadArgument):
await ctx.send('Please @mention a member.')
@commands.command(aliases=['server'])
async def server_info(self, ctx):
"""Sends some information about the server."""
guild = ctx.guild
embed = discord.Embed(title=guild.name, colour=COLOUR)
embed.set_thumbnail(url=str(guild.icon_url))
embed.add_field(name='Owner', value=f'<@{guild.owner_id}>')
embed.add_field(name='Region', value=str(guild.region).title())
embed.add_field(name='Member Count', value=str(ctx.guild.member_count))
embed.add_field(name='Created:', value=fmt(guild.created_at))
embed.add_field(name='Server ID', value=guild.id)
embed.set_thumbnail(url=guild.icon_url)
embed.set_footer(text=f'Requested by {ctx.author.display_name}', icon_url=ctx.author.avatar.url)
await ctx.send(embed=embed)
@commands.command()
async def icon(self, ctx):
"""Sends the server's icon."""
embed = discord.Embed(title=f'Icon of {ctx.guild.name}', colour=COLOUR)
embed.set_image(url=ctx.guild.icon_url)
embed.set_footer(text=f'Requested by {ctx.author.display_name}', icon_url=ctx.author.avatar.url)
await ctx.send(embed=embed)
@commands.command(aliases=['pfp'])
async def avatar(self, ctx, member: discord.Member = None):
"""Sends the member's avatar."""
member = member or ctx.author
embed = discord.Embed(title=f'{member.display_name}\'s profile picture', colour=COLOUR)
embed.set_image(url=member.avatar.url)
embed.set_footer(text=f'Requested by {ctx.author.display_name}', icon_url=ctx.author.avatar.url)
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(Meta(bot))
```
#### File: bot/cogs/mod.py
```python
from discord.ext import commands
from ..utils import checks
class Mod(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(aliases=['clear', 'delete'])
@commands.cooldown(3, 60.0, commands.BucketType.user)
@checks.is_mod()
async def purge(self, ctx, amount=0):
"""Purges the amount of messages."""
await ctx.message.delete()
limit = 200
if amount > limit:
await ctx.send(f'The amount can\'t exceed {limit} messages.', delete_after=2.5)
return
await ctx.channel.purge(limit=amount)
await ctx.send(f'Successfully purged {amount} message(s).', delete_after=2.5)
@purge.error
async def purge_error(self, ctx, error):
if isinstance(error, commands.BadArgument):
await ctx.send('Please enter an amount of messages.')
def setup(bot):
bot.add_cog(Mod(bot))
```
#### File: bot/cogs/owner.py
```python
import io
import textwrap
import traceback
from contextlib import redirect_stdout
from pathlib import Path
from discord.ext import commands
from ..utils import checks
class Owner(commands.Cog, command_attrs=dict(hidden=True)):
def __init__(self, bot):
self.bot = bot
@commands.command(name='quit', aliases=['die', 'logout', 'sleep'])
@checks.is_bot_owner()
async def _quit(self, ctx):
"""Logout from Discord."""
await ctx.send('**change da world**\n**my final message. Goodb ye**')
await self.bot.logout()
@staticmethod
def cleanup_code(content):
# Remove ```py\n```.
if content.startswith('```') and content.endswith('```'):
return '\n'.join(content.split('\n')[1:-1])
# Remove `foo`.
return content.strip('` \n')
# CRED: @Rapptz (https://github.com/Rapptz/RoboDanny/blob/rewrite/cogs/admin.py)
@commands.command(name='eval', pass_context=True)
@checks.is_bot_owner()
async def eval_(self, ctx, *, body: str):
"""Evaluates Python code."""
env = {
'bot': self.bot,
'ctx': ctx,
'channel': ctx.channel,
'author': ctx.author,
'guild': ctx.guild,
'message': ctx.message
}
env.update(globals())
body = self.cleanup_code(body)
stdout = io.StringIO()
to_compile = f'async def func():\n{textwrap.indent(body, " ")}'
try:
exec(to_compile, env)
except Exception as e:
return await ctx.send(f'```\n{e.__class__.__name__}: {e}\n```')
func = env['func']
try:
with redirect_stdout(stdout):
ret = await func()
except:
value = stdout.getvalue()
await ctx.send(f'```\n{value}{traceback.format_exc()}\n```')
else:
value = stdout.getvalue()
if ret is None:
if value:
await ctx.send(f'```py\n{value}\n```')
else:
self._last_result = ret
await ctx.send(f'```\n{value}{ret}\n```')
# CRED: @Rapptz (https://github.com/Rapptz/RoboDanny/blob/rewrite/cogs/admin.py)
@commands.command()
@checks.is_bot_owner()
async def load(self, ctx, module):
"""Loads a module."""
try:
self.bot.load_extension(module)
except Exception as e:
traceback_msg = ''.join(traceback.format_exception(etype=type(e), value=e, tb=e.__traceback__))
await ctx.send(f'Failed to load cog {module}. Traceback:\n```{traceback_msg}```')
else:
await ctx.send(f'`{module}` has been loaded.')
@commands.command()
@checks.is_bot_owner()
async def unload(self, ctx, module):
"""Unloads a module."""
try:
self.bot.unload_extension(module)
except Exception as e:
traceback_msg = ''.join(traceback.format_exception(etype=type(e), value=e, tb=e.__traceback__))
await ctx.send(f'Failed to load cog {module}. Traceback:\n```{traceback_msg}```')
else:
await ctx.send(f'`{module}` has been unloaded.')
@commands.command()
@checks.is_bot_owner()
async def reload(self, ctx, module):
"""Reloads a module."""
try:
self.bot.reload_extension(module)
except Exception as e:
traceback_msg = ''.join(traceback.format_exception(etype=type(e), value=e, tb=e.__traceback__))
await ctx.send(f'Failed to load cog {module}. Traceback:\n```{traceback_msg}```')
else:
await ctx.send(f'`{module}` has been reloaded.')
@commands.command()
@checks.is_bot_owner()
async def reload_all(self, ctx):
"""Reloads all extensions."""
content = 'Reloading modules...'
message = await ctx.send('Reloading modules...')
for extension_path in Path('bot/cogs').glob('*.py'):
extension_name = extension_path.stem
dotted_path = f'bot.cogs.{extension_name}'
try:
self.bot.reload_extension(dotted_path)
content += f'\nReloaded `{dotted_path}`.'
await message.edit(content=content)
except Exception as e:
traceback_msg = ''.join(traceback.format_exception(etype=type(e), value=e, tb=e.__traceback__))
await ctx.send(f'Failed to load cog {dotted_path}. Traceback:\n```{traceback_msg}```')
content += '\nSuccessfully reloaded all extensions.'
await message.edit(content=content)
def setup(bot):
bot.add_cog(Owner(bot))
``` |
{
"source": "JNPRAutomate/event_driven_automation_with_a_TIG_stack",
"score": 2
} |
#### File: JNPRAutomate/event_driven_automation_with_a_TIG_stack/clean_junos_routes.py
```python
from jnpr.junos import Device
from jnpr.junos.utils.config import Config
def clean_routing_table():
device=Device (host='172.16.31.10', user='jcluser', password='<PASSWORD>')
device.open()
cfg=Config(device, mode='private')
cfg.load(path='junos_configuration/clean_routes.conf', format='text')
cfg.commit()
device.close()
clean_routing_table()
``` |
{
"source": "JNPRAutomate/northstar_SDN_controller_automation",
"score": 3
} |
#### File: JNPRAutomate/northstar_SDN_controller_automation/get_active_LSPs.py
```python
import json
import requests
from requests.auth import HTTPBasicAuth
from pprint import pprint
import yaml
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
def get_token():
url = 'https://' + my_variables_in_yaml['northstar']['ip'] + ':8443/oauth2/token'
data_to_get_token = {"grant_type":"password","username":authuser,"password":<PASSWORD>}
r = requests.post(url, data=json.dumps(data_to_get_token), auth=(authuser, authpwd), headers=headers, verify=False)
return str('Bearer ' + r.json()['access_token'])
def import_variables_from_file():
my_variables_file=open('variables.yml', 'r')
my_variables_in_string=my_variables_file.read()
my_variables_in_yaml=yaml.load(my_variables_in_string)
my_variables_file.close()
return my_variables_in_yaml
my_variables_in_yaml=import_variables_from_file()
authuser = my_variables_in_yaml['northstar']['username']
authpwd = my_variables_in_yaml['northstar']['password']
url_base = 'http://' + my_variables_in_yaml['northstar']['ip'] + ':8091/NorthStar/API/v2/tenant/'
url = url_base + '1/topology/1/te-lsps'
headers = { 'Accept': 'application/json' }
headers = { 'Content-type': 'application/json' }
# r = requests.get(url, headers=headers, auth=(authuser, authpwd))
get_token()
headers = {'Authorization':get_token(), 'Accept' : 'application/json', 'Content-Type' : 'application/json'}
r = requests.get(url, headers=headers, verify=False)
# type(r.json())
# pprint(r.json())
# This gives the names of all the LSPs that are active
for item in r.json():
if item['operationalStatus'] == 'Active':
print "This LSP is active: " + item['name']
``` |
{
"source": "jnpr-pranav/contrail-controller",
"score": 2
} |
#### File: cat/tests/test_xmpp_peers.py
```python
import argparse
from pyunitreport import HTMLTestRunner
import os
import sys
import unittest
sys.path.append('controller/src/bgp/test/cat/lib')
from cat import *
class TestCAT(unittest.TestCase):
@classmethod
def setUpClass(cls):
CAT.initialize()
return
@classmethod
def tearDownClass(cls):
CAT.delete_all_files()
return
def setUp(self):
self.test = unittest.TestCase.id(self)
def tearDown(self):
CAT.clean_up()
def test_1_single_controlnode_single_agent(self):
print(Fore.CYAN + "\nSingle control-node + single agent" +
Style.RESET_ALL)
c1 = CAT.add_control_node(self.test, "control-node1")
a1 = CAT.add_agent(self.test, "agent1", [c1])
ret, ms = CAT.check_connection([c1], [a1], 2, 3)
self.assertTrue(ret, msg=ms)
def test_2_multi_controlnodes_single_agent(self):
print(Fore.CYAN + "\nMultiple control-nodes + single agent" +
Style.RESET_ALL)
c1 = CAT.add_control_node(self.test, "control-node1")
c2 = CAT.add_control_node(self.test, "control-node2")
a1 = CAT.add_agent(self.test, "agent1", [c1, c2])
ret, ms = CAT.check_connection([c1, c2], [a1], 2, 3)
self.assertTrue(ret, msg=ms)
def test_3_single_controlnode_multi_agents(self):
print(Fore.CYAN + "\nSingle control-node + multiple agents" +
Style.RESET_ALL)
c1 = CAT.add_control_node(self.test, "control-node1")
a1 = CAT.add_agent(self.test, "agent1", [c1])
a2 = CAT.add_agent(self.test, "agent2", [c1])
a3 = CAT.add_agent(self.test, "agent3", [c1])
ret, ms = CAT.check_connection([c1], [a1, a2, a3], 2, 3)
self.assertTrue(ret, msg=ms)
c1.restart_control_node()
ret, ms = CAT.check_connection([c1], [a1, a2, a3], 2, 3)
self.assertTrue(ret, msg=ms)
def test_4_multi_controlnodes_multi_agents(self):
print(Fore.CYAN + "\Multiple control-node + multiple agents" +
Style.RESET_ALL)
c1 = CAT.add_control_node(self.test, "control-node1")
c2 = CAT.add_control_node(self.test, "control-node2")
c3 = CAT.add_control_node(self.test, "control-node3")
a1 = CAT.add_agent(self.test, "agent1", [c1, c2])
a2 = CAT.add_agent(self.test, "agent2", [c1, c3])
ret, ms = CAT.check_connection([c1, c2], [a1], 2, 3)
self.assertTrue(ret, msg=ms)
ret, ms = CAT.check_connection([c1, c3], [a1, a2], 2, 3)
self.assertFalse(ret, msg=ms)
def main():
CAT.parse_arguments()
unittest.main(testRunner=HTMLTestRunner(output=CAT.get_report_dir()))
if __name__ == "__main__":
main()
```
#### File: vnc_cfg_api_server/resources/address_group.py
```python
from vnc_api.gen.resource_common import AddressGroup
from vnc_cfg_api_server.resources._security_base import SecurityResourceBase
class AddressGroupServer(SecurityResourceBase, AddressGroup):
@classmethod
def pre_dbe_create(cls, tenant_name, obj_dict, db_conn):
return cls.check_draft_mode_state(obj_dict)
@classmethod
def pre_dbe_update(cls, id, fq_name, obj_dict, db_conn, **kwargs):
return cls.check_draft_mode_state(obj_dict)
```
#### File: vnc_cfg_api_server/resources/port_profile.py
```python
from vnc_api.gen.resource_common import PortProfile
from vnc_cfg_api_server.resources._resource_base import ResourceMixin
class PortProfileServer(ResourceMixin, PortProfile):
@staticmethod
def validate_storm_control_back_refs(obj_dict):
storm_profile_refs = obj_dict.get('storm_control_profile_refs')
if storm_profile_refs and len(storm_profile_refs) > 1:
ref_list = [ref.get('to') for ref in storm_profile_refs]
return (False, (400, "Port profile %s has more than one "
"storm profile refs %s" % (
obj_dict.get('fq_name'),
ref_list)))
return True, ''
# end validate_storm_control_back_refs
@classmethod
def pre_dbe_create(cls, tenant_name, obj_dict, db_conn):
return cls.validate_storm_control_back_refs(obj_dict)
# end pre_dbe_create
@classmethod
def pre_dbe_update(cls, id, fq_name, obj_dict, db_conn, **kwargs):
return cls.validate_storm_control_back_refs(obj_dict)
# end pre_dbe_update
```
#### File: tests/resources/test_service_appliance.py
```python
import logging
from vnc_api.exceptions import BadRequest
from vnc_api.gen.resource_client import GlobalSystemConfig
from vnc_api.gen.resource_client import PhysicalInterface
from vnc_api.gen.resource_client import PhysicalRouter
from vnc_api.gen.resource_client import ServiceAppliance
from vnc_api.gen.resource_client import ServiceApplianceSet
from vnc_api.gen.resource_xsd import KeyValuePair
from vnc_api.gen.resource_xsd import KeyValuePairs
from vnc_api.gen.resource_xsd import ServiceApplianceInterfaceType
from vnc_cfg_api_server.tests import test_case
logger = logging.getLogger(__name__)
# logger.setLevel(logging.DEBUG)
class TestServiceApplianceBase(test_case.ApiServerTestCase):
@classmethod
def setUpClass(cls, *args, **kwargs):
cls.console_handler = logging.StreamHandler()
cls.console_handler.setLevel(logging.DEBUG)
logger.addHandler(cls.console_handler)
super(TestServiceApplianceBase, cls).setUpClass(*args, **kwargs)
@classmethod
def tearDownClass(cls, *args, **kwargs):
logger.removeHandler(cls.console_handler)
super(TestServiceApplianceBase, cls).tearDownClass(*args, **kwargs)
@property
def api(self):
return self._vnc_lib
class TestPnfServiceAppliance(TestServiceApplianceBase):
def setUp(self):
super(TestPnfServiceAppliance, self).setUp()
logger.debug("setUp called")
# Create Global system config object
self.gsc_obj = self.api.global_system_config_read(
GlobalSystemConfig().fq_name)
self.default_gsc_name = 'default-global-system-config'
# Create Service Appliance Set object
self.sas_obj = ServiceApplianceSet('sas-' + self.id(), self.gsc_obj)
self.sas_obj.set_service_appliance_set_virtualization_type(
'physical-device')
self.api.service_appliance_set_create(self.sas_obj)
# Create PNF Physical Router object
self.pnf_obj = PhysicalRouter('pnf-' + self.id(), self.gsc_obj)
self.pnf_obj.set_physical_router_role('pnf')
self.api.physical_router_create(self.pnf_obj)
# Create spine Physical Router object
self.spine_obj = PhysicalRouter('spine-' + self.id(), self.gsc_obj)
self.api.physical_router_create(self.spine_obj)
# create left, right PNF PI
self.left_pnf_pi_obj = PhysicalInterface(
'ge-0/0/1-' + self.id(), parent_obj=self.pnf_obj)
self.right_pnf_pi_obj = PhysicalInterface(
'ge-0/0/2-' + self.id(), parent_obj=self.pnf_obj)
self.api.physical_interface_create(self.left_pnf_pi_obj)
self.api.physical_interface_create(self.right_pnf_pi_obj)
# create left, right spine PI
self.left_spine_pi_obj = PhysicalInterface(
'xe-0/0/1-' + self.id(), parent_obj=self.spine_obj)
self.right_spine_pi_obj = PhysicalInterface(
'xe-0/0/2-' + self.id(), parent_obj=self.spine_obj)
self.api.physical_interface_create(self.left_spine_pi_obj)
self.api.physical_interface_create(self.right_spine_pi_obj)
def tearDown(self):
super(TestPnfServiceAppliance, self).tearDown()
logger.debug("TearDown called")
# delete PNF PI
self.api.physical_interface_delete(id=self.left_pnf_pi_obj.uuid)
self.api.physical_interface_delete(id=self.right_pnf_pi_obj.uuid)
# delete spine PI
self.api.physical_interface_delete(id=self.left_spine_pi_obj.uuid)
self.api.physical_interface_delete(id=self.right_spine_pi_obj.uuid)
# delete PNF PR and spine PR
self.api.physical_router_delete(id=self.spine_obj.uuid)
self.api.physical_router_delete(id=self.pnf_obj.uuid)
# delete sas
self.api.service_appliance_set_delete(id=self.sas_obj.uuid)
def test_valid_sa(self):
sa_obj = ServiceAppliance('sa-' + self.id(), parent_obj=self.sas_obj)
kvp_array = []
kvp = KeyValuePair(
"left-attachment-point",
self.default_gsc_name +
':' +
'spine-' +
self.id() +
':' +
'xe-0/0/1-' +
self.id())
kvp_array.append(kvp)
kvp = KeyValuePair(
"right-attachment-point",
self.default_gsc_name +
':' +
'spine-' +
self.id() +
':' +
'xe-0/0/2-' +
self.id())
kvp_array.append(kvp)
kvps = KeyValuePairs()
kvps.set_key_value_pair(kvp_array)
sa_obj.set_service_appliance_properties(kvps)
sa_obj.set_service_appliance_virtualization_type('physical-device')
# Add PNF PI refs
attr = ServiceApplianceInterfaceType(interface_type='left')
sa_obj.add_physical_interface(self.left_pnf_pi_obj, attr)
attr = ServiceApplianceInterfaceType(interface_type='right')
sa_obj.add_physical_interface(self.right_pnf_pi_obj, attr)
# Create SA
self.api.service_appliance_create(sa_obj)
self.left_pnf_pi_obj = self.api.physical_interface_read(
id=self.left_pnf_pi_obj.uuid)
self.right_pnf_pi_obj = self.api.physical_interface_read(
id=self.right_pnf_pi_obj.uuid)
# Check if spine PI <-> PNF PI link has been created
self.assertEqual(
self.left_pnf_pi_obj.physical_interface_refs[0].get('uuid'),
self.left_spine_pi_obj.uuid)
self.assertEqual(
self.right_pnf_pi_obj.physical_interface_refs[0].get('uuid'),
self.right_spine_pi_obj.uuid)
# Delete service appliance
self.api.service_appliance_delete(id=sa_obj.uuid)
# Check if spine PI <-> PNF PI link got removed
self.left_pnf_pi_obj = self.api.physical_interface_read(
id=self.left_pnf_pi_obj.uuid)
self.right_pnf_pi_obj = self.api.physical_interface_read(
id=self.right_pnf_pi_obj.uuid)
self.assertFalse(
hasattr(
self.left_pnf_pi_obj,
"physical_interface_refs"))
self.assertFalse(
hasattr(
self.right_pnf_pi_obj,
"physical_interface_refs"))
def test_sa_with_invalid_kvp(self):
sa_obj = ServiceAppliance('sa-' + self.id(), parent_obj=self.sas_obj)
kvp_array = []
kvp = KeyValuePair(
"left-attachment-point",
self.default_gsc_name +
':' +
'spine-' +
self.id() +
':' +
'xe-0/0/1-' +
self.id())
kvp_array.append(kvp)
kvp = KeyValuePair(
"right-attachment-point",
self.default_gsc_name +
':' +
'spine-' +
self.id() +
':' +
'xe-0/0/2-' +
self.id())
# The next line is intentionally commented out
# kvp_array.append(kvp)
kvps = KeyValuePairs()
kvps.set_key_value_pair(kvp_array)
sa_obj.set_service_appliance_properties(kvps)
sa_obj.set_service_appliance_virtualization_type('physical-device')
# Add PNF PI refs
attr = ServiceApplianceInterfaceType(interface_type='left')
sa_obj.add_physical_interface(self.left_pnf_pi_obj, attr)
attr = ServiceApplianceInterfaceType(interface_type='right')
sa_obj.add_physical_interface(self.right_pnf_pi_obj, attr)
# Create SA should raise exception
self.assertRaises(
BadRequest,
self.api.service_appliance_create,
sa_obj)
def test_sa_with_invalid_pr_role(self):
sa_obj = ServiceAppliance('sa-' + self.id(), parent_obj=self.sas_obj)
kvp_array = []
kvp = KeyValuePair(
"left-attachment-point",
self.default_gsc_name +
':' +
'spine-' +
self.id() +
':' +
'xe-0/0/1-' +
self.id())
kvp_array.append(kvp)
kvp = KeyValuePair(
"right-attachment-point",
self.default_gsc_name +
':' +
'spine-' +
self.id() +
':' +
'xe-0/0/2-' +
self.id())
kvp_array.append(kvp)
kvps = KeyValuePairs()
kvps.set_key_value_pair(kvp_array)
sa_obj.set_service_appliance_properties(kvps)
sa_obj.set_service_appliance_virtualization_type('physical-device')
# Add PNF PI refs
attr = ServiceApplianceInterfaceType(interface_type='left')
sa_obj.add_physical_interface(self.left_pnf_pi_obj, attr)
attr = ServiceApplianceInterfaceType(interface_type='right')
sa_obj.add_physical_interface(self.right_pnf_pi_obj, attr)
# Overwrite the role as leaf instead of pnf
self.pnf_obj.set_physical_router_role('leaf')
self.api.physical_router_update(self.pnf_obj)
# Create SA should raise exception
self.assertRaises(BadRequest,
self.api.service_appliance_create,
sa_obj)
```
#### File: tests/resources/test_storm_control_profile.py
```python
import logging
from vnc_api.exceptions import BadRequest
from vnc_api.gen.resource_client import StormControlProfile
from vnc_api.gen.resource_xsd import StormControlParameters
from vnc_cfg_api_server.tests import test_case
logger = logging.getLogger(__name__)
class TestStormControlProfile(test_case.ApiServerTestCase):
@classmethod
def setUpClass(cls, *args, **kwargs):
cls.console_handler = logging.StreamHandler()
cls.console_handler.setLevel(logging.DEBUG)
logger.addHandler(cls.console_handler)
super(TestStormControlProfile, cls).setUpClass(*args, **kwargs)
@classmethod
def tearDownClass(cls, *args, **kwargs):
logger.removeHandler(cls.console_handler)
super(TestStormControlProfile, cls).tearDownClass(*args, **kwargs)
def test_storm_control_profile_create(self):
# create test sc profile object
sc_name = 'strm_ctrl_' + self.id()
bw_percent = 20
traffic_type = ['no-broadcast', 'no-multicast']
actions = ['interface-shutdown']
sc_params_list = StormControlParameters(
storm_control_actions=actions,
bandwidth_percent=bw_percent)
if 'no-broadcast' in traffic_type:
sc_params_list.set_no_broadcast(True)
if 'no-multicast' in traffic_type:
sc_params_list.set_no_multicast(True)
if 'no-registered-multicast' in traffic_type:
sc_params_list.set_no_registered_multicast(True)
if 'no-unknown-unicast' in traffic_type:
sc_params_list.set_no_unknown_unicast(True)
if 'no-unregistered-multicast' in traffic_type:
sc_params_list.set_no_unregistered_multicast(True)
sc_obj = StormControlProfile(
name=sc_name,
storm_control_parameters=sc_params_list
)
self.assertIsNotNone(
self._vnc_lib.storm_control_profile_create(
sc_obj))
def test_storm_control_profile_create_negative_percent(self):
# create test sc profile object
sc_name = 'strm_ctrl_' + self.id()
bw_percent = -20
traffic_type = ['no-broadcast', 'no-multicast']
actions = ['interface-shutdown']
sc_params_list = StormControlParameters(
storm_control_actions=actions,
bandwidth_percent=bw_percent)
if 'no-broadcast' in traffic_type:
sc_params_list.set_no_broadcast(True)
if 'no-multicast' in traffic_type:
sc_params_list.set_no_multicast(True)
if 'no-registered-multicast' in traffic_type:
sc_params_list.set_no_registered_multicast(True)
if 'no-unknown-unicast' in traffic_type:
sc_params_list.set_no_unknown_unicast(True)
if 'no-unregistered-multicast' in traffic_type:
sc_params_list.set_no_unregistered_multicast(True)
sc_obj = StormControlProfile(
name=sc_name,
storm_control_parameters=sc_params_list
)
regex_msg = (r"Invalid bandwidth percentage %d"
% bw_percent)
self.assertRaisesRegexp(
BadRequest, regex_msg,
self._vnc_lib.storm_control_profile_create,
sc_obj)
def test_storm_control_profile_update_invalid_rec_timeout(self):
sc_name = 'strm_ctrl_' + self.id()
bw_percent = 20
traffic_type = ['no-broadcast', 'no-multicast']
actions = ['interface-shutdown']
rec_timeout = 1000
sc_params_list = StormControlParameters(
storm_control_actions=actions,
recovery_timeout=rec_timeout,
bandwidth_percent=bw_percent)
if 'no-broadcast' in traffic_type:
sc_params_list.set_no_broadcast(True)
if 'no-multicast' in traffic_type:
sc_params_list.set_no_multicast(True)
if 'no-registered-multicast' in traffic_type:
sc_params_list.set_no_registered_multicast(True)
if 'no-unknown-unicast' in traffic_type:
sc_params_list.set_no_unknown_unicast(True)
if 'no-unregistered-multicast' in traffic_type:
sc_params_list.set_no_unregistered_multicast(True)
sc_obj = StormControlProfile(
name=sc_name,
storm_control_parameters=sc_params_list
)
self.assertIsNotNone(
self._vnc_lib.storm_control_profile_create(
sc_obj))
# set invalid recovery timeout
rec_timeout = 3800
sc_params_list.set_recovery_timeout(rec_timeout)
sc_obj = StormControlProfile(
name=sc_name,
storm_control_parameters=sc_params_list
)
regex_msg = (r"Invalid recovery timeout %d"
% rec_timeout)
self.assertRaisesRegexp(
BadRequest, regex_msg,
self._vnc_lib.storm_control_profile_update,
sc_obj)
```
#### File: tests/resources/test_sub_cluster.py
```python
from builtins import object
import logging
from cfgm_common.exceptions import BadRequest
import mock
from vnc_api.vnc_api import GlobalSystemConfig
from vnc_api.vnc_api import SubCluster
from vnc_cfg_api_server.tests.test_case import ApiServerTestCase
logger = logging.getLogger(__name__)
ASN_2_BYTES = (1 << 16) - 42
ASN_4_BYTES = (1 << 16) + 42
class TestSubClusterBase(ApiServerTestCase):
@classmethod
def setUpClass(cls, *args, **kwargs):
super(TestSubClusterBase, cls).setUpClass(*args, **kwargs)
cls._gsc = cls._vnc_lib.global_system_config_read(
GlobalSystemConfig().fq_name)
if hasattr(cls, '_global_asn') and cls._global_asn:
cls._gsc.set_autonomous_system(cls._global_asn)
if cls._global_asn > 0xFFFF:
cls._gsc.enable_4byte_as = True
cls._vnc_lib.global_system_config_update(cls._gsc)
else:
cls._global_asn = cls._gsc.get_autonomous_system()
# Create a pool of 10 sub-cluster IDs to use for the tests
start = 42
if cls._global_asn <= 0xFFFF:
start += 1 << 16
cls._id_range = set(range(start, start + 10))
@property
def api(self):
return self._vnc_lib
class SubClusterTestSuite(object):
def test_cannot_update_asn(self):
sc = SubCluster('sc-%s' % self.id())
self.api.sub_cluster_create(sc)
sc.set_sub_cluster_asn(43)
self.assertRaises(BadRequest, self.api.sub_cluster_update, sc)
def test_cluster_id_automatically_assigned(self):
sc = SubCluster('sc-%s' % self.id())
self.api.sub_cluster_create(sc)
sc = self.api.sub_cluster_read(id=sc.uuid)
self.assertIsNotNone(sc.get_sub_cluster_id())
zk_db = self._api_server._db_conn._zk_db
self.assertEqual(
zk_db._get_sub_cluster_from_id(sc.get_sub_cluster_id()),
sc.get_fq_name_str(),
)
def test_manually_allocate_sub_cluster_id(self):
sub_cluster_id = self._id_range.pop()
sc = SubCluster('sc-%s' % self.id(), sub_cluster_id=sub_cluster_id)
self.api.sub_cluster_create(sc)
sc = self.api.sub_cluster_read(id=sc.uuid)
self.assertEqual(sc.get_sub_cluster_id(), sub_cluster_id)
zk_db = self._api_server._db_conn._zk_db
self.assertEqual(
zk_db._get_sub_cluster_from_id(sub_cluster_id),
sc.get_fq_name_str(),
)
def test_cannot_use_allocated_sub_cluster_id(self):
# automatically allocated
sc1 = SubCluster('sc1-%s' % self.id())
self.api.sub_cluster_create(sc1)
sc1 = self.api.sub_cluster_read(id=sc1.uuid)
sc2 = SubCluster(
'sc2-%s' % self.id(), sub_cluster_id=sc1.get_sub_cluster_id())
self.assertRaises(BadRequest, self.api.sub_cluster_create, sc2)
# or manually allocated
sc3 = SubCluster(
'sc3-%s' % self.id(), sub_cluster_id=self._id_range.pop())
self.api.sub_cluster_create(sc3)
sc4 = SubCluster(
'sc4-%s' % self.id(),
sub_cluster_id=sc3.get_sub_cluster_id())
self.assertRaises(BadRequest, self.api.sub_cluster_create, sc4)
def test_sub_cluster_id_deallocated_on_delete(self):
sc = SubCluster('sc-%s' % self.id())
self.api.sub_cluster_create(sc)
sc = self.api.sub_cluster_read(id=sc.uuid)
self.api.sub_cluster_delete(id=sc.uuid)
zk_db = self._api_server._db_conn._zk_db
self.assertNotEqual(
zk_db._get_sub_cluster_from_id(sc.get_sub_cluster_id()),
sc.get_fq_name_str(),
)
def test_sub_cluster_id_range(self):
sub_cluster_id = 1 << 32 # more than 4 bytes cluster ID
if self._global_asn > 0xFFFF:
sub_cluster_id = 1 << 16 # more than 2 bytes cluster ID
sc = SubCluster('sc-%s' % self.id(), sub_cluster_id=sub_cluster_id)
with mock.patch.object(self._api_server, '_validate_simple_type',
return_value=sub_cluster_id):
self.assertRaises(BadRequest, self.api.sub_cluster_create, sc)
sc = SubCluster('sc-%s' % self.id(), sub_cluster_id=0)
with mock.patch.object(self._api_server, '_validate_simple_type',
return_value=0):
self.assertRaises(BadRequest, self.api.sub_cluster_create, sc)
def test_update_allocated_id(self):
sc = SubCluster('sc-%s' % self.id())
self.api.sub_cluster_create(sc)
sc = self.api.sub_cluster_read(id=sc.uuid)
allocated_id = sc.get_sub_cluster_id()
self.assertIsNotNone(allocated_id)
sub_cluster_id = self._id_range.pop()
self.assertNotEqual(allocated_id, sub_cluster_id)
sc.set_sub_cluster_id(sub_cluster_id)
self.api.sub_cluster_update(sc)
zk_db = self._api_server._db_conn._zk_db
self.assertEqual(
zk_db._get_sub_cluster_from_id(sub_cluster_id),
sc.get_fq_name_str(),
)
self.assertIsNone(zk_db._get_sub_cluster_from_id(allocated_id))
class TestSubClusterWith2BytesASN(TestSubClusterBase, SubClusterTestSuite):
_global_asn = ASN_2_BYTES
class TestSubClusterWith4BytesASN(TestSubClusterBase, SubClusterTestSuite):
_global_asn = ASN_4_BYTES
class TestSubClusterChangeGlobalASN(TestSubClusterBase):
_global_asn = ASN_2_BYTES
def test_change_global_asn_from_2_to_4_bytes(self):
sc = SubCluster('sc-%s' % self.id())
self.api.sub_cluster_create(sc)
sc = self.api.sub_cluster_read(id=sc.uuid)
allocated_id = sc.get_sub_cluster_id()
self.assertIsNotNone(allocated_id)
self.assertLess(allocated_id, 1 << 16)
self._gsc.set_autonomous_system(ASN_4_BYTES)
self._gsc.enable_4byte_as = True
self.api.global_system_config_update(self._gsc)
sc = self.api.sub_cluster_read(id=sc.uuid)
self.assertEqual(sc.get_sub_cluster_id(), allocated_id)
self._gsc.set_autonomous_system(ASN_2_BYTES)
self._gsc.enable_4byte_as = False
self.api.global_system_config_update(self._gsc)
def test_change_global_asn_from_2_to_4_bytes_with_to_high_id(self):
sub_cluster_id = self._id_range.pop()
self.assertGreaterEqual(sub_cluster_id, 1 << 16)
sc = SubCluster('sc-%s' % self.id(), sub_cluster_id=sub_cluster_id)
self.api.sub_cluster_create(sc)
self._gsc.set_autonomous_system(ASN_4_BYTES)
self._gsc.enable_4byte_as = True
self.assertRaises(BadRequest, self.api.global_system_config_update,
self._gsc)
self.api.sub_cluster_delete(id=sc.uuid)
self._gsc.set_autonomous_system(ASN_4_BYTES)
self._gsc.enable_4byte_as = True
self.api.global_system_config_update(self._gsc)
sc = SubCluster('sc-%s' % self.id(), sub_cluster_id=sub_cluster_id)
self.assertRaises(BadRequest, self.api.sub_cluster_create, sc)
self._gsc.set_autonomous_system(ASN_2_BYTES)
self._gsc.enable_4byte_as = False
self.api.global_system_config_update(self._gsc)
self.api.sub_cluster_create(sc)
class TestSubClusterFirstAllocatedId(TestSubClusterBase):
def test_first_allocated_id_is_one(self):
sc = SubCluster('sc-%s' % self.id())
self.api.sub_cluster_create(sc)
sc = self.api.sub_cluster_read(id=sc.uuid)
self.assertEqual(sc.get_sub_cluster_id(), 1)
```
#### File: vnc_cfg_api_server/tests/test_perms2_target.py
```python
from builtins import str
from builtins import object
from keystoneclient.v2_0 import client
from vnc_api.vnc_api import *
import uuid
import argparse
import keystoneclient.exceptions as kc_exceptions
import cfgm_common
import pprint
import logging
logger = logging.getLogger(__name__)
PERMS_NONE = 0
PERMS_X = 1
PERMS_W = 2
PERMS_R = 4
PERMS_WX = 3
PERMS_RX = 5
PERMS_RW = 6
PERMS_RWX = 7
class TestPerms(object):
def parse_args(self):
# Eg. python test_perms.py <api-server-ip>
parser = argparse.ArgumentParser(
description="Test API server resource permissions")
parser.add_argument('--api_server_ip', default='127.0.0.1',
help="IP address of API server")
parser.add_argument('--api-server-port', type=int, default=8082,
help="API server port (default 8082)")
self.args = parser.parse_args()
logger.info('API server = %s : %d'\
% (self.args.api_server_ip, self.args.api_server_port))
# end parse_args
# create users specified as array of tuples (name, password, role)
# assumes admin user and tenant exists
class User(object):
def __init__(self, apis_ip, apis_port, kc, name, password, role, project):
self.name = name
self.password = password
self.role = role
self.project = project
self.project_uuid = None
self.project_obj = None
# create user/role/tenant in keystone as needed
kc_users = set([user.name for user in kc.users.list()])
kc_roles = set([user.name for user in kc.roles.list()])
kc_tenants = set([tenant.name for tenant in kc.tenants.list()])
if self.role not in kc_roles:
logger.info('role %s missing from keystone ... creating' % self.role)
kc.roles.create(self.role)
if self.project not in kc_tenants:
logger.info('tenant %s missing from keystone ... creating' % self.project)
kc.tenants.create(self.project)
for tenant in kc.tenants.list():
if tenant.name == self.project:
break
self.project_uuid = tenant.id
if self.name not in kc_users:
logger.info('user %s missing from keystone ... creating' % self.name)
user = kc.users.create(self.name, self.password, '', tenant_id=tenant.id)
role_dict = {role.name:role for role in kc.roles.list()}
user_dict = {user.name:user for user in kc.users.list()}
logger.info('Adding user %s with role %s to tenant %s' \
% (name, role, project))
try:
kc.roles.add_user_role(user_dict[self.name], role_dict[self.role], tenant)
except kc_exceptions.Conflict:
pass
try:
self.vnc_lib = VncApi(username = self.name, password = <PASSWORD>,
tenant_name = self.project,
api_server_host = apis_ip,api_server_port = apis_port)
except cfgm_common.exceptions.PermissionDenied:
logger.error('Error creating API server client handle (VncApi)')
logger.error('RBAC disabled or missing user-token middleware in Neutron pipeline? Please verify')
sys.exit(1)
# end __init__
def api_acl_name(self):
rg_name = list(self.project_obj.get_fq_name())
rg_name.append('default-api-access-list')
return rg_name
# display resource id-perms
def print_perms(obj_perms):
share_perms = ['%s:%d' % (x.tenant, x.tenant_access) for x in obj_perms.share]
return '%s/%d %d %s' \
% (obj_perms.owner, obj_perms.owner_access,
obj_perms.global_access, share_perms)
# end print_perms
# set id perms for object
def set_perms(obj, owner=None, owner_access=None, share=None, global_access=None):
try:
perms = obj.get_perms2()
except AttributeError:
logger.error('Unable to set perms2 in object %s' % obj.get_fq_name())
sys.exit()
logger.info('Current perms %s = %s' % (obj.get_fq_name(), print_perms(perms)))
if owner:
perms.owner = owner
if owner_access:
perms.owner_access = owner_access
if share is not None:
perms.share = [ShareType(obj_uuid, obj_crud) for (obj_uuid, obj_crud) in share]
if global_access is not None:
perms.global_access = global_access
obj.set_perms2(perms)
logger.info('New perms %s = %s' % (obj.get_fq_name(), print_perms(perms)))
# end set_perms
# Read VNC object. Return None if object doesn't exists
def vnc_read_obj(vnc, res_type, name = None, obj_uuid = None):
if name is None and obj_uuid is None:
logger.error('Need FQN or UUID to read object')
return None
method_name = res_type.replace('-', '_')
method = getattr(vnc, "%s_read" % (method_name))
try:
if obj_uuid:
if '-' not in obj_uuid:
obj_uuid = str(uuid.UUID(obj_uuid))
return method(id=obj_uuid)
else:
return method(fq_name=name)
except NoIdError:
logger.error('%s %s not found!' % (res_type, name if name else obj_uuid))
return None
except PermissionDenied:
logger.error('Permission denied reading %s %s' % (res_type, name))
raise
# end
def show_rbac_rules(api_access_list_entries):
if api_access_list_entries is None:
logger.info('Empty RBAC group!')
return
# {u'rbac_rule': [{u'rule_object': u'*', u'rule_perms': [{u'role_crud': u'CRUD', u'role_name': u'admin'}], u'rule_field': None}]}
rule_list = api_access_list_entries.get_rbac_rule()
logger.info('Rules (%d):' % len(rule_list))
logger.info('----------')
idx = 1
for rule in rule_list:
o = rule.rule_object
f = rule.rule_field
ps = ''
for p in rule.rule_perms:
ps += p.role_name + ':' + p.role_crud + ','
o_f = "%s.%s" % (o,f) if f else o
logger.info('%2d %-32s %s' % (idx, o_f, ps))
idx += 1
logger.info('')
def build_rule(rule_str):
r = rule_str.split(" ") if rule_str else []
if len(r) < 2:
return None
# [0] is object.field, [1] is list of perms
obj_field = r[0].split(".")
perms = r[1].split(",")
o = obj_field[0]
f = obj_field[1] if len(obj_field) > 1 else None
o_f = "%s.%s" % (o,f) if f else o
# perms eg ['foo:CRU', 'bar:CR']
rule_perms = []
for perm in perms:
p = perm.split(":")
rule_perms.append(RbacPermType(role_name = p[0], role_crud = p[1]))
# build rule
rule = RbacRuleType(
rule_object = o,
rule_field = f,
rule_perms = rule_perms)
return rule
#end
def match_rule(rule_list, rule_str):
extend_rule_list = True
nr = build_rule(rule_str)
for r in rule_list:
if r.rule_object != nr.rule_object or r.rule_field != nr.rule_field:
continue
# object and field match - fix rule in place
extend_rule_list = False
for np in nr.rule_perms:
extend_perms = True
for op in r.rule_perms:
if op.role_name == np.role_name:
# role found - merge incoming and existing crud in place
x = set(list(op.role_crud)) | set(list(np.role_crud))
op.role_crud = ''.join(x)
extend_perms = False
if extend_perms:
r.rule_perms.append(RbacPermType(role_name = np.role_name, role_crud = np.role_crud))
if extend_rule_list:
rule_list.append(nr)
# end match_rule
def vnc_fix_api_access_list(vnc_lib, pobj, rule_str = None):
rg_name = list(pobj.get_fq_name())
rg_name.append('default-api-access-list')
rg = vnc_read_obj(vnc_lib, 'api-access-list', name = rg_name)
create = False
rule_list = []
if rg == None:
rg = ApiAccessList(
name = 'default-api-access-list',
parent_obj = pobj,
api_access_list_entries = None)
create = True
elif rule_str:
api_access_list_entries = rg.get_api_access_list_entries()
rule_list = api_access_list_entries.get_rbac_rule()
if rule_str:
rule = match_rule(rule_list, rule_str)
rentry = RbacRuleEntriesType(rule_list)
rg.set_api_access_list_entries(rentry)
if create:
logger.info('API access list empty. Creating with default rule')
vnc_lib.api_access_list_create(rg)
else:
vnc_lib.api_access_list_update(rg)
show_rbac_rules(rg.get_api_access_list_entries())
def all(ip='127.0.0.1', port=8082, domain_name='default-domain',
proj_name='my-proj', subnet='192.168.1.0', prefix=24, vn_name='my-vn'):
testfail = 0
testpass = 0
fqdn = [domain_name]
pobjs = {}
kc = client.Client(username='admin', password='<PASSWORD>',
tenant_name='admin',
auth_url='http://127.0.0.1:5000/v2.0')
# create admin account first to ensure there are no permission problems
admin = User(ip, port, kc, 'admin', 'contrail123', 'admin', 'admin')
alice = User(ip, port, kc, 'alice', 'alice123', 'alice-role', 'alice-proj')
bob = User(ip, port, kc, 'bob', 'bob123', 'bob-role', 'bob-proj')
# create domain
domain = vnc_read_obj(admin.vnc_lib, 'domain', name = fqdn)
if domain == None:
domain = Domain(domain_name)
admin.vnc_lib.domain_create(domain)
domain = vnc_read_obj(vnc_lib, 'domain', name = domain.get_fq_name())
logger.info('Created domain %s' % fqdn)
# read projects
alice.project_obj = vnc_read_obj(admin.vnc_lib, 'project', obj_uuid = alice.project_uuid)
logger.info('Created Project object for %s' % alice.project)
bob.project_obj = vnc_read_obj(admin.vnc_lib, 'project', obj_uuid = bob.project_uuid)
logger.info('Created Project object for %s' % bob.project)
# reassign ownership of projects to alice and bob (from admin)
for user in [alice, bob]:
logger.info('Change owner of project %s to %s' % (user.project, user.project_uuid))
set_perms(user.project_obj, owner=user.project_uuid, share = [])
admin.vnc_lib.project_update(user.project_obj)
# delete test VN if it exists
for net_name in [vn_name, 'second-vn', 'bob-vn-in-alice-project']:
vn_fq_name = [domain_name, alice.project, net_name]
vn = vnc_read_obj(admin.vnc_lib, 'virtual-network', name = vn_fq_name)
if vn:
logger.info('%s exists ... deleting to start fresh' % vn_fq_name)
admin.vnc_lib.virtual_network_delete(fq_name = vn_fq_name)
logger.info('########### API ACCESS (CREATE) ##################')
# delete api-access-list for alice and bob and disallow api access to their projects
for user in [alice, bob]:
logger.info("Delete api-acl for project %s to disallow api access" % user.project)
vnc_fix_api_access_list(admin.vnc_lib, user.project_obj, rule_str = None)
logger.info('alice: trying to create VN in her project')
vn = VirtualNetwork(vn_name, alice.project_obj)
try:
alice.vnc_lib.virtual_network_create(vn)
logger.error('Created virtual network %s ... test failed!' % vn.get_fq_name())
testfail += 1
except PermissionDenied as e:
logger.info('Failed to create VN ... Test passes!')
testpass += 1
if testfail > 0:
sys.exit()
# allow permission to create virtual-network
for user in [alice, bob]:
logger.info("%s: project %s to allow full access to role %s" % \
(user.name, user.project, user.role))
# note that collection API is set for create operation
vnc_fix_api_access_list(admin.vnc_lib, user.project_obj,
rule_str = 'virtual-networks %s:C' % user.role)
logger.info('alice: trying to create VN in her project')
try:
alice.vnc_lib.virtual_network_create(vn)
logger.info('Created virtual network %s ... test passed!' % vn.get_fq_name())
testpass += 1
except PermissionDenied as e:
logger.error('Failed to create VN ... Test failed!')
testfail += 1
if testfail > 0:
sys.exit()
logger.info('########### API ACCESS (READ) ##################')
logger.info('alice: trying to read VN in her project (should fail)')
try:
vn2 = vnc_read_obj(alice.vnc_lib, 'virtual-network', name = vn.get_fq_name())
logger.error('Read VN without read permission ... test failed!!!')
testfail += 1
except PermissionDenied as e:
logger.info('Unable to read VN ... test passed')
testpass += 1
if testfail > 0:
sys.exit()
# allow read access
vnc_fix_api_access_list(admin.vnc_lib, alice.project_obj,
rule_str = 'virtual-network %s:R' % alice.role)
logger.info('alice: added permission to read virtual-network')
logger.info('alice: trying to read VN in her project (should succeed)')
try:
vn2 = vnc_read_obj(alice.vnc_lib, 'virtual-network', name = vn.get_fq_name())
logger.info('Read VN successfully ... test passed')
testpass += 1
except PermissionDenied as e:
testfail += 1
logger.info('Read VN failed ... test failed!!!')
if testfail > 0:
sys.exit()
logger.info('########### API ACCESS (UPDATE) ##################')
logger.info('alice: trying to update VN in her project (should fail)')
try:
vn.display_name = "foobar"
alice.vnc_lib.virtual_network_update(vn)
logger.error('Set field in virtual network %s ... test failed!' % vn.get_fq_name())
testfail += 1
except PermissionDenied as e:
logger.info('Unable to update field in VN ... Test succeeded!')
testpass += 1
if testfail > 0:
sys.exit()
vnc_fix_api_access_list(admin.vnc_lib, alice.project_obj,
rule_str = 'virtual-network %s:U' % alice.role)
logger.info('alice: added permission to update virtual-network')
logger.info('alice: trying to set field in her VN ')
try:
vn.display_name = "foobar"
alice.vnc_lib.virtual_network_update(vn)
logger.info('Set field in virtual network %s ... test passed!' % vn.get_fq_name())
testpass += 1
except PermissionDenied as e:
logger.error('Failed to update field in VN ... Test failed!')
testfail += 1
if testfail > 0:
sys.exit()
vn2 = vnc_read_obj(alice.vnc_lib, 'virtual-network', name = vn.get_fq_name())
logger.info('alice: display_name %s' % vn2.display_name)
if vn2.display_name != "foobar":
testfail += 1
logger.error('Failed to update shared field correctly in VN ... Test failed!')
else:
testpass += 1
logger.info('Updated shared field correctly in virtual network %s ... test passed!' % vn.get_fq_name())
if testfail > 0:
sys.exit()
logger.info('########### API ACCESS (update field) ##################')
logger.info('Restricting update of field to admin only ')
vnc_fix_api_access_list(admin.vnc_lib, alice.project_obj,
rule_str = 'virtual-network.display_name admin:U')
try:
vn.display_name = "alice"
alice.vnc_lib.virtual_network_update(vn)
logger.error('Set field in virtual network %s ... test failed!' % vn.get_fq_name())
testfail += 1
except PermissionDenied as e:
logger.info('Failed to update field in VN ... Test passed!')
testpass += 1
if testfail > 0:
sys.exit()
logger.info('########### API ACCESS (DELETE) ##################')
# delete test VN ... should fail
vn_fq_name = [domain_name, alice.project, vn_name]
try:
alice.vnc_lib.virtual_network_delete(fq_name = vn_fq_name)
logger.error('%s: Deleted VN %s ... test failed!' % (alice.name, vn_fq_name))
testfail += 1
except PermissionDenied as e:
logger.info('%s: Error deleting VN %s ... test passed!' % (alice.name, vn_fq_name))
testpass += 1
if testfail > 0:
sys.exit()
logger.info('############### PERMS2 ##########################')
logger.info( 'Giving bob API level access to perform all ops on virtual-network')
vnc_fix_api_access_list(admin.vnc_lib, bob.project_obj,
rule_str = 'virtual-network %s:RUD' % bob.role)
logger.info('bob: trying to create VN in alice project ... should fail')
try:
vn2 = VirtualNetwork('bob-vn-in-alice-project', alice.project_obj)
bob.vnc_lib.virtual_network_create(vn2)
logger.error('Created virtual network %s ... test failed!' % vn2.get_fq_name())
testfail += 1
except PermissionDenied as e:
logger.info('Failed to create VN ... Test passed!')
testpass += 1
if testfail > 0:
sys.exit()
vn = vnc_read_obj(alice.vnc_lib, 'virtual-network', name = vn_fq_name)
logger.info('########### READ (SHARED WITH TENANT) ##################')
logger.info('Disable share in virtual networks for others')
set_perms(vn, share = [], global_access = PERMS_NONE)
alice.vnc_lib.virtual_network_update(vn)
logger.info('Reading VN as bob ... should fail')
try:
net_obj = bob.vnc_lib.virtual_network_read(id=vn.get_uuid())
logger.error('Succeeded in reading VN. Test failed!')
testfail += 1
except PermissionDenied as e:
logger.info('Failed to read VN ... Test passed!')
testpass += 1
if testfail > 0:
sys.exit()
logger.info('Enable share in virtual network for bob project')
set_perms(vn, share = [(bob.project_uuid, PERMS_R)])
alice.vnc_lib.virtual_network_update(vn)
logger.info('Reading VN as bob ... should succeed')
try:
net_obj = bob.vnc_lib.virtual_network_read(id=vn.get_uuid())
logger.info('Succeeded in reading VN. Test passed!')
testpass += 1
except PermissionDenied as e:
logger.error('Failed to read VN ... Test failed!')
testfail += 1
if testfail > 0:
sys.exit()
logger.info('########### READ (GLOBALLY SHARED ) ##################')
logger.info('Disable share in virtual networks for others')
set_perms(vn, share = [])
alice.vnc_lib.virtual_network_update(vn)
logger.info('Reading VN as bob ... should fail')
try:
net_obj = bob.vnc_lib.virtual_network_read(id=vn.get_uuid())
logger.error('Succeeded in reading VN. Test failed!')
testfail += 1
except PermissionDenied as e:
logger.info('Failed to read VN ... Test passed!')
testpass += 1
if testfail > 0:
sys.exit()
logger.info('Enable virtual networks in alice project for global sharing (read only)')
set_perms(vn, share = [], global_access = PERMS_R)
alice.vnc_lib.virtual_network_update(vn)
logger.info('Reading VN as bob ... should succeed')
try:
net_obj = bob.vnc_lib.virtual_network_read(id=vn.get_uuid())
logger.info('Succeeded in reading VN. Test passed!')
testpass += 1
except PermissionDenied as e:
logger.error('Failed to read VN ... Test failed!')
testfail += 1
if testfail > 0:
sys.exit()
logger.info('Writing shared VN as bob ... should fail')
try:
vn.display_name = "foobar"
bob.vnc_lib.virtual_network_update(vn)
logger.error('Succeeded in updating VN. Test failed!!')
testfail += 1
except PermissionDenied as e:
logger.info('Failed to update VN ... Test passed!')
testpass += 1
if testfail > 0:
sys.exit()
logger.info('Enable virtual networks in alice project for global sharing (read, write)')
logger.info('Writing shared VN as bob ... should succeed')
# important: read VN afresh to overwrite display_name update pending status
vn = vnc_read_obj(alice.vnc_lib, 'virtual-network', name = vn_fq_name)
set_perms(vn, global_access = PERMS_RW)
alice.vnc_lib.virtual_network_update(vn)
try:
bob.vnc_lib.virtual_network_update(vn)
logger.info('Succeeded in updating VN. Test passed!')
testpass += 1
except PermissionDenied as e:
logger.error('Failed to update VN ... Test failed!!')
testfail += 1
if testfail > 0:
sys.exit()
logger.info('########################### Collections #################')
logger.info('User should be able to see VN in own project and any shared')
logger.info('alice: get virtual network collection ... should fail')
try:
x = alice.vnc_lib.virtual_networks_list(parent_id = alice.project_uuid)
logger.error('Read VN collection without list permission ... test failed!')
testfail += 1
except PermissionDenied as e:
logger.info('Failed to read VN collection ... test passed')
testpass += 1
if testfail > 0:
sys.exit()
# allow permission to read virtual-network collection
for user in [alice, bob]:
logger.info("%s: project %s to allow collection access to role %s" % \
(user.name, user.project, user.role))
# note that collection API is set for create operation
vnc_fix_api_access_list(admin.vnc_lib, user.project_obj,
rule_str = 'virtual-networks %s:CR' % user.role)
# create one more VN in alice project to differentiate from what bob sees
vn2 = VirtualNetwork('second-vn', alice.project_obj)
alice.vnc_lib.virtual_network_create(vn2)
logger.info('Alice: created additional VN %s in her project' % vn2.get_fq_name())
logger.info('Alice: network list')
x = alice.vnc_lib.virtual_networks_list(parent_id = alice.project_uuid)
for item in x['virtual-networks']:
logger.info(' %s: %s' % (item['uuid'], item['fq_name']))
expected = set(['my-vn', 'second-vn'])
received = set([item['fq_name'][-1] for item in x['virtual-networks']])
if received != expected:
logger.error('Alice: Received incorrect VN list ... test failed!')
testfail += 1
else:
logger.info('Alice: Received correct VN list ... test passed')
testpass += 1
if testfail > 0:
sys.exit()
logger.info('Bob: network list')
y = bob.vnc_lib.virtual_networks_list(parent_id = bob.project_uuid)
for item in y['virtual-networks']:
logger.info(' %s: %s' % (item['uuid'], item['fq_name']))
# need changes in auto code generation for lists
expected = set(['my-vn'])
received = set([item['fq_name'][-1] for item in y['virtual-networks']])
if received != expected:
logger.error('Bob: Received incorrect VN list ... test failed!')
testfail += 1
else:
logger.info('Bob: Received correct VN list ... test passed')
testpass += 1
if testfail > 0:
sys.exit()
logger.info('Tests fail=%d, pass=%d' % (testfail, testpass))
if __name__ == '__main__':
perms = TestPerms()
perms.parse_args()
log_level = 'INFO'
logger.setLevel(log_level)
logformat = logging.Formatter("%(levelname)s: %(message)s")
stdout = logging.StreamHandler()
stdout.setLevel(log_level)
stdout.setFormatter(logformat)
logger.addHandler(stdout)
all(ip=perms.args.api_server_ip, port=perms.args.api_server_port)
```
#### File: tests/unit/test_dependency_tracker.py
```python
from __future__ import unicode_literals
#
# Copyright (c) 2015 Juniper Networks, Inc. All rights reserved.
#
import unittest
import mock
import six
from cfgm_common.dependency_tracker import DependencyTracker
from cfgm_common.vnc_db import DBBase
#
# Red ------------------
# | |
# | |
# v v
# Blue <--> Green
# | |
# | |
# v v
# Purple <--> White
class DBBaseTM(DBBase):
obj_type = __name__
class RedSM(DBBaseTM):
_dict = {}
obj_type = 'red'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.blues = set()
self.greens = set()
obj_dict = self.update(obj_dict)
self.set_children('blue', obj_dict)
self.set_children('green', obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
return obj
# end update
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
del cls._dict[uuid]
# end delete
# end RedSM
class GreenSM(DBBaseTM):
_dict = {}
obj_type = 'green'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.whites = set()
obj_dict = self.update(obj_dict)
self.add_to_parent(obj_dict)
self.red = self.get_parent_uuid(obj_dict)
self.set_children('white', obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.update_single_ref('blue', obj)
return obj
# end update
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
obj.remove_from_parent()
obj.update_single_ref('blue', {})
del cls._dict[uuid]
# end delete
# end GreenSM
class BlueSM(DBBaseTM):
_dict = {}
obj_type = 'blue'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.purples = set()
self.greens = set()
obj_dict = self.update(obj_dict)
self.add_to_parent(obj_dict)
self.red = self.get_parent_uuid(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.update_multiple_refs('green', obj)
self.name = obj['fq_name'][-1]
return obj
# end update
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
obj.remove_from_parent()
obj.update_multiple_refs('green', {})
del cls._dict[uuid]
# end delete
# end BlueSM
class PurpleSM(DBBaseTM):
_dict = {}
obj_type = 'purple'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
obj_dict = self.update(obj_dict)
self.add_to_parent(obj_dict)
self.blue = self.get_parent_uuid(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.update_single_ref('white', obj)
return obj
# end update
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
obj.remove_from_parent()
obj.update_single_ref('white', {})
del cls._dict[uuid]
# end delete
# end PurpleSM
class WhiteSM(DBBaseTM):
_dict = {}
obj_type = 'white'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.purples = set()
obj_dict = self.update(obj_dict)
self.add_to_parent(obj_dict)
self.green = self.get_parent_uuid(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.update_multiple_refs('purple', obj)
self.name = obj['fq_name'][-1]
return obj
# end update
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
obj.remove_from_parent()
obj.update_multiple_refs('purple', {})
del cls._dict[uuid]
# end delete
# end WhiteSM
DBBase._OBJ_TYPE_MAP = {
'red': RedSM,
'blue': BlueSM,
'green': GreenSM,
'white': WhiteSM,
'purple': PurpleSM
}
class DepTrackTester(unittest.TestCase):
def green_read(self, obj_type, uuid, **kwargs):
green_obj = {}
green_obj['uuid'] = uuid[0]
green_obj['fq_name'] = ['fake-green-' + uuid[0]]
green_obj['parent_type'] = 'red'
green_obj['parent_uuid'] = 'fake-red-uuid'
return True, [green_obj]
# end green_read
def white_read(self, obj_type, uuid, **kwargs):
white_obj = {}
white_obj['uuid'] = uuid[0]
white_obj['fq_name'] = ['fake-white-' + uuid[0]]
white_obj['parent_type'] = 'green'
white_obj['parent_uuid'] = 'fake-green-uuid'
return True, [white_obj]
# end white_read
def purple_read(self, obj_type, uuid, **kwargs):
purple_obj = {}
purple_obj['uuid'] = uuid[0]
purple_obj['fq_name'] = ['fake-purple-' + uuid[0]]
purple_obj['parent_type'] = 'blue'
purple_obj['parent_uuid'] = 'fake-blue-uuid'
return True, [purple_obj]
# end purple_read
def green_read_with_refs(self, obj_type, uuid, **kwargs):
green_obj = {}
green_obj['uuid'] = 'fake-green-uuid'
green_obj['fq_name'] = ['fake-green-uuid']
green_obj['parent_type'] = 'red'
green_obj['parent_uuid'] = 'fake-red-uuid'
green_obj['blue_back_refs'] = [{'uuid': 'fake-blue-uuid'}]
return True, [green_obj]
# end green_read_with_refs
def green_read_with_update_refs(self, obj_type, uuid, **kwargs):
green_obj = {}
green_obj['uuid'] = 'fake-green-uuid'
green_obj['fq_name'] = ['fake-green-uuid']
green_obj['parent_type'] = 'red'
green_obj['parent_uuid'] = 'fake-red-uuid'
green_obj['blue_back_refs'] = [{'uuid': 'fake-blue-uuid-1'}]
return True, [green_obj]
# end green_read_with_update_refs
def white_read_with_refs(self, obj_type, uuid, **kwargs):
white_obj = {}
white_obj['uuid'] = 'fake-white-uuid'
white_obj['fq_name'] = ['fake-white-uuid']
white_obj['parent_type'] = 'green'
white_obj['parent_uuid'] = 'fake-green-uuid-0'
white_obj['purple_back_refs'] = [{'uuid': 'fake-purple-uuid'}]
return True, [white_obj]
# end white_read_with_refs
def red_read(self, obj_type, uuid, **kwargs):
red_obj = {}
red_obj['uuid'] = 'fake-red-uuid'
red_obj['fq_name'] = ['fake-red-uuid']
return True, [red_obj]
# end red_read
def red_read_with_child(self, obj_type, uuid, **kwargs):
red_obj = {}
red_obj['uuid'] = 'fake-red-uuid'
red_obj['fq_name'] = ['fake-red-uuid']
red_obj['blues'] = [{'uuid': 'fake-blue-uuid'}]
red_obj['greens'] = [{'to': 'fake-green-uuid'}]
return True, [red_obj]
# end red_read_with_child
def blue_read(self, obj_type, uuid, **kwargs):
blue_obj = {}
blue_obj['uuid'] = uuid[0]
blue_obj['fq_name'] = ['fake-blue-' + uuid[0]]
blue_obj['parent_type'] = 'red'
blue_obj['parent_uuid'] = 'fake-red-uuid'
return True, [blue_obj]
# end blue_read
def blue_read_with_refs(self, obj_type, uuid, **kwargs):
blue_obj = {}
blue_obj['uuid'] = 'fake-blue-uuid'
blue_obj['fq_name'] = ['fake-blue-uuid']
blue_obj['parent_type'] = 'red'
blue_obj['parent_uuid'] = 'fake-red-uuid'
blue_obj['green_refs'] = [{'uuid': 'fake-green-uuid'}]
return True, [blue_obj]
# end blue_read_with_refs
def blue_read_with_multi_refs(self, obj_type, uuid, **kwargs):
blue_obj = {}
blue_obj['uuid'] = 'fake-blue-uuid'
blue_obj['fq_name'] = ['fake-blue-uuid']
blue_obj['parent_type'] = 'red'
blue_obj['parent_uuid'] = 'fake-red-uuid'
blue_obj['green_refs'] = [{'uuid': 'fake-green-uuid-0'},
{'uuid': 'fake-green-uuid-1'}]
return True, [blue_obj]
# end blue_read_with_multi_refs
def blue_read_with_new_refs(self, obj_type, uuid, **kwargs):
blue_obj = {}
blue_obj['uuid'] = 'fake-blue-uuid'
blue_obj['fq_name'] = ['fake-blue-uuid']
blue_obj['parent_type'] = 'red'
blue_obj['parent_uuid'] = 'fake-red-uuid'
blue_obj['green_refs'] = [{'uuid': 'fake-green-uuid-2'},
{'uuid': 'fake-green-uuid-3'}]
return True, [blue_obj]
# end blue_read_with_new_refs
def purple_read_with_multi_refs(self, obj_type, uuid, **kwargs):
purple_obj = {}
purple_obj['uuid'] = 'fake-purple-uuid'
purple_obj['fq_name'] = ['fake-purple-uuid']
purple_obj['parent_type'] = 'blue'
purple_obj['parent_uuid'] = 'fake-blue-uuid'
purple_obj['white_refs'] = [{'uuid': 'fake-white-uuid-0'},
{'uuid': 'fake-white-uuid-1'}]
return True, [purple_obj]
# end purple_read_with_multi_refs
def setUp(self):
if six.PY3:
self.assertItemsEqual = self.assertCountEqual
DBBase.init(self, None, None)
DBBase._OBJ_TYPE_MAP['red'] = RedSM
DBBase._OBJ_TYPE_MAP['blue'] = BlueSM
DBBase._OBJ_TYPE_MAP['green'] = GreenSM
DBBase._OBJ_TYPE_MAP['white'] = WhiteSM
DBBase._OBJ_TYPE_MAP['purple'] = PurpleSM
BlueSM._object_db = mock.MagicMock()
BlueSM._object_db.object_read = self.blue_read
RedSM._object_db = mock.MagicMock()
RedSM._object_db.object_read = self.red_read
GreenSM._object_db = mock.MagicMock()
GreenSM._object_db.object_read = self.green_read
WhiteSM._object_db = mock.MagicMock()
WhiteSM._object_db.object_read = self.white_read
PurpleSM._object_db = mock.MagicMock()
PurpleSM._object_db.object_read = self.purple_read
# end setUp
def tearDown(self):
GreenSM.reset()
BlueSM.reset()
RedSM.reset()
PurpleSM.reset()
del DBBase._OBJ_TYPE_MAP['red']
del DBBase._OBJ_TYPE_MAP['blue']
del DBBase._OBJ_TYPE_MAP['green']
del DBBase._OBJ_TYPE_MAP['white']
del DBBase._OBJ_TYPE_MAP['purple']
# end tearDown
def test_add_delete(self):
RedSM.locate("fake-red-uuid")
BlueSM.locate("fake-blue-uuid")
GreenSM.locate("fake-green-uuid")
blues = [blue.uuid for blue in list(BlueSM.values())]
blues_1 = [uuid for uuid, value in list(BlueSM.items())]
blues_2 = [value for value in BlueSM]
self.assertEqual(blues, blues_1)
self.assertEqual(blues_2, blues_1)
self.assertIsNotNone(GreenSM.get("fake-green-uuid"))
self.assertIsNotNone(RedSM.get("fake-red-uuid"))
self.assertIsNotNone(BlueSM.get("fake-blue-uuid"))
red = RedSM.get("fake-red-uuid")
self.assertEqual(len(red.blues), 1)
self.assertEqual(len(red.greens), 1)
green = GreenSM.get("fake-green-uuid")
self.assertEqual(green.red, "fake-red-uuid")
blue = BlueSM.get("fake-blue-uuid")
self.assertEqual(blue.red, "fake-red-uuid")
RedSM.delete("fake-red-uuid")
GreenSM.delete("fake-green-uuid")
BlueSM.delete("fake-blue-uuid")
# end test_add_delete
def fq_name_to_uuid(self, type, name):
return 'fake-green-uuid'
# end fq_name_to_uuid
def test_add_set_child(self):
RedSM._object_db = mock.MagicMock()
RedSM._object_db.object_read = self.red_read_with_child
RedSM._object_db.fq_name_to_uuid = self.fq_name_to_uuid
RedSM.locate("fake-red-uuid")
self.assertIsNotNone(RedSM.get("fake-red-uuid"))
red = RedSM.get("fake-red-uuid")
self.assertEqual(len(red.blues), 1)
self.assertEqual(len(red.greens), 1)
BlueSM.locate("fake-blue-uuid")
GreenSM.locate("fake-green-uuid")
self.assertIsNotNone(GreenSM.get("fake-green-uuid"))
self.assertIsNotNone(BlueSM.get("fake-blue-uuid"))
red = RedSM.get("fake-red-uuid")
green = GreenSM.get("fake-green-uuid")
self.assertEqual(green.red, "fake-red-uuid")
blue = BlueSM.get("fake-blue-uuid")
self.assertEqual(blue.red, "fake-red-uuid")
BlueSM.delete("fake-blue-uuid")
self.assertEqual(len(red.blues), 0)
RedSM.delete("fake-red-uuid")
GreenSM.delete("fake-green-uuid")
# end test_add_set_child
def test_add_with_refs(self):
GreenSM._object_db.object_read = self.green_read_with_refs
BlueSM._object_db.object_read = self.blue_read_with_refs
RedSM.locate("fake-red-uuid")
BlueSM.locate("fake-blue-uuid")
GreenSM.locate("fake-green-uuid")
self.assertIsNotNone(RedSM.get("fake-red-uuid"))
self.assertIsNotNone(GreenSM.get("fake-green-uuid"))
self.assertIsNotNone(BlueSM.get("fake-blue-uuid"))
red = RedSM.get("fake-red-uuid")
self.assertEqual(len(red.blues), 1)
self.assertEqual(len(red.greens), 1)
green = GreenSM.get("fake-green-uuid")
self.assertEqual(green.red, "fake-red-uuid")
self.assertEqual(green.blue, "fake-blue-uuid")
blue = BlueSM.get("fake-blue-uuid")
self.assertEqual(blue.red, "fake-red-uuid")
self.assertEqual(len(blue.greens), 1)
BlueSM.delete("fake-blue-uuid")
self.assertEqual(green.blue, None)
RedSM.delete("fake-red-uuid")
GreenSM.delete("fake-green-uuid")
# end test_add_with_refs
def test_update_with_refs(self):
GreenSM._object_db.object_read = self.green_read_with_refs
BlueSM._object_db.object_read = self.blue_read_with_refs
RedSM.locate("fake-red-uuid")
BlueSM.locate("fake-blue-uuid")
GreenSM.locate("fake-green-uuid")
self.assertIsNotNone(RedSM.get("fake-red-uuid"))
self.assertIsNotNone(GreenSM.get("fake-green-uuid"))
self.assertIsNotNone(BlueSM.get("fake-blue-uuid"))
red = RedSM.get("fake-red-uuid")
self.assertEqual(len(red.blues), 1)
self.assertEqual(len(red.greens), 1)
green = GreenSM.get("fake-green-uuid")
self.assertEqual(green.red, "fake-red-uuid")
self.assertEqual(green.blue, "fake-blue-uuid")
blue = BlueSM.get("fake-blue-uuid")
self.assertEqual(blue.red, "fake-red-uuid")
self.assertEqual(len(blue.greens), 1)
BlueSM.delete("fake-blue-uuid")
BlueSM.locate("fake-blue-uuid-1")
GreenSM._object_db.object_read = self.green_read_with_update_refs
green.update()
green = GreenSM.get("fake-green-uuid")
RedSM.delete("fake-red-uuid")
GreenSM.delete("fake-green-uuid")
# end test_update_with_refs
def test_add_with_multi_refs(self):
GreenSM._object_db.object_read = self.green_read_with_refs
BlueSM._object_db.object_read = self.blue_read_with_multi_refs
RedSM.locate("fake-red-uuid")
BlueSM.locate("fake-blue-uuid")
GreenSM.locate("fake-green-uuid-0")
GreenSM.locate("fake-green-uuid-1")
self.assertIsNotNone(RedSM.get("fake-red-uuid"))
self.assertIsNotNone(GreenSM.get("fake-green-uuid-0"))
self.assertIsNotNone(GreenSM.get("fake-green-uuid-1"))
self.assertIsNotNone(BlueSM.get("fake-blue-uuid"))
red = RedSM.get("fake-red-uuid")
self.assertEqual(len(red.blues), 1)
self.assertEqual(len(red.greens), 2)
green_0 = GreenSM.get("fake-green-uuid-0")
green_1 = GreenSM.get("fake-green-uuid-1")
self.assertEqual(green_0.red, "fake-red-uuid")
self.assertEqual(green_0.blue, "fake-blue-uuid")
self.assertEqual(green_1.red, "fake-red-uuid")
self.assertEqual(green_1.blue, "fake-blue-uuid")
blue = BlueSM.get("fake-blue-uuid")
self.assertEqual(blue.red, "fake-red-uuid")
self.assertEqual(len(blue.greens), 2)
self.assertTrue("fake-green-uuid-0" in (blue.greens))
self.assertTrue("fake-green-uuid-1" in (blue.greens))
RedSM.delete("fake-red-uuid")
GreenSM.delete("fake-green-uuid-0")
GreenSM.delete("fake-green-uuid-1")
BlueSM.delete("fake-blue-uuid")
# end test_add_with_multi_refs
def test_update_with_multi_refs(self):
GreenSM._object_db.object_read = self.green_read_with_refs
BlueSM._object_db.object_read = self.blue_read_with_multi_refs
RedSM.locate("fake-red-uuid")
BlueSM.locate("fake-blue-uuid")
GreenSM.locate("fake-green-uuid-0")
GreenSM.locate("fake-green-uuid-1")
self.assertIsNotNone(RedSM.get("fake-red-uuid"))
self.assertIsNotNone(GreenSM.get("fake-green-uuid-0"))
self.assertIsNotNone(GreenSM.get("fake-green-uuid-1"))
self.assertIsNotNone(BlueSM.get("fake-blue-uuid"))
red = RedSM.get("fake-red-uuid")
self.assertEqual(len(red.blues), 1)
self.assertEqual(len(red.greens), 2)
green_0 = GreenSM.get("fake-green-uuid-0")
green_1 = GreenSM.get("fake-green-uuid-1")
self.assertEqual(green_0.red, "fake-red-uuid")
self.assertEqual(green_0.blue, "fake-blue-uuid")
self.assertEqual(green_1.red, "fake-red-uuid")
self.assertEqual(green_1.blue, "fake-blue-uuid")
blue = BlueSM.get("fake-blue-uuid")
self.assertEqual(blue.red, "fake-red-uuid")
self.assertEqual(len(blue.greens), 2)
self.assertTrue("fake-green-uuid-0" in (blue.greens))
self.assertTrue("fake-green-uuid-1" in (blue.greens))
GreenSM.delete("fake-green-uuid-0")
GreenSM.delete("fake-green-uuid-1")
GreenSM.locate("fake-green-uuid-2")
GreenSM.locate("fake-green-uuid-3")
BlueSM._object_db.object_read = self.blue_read_with_new_refs
blue.update()
green_2 = GreenSM.get("fake-green-uuid-2")
green_3 = GreenSM.get("fake-green-uuid-3")
self.assertEqual(green_2.red, "fake-red-uuid")
self.assertEqual(green_2.blue, "fake-blue-uuid")
self.assertEqual(green_3.red, "fake-red-uuid")
self.assertEqual(green_3.blue, "fake-blue-uuid")
blue = BlueSM.get("fake-blue-uuid")
self.assertEqual(blue.red, "fake-red-uuid")
self.assertEqual(len(blue.greens), 2)
self.assertTrue("fake-green-uuid-2" in (blue.greens))
self.assertTrue("fake-green-uuid-3" in (blue.greens))
self.assertFalse("fake-green-uuid-0" in (blue.greens))
self.assertFalse("fake-green-uuid-1" in (blue.greens))
RedSM.delete("fake-red-uuid")
BlueSM.delete("fake-blue-uuid")
GreenSM.delete("fake-green-uuid-2")
GreenSM.delete("fake-green-uuid-3")
# end test_update_with_mulit_refs
def test_find(self):
GreenSM._object_db.object_read = self.green_read
BlueSM._object_db.object_read = self.blue_read
RedSM.locate("fake-red-uuid")
BlueSM.locate("OK")
GreenSM.locate("fake-green-uuid-0")
GreenSM.locate("fake-green-uuid-1")
self.assertIsNone(BlueSM.find_by_name_or_uuid("fake-blue-NOK"))
self.assertIsNotNone(BlueSM.find_by_name_or_uuid("fake-blue-OK"))
self.assertIsNotNone(BlueSM.find_by_name_or_uuid("OK"))
RedSM.delete("fake-red-uuid")
GreenSM.delete("fake-green-uuid-0")
GreenSM.delete("fake-green-uuid-1")
BlueSM.delete("fake-blue-uuid")
# end test_find
def test_basic_dep_track(self):
reaction_map = {
"red": {
'self': ['blue', 'green'],
},
"blue": {
'self': [],
'red': [],
},
"green": {
'self': [],
'red': [],
},
"purple": {
'self': [],
},
}
dependency_tracker = DependencyTracker(DBBase._OBJ_TYPE_MAP,
reaction_map)
red = RedSM.locate("fake-red-uuid")
BlueSM.locate("fake-blue-uuid")
GreenSM.locate("fake-green-uuid")
dependency_tracker.evaluate('red', red)
self.assertEqual(len(dependency_tracker.resources), 3)
self.assertTrue("red" in dependency_tracker.resources)
self.assertTrue("blue" in dependency_tracker.resources)
self.assertTrue("green" in dependency_tracker.resources)
self.assertEqual(dependency_tracker.resources["red"],
["fake-red-uuid"])
self.assertEqual(dependency_tracker.resources["blue"],
["fake-blue-uuid"])
self.assertEqual(dependency_tracker.resources["green"],
["fake-green-uuid"])
RedSM.delete("fake-red-uuid")
GreenSM.delete("fake-green-uuid")
BlueSM.delete("fake-blue-uuid")
# end test_basic_dep_track
@unittest.skip("Flaky test, sometimes the DT resource list length is one "
"intead of two. Probably issue when tests run in parallel")
def test_basic_dep_track_1(self):
reaction_map = {
"red": {
'self': [],
},
"blue": {
'self': ['green'],
},
"green": {
'self': [],
'blue': [],
},
"purple": {
'self': [],
},
}
GreenSM._object_db.object_read = self.green_read_with_refs
BlueSM._object_db.object_read = self.blue_read_with_multi_refs
dependency_tracker = DependencyTracker(DBBase._OBJ_TYPE_MAP,
reaction_map)
RedSM.locate("fake-red-uuid")
blue = BlueSM.locate("fake-blue-uuid")
GreenSM.locate("fake-green-uuid")
dependency_tracker.evaluate('blue', blue)
self.assertEqual(len(dependency_tracker.resources), 2)
self.assertTrue("blue" in dependency_tracker.resources)
self.assertTrue("green" in dependency_tracker.resources)
self.assertEqual(dependency_tracker.resources["blue"],
["fake-blue-uuid"])
self.assertEqual(dependency_tracker.resources["green"],
["fake-green-uuid"])
RedSM.delete("fake-red-uuid")
GreenSM.delete("fake-green-uuid")
BlueSM.delete("fake-blue-uuid")
# end test_basic_dep_track_1
def test_basic_dep_track_2(self):
reaction_map = {
"red": {
'self': [],
},
"blue": {
'self': ['green'],
},
"green": {
'self': [],
'blue': [],
},
"purple": {
'self': [],
},
}
GreenSM._object_db.object_read = self.green_read_with_refs
BlueSM._object_db.object_read = self.blue_read_with_multi_refs
dependency_tracker = DependencyTracker(DBBase._OBJ_TYPE_MAP,
reaction_map)
RedSM.locate("fake-red-uuid")
blue = BlueSM.locate("fake-blue-uuid")
GreenSM.locate("fake-green-uuid-0")
GreenSM.locate("fake-green-uuid-1")
dependency_tracker.evaluate('blue', blue)
self.assertEqual(len(dependency_tracker.resources), 2)
self.assertTrue("blue" in dependency_tracker.resources)
self.assertTrue("green" in dependency_tracker.resources)
self.assertItemsEqual(dependency_tracker.resources["green"],
["fake-green-uuid-1", "fake-green-uuid-0"])
self.assertEqual(dependency_tracker.resources["blue"],
["fake-blue-uuid"])
RedSM.delete("fake-red-uuid")
GreenSM.delete("fake-green-uuid-0")
GreenSM.delete("fake-green-uuid-1")
BlueSM.delete("fake-blue-uuid")
# end test_basic_dep_track
def test_basic_dep_track_3(self):
reaction_map = {
"red": {
'self': ['green', 'blue'],
},
"blue": {
'self': ['green'],
'red': [],
},
"green": {
'self': ['white'],
'blue': ['white'],
'red': [],
},
"white": {
'self': ['purple'],
'green': ['purple'],
},
"purple": {
'self': ['blue'],
'white': ['blue'],
},
}
GreenSM._object_db.object_read = self.green_read_with_refs
WhiteSM._object_db.object_read = self.white_read_with_refs
BlueSM._object_db.object_read = self.purple_read_with_multi_refs
BlueSM._object_db.object_read = self.blue_read_with_multi_refs
dependency_tracker = DependencyTracker(DBBase._OBJ_TYPE_MAP,
reaction_map)
RedSM.locate("fake-red-uuid")
blue = BlueSM.locate("fake-blue-uuid")
GreenSM.locate("fake-green-uuid-0")
GreenSM.locate("fake-green-uuid-1")
WhiteSM.locate("fake-white-uuid")
PurpleSM.locate("fake-purple-uuid")
dependency_tracker.evaluate('blue', blue)
self.assertEqual(len(dependency_tracker.resources), 4)
self.assertTrue("blue" in dependency_tracker.resources)
self.assertTrue("green" in dependency_tracker.resources)
self.assertTrue("white" in dependency_tracker.resources)
self.assertTrue("purple" in dependency_tracker.resources)
self.assertItemsEqual(dependency_tracker.resources["green"],
["fake-green-uuid-1", "fake-green-uuid-0"])
self.assertEqual(dependency_tracker.resources["blue"],
["fake-blue-uuid"])
self.assertEqual(dependency_tracker.resources["purple"],
["fake-purple-uuid"])
self.assertEqual(dependency_tracker.resources["white"],
["fake-white-uuid"])
RedSM.delete("fake-red-uuid")
GreenSM.delete("fake-green-uuid-0")
GreenSM.delete("fake-green-uuid-1")
BlueSM.delete("fake-blue-uuid")
WhiteSM.delete("fake-white-uuid")
PurpleSM.delete("fake-purple-uuid")
# end test_basic_dep_track_3
def test_basic_dep_track_4(self):
reaction_map = {
"red": {
'self': ['green', 'blue'],
},
"blue": {
'self': ['green'],
'red': [],
},
"green": {
'self': ['white'],
'blue': ['white'],
'red': [],
},
"white": {
'self': ['purple'],
'green': ['purple'],
},
"purple": {
'self': ['blue'],
'white': ['blue'],
},
}
GreenSM._object_db.object_read = self.green_read_with_refs
WhiteSM._object_db.object_read = self.white_read_with_refs
BlueSM._object_db.object_read = self.purple_read_with_multi_refs
BlueSM._object_db.object_read = self.blue_read_with_multi_refs
dependency_tracker = DependencyTracker(DBBase._OBJ_TYPE_MAP,
reaction_map)
red = RedSM.locate("fake-red-uuid")
BlueSM.locate("fake-blue-uuid")
GreenSM.locate("fake-green-uuid-0")
GreenSM.locate("fake-green-uuid-1")
WhiteSM.locate("fake-white-uuid")
PurpleSM.locate("fake-purple-uuid")
dependency_tracker.evaluate('red', red)
self.assertEqual(len(dependency_tracker.resources), 3)
self.assertTrue("blue" in dependency_tracker.resources)
self.assertTrue("green" in dependency_tracker.resources)
self.assertTrue("red" in dependency_tracker.resources)
self.assertItemsEqual(dependency_tracker.resources["green"],
["fake-green-uuid-1", "fake-green-uuid-0"])
self.assertEqual(dependency_tracker.resources["blue"],
["fake-blue-uuid"])
self.assertEqual(dependency_tracker.resources["red"],
["fake-red-uuid"])
RedSM.delete("fake-red-uuid")
GreenSM.delete("fake-green-uuid-0")
GreenSM.delete("fake-green-uuid-1")
BlueSM.delete("fake-blue-uuid")
WhiteSM.delete("fake-white-uuid")
PurpleSM.delete("fake-purple-uuid")
# end test_basic_dep_track_4
def test_basic_dep_track_5(self):
reaction_map = {
"red": {
'self': ['green', 'blue'],
},
"blue": {
'self': ['green'],
'red': [],
'purple': ['green'],
},
"green": {
'self': ['white'],
'blue': ['white'],
'red': [],
},
"white": {
'self': ['purple'],
'green': ['purple'],
},
"purple": {
'self': ['blue'],
'white': ['blue'],
},
}
GreenSM._object_db.object_read = self.green_read_with_refs
WhiteSM._object_db.object_read = self.white_read_with_refs
BlueSM._object_db.object_read = self.purple_read_with_multi_refs
BlueSM._object_db.object_read = self.blue_read_with_multi_refs
dependency_tracker = DependencyTracker(DBBase._OBJ_TYPE_MAP,
reaction_map)
RedSM.locate("fake-red-uuid")
BlueSM.locate("fake-blue-uuid")
green = GreenSM.locate("fake-green-uuid-0")
GreenSM.locate("fake-green-uuid-1")
WhiteSM.locate("fake-white-uuid")
PurpleSM.locate("fake-purple-uuid")
dependency_tracker.evaluate('green', green)
self.assertEqual(len(dependency_tracker.resources), 4)
self.assertTrue("blue" in dependency_tracker.resources)
self.assertTrue("green" in dependency_tracker.resources)
self.assertTrue("white" in dependency_tracker.resources)
self.assertTrue("purple" in dependency_tracker.resources)
self.assertItemsEqual(dependency_tracker.resources["green"],
["fake-green-uuid-1", "fake-green-uuid-0"])
self.assertEqual(dependency_tracker.resources["blue"],
["fake-blue-uuid"])
self.assertEqual(dependency_tracker.resources["white"],
["fake-white-uuid"])
self.assertEqual(dependency_tracker.resources["purple"],
["fake-purple-uuid"])
RedSM.delete("fake-red-uuid")
GreenSM.delete("fake-green-uuid-0")
GreenSM.delete("fake-green-uuid-1")
BlueSM.delete("fake-blue-uuid")
WhiteSM.delete("fake-white-uuid")
PurpleSM.delete("fake-purple-uuid")
# end test_basic_dep_track_5
def test_basic_dep_track_6(self):
reaction_map = {
"red": {
'self': ['green', 'blue'],
},
"blue": {
'self': ['green'],
'red': [],
'purple': ['green'],
},
"green": {
'self': ['white'],
'blue': ['white'],
'red': [],
},
"white": {
'self': ['purple'],
'green': ['purple'],
},
"purple": {
'self': ['blue'],
'white': ['blue'],
},
}
GreenSM._object_db.object_read = self.green_read_with_refs
WhiteSM._object_db.object_read = self.white_read_with_refs
BlueSM._object_db.object_read = self.purple_read_with_multi_refs
BlueSM._object_db.object_read = self.blue_read_with_multi_refs
dependency_tracker = DependencyTracker(DBBase._OBJ_TYPE_MAP,
reaction_map)
RedSM.locate("fake-red-uuid")
BlueSM.locate("fake-blue-uuid")
GreenSM.locate("fake-green-uuid-0")
GreenSM.locate("fake-green-uuid-1")
white = WhiteSM.locate("fake-white-uuid")
PurpleSM.locate("fake-purple-uuid")
dependency_tracker.evaluate('white', white)
self.assertEqual(len(dependency_tracker.resources), 4)
self.assertTrue("blue" in dependency_tracker.resources)
self.assertTrue("green" in dependency_tracker.resources)
self.assertTrue("white" in dependency_tracker.resources)
self.assertTrue("purple" in dependency_tracker.resources)
self.assertItemsEqual(dependency_tracker.resources["green"],
["fake-green-uuid-1", "fake-green-uuid-0"])
self.assertEqual(dependency_tracker.resources["blue"],
["fake-blue-uuid"])
self.assertEqual(dependency_tracker.resources["white"],
["fake-white-uuid"])
self.assertEqual(dependency_tracker.resources["purple"],
["fake-purple-uuid"])
RedSM.delete("fake-red-uuid")
GreenSM.delete("fake-green-uuid-0")
GreenSM.delete("fake-green-uuid-1")
BlueSM.delete("fake-blue-uuid")
WhiteSM.delete("fake-white-uuid")
PurpleSM.delete("fake-purple-uuid")
# end test_basic_dep_track_6
def test_basic_dep_track_7(self):
reaction_map = {
"red": {
'self': ['green', 'blue'],
},
"blue": {
'self': ['green'],
'red': [],
'purple': ['green'],
},
"green": {
'self': ['white'],
'blue': ['white'],
'red': [],
},
"white": {
'self': ['purple'],
'green': ['purple'],
},
"purple": {
'self': ['blue'],
'white': ['blue'],
},
}
GreenSM._object_db.object_read = self.green_read_with_refs
WhiteSM._object_db.object_read = self.white_read_with_refs
BlueSM._object_db.object_read = self.purple_read_with_multi_refs
BlueSM._object_db.object_read = self.blue_read_with_multi_refs
dependency_tracker = DependencyTracker(DBBase._OBJ_TYPE_MAP,
reaction_map)
RedSM.locate("fake-red-uuid")
BlueSM.locate("fake-blue-uuid")
GreenSM.locate("fake-green-uuid-0")
GreenSM.locate("fake-green-uuid-1")
WhiteSM.locate("fake-white-uuid")
purple = PurpleSM.locate("fake-purple-uuid")
dependency_tracker.evaluate('purple', purple)
self.assertEqual(len(dependency_tracker.resources), 4)
self.assertTrue("blue" in dependency_tracker.resources)
self.assertTrue("green" in dependency_tracker.resources)
self.assertTrue("white" in dependency_tracker.resources)
self.assertTrue("purple" in dependency_tracker.resources)
self.assertItemsEqual(dependency_tracker.resources["green"],
["fake-green-uuid-1", "fake-green-uuid-0"])
self.assertEqual(dependency_tracker.resources["blue"],
["fake-blue-uuid"])
self.assertEqual(dependency_tracker.resources["white"],
["fake-white-uuid"])
self.assertEqual(dependency_tracker.resources["purple"],
["fake-purple-uuid"])
RedSM.delete("fake-red-uuid")
GreenSM.delete("fake-green-uuid-0")
GreenSM.delete("fake-green-uuid-1")
BlueSM.delete("fake-blue-uuid")
WhiteSM.delete("fake-white-uuid")
PurpleSM.delete("fake-purple-uuid")
# end test_basic_dep_track_7
def test_basic_dep_track_8(self):
reaction_map = {
"red": {
'self': ['green', 'blue'],
},
"blue": {
'self': ['green'],
'red': ['blue'],
'purple': ['green'],
'green': [],
},
"green": {
'self': ['white'],
'blue': ['white'],
'red': ['blue'],
},
"white": {
'self': ['purple'],
'green': ['purple'],
},
"purple": {
'self': ['blue'],
'white': ['blue'],
},
}
GreenSM._object_db.object_read = self.green_read_with_refs
WhiteSM._object_db.object_read = self.white_read_with_refs
BlueSM._object_db.object_read = self.purple_read_with_multi_refs
BlueSM._object_db.object_read = self.blue_read_with_multi_refs
dependency_tracker = DependencyTracker(DBBase._OBJ_TYPE_MAP,
reaction_map)
red = RedSM.locate("fake-red-uuid")
BlueSM.locate("fake-blue-uuid")
GreenSM.locate("fake-green-uuid-0")
GreenSM.locate("fake-green-uuid-1")
WhiteSM.locate("fake-white-uuid")
PurpleSM.locate("fake-purple-uuid")
dependency_tracker.evaluate('red', red)
self.assertEqual(len(dependency_tracker.resources), 3)
self.assertTrue("blue" in dependency_tracker.resources)
self.assertTrue("green" in dependency_tracker.resources)
self.assertTrue("red" in dependency_tracker.resources)
self.assertItemsEqual(dependency_tracker.resources["green"],
["fake-green-uuid-1", "fake-green-uuid-0"])
self.assertEqual(dependency_tracker.resources["blue"],
["fake-blue-uuid"])
self.assertEqual(dependency_tracker.resources["red"],
["fake-red-uuid"])
RedSM.delete("fake-red-uuid")
GreenSM.delete("fake-green-uuid-0")
GreenSM.delete("fake-green-uuid-1")
BlueSM.delete("fake-blue-uuid")
WhiteSM.delete("fake-white-uuid")
PurpleSM.delete("fake-purple-uuid")
# end test_basic_dep_track_8
def test_basic_dep_track_update_1(self):
reaction_map = {
"red": {
'self': ['green', 'blue'],
},
"blue": {
'self': ['green'],
'red': ['blue'],
'purple': ['green'],
'green': [],
},
"green": {
'self': ['white'],
'blue': ['white'],
'red': ['blue'],
},
"white": {
'self': ['purple'],
'green': ['purple'],
},
"purple": {
'self': ['blue'],
'white': ['blue'],
},
}
GreenSM._object_db.object_read = self.green_read_with_refs
WhiteSM._object_db.object_read = self.white_read_with_refs
BlueSM._object_db.object_read = self.purple_read_with_multi_refs
BlueSM._object_db.object_read = self.blue_read_with_multi_refs
dependency_tracker = DependencyTracker(DBBase._OBJ_TYPE_MAP,
reaction_map)
red = RedSM.locate("fake-red-uuid")
blue = BlueSM.locate("fake-blue-uuid")
GreenSM.locate("fake-green-uuid-0")
GreenSM.locate("fake-green-uuid-1")
WhiteSM.locate("fake-white-uuid")
PurpleSM.locate("fake-purple-uuid")
dependency_tracker.evaluate('red', red)
GreenSM.delete("fake-green-uuid-0")
GreenSM.delete("fake-green-uuid-1")
GreenSM.locate("fake-green-uuid-2")
GreenSM.locate("fake-green-uuid-3")
BlueSM._object_db.object_read = self.blue_read_with_new_refs
blue.update()
dependency_tracker.resources = {}
dependency_tracker.evaluate('red', red)
self.assertEqual(len(dependency_tracker.resources), 3)
self.assertTrue("blue" in dependency_tracker.resources)
self.assertTrue("green" in dependency_tracker.resources)
self.assertTrue("red" in dependency_tracker.resources)
self.assertItemsEqual(dependency_tracker.resources["green"],
["fake-green-uuid-2", "fake-green-uuid-3"])
self.assertEqual(dependency_tracker.resources["blue"],
["fake-blue-uuid"])
self.assertEqual(dependency_tracker.resources["red"],
["fake-red-uuid"])
RedSM.delete("fake-red-uuid")
GreenSM.delete("fake-green-uuid-2")
GreenSM.delete("fake-green-uuid-3")
BlueSM.delete("fake-blue-uuid")
WhiteSM.delete("fake-white-uuid")
PurpleSM.delete("fake-purple-uuid")
# end test_basic_dep_track_update_1
def test_basic_dep_track_update_2(self):
reaction_map = {
"red": {
'self': ['green', 'blue'],
},
"blue": {
'self': ['green'],
'red': ['blue'],
'purple': ['green'],
'green': [],
},
"green": {
'self': ['white'],
'blue': ['white'],
'red': ['blue'],
},
"white": {
'self': ['purple'],
'green': ['purple'],
},
"purple": {
'self': ['blue'],
'white': ['blue'],
},
}
GreenSM._object_db.object_read = self.green_read_with_refs
WhiteSM._object_db.object_read = self.white_read_with_refs
BlueSM._object_db.object_read = self.purple_read_with_multi_refs
BlueSM._object_db.object_read = self.blue_read_with_multi_refs
dependency_tracker = DependencyTracker(DBBase._OBJ_TYPE_MAP,
reaction_map)
RedSM.locate("fake-red-uuid")
blue = BlueSM.locate("fake-blue-uuid")
GreenSM.locate("fake-green-uuid-0")
GreenSM.locate("fake-green-uuid-1")
WhiteSM.locate("fake-white-uuid")
PurpleSM.locate("fake-purple-uuid")
dependency_tracker.evaluate('blue', blue)
GreenSM.delete("fake-green-uuid-0")
GreenSM.delete("fake-green-uuid-1")
GreenSM.locate("fake-green-uuid-2")
GreenSM.locate("fake-green-uuid-3")
BlueSM._object_db.object_read = self.blue_read_with_new_refs
blue.update()
dependency_tracker.resources = {}
dependency_tracker.evaluate('blue', blue)
self.assertEqual(len(dependency_tracker.resources), 2)
self.assertTrue("blue" in dependency_tracker.resources)
self.assertTrue("green" in dependency_tracker.resources)
self.assertItemsEqual(dependency_tracker.resources["green"],
["fake-green-uuid-2", "fake-green-uuid-3"])
self.assertEqual(dependency_tracker.resources["blue"],
["fake-blue-uuid"])
RedSM.delete("fake-red-uuid")
GreenSM.delete("fake-green-uuid-2")
GreenSM.delete("fake-green-uuid-3")
BlueSM.delete("fake-blue-uuid")
WhiteSM.delete("fake-white-uuid")
PurpleSM.delete("fake-purple-uuid")
# end test_basic_dep_track_update_2
def test_basic_dep_track_update_3(self):
reaction_map = {
"red": {
'self': ['green', 'blue'],
},
"blue": {
'self': ['green'],
'red': [],
'purple': ['green'],
},
"green": {
'self': ['white'],
'blue': ['white'],
'red': [],
},
"white": {
'self': ['purple'],
'green': ['purple'],
},
"purple": {
'self': ['blue'],
'white': ['blue'],
},
}
GreenSM._object_db.object_read = self.green_read_with_refs
WhiteSM._object_db.object_read = self.white_read_with_refs
BlueSM._object_db.object_read = self.purple_read_with_multi_refs
BlueSM._object_db.object_read = self.blue_read_with_multi_refs
dependency_tracker = DependencyTracker(DBBase._OBJ_TYPE_MAP,
reaction_map)
RedSM.locate("fake-red-uuid")
blue = BlueSM.locate("fake-blue-uuid")
GreenSM.locate("fake-green-uuid-0")
GreenSM.locate("fake-green-uuid-1")
WhiteSM.locate("fake-white-uuid")
PurpleSM.locate("fake-purple-uuid")
dependency_tracker.evaluate('blue', blue)
self.assertEqual(len(dependency_tracker.resources), 4)
self.assertTrue("blue" in dependency_tracker.resources)
self.assertTrue("green" in dependency_tracker.resources)
self.assertTrue("white" in dependency_tracker.resources)
self.assertTrue("purple" in dependency_tracker.resources)
self.assertItemsEqual(dependency_tracker.resources["green"],
["fake-green-uuid-1", "fake-green-uuid-0"])
self.assertEqual(dependency_tracker.resources["blue"],
["fake-blue-uuid"])
self.assertEqual(dependency_tracker.resources["white"],
["fake-white-uuid"])
self.assertEqual(dependency_tracker.resources["purple"],
["fake-purple-uuid"])
# update
GreenSM.delete("fake-green-uuid-0")
GreenSM.delete("fake-green-uuid-1")
GreenSM.locate("fake-green-uuid-2")
GreenSM.locate("fake-green-uuid-3")
BlueSM._object_db.object_read = self.blue_read_with_new_refs
blue.update()
dependency_tracker.resources = {}
dependency_tracker.evaluate('blue', blue)
self.assertEqual(len(dependency_tracker.resources), 2)
self.assertTrue("blue" in dependency_tracker.resources)
self.assertTrue("green" in dependency_tracker.resources)
self.assertItemsEqual(dependency_tracker.resources["green"],
["fake-green-uuid-3", "fake-green-uuid-2"])
self.assertEqual(dependency_tracker.resources["blue"],
["fake-blue-uuid"])
RedSM.delete("fake-red-uuid")
GreenSM.delete("fake-green-uuid-2")
GreenSM.delete("fake-green-uuid-3")
BlueSM.delete("fake-blue-uuid")
WhiteSM.delete("fake-white-uuid")
PurpleSM.delete("fake-purple-uuid")
# end test_basic_dep_track_update_3
# end DepTrackTester(unittest.TestCase):
```
#### File: tests/unit/test_utils.py
```python
from __future__ import unicode_literals
import unittest
from cfgm_common.utils import CacheContainer
from cfgm_common.utils import decode_string
from cfgm_common.utils import encode_string
class TestCacheContainer(unittest.TestCase):
def test_cache_container_trimming(self):
c = CacheContainer(5)
lst = ['a', 'b', 'c', 'd', 'e', 'f', 'h', 'i', 'j', 'k', 'm']
for index, value in enumerate(lst):
c[value] = index + 1
self.assertEqual(len(list(c.dictionary.keys())), 5)
self.assertEqual(set(lst[-5:]), set(c.dictionary.keys()))
def test_cache_container_fetch(self):
c = CacheContainer(5)
lst = ['a', 'b', 'c', 'd', 'e']
for index, value in enumerate(lst):
c[value] = index + 1
self.assertEqual(set(lst), set(c.dictionary.keys()))
# get the oldest value and check on the next set its not lost
c['a']
c['f'] = 6
self.assertEqual(set(['c', 'd', 'e', 'f', 'a']),
set(c.dictionary.keys()))
# put a value for the oldest key and check its not lost
c['c'] = 'x'
self.assertEqual(c['c'], 'x')
self.assertEqual(set(['d', 'e', 'f', 'a', 'c']),
set(c.dictionary.keys()))
c['g'] = 7
self.assertEqual(set(['e', 'f', 'a', 'c', 'g']),
set(c.dictionary.keys()))
class TestFqNameEncode(unittest.TestCase):
def test_fq_name_encoding(self):
test_suite = [
('only-ascii', 'only-ascii'),
('only ascii with space', 'only ascii with space'),
('only/ascii/with/forward/slash', 'only/ascii/with/forward/slash'),
('only!ascii!with!exclamatory', 'only!ascii!with!exclamatory'),
('only~ascii~tilde', 'only~ascii~tilde'),
('foo=bar', 'foo=bar'),
# (, ),
# ('non-ascii-é', 'non-ascii-%C3%A9'),
# ('non ascii with space é', 'non+ascii+with+space+%C3%A9'),
# ('non-ascii-encoded-\xe9', 'non-ascii-encoded-%C3%A9'),
# (b'binary', TypeError),
]
for string, expected_result in test_suite:
if (isinstance(expected_result, type) and
issubclass(expected_result, Exception)):
self.assertRaises(expected_result, encode_string, string)
else:
self.assertEqual(expected_result, encode_string(string))
self.assertEqual(decode_string(expected_result), string)
```
#### File: common/cfgm_common/vnc_greenlets.py
```python
from __future__ import unicode_literals
#
# Copyright (c) 2016 Juniper Networks, Inc. All rights reserved.
#
import os
import socket
from pysandesh.sandesh_base import *
from pysandesh.sandesh_logger import *
from pysandesh.gen_py.sandesh.ttypes import SandeshLevel
from cfgm_common.uve.greenlets import ttypes as sandesh
import gc
import gevent
import traceback
from greenlet import greenlet
from gevent import Greenlet
class VncGreenlet(Greenlet):
def __init__(self, module_name, handler):
Greenlet.__init__(self, self.super_greenlet_handler,
handler, module_name)
self.start()
# end __init_
@staticmethod
def register_sandesh_handler():
sandesh.GreenletObjectReq.handle_request = \
VncGreenlet.sandesh_greenlet_object_handle_request
def super_greenlet_handler(self, greenlet_handler, module_name):
# Create a flag/variable local to the greenlet so that
# it can be identified as a "well known" greenlet.
gevent.getcurrent().greenlet_name = module_name
if callable(greenlet_handler):
greenlet_handler()
@staticmethod
def build_greenlet_sandesh(name, traces):
sandesh_greenlet = sandesh.GreenletObject()
sandesh_greenlet.greenlet_name = name
sandesh_greenlet.greenlet_traces = traces
return sandesh_greenlet
@classmethod
def sandesh_greenlet_object_handle_request(cls, req):
greenlet_resp = sandesh.GreenletObjectListResp(greenlets=[])
anonymous_cnt = 0
for obj in gc.get_objects():
if obj is None:
continue
if not isinstance(obj, greenlet):
continue
# If the GC object is an instance of greenlet,
# but does not have the flag/variable 'greenlet_name'
# then try to find the run function.
if not hasattr(obj, 'greenlet_name'):
try:
name = obj._run.__name__
except AttributeError:
name = 'Anonymous'
else:
name = obj.greenlet_name
if (req.greenlet_name is None or
req.greenlet_name == name):
sandesh_greenlet = VncGreenlet.build_greenlet_sandesh(
name,
''.join(traceback.format_stack(obj.gr_frame)))
greenlet_resp.greenlets.append(sandesh_greenlet)
greenlet_resp.response(req.context())
# end sandesh_greenlet_object_handle_request
# end class VncGreenlet
```
#### File: device-manager/device_manager/device_job_manager.py
```python
import ast
from builtins import map
from builtins import object
from builtins import str
import json
import os
import signal
import socket
import subprocess
import time
import traceback
from cfgm_common import vnc_greenlets
from cfgm_common.exceptions import NoIdError
from cfgm_common.exceptions import ResourceExistsError
from cfgm_common.uve.vnc_api.ttypes import FabricJobExecution, FabricJobUve, \
PhysicalRouterJobExecution, PhysicalRouterJobUve
from cfgm_common.zkclient import ZookeeperClient
import gevent
from job_manager.job_exception import JobException
from job_manager.job_log_utils import JobLogUtils
from job_manager.job_utils import JobStatus, JobVncApi
class DeviceJobManager(object):
JOB_REQUEST_EXCHANGE = "job_request_exchange"
JOB_REQUEST_CONSUMER = "job_request_consumer"
JOB_REQUEST_ROUTING_KEY = "job.request"
JOB_ABORT_CONSUMER = "job_abort_consumer"
JOB_ABORT_ROUTING_KEY = "job.abort"
JOB_STATUS_EXCHANGE = "job_status_exchange"
JOB_STATUS_CONSUMER = "job_status_consumer."
JOB_STATUS_ROUTING_KEY = "job.status."
JOB_STATUS_TTL = 5 * 60
FABRIC_ZK_LOCK = "fabric-job-monitor"
_instance = None
def __init__(self, amqp_client, db_conn, args, dm_logger):
"""Initialize ZooKeeper, RabbitMQ, Sandesh, DB conn etc."""
DeviceJobManager._instance = self
self._amqp_client = amqp_client
# create zk client for devicejobmanager with call_back
self.client_reconnect_gl = None
self._zookeeper_client = ZookeeperClient("device-job-manager",
args.zk_server_ip,
args.host_ip)
self._zookeeper_client.set_lost_cb(self.client_reconnect)
self._db_conn = db_conn
self._args = args
self._job_mgr_statistics = {
'max_job_count': self._args.max_job_count,
'running_job_count': 0
}
# dict of exec_id:job_status (key/value pairs)
self.job_status = {}
# map of running job instances. Key is the pid and value is job
# instance info
self._job_mgr_running_instances = {}
job_args = {
'collectors': self._args.collectors,
'fabric_ansible_conf_file': self._args.fabric_ansible_conf_file,
'host_ip': self._args.host_ip,
'zk_server_ip': self._args.zk_server_ip,
'cluster_id': self._args.cluster_id
}
self._job_args = json.dumps(job_args)
# initialize the job logger
self._job_log_utils = JobLogUtils(
sandesh_instance_id="DeviceJobManager" + str(time.time()),
config_args=self._job_args,
sandesh_instance=dm_logger._sandesh)
self._logger = self._job_log_utils.config_logger
self._sandesh = self._logger._sandesh
self._amqp_client.add_exchange(self.JOB_STATUS_EXCHANGE, type='direct')
# add dummy consumer to initialize the exchange
self._amqp_client.add_consumer(
self.JOB_STATUS_CONSUMER + "dummy",
self.JOB_STATUS_EXCHANGE,
routing_key=self.JOB_STATUS_ROUTING_KEY + "dummy",
auto_delete=True)
self._amqp_client.add_exchange(self.JOB_REQUEST_EXCHANGE,
type='direct')
self._amqp_client.add_consumer(
self.JOB_REQUEST_CONSUMER,
self.JOB_REQUEST_EXCHANGE,
routing_key=self.JOB_REQUEST_ROUTING_KEY,
callback=self.handle_execute_job_request)
abort_q_name = '.'.join([self.JOB_ABORT_CONSUMER,
socket.getfqdn(self._args.host_ip)])
self._amqp_client.add_consumer(
abort_q_name,
self.JOB_REQUEST_EXCHANGE,
routing_key=self.JOB_ABORT_ROUTING_KEY,
callback=self.handle_abort_job_request)
# end __init__
@classmethod
def get_instance(cls):
return cls._instance
# end get_instance
@classmethod
def destroy_instance(cls):
inst = cls.get_instance()
if not inst:
return
cls._instance = None
# end destroy_instance
def client_reconnect(self):
if self.client_reconnect_gl is None:
self.client_reconnect_gl =\
vnc_greenlets.VncGreenlet("djm reconnect",
self.zk_reconnect)
# end client_reconnect
def zk_reconnect(self):
self._zookeeper_client.connect()
self.client_reconnect_gl = None
def db_read(self, obj_type, obj_id, obj_fields=None,
ret_readonly=False):
try:
(ok, cassandra_result) = self._db_conn.object_read(
obj_type, [obj_id], obj_fields, ret_readonly=ret_readonly)
except NoIdError as e:
# if NoIdError is for obj itself (as opposed to say for parent
# or ref), let caller decide if this can be handled gracefully
# by re-raising
if e._unknown_id == obj_id:
raise
return (False, str(e))
return (ok, cassandra_result[0])
# end db_read
def is_max_job_threshold_reached(self):
if self._job_mgr_statistics.get('running_job_count') < \
self._job_mgr_statistics.get('max_job_count'):
return False
return True
# end is_max_job_threshold_reached
def publish_job_status_notification(self, job_execution_id, status):
try:
msg = {'job_execution_id': job_execution_id,
'job_status': status}
self._amqp_client.publish(
msg, self.JOB_STATUS_EXCHANGE,
routing_key=self.JOB_STATUS_ROUTING_KEY + job_execution_id,
serializer='json', retry=True,
retry_policy={'max_retries': 5,
'interval_start': 2,
'interval_step': 3,
'interval_max': 15},
expiration=self.JOB_STATUS_TTL)
except Exception:
self._logger.error("Failed to send job status change notification"
" %s %s" % (job_execution_id, status))
# end publish_job_status_notification
def get_job_template_id(self, job_template_fq_name):
try:
return self._db_conn.fq_name_to_uuid("job_template",
job_template_fq_name)
except Exception as e:
msg = "Error while reading job_template_id: " + str(e)
self._logger.error(msg)
raise
# end get_job_template_id
def handle_execute_job_request(self, body, message):
job_input_params = body
job_execution_id = job_input_params.get('job_execution_id')
# check if the max job processing threshold is reached
if not self.is_max_job_threshold_reached():
message.ack()
self._logger.info("SENT JOB REQUEST: {}".format(
job_execution_id))
else:
# requeue the message if the max threshold is reached, to be picked
# by another job manager or wait until the current job mgr is free
message.reject(requeue=True)
self._logger.info("REQUEUE JOB REQUEST: {}".format(
job_execution_id))
gevent.sleep(1)
return
acfg = job_input_params.get('input', {}).get('device_abstract_config')
if acfg:
job_input_params['input']['device_abstract_config'] = \
json.loads(acfg)
update_uve_on_failure = False
device_list = None
extra_params = job_input_params.get('params')
if extra_params is not None:
device_list = extra_params.get('device_list')
is_delete = job_input_params.get('input').get('is_delete')
job_template_fq_name = job_input_params.get('job_template_fq_name')
job_template_id = job_input_params.get('job_template_id')
fabric_fq_name = None
fabric_job_uve_name = ''
job_input_params['vnc_api_init_params'] = {
"admin_user": self._args.admin_user,
"admin_password": self._args.admin_password,
"admin_tenant_name": self._args.admin_tenant_name,
"api_server_port": self._args.api_server_port,
"api_server_use_ssl": self._args.api_server_use_ssl
}
try:
# populate job template id if not present in input_param
if job_template_id is None:
job_template_id = self.get_job_template_id(
job_template_fq_name)
job_input_params["job_template_id"] = job_template_id
# read the device object and pass the necessary data to the job
if device_list:
self.read_device_data(device_list, job_input_params,
job_execution_id, is_delete)
else:
self.read_fabric_data(job_input_params, job_execution_id,
is_delete)
# read the job concurrency level from job template
job_concurrency = self.get_job_concurrency(job_template_id,
job_execution_id)
job_input_params['job_concurrency'] = job_concurrency
fabric_fq_name = job_input_params.get('fabric_fq_name')
fabric_job_uve_name_list = job_template_fq_name
fabric_job_uve_name_list.insert(0, fabric_fq_name)
fabric_job_uve_name = ':'.join(map(str, fabric_job_uve_name_list))
device_fqnames = []
# create the UVE
if fabric_fq_name != "__DEFAULT__" and not device_list:
self.create_fabric_job_uve(fabric_job_uve_name,
job_input_params.get(
'job_execution_id'),
JobStatus.STARTING.value, 0.0)
if device_list:
device_fqnames = self.create_physical_router_job_uve(
device_list, job_input_params, fabric_job_uve_name,
JobStatus.STARTING.value, 0.0)
# after creating the UVE, flag indicates to update the
# UVE upon any failures
update_uve_on_failure = True
# check if there is any other job running for the fabric
if job_concurrency is not None and job_concurrency == "fabric":
existing_job = self._is_existing_job_for_fabric(
fabric_fq_name, job_execution_id)
if existing_job:
msg = "Another job for the same fabric is in" \
" progress. Please wait for the job to finish"
self.mark_failure(
msg, job_template_fq_name,
job_execution_id,
fabric_fq_name, mark_uve=True,
device_list=device_list,
fabric_job_uve_name=fabric_job_uve_name,
job_params=job_input_params)
return
start_time = time.time()
signal_var = {
'fabric_name': fabric_job_uve_name,
'fabric_fq_name': fabric_fq_name,
'start_time': start_time,
'exec_id': job_execution_id,
'device_fqnames': device_fqnames,
'job_concurrency': job_concurrency
}
self.job_status.update(
{job_execution_id: JobStatus.STARTING.value})
self.publish_job_status_notification(job_execution_id,
JobStatus.STARTING.value)
# handle process exit signal
signal.signal(signal.SIGCHLD, self.job_mgr_signal_handler)
# write the abstract config to file if needed
self.save_abstract_config(job_input_params)
# add params needed for sandesh connection
job_input_params['args'] = self._job_args
# create job manager subprocess
job_mgr_path = os.path.dirname(
__file__) + "/../job_manager/job_mgr.py"
job_process = subprocess.Popen(["python", job_mgr_path, "-i",
json.dumps(job_input_params)],
cwd="/", close_fds=True)
self._job_mgr_running_instances[str(job_process.pid)] = signal_var
self._job_mgr_statistics['running_job_count'] = len(
self._job_mgr_running_instances)
self._logger.notice("Created job manager process. Execution id: "
"%s" % job_execution_id)
self._logger.info(
"Current number of job_mgr processes running %s"
% self._job_mgr_statistics.get('running_job_count'))
except Exception as e:
msg = "Exception while processing the job request %s %s %s : " \
"%s %s" % (job_template_fq_name, job_execution_id,
fabric_fq_name, repr(e), traceback.format_exc())
self.mark_failure(msg, job_template_fq_name, job_execution_id,
fabric_fq_name, mark_uve=update_uve_on_failure,
device_list=device_list,
fabric_job_uve_name=fabric_job_uve_name,
job_params=job_input_params)
# end handle_execute_job_request
def _abort_job(self, pid, job_instance, abort_mode):
self._logger.info("ABORT: pid={}, job_instance={}, mode={}".
format(pid, job_instance, abort_mode))
# Force abort or graceful abort
os.kill(int(pid), signal.SIGABRT if abort_mode == "force"
else signal.SIGUSR1)
def handle_abort_job_request(self, body, message):
message.ack()
inp = body.get('input')
job_execution_ids = inp.get('job_execution_ids')
abort_mode = inp.get('abort_mode')
self._logger.info("Abort job request: job_ids={}, mode={}".
format(job_execution_ids, abort_mode))
# Search through running job instances to find this job
for pid, job_instance in list(self._job_mgr_running_instances.items()):
# Abort one job
if job_execution_ids:
if job_instance.get('exec_id') in job_execution_ids:
self._abort_job(pid, job_instance, abort_mode)
# Abort next job
else:
self._abort_job(pid, job_instance, abort_mode)
# end handle_abort_job_request
def create_fabric_job_uve(self, fabric_job_uve_name,
execution_id, job_status,
percentage_completed):
job_execution_data = FabricJobExecution(
name=fabric_job_uve_name,
execution_id=execution_id,
job_start_ts=int(round(time.time() * 1000)),
job_status=job_status,
percentage_completed=percentage_completed
)
job_execution_uve = FabricJobUve(data=job_execution_data,
sandesh=self._sandesh)
job_execution_uve.send(sandesh=self._sandesh)
# end create_fabric_job_uve
def create_physical_router_job_uve(self, device_list, job_input_params,
fabric_job_uve_name, job_status,
percentage_completed):
device_fqnames = []
for device_id in device_list:
device_fqname = job_input_params.get(
'device_json').get(device_id).get('device_fqname')
device_fqname = ':'.join(map(str, device_fqname))
prouter_uve_name = device_fqname + ":" + \
fabric_job_uve_name
prouter_job_data = PhysicalRouterJobExecution(
name=prouter_uve_name,
execution_id=job_input_params.get('job_execution_id'),
job_start_ts=int(round(time.time() * 1000)),
job_status=job_status,
percentage_completed=percentage_completed
)
prouter_job_uve = PhysicalRouterJobUve(
data=prouter_job_data, sandesh=self._sandesh)
prouter_job_uve.send(sandesh=self._sandesh)
device_fqnames.append(prouter_uve_name)
return device_fqnames
# end create_physical_router_job_uve
def mark_failure(self, msg, job_template_fq_name, job_execution_id,
fabric_fq_name, mark_uve=True, device_list=None,
fabric_job_uve_name=None, job_params=None):
self._logger.error("Marked job as failed %s %s %s " %
(job_template_fq_name, job_execution_id, msg))
# send job object log for failure
self._job_log_utils.send_job_log(job_template_fq_name,
job_execution_id, fabric_fq_name, msg,
JobStatus.FAILURE.value)
# update the in memory job status for the job
self.job_status[job_execution_id] = JobStatus.FAILURE.value
self.publish_job_status_notification(job_execution_id,
JobStatus.FAILURE.value)
# update the UVE
if mark_uve:
if fabric_fq_name != "__DEFAULT__" and not device_list:
self.create_fabric_job_uve(fabric_job_uve_name,
job_execution_id,
JobStatus.FAILURE.value, 100.0)
if device_list:
self.create_physical_router_job_uve(device_list,
job_params,
fabric_job_uve_name,
JobStatus.FAILURE.value,
100.0)
# end mark_failure
def _load_job_log(self, marker, input_str):
json_str = input_str.split(marker)[1]
try:
return json.loads(json_str)
except ValueError:
return ast.literal_eval(json_str)
# end _load_job_log
def _extracted_file_output(self, execution_id):
status = "FAILURE"
prouter_info = {}
device_op_results = {}
failed_devices_list = []
try:
with open("/tmp/" + execution_id, "r") as f_read:
for line in f_read:
if 'PROUTER_LOG##' in line:
job_log = self._load_job_log('PROUTER_LOG##', line)
fqname = ":".join(job_log.get('prouter_fqname'))
prouter_info[fqname] = job_log.get('onboarding_state')
if line.startswith('job_summary'):
job_log = self._load_job_log('JOB_LOG##', line)
status = job_log.get('job_status')
failed_devices_list = job_log.get(
'failed_devices_list')
if 'GENERIC_DEVICE##' in line:
job_log = self._load_job_log(
'GENERIC_DEVICE##', line)
device_name = job_log.get('device_name')
device_op_results[device_name] = job_log.get(
'command_output')
except Exception as e:
msg = "File corresponding to execution id %s not found: %s\n%s" % (
execution_id, str(e), traceback.format_exc())
self._logger.error(msg)
return status, prouter_info, device_op_results, failed_devices_list
# end _extracted_file_output
def job_mgr_signal_handler(self, signalnum, frame):
pid = None
signal_var = None
try:
# get the child process id that called the signal handler
pid = os.waitpid(-1, os.WNOHANG)
signal_var = self._job_mgr_running_instances.get(str(pid[0]))
if not signal_var:
self._logger.error(
"Job mgr process %s not found in the instance "
"map" % str(pid))
return
msg = "Entered job_mgr_signal_handler for: %s" % signal_var
self._logger.notice(msg)
exec_id = signal_var.get('exec_id')
status, prouter_info, device_op_results, failed_devices_list = \
self._extracted_file_output(exec_id)
self.job_status[exec_id] = status
self.publish_job_status_notification(exec_id, status)
if signal_var.get('fabric_name') != "__DEFAULT__"\
and not signal_var.get('device_fqnames'):
job_execution_data = FabricJobExecution(
name=signal_var.get('fabric_name'),
job_status=status,
percentage_completed=100)
job_execution_uve = FabricJobUve(data=job_execution_data,
sandesh=self._sandesh)
job_execution_uve.send(sandesh=self._sandesh)
else:
for prouter_uve_name in signal_var.get('device_fqnames'):
prouter_status = status
device_name = prouter_uve_name.split(":")[1]
if device_name in failed_devices_list:
prouter_status = "FAILURE"
prouter_job_data = PhysicalRouterJobExecution(
name=prouter_uve_name,
job_status=prouter_status,
percentage_completed=100,
device_op_results=json.dumps(
device_op_results.get(device_name, {}))
)
prouter_job_uve = PhysicalRouterJobUve(
data=prouter_job_data, sandesh=self._sandesh)
prouter_job_uve.send(sandesh=self._sandesh)
for k, v in list(prouter_info.items()):
prouter_uve_name = "%s:%s" % (k, signal_var.get('fabric_name'))
prouter_job_data = PhysicalRouterJobExecution(
name=prouter_uve_name,
execution_id=exec_id,
job_start_ts=int(round(signal_var.get('start_time'
) * 1000)),
prouter_state=v
)
prouter_job_uve = PhysicalRouterJobUve(
data=prouter_job_data, sandesh=self._sandesh)
prouter_job_uve.send(sandesh=self._sandesh)
self._clean_up_job_data(signal_var, str(pid[0]))
self._logger.info("Job : %s finished. Current number of job_mgr "
"processes running now %s " %
(signal_var, self._job_mgr_statistics[
'running_job_count']))
except OSError as process_error:
self._logger.error("Could not retrieve the child process id. "
"OS call returned with error %s" %
str(process_error))
except Exception as unknown_exception:
self._clean_up_job_data(signal_var, str(pid[0]))
self._logger.error("Failed in job signal handler %s" %
str(unknown_exception))
# end job_mgr_signal_handler
def _clean_up_job_data(self, signal_var, pid):
# remove the pid entry of the processed job_mgr process
del self._job_mgr_running_instances[pid]
# clean up fabric level lock
if signal_var.get('job_concurrency') \
is not None and signal_var.get('job_concurrency') == "fabric":
self._release_fabric_job_lock(signal_var.get('fabric_fq_name'))
self._cleanup_job_lock(signal_var.get('fabric_fq_name'))
self._job_mgr_statistics['running_job_count'] = len(
self._job_mgr_running_instances)
# end _clean_up_job_data
def _is_existing_job_for_fabric(self, fabric_fq_name, job_execution_id):
is_fabric_job_running = False
# build the zk lock path
fabric_node_path = '/job-manager/' + fabric_fq_name + '/' + \
self.FABRIC_ZK_LOCK
# check if the lock is already taken if not taken, acquire the lock
# by creating a node
try:
self._zookeeper_client.create_node(fabric_node_path,
value=job_execution_id,
ephemeral=True)
self._logger.info("Acquired fabric lock"
" for %s " % fabric_node_path)
except ResourceExistsError:
# means the lock was acquired by some other job
value = self._zookeeper_client.read_node(fabric_node_path)
self._logger.error("Fabric lock is already acquired by"
" job %s " % value)
is_fabric_job_running = True
return is_fabric_job_running
# end _is_existing_job_for_fabric
def _release_fabric_job_lock(self, fabric_fq_name):
# build the zk lock path
fabric_node_path = '/job-manager/' + fabric_fq_name + '/' + \
self.FABRIC_ZK_LOCK
try:
self._zookeeper_client.delete_node(fabric_node_path)
self._logger.info("Released fabric lock"
" for %s " % fabric_node_path)
except Exception as zk_error:
self._logger.error("Exception while releasing the zookeeper lock"
" %s " % repr(zk_error))
# end _release_fabric_job_lock
def _cleanup_job_lock(self, fabric_fq_name):
fabric_node_path = '/job-manager/' + fabric_fq_name
try:
if not self._zookeeper_client.get_children(fabric_node_path):
self._zookeeper_client.delete_node(fabric_node_path)
self._logger.info("Released fabric node"
" for %s " % fabric_node_path)
except Exception as zk_error:
self._logger.error("Exception while releasing the fabric node for "
"%s: %s " % (fabric_node_path, str(zk_error)))
# end _cleanup_job_lock
def save_abstract_config(self, job_params):
# Saving device abstract config to a local file as it could be large
# config. There will be one local file per device and this file gets
# removed when device is removed from database.
dev_abs_cfg = job_params.get('input', {}).get('device_abstract_config')
if dev_abs_cfg:
dev_mgt_ip = dev_abs_cfg.get('system', {}).get('management_ip')
if not dev_mgt_ip:
raise ValueError('Missing management IP in abstract config')
dev_cfg_dir = '/opt/contrail/fabric_ansible_playbooks/config/' +\
dev_mgt_ip
if not os.path.exists(dev_cfg_dir):
os.makedirs(dev_cfg_dir)
if dev_cfg_dir:
with open(dev_cfg_dir + '/abstract_cfg.json', 'w') as f:
f.write(json.dumps(dev_abs_cfg, indent=4))
job_params.get('input').pop('device_abstract_config')
# end save_abstract_config
def get_job_concurrency(self, job_template_id, job_exec_id):
(ok, result) = self.db_read("job-template", job_template_id,
['job_template_concurrency_level'])
if not ok:
msg = "Error while reading the job concurrency " \
"from the job template with id %s : %s" %\
(job_template_id, result)
raise JobException(msg, job_exec_id)
return result.get('job_template_concurrency_level')
# end get_job_concurrency
def read_device_data(self, device_list, request_params,
job_exec_id, is_delete=False):
device_data = dict()
for device_id in device_list:
if not is_delete:
try:
(ok, result) = self.db_read(
"physical-router", device_id,
['physical_router_user_credentials',
'physical_router_management_ip', 'fq_name',
'physical_router_device_family',
'physical_router_vendor_name',
'physical_router_product_name',
'fabric_refs'])
if not ok:
msg = "Error while reading the physical router " \
"with id %s : %s" % (device_id, result)
raise JobException(msg, job_exec_id)
except NoIdError as ex:
msg = "Device not found" \
"%s: %s" % (device_id, str(ex))
raise JobException(msg, job_exec_id)
except Exception as e:
msg = "Exception while reading device %s %s " % \
(device_id, str(e))
raise JobException(msg, job_exec_id)
device_fq_name = result.get('fq_name')
device_mgmt_ip = result.get(
'physical_router_management_ip')
user_cred = result.get('physical_router_user_credentials')
device_family = result.get("physical_router_device_family")
device_vendor_name = result.get("physical_router_vendor_name")
device_product_name = result.get(
"physical_router_product_name")
fabric_refs = result.get('fabric_refs')
if fabric_refs:
fabric_fq_name = result.get('fabric_refs')[0].get('to')
fabric_fq_name_str = ':'.join(fabric_fq_name)
request_params['fabric_fq_name'] = fabric_fq_name_str
else:
device_mgmt_ip = request_params.get('input', {}).get(
'device_management_ip')
device_abs_cfg = request_params.get('input', {}).get(
'device_abstract_config')
system = device_abs_cfg.get('system', {})
device_name = system.get('name')
device_username = system.get('credentials', {}).get(
'user_name')
device_password = system.get('credentials', {}).get('password')
user_cred = {
"username": device_username,
"password": <PASSWORD>
}
device_family = system.get('device_family')
device_vendor_name = system.get('vendor_name')
device_product_name = system.get('product_name')
device_fq_name = [
"default-global-system-config",
device_name
]
self.read_fabric_data(request_params, job_exec_id, is_delete)
device_json = {"device_management_ip": device_mgmt_ip}
device_json.update({"device_fqname": device_fq_name})
if user_cred:
device_json.update(
{"device_username": user_cred.get('username')})
decrypt_password = JobVncApi.decrypt_password(
encrypted_password=user_cred.get('password'),
pwd_key=device_id)
device_json.update({"device_password":
decrypt_password})
if device_family:
device_json.update({"device_family": device_family})
if device_vendor_name:
device_json.update({"device_vendor": device_vendor_name})
if device_product_name:
device_json.update({"device_product": device_product_name})
device_data.update({device_id: device_json})
if len(device_data) > 0:
request_params.update({"device_json": device_data})
# end read_device_data
def read_fabric_data(self, request_params, job_execution_id,
is_delete=False):
if request_params.get('input') is None:
err_msg = "Missing job input"
raise JobException(err_msg, job_execution_id)
fabric_fq_name = None
if request_params.get('input').get('fabric_fq_name'):
fabric_fq_name = request_params.get('input').get('fabric_fq_name')
elif request_params.get('input').get('fabric_uuid'):
# get the fabric fq_name from the db if fabric_uuid is provided
fabric_uuid = request_params.get('input').get('fabric_uuid')
try:
fabric_fq_name = self._db_conn.uuid_to_fq_name(fabric_uuid)
except NoIdError as e:
raise JobException(str(e), job_execution_id)
else:
if "device_deletion_template" in request_params.get(
'job_template_fq_name'):
fabric_fq_name = ["__DEFAULT__"]
elif not is_delete:
err_msg = "Missing fabric details in the job input"
raise JobException(err_msg, job_execution_id)
if fabric_fq_name:
fabric_fq_name_str = ':'.join(map(str, fabric_fq_name))
request_params['fabric_fq_name'] = fabric_fq_name_str
# end read_fabric_data
```
#### File: device-manager/test/test_dm_ip_clos.py
```python
import gevent
import json
from attrdict import AttrDict
from device_manager.device_manager import DeviceManager
from cfgm_common.tests.test_common import retries
from cfgm_common.tests.test_common import retry_exc_handler
from vnc_api.vnc_api import *
from .test_dm_ansible_common import TestAnsibleCommonDM
from .test_dm_utils import FakeJobHandler
class TestAnsibleUnderlayIpClosDM(TestAnsibleCommonDM):
def test_dm_ansible_underlay_ip_clos(self):
self.create_feature_objects_and_params()
jt1 = self.create_job_template('job-template-1')
jt2 = self.create_job_template('job-template-2')
name = "test_ipclos"
fabric_uuid = self.create_fabric(name)
fabric = self._vnc_lib.fabric_read(id=fabric_uuid)
asn_ranges = [{'asn_min': 64501, 'asn_max': 64601}]
ns_fq_name = fabric.fq_name + ["test_ipclos_ns_eBGP-ASN-pool"]
fabric_namespace = FabricNamespace(
name='test_ipclos_ns_eBGP-ASN-pool',
fq_name=ns_fq_name,
parent_type='fabric',
fabric_namespace_type='ASN_RANGE',
fabric_namespace_value=NamespaceValue(asn_ranges=asn_ranges)
)
tag_obj = Tag(
name="label=fabric-ebgp-as-number",
tag_type_name="label",
tag_value="fabric-ebgp-as-number",
tag_predefined=True)
self._vnc_lib.tag_create(tag_obj)
fabric_namespace.add_tag(tag_obj)
self._vnc_lib.fabric_namespace_create(fabric_namespace)
np1, rc1 = self.create_node_profile(
'node-profile-1',
device_family='junos-qfx',
role_mappings=[
AttrDict(
{'physical_role': 'spine',
'rb_roles': ['crb-gateway']}
)], job_template=jt1)
np2, rc2 = self.create_node_profile(
'node-profile-2', device_family='junos-qfx',
role_mappings=[
AttrDict(
{'physical_role': 'leaf',
'rb_roles': ['crb-access']}
)], job_template=jt2)
pr1_mgmtip = '10.10.0.1'
pr2_mgmtip = '10.10.0.2'
bgp_router1, pr1 = self.create_router(
'router1', pr1_mgmtip, role='spine',
ignore_bgp=False, fabric=fabric,
node_profile=np1,
physical_role=self.physical_roles['spine'],
overlay_role=self.overlay_roles[
'crb-gateway'],
rb_roles=['crb-gateway'],
product='qfx10k', family='junos-qfx')
bgp_router2, pr2 = self.create_router(
'router2', pr2_mgmtip, role='leaf',
ignore_bgp=False, fabric=fabric,
node_profile=np2,
physical_role=self.physical_roles['leaf'],
overlay_role=self.overlay_roles[
'crb-access'],
rb_roles=['crb-access'],
product='qfx5110', family='junos-qfx')
pr1.set_physical_router_underlay_managed(True)
pr2.set_physical_router_underlay_managed(True)
pr1.set_physical_router_loopback_ip('1.1.1.1')
pr2.set_physical_router_loopback_ip('1.1.1.2')
self._vnc_lib.physical_router_update(pr1)
self._vnc_lib.physical_router_update(pr2)
pi1 = PhysicalInterface(name='xe-0/0/0', parent_obj=pr1)
pi2 = PhysicalInterface(name='xe-0/0/0', parent_obj=pr2)
self._vnc_lib.physical_interface_create(pi1)
self._vnc_lib.physical_interface_create(pi2)
pi1.set_physical_interface(pi2)
pi2.set_physical_interface(pi1)
self._vnc_lib.physical_interface_update(pi1)
self._vnc_lib.physical_interface_update(pi2)
subnet1 = SubnetType(ip_prefix='192.168.2.0', ip_prefix_len=24)
net_ipam1 = NetworkIpam()
li1 = LogicalInterface(name='0', parent_obj=pi1)
self._vnc_lib.logical_interface_create(li1)
li2 = LogicalInterface(name='0', parent_obj=pi2)
self._vnc_lib.logical_interface_create(li2)
vn_obj1 = VirtualNetwork(
name='vn1',
virtual_network_category='infra',
address_allocation_mode='flat-subnet-only')
vnsubnet_obj = VnSubnetsType(
ipam_subnets=[
IpamSubnetType(
subnet=subnet1)])
self._vnc_lib.virtual_network_create(vn_obj1)
vn_obj1.set_network_ipam(net_ipam1, vnsubnet_obj)
self._vnc_lib.virtual_network_update(vn_obj1)
ins_ip1 = InstanceIp(
name='ip1',
instance_ip_address='192.168.2.1',
instance_ip_family='v4')
ins_ip1.set_logical_interface(li1)
ins_ip1.set_virtual_network(vn_obj1)
self._vnc_lib.instance_ip_create(ins_ip1)
ins_ip2 = InstanceIp(
name='ip2',
instance_ip_address='192.168.2.2',
instance_ip_family='v4')
ins_ip2.set_logical_interface(li2)
ins_ip2.set_virtual_network(vn_obj1)
self._vnc_lib.instance_ip_create(ins_ip2)
pr1.set_physical_router_product_name('qfx10k-6s-4c')
self._vnc_lib.physical_router_update(pr1)
self.validate_abstract_configs(pr1, pr2)
self.delete_objects()
@retries(5, hook=retry_exc_handler)
def validate_abstract_configs(self, pr1, pr2):
abstract_config = FakeJobHandler.get_dev_job_input(pr1.name)
device_abstract_config1 = abstract_config.get('device_abstract_config')
pr2.set_physical_router_product_name('qfx5110-6s-4c')
self._vnc_lib.physical_router_update(pr2)
abstract_config = FakeJobHandler.get_dev_job_input(pr2.name)
device_abstract_config2 = abstract_config.get('device_abstract_config')
system1 = device_abstract_config1.get('system')
bgp_config1 = device_abstract_config1.get('features', {}).get(
'underlay-ip-clos', {}).get('bgp', [])
bgp_peer1 = bgp_config1[0].get('peers', [])
system2 = device_abstract_config2.get('system')
bgp_config2 = device_abstract_config2.get('features', {}).get(
'underlay-ip-clos', {}).get('bgp', [])
bgp_peer2 = bgp_config2[0].get('peers', [])
self.assertEqual(bgp_config1[0].get('ip_address'), '192.168.2.1')
self.assertEqual(bgp_peer1[0].get('ip_address'), '192.168.2.2')
self.assertEqual(bgp_config2[0].get('ip_address'), '192.168.2.2')
self.assertEqual(bgp_peer2[0].get('ip_address'), '192.168.2.1')
def create_feature_objects_and_params(self):
self.create_features(['underlay-ip-clos'])
self.create_physical_roles(['leaf', 'spine'])
self.create_overlay_roles(['crb-gateway', 'crb-access'])
self.create_role_definitions([
AttrDict({
'name': 'crb-gateway@spine',
'physical_role': 'spine',
'overlay_role': 'crb-gateway',
'features': ['underlay-ip-clos'],
'feature_configs': {}
}),
AttrDict({
'name': 'crb-access@leaf',
'physical_role': 'leaf',
'overlay_role': 'crb-access',
'features': ['underlay-ip-clos'],
'feature_configs': {}
})
])
def create_fabric(self, name):
fab = Fabric(
name=name,
manage_underlay=True,
fabric_enterprise_style=False,
fabric_ztp=True,
fabric_credentials={
'device_credential': [{
'credential': {
'username': 'root', 'password': '<PASSWORD>'
},
'vendor': 'Juniper',
'device_family': None
}]
}
)
fab_uuid = self._vnc_lib.fabric_create(fab)
return fab_uuid
@retries(5, hook=retry_exc_handler)
def delete_objects(self):
# Seeing issues with the JSON decoder - not sure why we are not
# receiving the response, so added retry logic for the deletion
# objects as well.
vpg_list = self._vnc_lib.virtual_port_groups_list().get(
'virtual-port-groups')
for vpg in vpg_list:
vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg['uuid'])
vpg_obj.set_virtual_machine_interface_list([])
vpg_obj.set_physical_interface_list([])
self._vnc_lib.virtual_port_group_update(vpg_obj)
vmi_list = self._vnc_lib.virtual_machine_interfaces_list().get(
'virtual-machine-interfaces', [])
for vmi in vmi_list:
self._vnc_lib.virtual_machine_interface_delete(id=vmi['uuid'])
instance_ip_list = self._vnc_lib.instance_ips_list().get(
'instance-ips', [])
for ip in instance_ip_list:
self._vnc_lib.instance_ip_delete(id=ip['uuid'])
logical_interfaces_list = self._vnc_lib.logical_interfaces_list().get(
'logical-interfaces', [])
for logical_interface in logical_interfaces_list:
self._vnc_lib.logical_interface_delete(id=logical_interface['uuid'])
pi_list = self._vnc_lib.physical_interfaces_list().get(
'physical-interfaces', [])
for pi in pi_list:
self._vnc_lib.physical_interface_delete(id=pi['uuid'])
for vpg in vpg_list:
self._vnc_lib.virtual_port_group_delete(id=vpg['uuid'])
pr_list = self._vnc_lib.physical_routers_list().get(
'physical-routers', [])
for pr in pr_list:
self._vnc_lib.physical_router_delete(id=pr['uuid'])
bgpr_list = self._vnc_lib.bgp_routers_list().get('bgp-routers', [])
for br in bgpr_list:
self._vnc_lib.bgp_router_delete(id=br['uuid'])
rc_list = self._vnc_lib.role_configs_list().get('role-configs', [])
for rc in rc_list:
self._vnc_lib.role_config_delete(id=rc['uuid'])
ports_list = self._vnc_lib.ports_list().get('ports', [])
for port in ports_list:
self._vnc_lib.port_delete(id=port['uuid'])
nodes_list = self._vnc_lib.nodes_list().get('nodes', [])
for node in nodes_list:
self._vnc_lib.node_delete(id=node['uuid'])
fabric_namespace_list = self._vnc_lib.fabric_namespaces_list().get(
'fabric-namespaces', [])
for fabric_namespace in fabric_namespace_list:
self._vnc_lib.fabric_namespace_delete(id=fabric_namespace['uuid'])
fab_list = self._vnc_lib.fabrics_list().get('fabrics', [])
for fab in fab_list:
self._vnc_lib.fabric_delete(id=fab['uuid'])
vn_list = self._vnc_lib.virtual_networks_list().get(
'virtual-networks', [])
for vn in vn_list:
self._vnc_lib.virtual_network_delete(id=vn['uuid'])
np_list = self._vnc_lib.node_profiles_list().get('node-profiles', [])
for np in np_list:
self._vnc_lib.node_profile_delete(id=np['uuid'])
jt_list = self._vnc_lib.job_templates_list().get('job-templates', [])
for jt in jt_list:
self._vnc_lib.job_template_delete(id=jt['uuid'])
pp_list = self._vnc_lib.port_profiles_list().get('port-profiles', [])
for pp in pp_list:
self._vnc_lib.port_profile_delete(id=pp['uuid'])
sc_list = self._vnc_lib.storm_control_profiles_list().get(
'storm-control-profiles', [])
for sc in sc_list:
self._vnc_lib.storm_control_profile_delete(id=sc['uuid'])
self.delete_role_definitions()
self.delete_overlay_roles()
self.delete_physical_roles()
self.delete_features()
self.wait_for_features_delete()
```
#### File: sanity/tools/debug.py
```python
from __future__ import print_function
from vnc_api.vnc_api import VncApi
import json
api = VncApi()
GSC = 'default-global-system-config'
def dump(res_type, fq_name):
obj = api._object_read(res_type=res_type, fq_name=fq_name)
dumpobj(obj)
def dumpobj(obj):
print(json.dumps(api.obj_to_dict(obj), indent=4))
def dumplist(res_type, detail=False):
refs = api._objects_list(res_type)
if refs:
refs = refs.get(res_type + 's')
if detail:
obj_list = []
for ref in refs or []:
obj = api._object_read(res_type, id=ref.get('uuid'))
obj_list.append(api.obj_to_dict(obj))
print(json.dumps({ 'objs': obj_list }, indent=4))
else:
print(json.dumps(refs, indent=4))
def dump_pr(name):
dump('physical-router', [GSC, name])
def dump_pi(pr_name, pi_name):
dump('physical-interface', [GSC, pr_name, pi_name])
def dump_li(pr_name, pi_name, unit=0):
dump('logical-interface', [GSC, pr_name, pi_name, "%s.%d" % (pi_name, unit)])
def validate_json(filepath):
with open(filepath, 'r') as f:
data = json.load(f)
print(json.dumps(data, indent=4))
```
#### File: test/units/test_swift_fileutil.py
```python
import unittest
from flexmock import flexmock
import swiftclient
import swiftclient.utils
from ansible.modules.network.fabric import swift_fileutil
from test_fabric_base import TestFabricModule
from test_fabric_base import set_module_args
from ansible.module_utils import fabric_utils
class TestSwiftFileUtilModule(TestFabricModule):
module = swift_fileutil
def setUp(self):
super(TestSwiftFileUtilModule, self).setUp()
# Mocking the swift connection object
self.mockobj = flexmock().should_receive('get_account').and_return(['storageurl']).mock()
flexmock(swiftclient.client).should_receive('Connection').and_return(self.mockobj)
flexmock(self.mockobj).should_receive('post_account').and_return(None)
flexmock(self.mockobj).url = "storage_url"
flexmock(self.mockobj).should_receive("close").and_return(None)
fake_logger = flexmock()
flexmock(fake_logger).should_receive('error')
flexmock(fake_logger).should_receive('debug')
flexmock(fabric_utils).should_receive('fabric_ansible_logger').and_return(fake_logger)
self.args_dict = dict(authtoken="<PASSWORD>", authurl="auth_url", user="admin", key="contrail", tenant_name="project",
auth_version="3.0", temp_url_key="temp_url_key1",
temp_url_key_2="temp_url_key2", chosen_temp_url_key="temp_url_key",
container_name="container", filename="sample.txt", expirytime=3600)
# Testing the swift utility module
def test_fileutility01(self):
fake_image_url = "/v1/sample.txt"
flexmock(swiftclient.utils).should_receive('generate_temp_url').and_return(fake_image_url)
set_module_args(self.args_dict)
result = self.execute_module()
self.assertTrue(result["url"])
self.assertEqual(result["url"], fake_image_url)
# Testing when generate_temp_url returns None
def test_fileutility02(self):
flexmock(swiftclient.utils).should_receive('generate_temp_url').and_return(None)
set_module_args(self.args_dict)
self.assertRaises(Exception, self.execute_module())
# Testing when generate_temp_url raises exception
def test_fileutility_03(self):
flexmock(swiftclient.utils).should_receive('generate_temp_url').and_raise(Exception)
set_module_args(self.args_dict)
self.assertRaises(Exception, self.execute_module())
# #Testing the swift connection after retry
def test_fileutility04(self):
flexmock(swiftclient.client).should_receive('Connection').and_return(None)
self.args_dict['connection_retry_count'] = 1
set_module_args(self.args_dict)
self.assertRaises(Exception, self.execute_module())
# Testing the update account error
def test_fileutility05(self):
flexmock(self.mockobj).should_receive('post_account').and_raise(Exception)
set_module_args(self.args_dict)
self.assertRaises(Exception, self.execute_module())
# Testing the case where optional args are not passed and it should take default value
def test_fileutility06(self):
for e in ["tenant_name","auth_version","chosen_temp_url_key","connection_retry_count"]:
self.args_dict.pop(e, None)
set_module_args(self.args_dict)
fake_image_url = "/v1/sample.txt"
flexmock(swiftclient.utils).should_receive('generate_temp_url').and_return(fake_image_url)
result = self.execute_module()
self.assertTrue(result["url"])
self.assertEqual(result["url"], fake_image_url)
```
#### File: fabric-ansible/job_manager/playbook_helper.py
```python
import argparse
from builtins import object
from builtins import str
from collections import namedtuple
import errno
import json
import sys
import traceback
from ansible import constants as CONST
from ansible.executor.playbook_executor import PlaybookExecutor
from ansible.inventory.manager import InventoryManager
from ansible.module_utils._text import to_bytes, to_text
from ansible.parsing.dataloader import DataLoader
from ansible.utils.color import stringc
import ansible.utils.display as default_display
from ansible.utils.display import Display
from ansible.vars.manager import VariableManager
from job_manager.fabric_logger import fabric_ansible_logger
from job_manager.job_manager_logger import job_mgr_logger
from job_manager.job_messages import MsgBundle
from job_manager.job_utils import (
JobFileWrite, PLAYBOOK_EOL_PATTERN
)
verbosity = CONST.DEFAULT_VERBOSITY or 0
logger = fabric_ansible_logger("ansible")
# Overrides the default display processing from ansible/utils/display.py
# The default display method supresses certain output for remote hosts
# while we want this information sent to the logs
def fabric_ansible_display(self, msg, color=None, stderr=False,
screen_only=False, log_only=False):
# Display a message to the user
# Note: msg *must* be a unicode string to prevent UnicodeError traceback
nocolor = msg
if color:
msg = stringc(msg, color)
if not log_only:
if not msg.endswith(u'\n'):
msg2 = msg + u'\n'
else:
msg2 = msg
msg2 = to_bytes(msg2, encoding=self._output_encoding(stderr=stderr))
if sys.version_info >= (3,):
# Convert back to text string on python3
# We first convert to a byte string so that we get rid of
# characters that are invalid in the user's locale
msg2 = to_text(msg2, self._output_encoding(stderr=stderr),
errors='replace')
# Note: After Display() class is refactored need to update the
# log capture code in 'bin/ansible-connection' (and other
# relevant places).
if not stderr:
fileobj = sys.stdout
else:
fileobj = sys.stderr
fileobj.write(msg2)
try:
fileobj.flush()
except IOError as e:
# Ignore EPIPE in case fileobj has been prematurely closed, eg.
# when piping to "head -n1"
if e.errno != errno.EPIPE:
raise
if logger:
msg2 = nocolor.lstrip(u'\n')
msg2 = to_bytes(msg2)
if sys.version_info >= (3,):
# Convert back to text string on python3
# We first convert to a byte string so that we get rid of
# characters that are invalid in the user's locale
msg2 = to_text(msg2, self._output_encoding(stderr=stderr))
if color == CONST.COLOR_ERROR:
logger.error(msg2)
else:
logger.info(msg2)
# Use custom display function
default_display.logger = logger
display = Display(verbosity)
JM_LOGGER = job_mgr_logger("FabricAnsible")
default_display.Display.display = fabric_ansible_display
class PlaybookHelper(object):
def __init__(self):
"""Playbook helper initializer. Creates the playbook log util class."""
self._job_file_write = JobFileWrite(logger)
def get_plugin_output(self, pbex):
output_json = pbex._tqm._variable_manager._nonpersistent_fact_cache[
'localhost'].get('output')
return output_json
def execute_playbook(self, playbook_info):
output = None
try:
loader = DataLoader()
inventory = InventoryManager(loader=loader, sources=['localhost'])
variable_manager = VariableManager(loader=loader,
inventory=inventory)
Options = namedtuple('Options',
['listtags', 'listtasks', 'listhosts',
'syntax', 'connection', 'module_path',
'forks', 'remote_user', 'private_key_file',
'ssh_common_args', 'ssh_extra_args',
'sftp_extra_args', 'scp_extra_args',
'become', 'become_method', 'become_user',
'verbosity', 'check', 'diff'])
options = Options(listtags=False, listtasks=False, listhosts=False,
syntax=False, connection='ssh', module_path=None,
forks=100, remote_user=None,
private_key_file=None, ssh_common_args=None,
ssh_extra_args=None, sftp_extra_args=None,
scp_extra_args=None, become=None,
become_method=None, become_user=None,
verbosity=None, check=False, diff=False)
variable_manager.extra_vars = playbook_info['extra_vars']
pbex = PlaybookExecutor(playbooks=[playbook_info['uri']],
inventory=inventory,
variable_manager=variable_manager,
loader=loader,
options=options, passwords=None)
ret_val = pbex.run()
output = self.get_plugin_output(pbex)
if ret_val != 0:
msg = MsgBundle.getMessage(MsgBundle.
PLAYBOOK_RETURN_WITH_ERROR)
raise Exception(msg)
if output is None or output.get('status') is None:
msg = MsgBundle.getMessage(MsgBundle.
PLAYBOOK_OUTPUT_MISSING)
raise Exception(msg)
if output.get('status').lower() == "failure":
msg = MsgBundle.getMessage(MsgBundle.
PLAYBOOK_STATUS_FAILED)
raise Exception(msg)
return output
except Exception as exp:
msg = MsgBundle.getMessage(MsgBundle.PLAYBOOK_EXECUTE_ERROR,
playbook_uri=playbook_info['uri'],
execution_id=playbook_info['extra_vars']
['playbook_input']['job_execution_id'],
exc_msg=repr(exp))
if exp.message:
msg = msg + "\n" + exp.message
JM_LOGGER.error(msg)
# after handling exception, write an END
# to stop listening to the file if created
unique_pb_id = playbook_info['extra_vars'][
'playbook_input']['unique_pb_id']
exec_id = playbook_info['extra_vars']['playbook_input'][
'job_execution_id']
self._job_file_write.write_to_file(
exec_id,
unique_pb_id,
JobFileWrite.PLAYBOOK_OUTPUT,
json.dumps(output)
)
with open("/tmp/" + exec_id, "a") as f:
f.write(unique_pb_id + 'END' + PLAYBOOK_EOL_PATTERN)
sys.exit(msg)
def parse_args():
parser = argparse.ArgumentParser(description='Ansible playbook input '
'parameters')
parser.add_argument('-i', '--playbook_input', nargs=1,
help='Playbook input json')
return parser.parse_args()
if __name__ == "__main__":
playbook_input_json = None
try:
playbook_params = parse_args()
playbook_input_json = json.loads(playbook_params.playbook_input[0])
if playbook_input_json is None:
sys.exit(MsgBundle.getMessage(MsgBundle.NO_PLAYBOOK_INPUT_DATA))
except Exception as exp:
ERR_MSG = "Failed to start playbook due to Exception: %s" %\
traceback.print_stack()
JM_LOGGER.error(ERR_MSG)
sys.exit(MsgBundle.getMessage(MsgBundle.PLAYBOOK_INPUT_PARSING_ERROR,
exc_msg=repr(exp)))
playbook_helper = PlaybookHelper()
pb_output = playbook_helper.execute_playbook(playbook_input_json)
# if it comes here, it implies the pb_output is of correct
# format and is present with status Sucess. So derive the
# playbook output to be written to file and finally write END to the file
unique_pb_id = playbook_input_json['extra_vars'][
'playbook_input']['unique_pb_id']
exec_id = playbook_input_json['extra_vars']['playbook_input'][
'job_execution_id']
try:
playbook_helper._job_file_write.write_to_file(
exec_id, unique_pb_id, JobFileWrite.PLAYBOOK_OUTPUT,
json.dumps(pb_output)
)
except Exception as exc:
ERR_MSG = "Error while trying to parse output"\
" from playbook due to exception: %s"\
% str(exc)
JM_LOGGER.error(ERR_MSG)
# not stopping execution just because of parsing error
# no sys.exit therefore
finally:
with open("/tmp/" + exec_id, "a") as f:
f.write(unique_pb_id + 'END' + PLAYBOOK_EOL_PATTERN)
```
#### File: schema_transformer/resources/bgp_vpn.py
```python
from schema_transformer.resources._resource_base import ResourceBaseST
class BgpvpnST(ResourceBaseST):
_dict = {}
obj_type = 'bgpvpn'
prop_fields = ['route_target_list', 'import_route_target_list',
'export_route_target_list']
def __init__(self, name, obj=None):
self.name = name
self.virtual_networks = set()
self.logical_routers = set()
self.rt_list = set()
self.import_rt_list = set()
self.export_rt_list = set()
self.update(obj)
self.update_multiple_refs('virtual_network', self.obj)
self.update_multiple_refs('logical_router', self.obj)
def update(self, obj=None):
changed = self.update_vnc_obj(obj)
if set(self.prop_fields) & set(changed):
self.get_route_target_lists()
return changed
def delete_obj(self):
self.update_multiple_refs('virtual_network', {})
self.update_multiple_refs('logical_router', {})
def get_route_target_lists(self):
old_lists = (self.rt_list, self.import_rt_list, self.export_rt_list)
rt_list = self.obj.get_route_target_list()
if rt_list:
self.rt_list = set(rt_list.get_route_target())
else:
self.rt_list = set()
rt_list = self.obj.get_import_route_target_list()
if rt_list:
self.import_rt_list = set(rt_list.get_route_target())
else:
self.import_rt_list = set()
rt_list = self.obj.get_export_route_target_list()
if rt_list:
self.export_rt_list = set(rt_list.get_route_target())
else:
self.export_rt_list = set()
# if any RT exists in both import and export, just add it to rt_list
self.rt_list |= self.import_rt_list & self.export_rt_list
# if any RT exists in rt_list, remove it from import/export lists
self.import_rt_list -= self.rt_list
self.export_rt_list -= self.rt_list
return old_lists != (self.rt_list, self.import_rt_list,
self.export_rt_list)
def handle_st_object_req(self):
resp = super(BgpvpnST, self).handle_st_object_req()
resp.obj_refs = [
self._get_sandesh_ref_list('virtual_network'),
self._get_sandesh_ref_list('logical_router'),
]
return resp
# end BgpvpnST
```
#### File: schema_transformer/resources/virtual_machine.py
```python
from schema_transformer.resources._resource_base import ResourceBaseST
from schema_transformer.sandesh.st_introspect import ttypes as sandesh
class VirtualMachineST(ResourceBaseST):
_dict = {}
obj_type = 'virtual_machine'
ref_fields = ['service_instance']
def __init__(self, name, obj=None):
self.name = name
self.virtual_machine_interfaces = set()
self.service_instance = None
self.update(obj)
self.uuid = self.obj.uuid
self.update_multiple_refs('virtual_machine_interface', self.obj)
# end __init__
def get_vmi_by_service_type(self, service_type):
for vmi_name in self.virtual_machine_interfaces:
vmi = ResourceBaseST.get_obj_type_map().get(
'virtual_machine_interface').get(vmi_name)
if vmi and vmi.service_interface_type == service_type:
return vmi
return None
def get_service_mode(self):
if self.service_instance is None:
return None
si_obj = ResourceBaseST.get_obj_type_map().get(
'service_instance').get(self.service_instance)
if si_obj is None:
self._logger.error("service instance %s not found"
% self.service_instance)
return None
return si_obj.get_service_mode()
# end get_service_mode
def handle_st_object_req(self):
resp = super(VirtualMachineST, self).handle_st_object_req()
resp.obj_refs.extend([
self._get_sandesh_ref_list('virtual_machine_interface'),
])
resp.properties.extend([
sandesh.PropList('service_mode', self.get_service_mode()),
])
return resp
# end handle_st_object_req
# end VirtualMachineST
```
#### File: schema-transformer/schema_transformer/utils.py
```python
import socket
import cfgm_common as common
from cfgm_common import jsonutils as json
from cfgm_common.uve.config_req.ttypes import SystemConfigReq
from cfgm_common.uve.config_req.ttypes import SystemConfigReqTrace
import jsonpickle
from schema_transformer.sandesh.st_introspect import ttypes as sandesh
_PROTO_STR_TO_NUM_IPV4 = {
'icmp6': '58',
'icmp': '1',
'tcp': '6',
'udp': '17',
'any': 'any',
}
_PROTO_STR_TO_NUM_IPV6 = {
'icmp6': '58',
'icmp': '58',
'tcp': '6',
'udp': '17',
'any': 'any',
}
RULE_IMPLICIT_ALLOW_UUID = common.RULE_IMPLICIT_ALLOW_UUID
RULE_IMPLICIT_DENY_UUID = common.RULE_IMPLICIT_DENY_UUID
def _raise_and_send_uve_to_sandesh(obj_type, err_info, sandesh):
config_req_err = SystemConfigReq(obj_type=obj_type,
err_info=err_info)
config_req_err.name = socket.getfqdn()
config_req_trace = SystemConfigReqTrace(data=config_req_err,
sandesh=sandesh)
config_req_trace.send(sandesh=sandesh)
def _pp_json_object(obj):
parsed = json.loads(jsonpickle.encode(obj, unpicklable=False))
return json.dumps(parsed, indent=4)
def _create_pprinted_prop_list(name, value):
return sandesh.PropList(name, _pp_json_object(value))
```
#### File: drivers/f5/f5_driver_v2.py
```python
import sys
from oslo_config import cfg
from f5_openstack_agent.lbaasv2.drivers.bigip import icontrol_driver
from f5_openstack_agent.lbaasv2.drivers.bigip import agent_manager
from svc_monitor.services.loadbalancer.drivers import abstract_driver
from svc_monitor.config_db import *
class OpencontrailF5LoadbalancerDriver(
abstract_driver.ContrailLoadBalancerAbstractDriver):
def __init__(self, name, manager, api, db, args=None):
self._name = name
self._manager = manager
self._api = api
self.db = db
args_driver = dict(args.config_sections.items(self._name))
args_auth = dict(args.config_sections.items('KEYSTONE'))
self.service = {}
conf = cfg.ConfigOpts()
conf.register_opts(agent_manager.OPTS)
conf.register_opts(icontrol_driver.OPTS)
conf.register_opt(cfg.BoolOpt('debug', default = False))
conf.f5_global_routed_mode = True
if not 'device_ip' in args_driver:
return None
if 'ha_mode' in args_driver:
conf.f5_ha_type = args_driver['ha_mode']
conf.icontrol_hostname = args_driver['device_ip']
conf.icontrol_username = args_driver['user']
conf.icontrol_password = args_driver['password']
conf.cert_manager = 'f5_openstack_agent.lbaasv2.drivers.bigip' \
'.barbican_cert.BarbicanCertManager'
if 'v2.0' in args_auth['auth_url']:
conf.auth_version = 'v2'
conf.os_username = args_auth['admin_user']
conf.os_password = <PASSWORD>_auth['<PASSWORD>']
conf.os_auth_url = args_auth['auth_url']
conf.os_tenant_name = args_auth['admin_tenant_name']
elif 'v3' in args_auth['auth_url']:
conf.auth_version = 'v3'
conf.os_username = args_auth['admin_user']
conf.os_password = <PASSWORD>_auth['<PASSWORD>']
conf.os_auth_url = args_auth['auth_url']
conf.os_project_name = args_auth['admin_tenant_name']
conf.os_user_domain_name = 'default'
conf.os_project_domain_name = 'default'
self.icontrol = icontrol_driver.iControlDriver(conf,
registerOpts = False)
def _service_lb(self, lb_id, status = 'ACTIVE'):
self.service = {}
lb = LoadbalancerSM.get(lb_id)
if not lb:
self.service['loadbalancer'] = None
return
tenant = ProjectSM.get(lb.parent_uuid)
vmi = VirtualMachineInterfaceSM.get(lb.virtual_machine_interface)
vn = VirtualNetworkSM.get(vmi.virtual_network)
conf = {'id': lb.uuid,
'config': None,
'tenant_id': tenant.name,
'name': lb.name,
'description': "",
'vip_subnet_id': lb.params['vip_subnet_id'],
'vip_address': lb.params['vip_address'],
'port_id': lb.virtual_machine_interface,
'provisioning_status': status,
'status': lb.params['status'],
'network_id': vn.name}
self.service['loadbalancer'] = conf
self.service['networks'] = {vn.name: {'provider:network_type': 'flat'}}
def _service_listener(self, l_id, status = 'ACTIVE'):
lb_conf = self.service['loadbalancer']
l = LoadbalancerListenerSM.get(l_id)
conf = {'id': lb_conf['name'] + '_' + l.name,
'config': None,
'tenant_id': lb_conf['tenant_id'],
'name': l.name,
'protocol': l.params['protocol'],
'protocol_port': l.params['protocol_port'],
'provisioning_status': status,
'admin_state_up': l.params['admin_state'],
'description': ""}
if l.params['protocol'] == 'TERMINATED_HTTPS':
conf['default_tls_container_id'] = l.params['default_tls_container']
conf['sni_containers'] = []
for sni_container in l.params['sni_containers']:
c = {'tls_container_id': sni_container}
conf['sni_containers'].append(c)
self.service['listeners'] = [conf]
def _service_pool(self, p_id, status = 'ACTIVE'):
lb_conf = self.service['loadbalancer']
l_conf = self.service['listeners'][0]
p = LoadbalancerPoolSM.get(p_id)
conf = {'id': lb_conf['name'] + '_' + p.name,
'config': None,
'tenant_id': lb_conf['tenant_id'],
'provisioning_status': status,
'listeners': [{'id': l_conf['id']}],
'description': ""}
self.service['pools'] = [conf]
def _service_member(self, m_id, status = 'ACTIVE'):
lb_conf = self.service['loadbalancer']
p_conf = self.service['pools'][0]
m = LoadbalancerMemberSM.get(m_id)
conf = {'id': m_id,
'config': None,
'tenant_id': lb_conf['tenant_id'],
'provisioning_status': status,
'pool_id': p_conf['id'],
'address': m.params['address'],
'protocol_port': m.params['protocol_port'],
'subnet_id': None,
'network_id': None,
'description': ""}
self.service['members'] = [conf]
def _service_healthmonitor(self, hm_id, status = 'ACTIVE'):
lb_conf = self.service['loadbalancer']
p_conf = self.service['pools'][0]
hm = HealthMonitorSM.get(hm_id)
conf = {'id': hm_id,
'config': None,
'tenant_id': lb_conf['tenant_id'],
'provisioning_status': status,
'pool_id': p_conf['id'],
'type': hm.params['monitor_type'],
'url_path': hm.params['url_path'],
'delay': hm.params['delay'],
'max_retries': hm.params['max_retries'],
'timeout': hm.params['timeout'],
'expected_codes': hm.params['expected_codes'],
'description': ""}
self.service['healthmonitors'] = [conf]
def create_loadbalancer(self, loadbalancer):
self._service_lb(loadbalancer['id'])
if self.service['loadbalancer']:
self.icontrol.create_loadbalancer(None, self.service)
def update_loadbalancer(self, old_loadbalancer, loadbalancer):
pass
def delete_loadbalancer(self, loadbalancer):
self._service_lb(loadbalancer['id'], status = 'PENDING_DELETE')
self.icontrol.delete_loadbalancer(None, self.service)
def create_listener(self, listener):
self._service_lb(listener['loadbalancer_id'])
self._service_listener(listener['id'])
self.icontrol.create_listener(None, self.service)
def update_listener(self, old_listener, listener):
pass
def delete_listener(self, listener):
self._service_lb(listener['loadbalancer_id'])
self._service_listener(listener['id'], status = 'PENDING_DELETE')
self.icontrol.delete_listener(None, self.service)
def create_pool(self, pool):
self._service_lb(pool['loadbalancer_id'])
pool_sm = LoadbalancerPoolSM.get(pool['id'])
self._service_listener(pool_sm.loadbalancer_listener)
self._service_pool(pool['id'])
self.icontrol.create_pool(None, self.service)
def update_pool(self, old_pool, pool):
pass
def delete_pool(self, pool):
self._service_lb(pool['loadbalancer_id'])
pool_sm = LoadbalancerPoolSM.get(pool['id'])
self._service_listener(pool_sm.loadbalancer_listener)
self._service_pool(pool['id'], status = 'PENDING_DELETE')
self.icontrol.delete_pool(None, self.service)
def create_member(self, member):
pool_sm = LoadbalancerPoolSM.get(member['pool_id'])
listener_sm = LoadbalancerListenerSM.get(pool_sm.loadbalancer_listener)
self._service_lb(listener_sm.loadbalancer)
self._service_listener(listener_sm.uuid)
self._service_pool(pool_sm.uuid)
self._service_member(member['id'])
self.icontrol.create_member(None, self.service)
def update_member(self, old_member, member):
pass
def delete_member(self, member):
pool_sm = LoadbalancerPoolSM.get(member['pool_id'])
listener_sm = LoadbalancerListenerSM.get(pool_sm.loadbalancer_listener)
self._service_lb(listener_sm.loadbalancer)
self._service_listener(listener_sm.uuid)
self._service_pool(pool_sm.uuid)
self._service_member(member['id'], status = 'PENDING_DELETE')
self.icontrol.delete_member(None, self.service)
def create_health_monitor(self, health_monitor, pool_id):
pool_sm = LoadbalancerPoolSM.get(pool_id)
listener_sm = LoadbalancerListenerSM.get(pool_sm.loadbalancer_listener)
self._service_lb(pool_sm.loadbalancer_id)
self._service_listener(listener_sm.uuid)
self._service_pool(pool_id)
self._service_healthmonitor(health_monitor['id'])
self.icontrol.create_health_monitor(None, self.service)
def update_health_monitor(self, old_health_monitor, health_monitor,
pool_id):
pass
def delete_health_monitor(self, health_monitor, pool_id):
pool_sm = LoadbalancerPoolSM.get(pool_id)
listener_sm = LoadbalancerListenerSM.get(pool_sm.loadbalancer_listener)
self._service_lb(pool_sm.loadbalancer_id)
self._service_listener(listener_sm.uuid)
self._service_pool(pool_id)
self._service_healthmonitor(health_monitor['id'],
status = 'PENDING_DELETE')
self.icontrol.delete_health_monitor(None, self.service)
def create_vip(self, vip):
pass
def update_vip(self, old_vip, vip):
pass
def delete_vip(self, vip):
pass
def stats(self, pool_id):
pass
def set_config_v2(self, lb_id):
pass
```
#### File: ha_proxy/custom_attributes/haproxy_validator.py
```python
from builtins import str
from builtins import range
from builtins import object
import logging
import inspect
import os
class CustomAttr(object):
"""This type handles non-flat data-types like
int, str, bool.
"""
def __init__(self, key, value):
self._value = value
self._key = key
def validate(self):
pass
def post_validation(self):
pass
class CustomAttrTlsContainer(CustomAttr):
def __init__(self, key, value):
super(CustomAttrTlsContainer, self).__init__(key, value)
def validate(self):
return True
def post_validation(self):
return self._value
def validate_custom_attributes(custom_attributes_dict, section,
custom_attributes):
section_dict = {}
if custom_attributes and section in custom_attributes_dict:
for key, value in list(custom_attributes.items()):
if key in custom_attributes_dict[section]:
#Sanitize the value
try:
type_attr = custom_attributes_dict[section][key]['type']
limits = custom_attributes_dict[section][key]['limits']
if type_attr == 'int':
value = int(value)
if value in range(limits[0], limits[1]):
section_dict.update({key:value})
else:
logging.info("Skipping key: %s, value: %s due to" \
"validation failure" % (key, value))
elif type_attr == 'str':
if len(value) in range(limits[0], limits[1]):
section_dict.update({key:value})
else:
logging.info("Skipping key: %s, value: %s due to" \
"validation failure" % (key, value))
elif type_attr == 'bool':
if value in limits:
if value == 'True':
value = ''
elif value == 'False':
value = 'no '
section_dict.update({key:value})
else:
logging.info("Skipping key: %s, value: %s due to" \
"validation failure" % (key, value))
elif inspect.isclass(eval(type_attr)):
new_custom_attr = eval(type_attr)(key, value)
if new_custom_attr.validate():
value = new_custom_attr.post_validation()
section_dict.update({key:value})
else:
logging.info("Skipping key: %s, value: %s due to" \
"validation failure" % (key, value))
except Exception as e:
logging.error(str(e))
continue
return section_dict
```
#### File: svc_monitor/tests/test_virtual_machine_manager.py
```python
from __future__ import absolute_import
import mock
import unittest
from vnc_api.vnc_api import *
from svc_monitor.virtual_machine_manager import VirtualMachineManager
from svc_monitor.config_db import *
from . import test_common_utils as test_utils
class VirtualMachineManagerTest(unittest.TestCase):
def setUp(self):
VirtualMachineSM._object_db = mock.MagicMock()
VirtualMachineSM._object_db.object_read = test_utils.vm_db_read
VirtualMachineInterfaceSM._object_db = mock.MagicMock()
VirtualMachineInterfaceSM._object_db.object_read = test_utils.vmi_db_read
InstanceIpSM._object_db = mock.MagicMock()
InstanceIpSM._object_db.object_read = test_utils.iip_db_read
InterfaceRouteTableSM._object_db = mock.MagicMock()
InterfaceRouteTableSM._object_db.object_read = test_utils.irt_db_read
self.mocked_vnc = mock.MagicMock()
self.mocked_vnc.fq_name_to_id = test_utils.get_vn_id_for_fq_name
self.mocked_vnc.virtual_network_create = test_utils.vn_create
self.mocked_vnc.virtual_machine_interface_create = test_utils.vmi_create
self.mocked_vnc.instance_ip_create = test_utils.iip_create
self.nova_mock = mock.MagicMock()
self.mocked_db = mock.MagicMock()
self.mocked_args = mock.MagicMock()
self.mocked_args.availability_zone = 'default-availability-zone'
self.log_mock = mock.MagicMock()
self.vm_manager = VirtualMachineManager(
db=self.mocked_db, logger=self.log_mock,
vnc_lib=self.mocked_vnc, vrouter_scheduler=mock.MagicMock(),
nova_client=self.nova_mock, args=self.mocked_args,
agent_manager=mock.MagicMock())
def tearDown(self):
ServiceTemplateSM.reset()
ServiceInstanceSM.reset()
InstanceIpSM.reset()
VirtualMachineInterfaceSM.reset()
VirtualMachineSM.reset()
del InterfaceRouteTableSM._object_db
del VirtualMachineSM._object_db
def test_virtual_machine_create(self):
test_utils.create_test_project('fake-domain:fake-project')
test_utils.create_test_security_group('fake-domain:fake-project:default')
test_utils.create_test_virtual_network('fake-domain:fake-project:left-vn')
test_utils.create_test_virtual_network('fake-domain:fake-project:right-vn')
st = test_utils.create_test_st(name='vm-template',
virt_type='virtual-machine',
intf_list=[['management', False], ['left', True], ['right', False]])
si = test_utils.create_test_si(name='vm-instance', count=2,
intf_list=['', 'left-vn', 'right-vn'])
def nova_oper(resource, oper, proj_name, **kwargs):
if resource == 'servers' and oper == 'create':
nova_vm = test_utils.FakeNovaServer('fake-vm-uuid', kwargs['name'])
return nova_vm
else:
return mock.MagicMock()
self.nova_mock.oper = nova_oper
self.vm_manager.create_service(st, si)
self.mocked_vnc.virtual_machine_create.assert_any_call(test_utils.VMObjMatcher(1))
self.mocked_vnc.virtual_machine_create.assert_any_call(test_utils.VMObjMatcher(2))
self.assertTrue(si.availability_zone, 'default-availability-zone')
def test_virtual_machine_delete(self):
vm = test_utils.create_test_virtual_machine('fake-vm-uuid')
self.vm_manager.delete_service(vm)
def test_missing_image_in_template(self):
test_utils.create_test_project('fake-domain:fake-project')
test_utils.create_test_security_group('fake-domain:fake-project:default')
test_utils.create_test_virtual_network('fake-domain:fake-project:left-vn')
test_utils.create_test_virtual_network('fake-domain:fake-project:right-vn')
st = test_utils.create_test_st(name='vm-template',
virt_type='virtual-machine',
intf_list=[['management', False], ['left', True], ['right', False]])
si = test_utils.create_test_si(name='vm-instance', count=2,
intf_list=['', 'left-vn', 'right-vn'])
st.params['image_name'] = None
self.vm_manager.create_service(st, si)
self.log_mock.error.assert_called_with("Image not present in %s" % ((':').join(st.fq_name)))
def test_missing_image_in_nova(self):
test_utils.create_test_project('fake-domain:fake-project')
test_utils.create_test_security_group('fake-domain:fake-project:default')
test_utils.create_test_virtual_network('fake-domain:fake-project:left-vn')
test_utils.create_test_virtual_network('fake-domain:fake-project:right-vn')
st = test_utils.create_test_st(name='vm-template',
virt_type='virtual-machine',
intf_list=[['management', False], ['left', True], ['right', False]])
si = test_utils.create_test_si(name='vm-instance', count=2,
intf_list=['', 'left-vn', 'right-vn'])
def nova_oper(resource, oper, proj_name, **kwargs):
if resource == 'images' and oper == 'find':
return None
else:
return mock.MagicMock()
self.nova_mock.oper = nova_oper
self.vm_manager.create_service(st, si)
self.log_mock.error.assert_called_with("Image not found %s" % si.image)
def test_nova_vm_create_fail(self):
test_utils.create_test_project('fake-domain:fake-project')
test_utils.create_test_security_group('fake-domain:fake-project:default')
test_utils.create_test_virtual_network('fake-domain:fake-project:left-vn')
test_utils.create_test_virtual_network('fake-domain:fake-project:right-vn')
st = test_utils.create_test_st(name='vm-template',
virt_type='virtual-machine',
intf_list=[['management', False], ['left', True], ['right', False]])
si = test_utils.create_test_si(name='vm-instance', count=2,
intf_list=['', 'left-vn', 'right-vn'])
def nova_oper(resource, oper, proj_name, **kwargs):
if resource == 'servers' and oper == 'create':
return None
else:
return mock.MagicMock()
self.nova_mock.oper = nova_oper
self.vm_manager.create_service(st, si)
self.log_mock.error.assert_any_call(test_utils.AnyStringWith('Nova vm create failed'))
def test_missing_flavor_in_template(self):
test_utils.create_test_project('fake-domain:fake-project')
test_utils.create_test_security_group('fake-domain:fake-project:default')
test_utils.create_test_virtual_network('fake-domain:fake-project:left-vn')
test_utils.create_test_virtual_network('fake-domain:fake-project:right-vn')
st = test_utils.create_test_st(name='vm-template',
virt_type='virtual-machine',
intf_list=[['management', False], ['left', True], ['right', False]])
si = test_utils.create_test_si(name='vm-instance', count=2,
intf_list=['', 'left-vn', 'right-vn'])
def nova_oper(resource, oper, proj_name, **kwargs):
if resource == 'flavors' and oper == 'find':
return None
else:
return mock.MagicMock()
self.nova_mock.oper = nova_oper
st.params['flavor'] = None
self.vm_manager.create_service(st, si)
self.log_mock.error.assert_called_with(test_utils.AnyStringWith("Flavor not found"))
def test_availability_zone_setting(self):
test_utils.create_test_project('fake-domain:fake-project')
test_utils.create_test_security_group('fake-domain:fake-project:default')
test_utils.create_test_virtual_network('fake-domain:fake-project:left-vn')
test_utils.create_test_virtual_network('fake-domain:fake-project:right-vn')
st = test_utils.create_test_st(name='vm-template',
virt_type='virtual-machine',
intf_list=[['management', False], ['left', True], ['right', False]])
si = test_utils.create_test_si(name='vm-instance', count=2,
intf_list=['', 'left-vn', 'right-vn'])
def nova_oper(resource, oper, proj_name, **kwargs):
if resource == 'servers' and oper == 'create':
nova_vm = test_utils.FakeNovaServer('fake-vm-uuid', kwargs['name'])
return nova_vm
else:
return mock.MagicMock()
self.nova_mock.oper = nova_oper
st.params['availability_zone_enable'] = True
si.params['availability_zone'] = 'test-availability-zone'
self.vm_manager.create_service(st, si)
self.assertTrue(si.availability_zone, 'test-availability-zone')
def test_network_config_validation(self):
test_utils.create_test_project('fake-domain:fake-project')
test_utils.create_test_security_group('fake-domain:fake-project:default')
test_utils.create_test_virtual_network('fake-domain:fake-project:left-vn')
test_utils.create_test_virtual_network('fake-domain:fake-project:right-vn')
st = test_utils.create_test_st(name='vm-template',
virt_type='virtual-machine',
intf_list=[['management', False], ['left', True], ['right', False]])
si = test_utils.create_test_si(name='vm-instance', count=2,
intf_list=['', 'left-vn', 'right-vn'])
st.params['interface_type'] = []
self.vm_manager.create_service(st, si)
self.log_mock.notice.assert_called_with("Interface list empty for ST %s SI %s" %
((':').join(st.fq_name), (':').join(si.fq_name)))
def test_virtual_machine_exists(self):
test_utils.create_test_project('fake-domain:fake-project')
test_utils.create_test_security_group('fake-domain:fake-project:default')
test_utils.create_test_virtual_network('fake-domain:fake-project:left-vn')
test_utils.create_test_virtual_network('fake-domain:fake-project:right-vn')
st = test_utils.create_test_st(name='vm-template',
virt_type='virtual-machine',
intf_list=[['management', False], ['left', True], ['right', False]])
si = test_utils.create_test_si(name='vm-instance', count=2,
intf_list=['', 'left-vn', 'right-vn'])
def nova_oper(resource, oper, proj_name, **kwargs):
if resource == 'servers' and oper == 'create':
nova_vm = test_utils.FakeNovaServer(kwargs['name'], kwargs['name'])
return nova_vm
else:
return mock.MagicMock()
self.nova_mock.oper = nova_oper
self.mocked_vnc.virtual_machine_create = test_utils.vm_create
self.vm_manager.create_service(st, si)
self.log_mock.info.assert_any_call(test_utils.AnyStringWith('Launching VM :'))
self.log_mock.info.assert_any_call(test_utils.AnyStringWith('Created VM :'))
self.log_mock.info.assert_any_call(test_utils.AnyStringWith(si.name))
self.log_mock.reset_mock()
self.vm_manager.create_service(st, si)
self.assertTrue(self.log_mock.info.call_count, 1)
def test_virtual_machine_static_routes(self):
test_utils.create_test_project('fake-domain:fake-project')
test_utils.create_test_security_group('fake-domain:fake-project:default')
test_utils.create_test_virtual_network('fake-domain:fake-project:left-vn')
test_utils.create_test_virtual_network('fake-domain:fake-project:right-vn')
st = test_utils.create_test_st(name='vm-template',
virt_type='virtual-machine',
intf_list=[['management', False], ['left', True, True], ['right', False]])
si = test_utils.create_test_si(name='vm-instance', count=2,
intf_list=['', 'left-vn', 'right-vn'])
def nova_oper(resource, oper, proj_name, **kwargs):
if resource == 'servers' and oper == 'create':
nova_vm = test_utils.FakeNovaServer('fake-vm-uuid', kwargs['name'])
return nova_vm
else:
return mock.MagicMock()
self.nova_mock.oper = nova_oper
self.vm_manager.create_service(st, si)
self.mocked_vnc.virtual_machine_create.assert_any_call(test_utils.VMObjMatcher(1))
self.mocked_vnc.virtual_machine_create.assert_any_call(test_utils.VMObjMatcher(2))
```
#### File: mockzoo/mockzoo/mockzoo.py
```python
import os
import subprocess
import logging
import socket
from kazoo.client import KazooClient
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
zookeeper_version = 'zookeeper-3.4.5'
zookeeper_dl = '/zookeeper-3.4.5.tar.gz'
zookeeper_bdir = '/tmp/cache-' + os.environ['USER'] + '-systemless_test'
def start_zoo(cport):
'''
Client uses this function to start an instance of zookeeper
Arguments:
cport : An unused TCP port for zookeeper to use as the client port
'''
if not os.path.exists(zookeeper_bdir):
output,_ = call_command_("mkdir " + zookeeper_bdir)
zookeeper_download = 'wget -O ' + zookeeper_bdir + zookeeper_dl + \
' https://github.com/Juniper/contrail-third-party-cache/blob/master/zookeeper' + \
zookeeper_dl + '?raw=true'
if not os.path.exists(zookeeper_bdir + zookeeper_dl):
process = subprocess.Popen(zookeeper_download.split(' '))
process.wait()
if process.returncode is not 0:
logging.info('Cannot download Zk: %s' % zookeeper_download)
return False
basefile = zookeeper_version
cassbase = "/tmp/zoo.%s.%d/" % (os.getenv('USER', 'None'), cport)
confdir = cassbase + basefile + "/conf/"
output,_ = call_command_("mkdir " + cassbase)
logging.info('Installing zookeeper in ' + cassbase)
os.system("cat " + zookeeper_bdir + zookeeper_dl + " | tar -xpzf - -C " + cassbase)
output,_ = call_command_("cp " + confdir + "zoo_sample.cfg " + confdir + "zoo.cfg")
logging.info('zookeeper Client Port %d' % cport)
replace_string_(confdir + "zoo.cfg", \
[("dataDir=/tmp/zookeeper", "dataDir="+cassbase),
("clientPort=2181", "clientPort="+str(cport))])
replace_string_(cassbase + basefile + "/bin/zkServer.sh", \
[('_ZOO_DAEMON_OUT="$ZOO_LOG_DIR/', '_ZOO_DAEMON_OUT="%s' % cassbase)])
output,_ = call_command_("chmod +x " + cassbase + basefile + "/bin/zkServer.sh")
output,_ = call_command_(cassbase + basefile + "/bin/zkServer.sh start")
zk = KazooClient(hosts='127.0.0.1:'+str(cport))
try:
zk.start()
except:
logging.info("Zookeeper client cannot connect. Zk logfile below:")
with open(cassbase+"zookeeper.out", 'r') as fin:
logging.info(fin.read())
return False
zk.stop()
return True
def stop_zoo(cport):
'''
Client uses this function to stop an instance of zookeeper
This will only work for zookeeper instances that were started by this module
Arguments:
cport : The Client Port for the instance of zookeper be stopped
'''
cassbase = "/tmp/zoo.%s.%d/" % (os.getenv('USER', 'None'), cport)
basefile = zookeeper_version
logging.info('Killing zookeeper %d' % cport)
output,_ = call_command_(cassbase + basefile + '/bin/zkServer.sh stop')
output,_ = call_command_("rm -rf " + cassbase)
def replace_string_(filePath, findreplace):
"replaces all findStr by repStr in file filePath"
print(filePath)
tempName=filePath+'~~~'
input = open(filePath)
output = open(tempName,'w')
s=input.read()
for couple in findreplace:
outtext=s.replace(couple[0],couple[1])
s=outtext
output.write(outtext)
output.close()
input.close()
os.rename(tempName,filePath)
def call_command_(command):
process = subprocess.Popen(command.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
return process.communicate()
if __name__ == "__main__":
cs = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
cs.bind(("",0))
cport = cs.getsockname()[1]
cs.close()
start_zoo(cport)
```
#### File: kube-manager/kube_manager/kube_manager.py
```python
import random
import os
import sys
import time
import gc
import traceback
import greenlet
from gevent.queue import Queue
import gevent
from cfgm_common.vnc_amqp import VncAmqpHandle
from cfgm_common.zkclient import ZookeeperClient
from .common import logger as common_logger
from .common import args as kube_args
from .kube import (
kube_monitor,
namespace_monitor,
pod_monitor,
service_monitor,
network_policy_monitor,
endpoint_monitor,
ingress_monitor,
network_monitor
)
from .vnc import config_db
from .vnc import reaction_map
from .vnc import vnc_kubernetes
from .sandesh.kube_introspect import ttypes as introspect
class KubeNetworkManager(object):
_kube_network_manager = None
def __init__(self, args=None, logger=None, kube_api_connected=False,
queue=None, vnc_kubernetes_config_dict=None, is_master=True):
self.q = queue if queue else Queue()
self.args = args
self.logger = logger
self.is_master = is_master
# All monitors supported by this manager.
self.monitors = {}
self.kube = None
self.greenlets = []
while not kube_api_connected:
try:
self.kube = kube_monitor.KubeMonitor(
args=self.args, logger=self.logger, q=self.q)
self.monitors['namespace'] = namespace_monitor.NamespaceMonitor(
args=self.args, logger=self.logger, q=self.q)
self.monitors['network'] = \
network_monitor.NetworkMonitor(
args=self.args, logger=self.logger, q=self.q)
self.monitors['pod'] = pod_monitor.PodMonitor(
args=self.args, logger=self.logger, q=self.q)
self.monitors['service'] = service_monitor.ServiceMonitor(
args=self.args, logger=self.logger, q=self.q)
self.monitors['network_policy'] =\
network_policy_monitor.NetworkPolicyMonitor(
args=self.args, logger=self.logger, q=self.q)
self.monitors['endpoint'] = \
endpoint_monitor.EndPointMonitor(
args=self.args, logger=self.logger, q=self.q)
self.monitors['ingress'] = \
ingress_monitor.IngressMonitor(
args=self.args, logger=self.logger, q=self.q)
kube_api_connected = True
except Exception: # FIXME: Except clause is too broad
time.sleep(30)
# Register all the known monitors.
for monitor in list(self.monitors.values()):
monitor.register_monitor()
self.vnc = vnc_kubernetes.VncKubernetes(
args=self.args, logger=self.logger, q=self.q, kube=self.kube,
vnc_kubernetes_config_dict=vnc_kubernetes_config_dict)
# end __init__
def _kube_object_cache_enabled(self):
return self.args.kube_object_cache == 'True'
def launch_timer(self):
self.logger.info("KubeNetworkManager - kube_timer_interval(%ss)"
% self.args.kube_timer_interval)
time.sleep(int(self.args.kube_timer_interval))
while True:
gevent.sleep(int(self.args.kube_timer_interval))
try:
self.timer_callback()
except Exception: # FIXME: Except clause is too broad
pass
def timer_callback(self):
self.vnc.vnc_timer()
def start_tasks(self):
self.logger.info("Starting all tasks.")
self.greenlets = [gevent.spawn(self.vnc.vnc_process)]
for monitor in list(self.monitors.values()):
self.greenlets.append(gevent.spawn(monitor.event_callback))
if not self.args.kube_timer_interval.isdigit():
self.logger.emergency("set seconds for kube_timer_interval "
"in contrail-kubernetes.conf. \
example: kube_timer_interval=60")
sys.exit()
if int(self.args.kube_timer_interval) > 0:
self.greenlets.append(gevent.spawn(self.launch_timer))
gevent.joinall(self.greenlets)
@staticmethod
def reset():
for cls in list(config_db.DBBaseKM.get_obj_type_map().values()):
cls.reset()
@classmethod
def get_instance(cls):
return cls._kube_network_manager
@classmethod
def destroy_instance(cls):
inst = cls.get_instance()
if inst is None:
return
inst.logger.sandesh_uninit()
for gl in inst.greenlets:
gl.kill()
inst.greenlets = []
inst.vnc.destroy_instance()
inst.vnc = None
inst.q = None
KubeNetworkManager._kube_network_manager = None
@classmethod
def sandesh_handle_greenlet_stack_list_request(cls, request):
greenlets = [introspect.KubeGreenletStackInstance(stack=stack)
for stack in [traceback.format_stack(ob.gr_frame)
for ob in gc.get_objects()
if isinstance(ob, greenlet.greenlet)]]
resp = introspect.KubeGreenletStackListResp(greenlets=greenlets)
resp.response(request.context())
@classmethod
def sandesh_handle_kube_api_connection_status_request(cls, request):
statuses = {
'endpoint_monitor': False,
'ingress_monitor': False,
'namespace_monitor': False,
'network_policy_monitor': False,
'pod_monitor': False,
'service_monitor': False
}
kube_manager = cls.get_instance()
if kube_manager is not None:
for key, value in list(kube_manager.monitors.items()):
statuses[key + '_monitor'] = value._is_kube_api_server_alive()
response = introspect.KubeApiConnectionStatusResp(
connections=introspect.KubeApiConnections(**statuses))
response.response(request.context())
@classmethod
def sandesh_handle_mastership_status_request(cls, request):
kube_manager = cls.get_instance()
response = introspect.MastershipStatusResp(
is_master=(kube_manager.is_master if kube_manager else False))
response.response(request.context())
def run_kube_manager(km_logger, args, kube_api_skip, event_queue,
vnc_kubernetes_config_dict=None):
km_logger.notice("Elected master kube-manager node. Initializing...")
km_logger.introspect_init()
kube_nw_mgr = KubeNetworkManager(
args, km_logger,
kube_api_connected=kube_api_skip,
queue=event_queue,
vnc_kubernetes_config_dict=vnc_kubernetes_config_dict,
is_master=True)
KubeNetworkManager._kube_network_manager = kube_nw_mgr
# Register introspect handlers
introspect.KubeGreenletStackList.handle_request = \
KubeNetworkManager.sandesh_handle_greenlet_stack_list_request
introspect.KubeApiConnectionStatus.handle_request =\
KubeNetworkManager.sandesh_handle_kube_api_connection_status_request
introspect.MastershipStatus.handle_request =\
KubeNetworkManager.sandesh_handle_mastership_status_request
kube_nw_mgr.start_tasks()
def main(args_str=None, kube_api_skip=False, event_queue=None,
vnc_kubernetes_config_dict=None):
_zookeeper_client = None
args = kube_args.parse_args(args_str)
client_pfx = ''
zk_path_pfx = ''
if args.cluster_id:
client_pfx += args.cluster_id + '-'
zk_path_pfx += args.cluster_id + '/'
client_pfx += args.cluster_name + '-'
zk_path_pfx += args.cluster_name + '/'
# randomize collector list
args.random_collectors = args.collectors
if args.collectors:
args.random_collectors = random.sample(args.collectors,
len(args.collectors))
km_logger = common_logger.KubeManagerLogger(args, http_server_port=-1)
if args.nested_mode == '0':
# Initialize AMQP handler then close it to be sure remain queue of a
# precedent run is cleaned
rabbitmq_cfg = kube_args.rabbitmq_args(args)
try:
vnc_amqp = VncAmqpHandle(
km_logger._sandesh,
km_logger,
config_db.DBBaseKM,
reaction_map.REACTION_MAP,
client_pfx + 'kube_manager',
rabbitmq_cfg,
args.host_ip
)
vnc_amqp.establish()
vnc_amqp.close()
except Exception: # FIXME: Except clause is too broad
pass
finally:
km_logger.debug("Removed remained AMQP queue")
# Ensure zookeeper is up and running before starting kube-manager
_zookeeper_client = ZookeeperClient(client_pfx + "kube-manager",
args.zk_server_ip, args.host_ip)
km_logger.notice("Waiting to be elected as master...")
_zookeeper_client.master_election(
zk_path_pfx + "kube-manager",
os.getpid(),
run_kube_manager,
km_logger,
args,
kube_api_skip,
event_queue,
vnc_kubernetes_config_dict)
else: # nested mode, skip zookeeper mastership check
run_kube_manager(km_logger, args, kube_api_skip, event_queue,
vnc_kubernetes_config_dict)
if __name__ == '__main__':
main()
```
#### File: mesos_manager/vnc/vnc_common.py
```python
from __future__ import absolute_import
from builtins import object
from .vnc_mesos_config import VncMesosConfig as vnc_mesos_config
from vnc_api.vnc_api import (KeyValuePair,KeyValuePairs)
from .config_db import DBBaseMM
class VncCommon(object):
"""VNC mesos common functionality.
"""
def __init__(self, mesos_obj_kind):
self.annotations = {}
self.annotations['kind'] = mesos_obj_kind
def get_annotations(self):
return self.annotations
```
#### File: dns/scripts/disassociate_virtual_dns.py
```python
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from builtins import object
import sys
import argparse
from six.moves import configparser
from provision_dns import DnsProvisioner
from requests.exceptions import ConnectionError
class DisassociateVirtualDns(object):
def __init__(self, args_str = None):
self._args = None
if not args_str:
args_str = ' '.join(sys.argv[1:])
self._parse_args(args_str)
try:
dp_obj = DnsProvisioner(self._args.admin_user, self._args.admin_password,
self._args.admin_tenant_name,
self._args.api_server_ip, self._args.api_server_port)
except ConnectionError:
print('Connection to API server failed ')
return
dp_obj.disassociate_vdns_from_ipam(self._args.ipam_fqname, self._args.vdns_fqname)
#end __init__
def _parse_args(self, args_str):
'''
Eg. python disassociate_virtual_dns.py --ipam_fqname default-domain:demo:ipam1
--vdns_fqname default-domain:vdns2
'''
# Source any specified config/ini file
# Turn off help, so we print all options in response to -h
conf_parser = argparse.ArgumentParser(add_help = False)
args, remaining_argv = conf_parser.parse_known_args(args_str.split())
defaults = {
'api_server_ip' : '127.0.0.1',
'api_server_port' : '8082',
'admin_user': None,
'admin_password': None,
'admin_tenant_name': None
}
# Don't surpress add_help here so it will handle -h
parser = argparse.ArgumentParser(
# Inherit options from config_parser
parents=[conf_parser],
# print script description with -h/--help
description=__doc__,
# Don't mess with format of description
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.set_defaults(**defaults)
parser.add_argument("--ipam_fqname", help = "Fully qualified IPAM Name")
parser.add_argument("--vdns_fqname", help = "Fully qualified Virtual DNS Name")
parser.add_argument("--api_server_ip", help = "IP address of api server")
parser.add_argument("--api_server_port", help = "Port of api server")
parser.add_argument("--admin_user", help = "Name of keystone admin user")
parser.add_argument("--admin_password", help = "Password of <PASSWORD>")
parser.add_argument("--admin_tenant_name", help = "Tenamt name for keystone admin user")
self._args = parser.parse_args(remaining_argv)
#end _parse_args
# end class DisassociateVirtualDns
def main(args_str = None):
DisassociateVirtualDns(args_str)
#end main
if __name__ == "__main__":
main()
```
#### File: src/nodemgr/main.py
```python
doc = """\
Node manager listens to process state change events and
other flag value change events to provide advanced service
management functionality.
"""
from gevent import monkey
import argparse
from six.moves.configparser import SafeConfigParser, NoOptionError
import gevent
import os
import socket
import sys
from pysandesh.gen_py.sandesh.ttypes import SandeshLevel
from pysandesh.sandesh_base import Sandesh, SandeshConfig
from nodemgr.analytics_nodemgr.event_manager import AnalyticsEventManager
from nodemgr.analytics_alarm_nodemgr.event_manager import AnalyticsAlarmEventManager
from nodemgr.analytics_snmp_nodemgr.event_manager import AnalyticsSNMPEventManager
from nodemgr.config_nodemgr.event_manager import ConfigEventManager
from nodemgr.control_nodemgr.event_manager import ControlEventManager
from nodemgr.analytics_database_nodemgr.event_manager import AnalyticsDatabaseEventManager
from nodemgr.config_database_nodemgr.event_manager import ConfigDatabaseEventManager
from nodemgr.vrouter_nodemgr.event_manager import VrouterEventManager
monkey.patch_all()
node_properties = {
'contrail-analytics': {
'config_file': '/etc/contrail/contrail-analytics-nodemgr.conf',
'event_manager': AnalyticsEventManager,
'unit_names': [
'contrail-collector',
'contrail-analytics-api',
'contrail-analytics-nodemgr'
],
},
'contrail-analytics-snmp': {
'config_file': '/etc/contrail/contrail-analytics-snmp-nodemgr.conf',
'event_manager': AnalyticsSNMPEventManager,
'unit_names': [
'contrail-snmp-collector',
'contrail-topology',
'contrail-analytics-snmp-nodemgr',
],
},
'contrail-analytics-alarm': {
'config_file': '/etc/contrail/contrail-analytics-alarm-nodemgr.conf',
'event_manager': AnalyticsAlarmEventManager,
'unit_names': [
'contrail-alarm-gen',
'kafka',
'contrail-analytics-alarm-nodemgr',
],
},
'contrail-config': {
'config_file': '/etc/contrail/contrail-config-nodemgr.conf',
'event_manager': ConfigEventManager,
'unit_names': [
'contrail-api',
'contrail-schema',
'contrail-svc-monitor',
'contrail-device-manager',
'contrail-config-nodemgr'
],
},
'contrail-config-database': {
'config_file': '/etc/contrail/contrail-config-database-nodemgr.conf',
'event_manager': ConfigDatabaseEventManager,
'unit_names': [
'cassandra',
'zookeeper',
'contrail-config-database-nodemgr'
],
},
'contrail-control': {
'config_file': '/etc/contrail/contrail-control-nodemgr.conf',
'event_manager': ControlEventManager,
'unit_names': [
'contrail-control',
'contrail-dns',
'contrail-named',
'contrail-control-nodemgr'
],
},
'contrail-vrouter': {
'config_file': '/etc/contrail/contrail-vrouter-nodemgr.conf',
'event_manager': VrouterEventManager,
'unit_names': [
'contrail-vrouter-agent',
'contrail-vrouter-nodemgr'
],
},
'contrail-database': {
'config_file': '/etc/contrail/contrail-database-nodemgr.conf',
'event_manager': AnalyticsDatabaseEventManager,
'unit_names': [
'cassandra',
'contrail-query-engine',
'contrail-database-nodemgr'
],
},
}
def print_usage_and_exit():
print(doc)
sys.exit(255)
def main(args_str=' '.join(sys.argv[1:])):
# Parse Arguments
node_parser = argparse.ArgumentParser(add_help=False)
node_parser.add_argument("--nodetype",
default='contrail-analytics',
help='Type of node which nodemgr is managing')
try:
args, remaining_argv = node_parser.parse_known_args(args_str.split())
except Exception:
print_usage_and_exit()
default = {'rules': '',
'collectors': [],
'db_port': '9042',
'db_jmx_port': '7199',
'db_user': None,
'db_password': None,
'db_use_ssl': False,
'minimum_diskgb': 256,
'corefile_path': '/var/crashes',
'cassandra_repair_interval': 24,
'cassandra_repair_logdir': '/var/log/contrail/',
'log_local': False,
'log_level': SandeshLevel.SYS_DEBUG,
'log_category': '',
'log_file': Sandesh._DEFAULT_LOG_FILE,
'use_syslog': False,
'syslog_facility': Sandesh._DEFAULT_SYSLOG_FACILITY,
'hostname': None
}
try:
default['hostip'] = socket.gethostbyname(socket.getfqdn())
except Exception:
pass
default.update(SandeshConfig.get_default_options(['DEFAULTS']))
sandesh_opts = SandeshConfig.get_default_options()
node_type = args.nodetype
if node_type not in node_properties:
sys.stderr.write("Node type '" + str(node_type) + "' is incorrect\n")
sys.exit(1)
config_file_path = ""
config_file_path += node_properties[node_type]['config_file']
if (os.path.exists(config_file_path) is False):
sys.stderr.write("config file '" + config_file_path + "' is not present\n")
sys.exit(1)
config = SafeConfigParser()
config.read([config_file_path])
if 'DEFAULTS' in config.sections():
default.update(dict(config.items('DEFAULTS')))
if 'COLLECTOR' in config.sections():
try:
collector = config.get('COLLECTOR', 'server_list')
default['collectors'] = collector.split()
except NoOptionError:
pass
SandeshConfig.update_options(sandesh_opts, config)
parser = argparse.ArgumentParser(
parents=[node_parser],
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
default.update(sandesh_opts)
parser.set_defaults(**default)
parser.add_argument("--rules",
help='Rules file to use for processing events')
parser.add_argument("--collectors",
nargs='+',
help='Collector addresses in format'
+ 'ip1:port1 ip2:port2')
parser.add_argument("--log_local", action="store_true",
help="Enable local logging of sandesh messages")
parser.add_argument(
"--log_level",
help="Severity level for local logging of sandesh messages")
parser.add_argument(
"--log_category",
help="Category filter for local logging of sandesh messages")
parser.add_argument("--log_file",
help="Filename for the logs to be written to")
parser.add_argument("--use_syslog", action="store_true",
help="Use syslog for logging")
parser.add_argument("--syslog_facility",
help="Syslog facility to receive log lines")
parser.add_argument("--corefile_path",
help="Location where coredump files are stored")
parser.add_argument("--hostname",
help="Custom Hostname")
SandeshConfig.add_parser_arguments(parser, add_dscp=True)
if (node_type == 'contrail-database'
or node_type == 'contrail-config-database'):
parser.add_argument("--minimum_diskGB",
type=int,
dest='minimum_diskgb',
help="Minimum disk space in GB's")
parser.add_argument("--hostip",
help="IP address of host")
parser.add_argument("--db_port",
help="Cassandra DB cql port")
parser.add_argument("--db_jmx_port",
help="Cassandra DB jmx port")
parser.add_argument("--db_user",
help="Cassandra DB cql username")
parser.add_argument("--db_password",
help="Cassandra DB cql password")
parser.add_argument("--db_use_ssl",
help="Cassandra DB behind SSL or not")
parser.add_argument("--cassandra_repair_interval", type=int,
help="Time in hours to periodically run "
"nodetool repair for cassandra maintenance")
parser.add_argument("--cassandra_repair_logdir",
help="Directory for storing repair logs")
try:
_args = parser.parse_args(remaining_argv)
except Exception:
print_usage_and_exit()
_args.config_file_path = config_file_path
_args.db_use_ssl = (str(_args.db_use_ssl).lower() == 'true')
# done parsing arguments
# TODO: restore rule_file logic somehow if needed for microservices
# rule_file = _args.rules
unit_names = node_properties[node_type]['unit_names']
event_manager = node_properties[node_type]['event_manager'](_args, unit_names)
event_manager.send_init_data()
gevent.joinall([
gevent.spawn(event_manager.runforever),
gevent.spawn(
event_manager.run_periodically(
event_manager.do_periodic_events, 60))
])
if __name__ == '__main__':
main()
```
#### File: opencontrail-vrouter-netns/opencontrail_vrouter_netns/vrouter_control.py
```python
from contrail_vrouter_api.vrouter_api import ContrailVRouterApi
def interface_register(vm, vmi, iface_name, project=None, vrouter_api=None):
api = vrouter_api or ContrailVRouterApi()
mac = vmi.virtual_machine_interface_mac_addresses.mac_address[0]
if project:
proj_id = project.uuid
else:
proj_id = None
api.add_port(vm.uuid, vmi.uuid, iface_name, mac, display_name=vm.name,
vm_project_id=proj_id)
def interface_unregister(vmi_uuid):
api = ContrailVRouterApi()
api.delete_port(vmi_uuid)
```
#### File: provisioning/contrail_vrouter_provisioning/common.py
```python
from __future__ import print_function
from __future__ import division
from builtins import str
from builtins import range
from past.utils import old_div
import os
import socket
import netaddr
import logging
import netifaces
import tempfile
from contrail_vrouter_provisioning import local
from contrail_vrouter_provisioning.base import ContrailSetup
from contrail_vrouter_provisioning.network import ComputeNetworkSetup
log = logging.getLogger('contrail_vrouter_provisioning.common')
def insert_line_to_file(line, file_name, pattern=None):
if pattern:
local('sed -i \'/%s/d\' %s' % (pattern, file_name), warn_only=True)
local('printf "%s\n" >> %s' % (line, file_name))
class CommonComputeSetup(ContrailSetup, ComputeNetworkSetup):
def __init__(self, args):
super(CommonComputeSetup, self).__init__()
self._args = args
# Using keystone admin password for nova/neutron if not supplied
if not self._args.neutron_password:
self._args.neutron_password = self._args.keystone_admin_password
self.multi_net = False
if self._args.non_mgmt_ip:
self.multi_net = True
self.vhost_ip = self._args.non_mgmt_ip
else:
self.vhost_ip = self._args.self_ip
self.dev = None # Will be physical device
if self._args.physical_interface:
# During re-provision/upgrade vhost0 will be present
# so vhost0 should be treated as dev,
# which is used to get netmask/gateway
if 'vhost0' in netifaces.interfaces():
self.dev = 'vhost0'
# During intial provision actual interface should be treated as dev
# which is used to get netmask/gateway
elif self._args.physical_interface in netifaces.interfaces():
self.dev = self._args.physical_interface
else:
raise KeyError('Interface %s in present' %
self._args.physical_interface)
else:
# Get the physical device and provision status
# if reprov is False, it means fresh install
# True, it means reprovision
(self.dev, self.reprov) = self.get_device_info(self.vhost_ip)
def fixup_config_files(self):
self.add_dev_tun_in_cgroup_device_acl()
self.fixup_contrail_vrouter_agent()
self.add_qos_config()
self.fixup_contrail_vrouter_nodemgr()
self.fixup_contrail_lbaas()
def setup_lbaas_prereq(self):
if self.pdist in ['centos', 'redhat']:
local('sudo groupadd -f nogroup')
cmd = "sudo sed -i s/'Defaults requiretty'/'#Defaults "
cmd += "requiretty'/g /etc/sudoers"
local(cmd)
def add_dev_tun_in_cgroup_device_acl(self):
# add /dev/net/tun in cgroup_device_acl needed
# for type=ethernet interfaces
fl = "/etc/libvirt/qemu.conf"
ret = local("sudo grep -q '^cgroup_device_acl' %s" % fl,
warn_only=True)
if ret.failed:
if self.pdist in ['centos', 'redhat']:
local('sudo echo "clear_emulator_capabilities = 1" >> %s' % fl,
warn_only=True)
local('sudo echo \'user = "root"\' >> %s' % fl, warn_only=True)
local('sudo echo \'group = "root"\' >> %s' % fl,
warn_only=True)
cmds = ['echo \'cgroup_device_acl = [\' >> %s' % fl,
'echo \' "/dev/null", "/dev/full", "/dev/zero",\''
+ ' >> %s' % fl,
'echo \' "/dev/random", "/dev/urandom",\''
+ ' >> %s' % fl,
'echo \' "/dev/ptmx", "/dev/kvm", "/dev/kqemu",\''
+ ' >> %s' % fl,
'echo \' "/dev/rtc", "/dev/hpet", "/dev/net/tun",\''
+ ' >> %s' % fl,
'echo \']\' >> %s' % fl]
for cmd in cmds:
local('sudo ' + cmd, warn_only=True)
self._fixed_qemu_conf = True
# add "alias bridge off" in /etc/modprobe.conf for Centos
if self.pdist in ['centos', 'redhat']:
local('sudo echo "alias bridge off" > /etc/modprobe.conf',
warn_only=True)
def fixup_contrail_vrouter_nodemgr(self):
# Workaround https://bugs.launchpad.net/juniperopenstack/+bug/1681172
cfgfile = '/etc/contrail/contrail-vrouter-nodemgr.conf'
if not os.path.isfile(cfgfile):
local('sudo touch %s' % cfgfile)
collector_list = ' '.join('%s:%s' % (server, '8086')
for server in self._args.collectors)
self.set_config(cfgfile, 'COLLECTOR', 'server_list', collector_list)
self.set_config(cfgfile, 'SANDESH', 'sandesh_ssl_enable',
self._args.sandesh_ssl_enable)
self.set_config(cfgfile, 'SANDESH', 'introspect_ssl_enable',
self._args.introspect_ssl_enable)
def setup_hugepages_node(self, dpdk_args):
"""Setup hugepages on one or list of nodes
"""
# How many times DPDK inits hugepages (rte_eal_init())
# See function map_all_hugepages() in DPDK
DPDK_HUGEPAGES_INIT_TIMES = 2
# get required size of hugetlbfs
factor = int(dpdk_args['huge_pages'])
print(dpdk_args)
if factor == 0:
factor = 1
# set number of huge pages
memsize = local("sudo grep MemTotal /proc/meminfo |"
" tr -s ' ' | cut -d' ' -f 2 | tr -d '\n'",
capture=True, warn_only=True)
pagesize = local("sudo grep Hugepagesize /proc/meminfo"
" | tr -s ' 'i | cut -d' ' -f 2 | tr -d '\n'",
capture=True, warn_only=True)
reserved = local("sudo grep HugePages_Total /proc/meminfo"
" | tr -s ' 'i | cut -d' ' -f 2 | tr -d '\n'",
capture=True, warn_only=True)
if (reserved == ""):
reserved = "0"
requested = old_div((old_div((int(memsize) * factor), 100)), int(pagesize))
if (requested > int(reserved)):
pattern = "^vm.nr_hugepages ="
line = "vm.nr_hugepages = %d" % requested
insert_line_to_file(pattern=pattern, line=line,
file_name='/etc/sysctl.conf')
current_max_map_count = local("sudo sysctl -n "
"vm.max_map_count")
if current_max_map_count == "":
current_max_map_count = 0
current_huge_pages = max(int(requested), int(reserved))
requested_max_map_count = (DPDK_HUGEPAGES_INIT_TIMES
* int(current_huge_pages))
if int(requested_max_map_count) > int(current_max_map_count):
pattern = "^vm.max_map_count ="
line = "vm.max_map_count = %d" % requested_max_map_count
insert_line_to_file(pattern=pattern, line=line,
file_name='/etc/sysctl.conf')
local('sudo sysctl -p', warn_only=True)
mounted = local("sudo mount | grep hugetlbfs | cut -d' ' -f 3",
capture=True, warn_only=False)
if (mounted != ""):
print("hugepages already mounted on %s" % mounted)
else:
local("sudo mkdir -p /hugepages", warn_only=False)
pattern = "^hugetlbfs"
line = "hugetlbfs "\
"/hugepages hugetlbfs defaults 0 0"
insert_line_to_file(pattern=pattern, line=line,
file_name='/etc/fstab')
local("sudo mount -t hugetlbfs hugetlbfs /hugepages",
warn_only=False)
def search_and_replace(self, lvalue, rvalue, position, vrouter_file):
"""Search and replace strings in the format <key>=<filepath> <args>
- 'position' determines where the <rvalue> needs to be inserted
- If it is "Begin", the string becomes:
<key>=<rvalue> <filepath> <args>
- If it is "End", the string becomes:
<key>=<filepath> <args> <rvalue>
- If <rvalue> already exists in <args>, it deletes it first
- If <rvalue> already preceeds <filepath> it deletes it first
Input:
- lvalue = <key>
- rvalue = <arg> to be searched and replaced
- position = Begin/End
- vrouter_file = path of vrouter file
"""
if position == "Begin":
regexp_del = r"'s/\(^ *%s *=\)\(.*\)\( \/.*\)/\1\3/'" % (lvalue)
regexp_add = r"'s/\(^%s=\)\(.*\)/\1%s \2/'" % (lvalue, rvalue)
regexp = "sed -i.bak -e %s -e %s %s" \
% (regexp_del, regexp_add, vrouter_file)
local(regexp, warn_only=False)
elif position == "End":
regexp_del = r"'s/\(^ *%s *=.*\) \(%s [^ ]*\)\(.*\) *$/\1\3/'" \
% (lvalue, rvalue.split(' ')[0])
regexp_add = r"'s/\(^ *%s *=.*\)/\1 %s/'" % (lvalue, rvalue)
regexp = "sed -i.bak -e %s -e %s %s" \
% (regexp_del, regexp_add, vrouter_file)
local(regexp, warn_only=False)
def setup_coremask_node(self, dpdk_args):
"""Setup core mask on one or list of nodes
"""
try:
coremask = dpdk_args['coremask']
except KeyError:
raise RuntimeError("Core mask for host %s is not defined."
% (dpdk_args))
if not coremask:
raise RuntimeError("Core mask for host %s is not defined."
% dpdk_args)
# if a list of cpus is provided, -c flag must be passed to taskset
if (',' in coremask) or ('-' in coremask):
taskset_param = ' -c'
else:
taskset_param = ''
# supported coremask format: hex: (0x3f); list: (0,3-5), (0,1,2,3,4,5)
# try taskset on a dummy command
if local('sudo taskset%s %s true' % (taskset_param, coremask),
capture=True, warn_only=False).succeeded:
self.search_and_replace(self.command_key, '\/usr\/bin\/taskset ' + coremask,
"Begin", self.vrouter_file)
else:
raise RuntimeError("Error: Core mask %s for host %s is invalid."
% (coremask, dpdk_args))
def setup_vm_coremask_node(self, q_coremask, dpdk_args):
"""
Setup CPU affinity for QEMU processes based on
vRouter/DPDK core affinity on a given node.
Supported core mask format:
vRouter/DPDK: hex (0x3f), list (0,1,2,3,4,5), range (0,3-5)
QEMU/nova.conf: list (0,1,2,3,4,5), range (0,3-5),
exclusion (0-5,^4)
QEMU needs to be pinned to different cores than vRouter. Because of
different core mask formats, it is not possible to just set QEMU to
<not vRouter cores>. This function takes vRouter core mask from
testbed, changes it to list of cores and removes them from list
of all possible cores (generated as a list from 0 to N-1, where
N = number of cores). This is changed back to string and passed to
openstack-config.
"""
try:
vr_coremask = dpdk_args['coremask']
except KeyError:
raise RuntimeError("vRouter core mask for "
"host %s is not defined." % (dpdk_args))
if not vr_coremask:
raise RuntimeError("vRouter core mask for host "
"%s is not defined." % dpdk_args)
if not q_coremask:
try:
cpu_count = int(local(
'sudo grep -c processor /proc/cpuinfo',
capture=True))
except ValueError:
log.info("Cannot count CPUs on host %s. VM core "
"mask cannot be computed." % (dpdk_args))
raise
if not cpu_count or cpu_count == -1:
raise ValueError("Cannot count CPUs on host %s. "
"VM core mask cannot be computed."
% (dpdk_args))
all_cores = [x for x in range(cpu_count)]
if 'x' in vr_coremask: # String containing hexadecimal mask.
vr_coremask = int(vr_coremask, 16)
"""
Convert hexmask to a string with numbers of cores to be
used, eg.
0x19 -> 11001 -> 10011 -> [(0,1), (1,0), (2,0),
(3,1), (4,1)] -> '0,3,4'
"""
vr_coremask = [
x[0] for x in enumerate(reversed(bin(vr_coremask)[2:]))
if x[1] == '1']
# Range or list of cores.
elif (',' in vr_coremask) or ('-' in vr_coremask):
# Get list of core numbers and/or core ranges.
vr_coremask = vr_coremask.split(',')
# Expand ranges like 0-4 to 0, 1, 2, 3, 4.
vr_coremask_expanded = []
for rng in vr_coremask:
if '-' in rng: # If it's a range - expand it.
a, b = rng.split('-')
vr_coremask_expanded += list(range(int(a), int(b) + 1))
else: # If not, just add to the list.
vr_coremask_expanded.append(int(rng))
vr_coremask = vr_coremask_expanded
else: # A single core.
try:
single_core = int(vr_coremask)
except ValueError:
log.error("vRouter core mask %s for host %s is invalid."
% (vr_coremask, dpdk_args))
raise
vr_coremask = []
vr_coremask.append(single_core)
# From list of all cores remove list of vRouter cores
# and stringify.
diff = set(all_cores) - set(vr_coremask)
q_coremask = ','.join(str(x) for x in diff)
# If we have no spare cores for VMs
if not q_coremask:
raise RuntimeError("Setting QEMU core mask for host %s "
"failed - empty string."
% (dpdk_args))
# This can fail eg. because openstack-config is not present.
# There's no sanity check in openstack-config.
if local("sudo crudini --set /etc/nova/nova.conf "
"DEFAULT vcpu_pin_set %s"
% q_coremask, capture=True, warn_only=False).succeeded:
log.info("QEMU coremask on host %s set to %s."
% (dpdk_args, q_coremask))
else:
raise RuntimeError("Error: setting QEMU core mask %s for "
"host %s failed." % (vr_coremask, dpdk_args))
def setup_uio_driver(self, dpdk_args):
"""Setup UIO driver to use for DPDK
(igb_uio, uio_pci_generic or vfio-pci)
"""
vrouter_agent_file = '/etc/contrail/contrail-vrouter-agent.conf'
if 'uio_driver' in dpdk_args:
uio_driver = dpdk_args['uio_driver']
if uio_driver == "vfio-pci":
self.setup_sriov_grub(uio_driver)
else:
print("No UIO driver defined for host, skipping...")
return
if local('sudo modprobe %s'
% (uio_driver), capture=True, warn_only=False).succeeded:
log.info("Setting UIO driver to %s for host..." % uio_driver)
local('sudo contrail-config --set %s DEFAULT '\
'physical_uio_driver %s' % (vrouter_agent_file, uio_driver))
else:
raise RuntimeError("Error: invalid UIO driver %s for host"
% (uio_driver))
def dpdk_increase_vrouter_limit(self,
vrouter_module_params_args):
"""Increase the maximum number of mpls label
and nexthop on tsn node"""
vr_params = {
'flow_entries': '524288',
'oflow_entries': '3000',
'mpls_labels': '5120',
'nexthops': '65536',
'vrfs': '5120',
'macs': {'bridge_entries': '262144'},
}
for param in vr_params:
if isinstance(vr_params[param], dict):
for p in vr_params[param]:
param_name = p
param_val = vrouter_module_params_args.setdefault(
param, vr_params[param][p])
else:
param_name = param
param_val = vrouter_module_params_args.setdefault(
param, vr_params[param])
param = "--vr_" + param_name + " " + param_val
self.search_and_replace(self.command_key, param,
"End", self.vrouter_file)
def fixup_contrail_vrouter_agent(self):
compute_ip = self._args.self_ip
non_mgmt_gw = self._args.non_mgmt_gw
vgw_public_subnet = self._args.vgw_public_subnet
vgw_public_vn_name = self._args.vgw_public_vn_name
vgw_intf_list = self._args.vgw_intf_list
vgw_gateway_routes = self._args.vgw_gateway_routes
compute_as_gateway = self._args.compute_as_gateway
flow_thread_count = self._args.flow_thread_count
self.mac = None
# Fresh install
if self.dev and not self.reprov:
self.mac = netifaces.ifaddresses(self.dev)[netifaces.AF_LINK][0][
'addr']
if not self.mac:
raise KeyError('Interface %s Mac %s' % (str(self.dev),
str(self.mac)))
self.netmask = netifaces.ifaddresses(self.dev)[
netifaces.AF_INET][0]['netmask']
if self.multi_net:
self.gateway = non_mgmt_gw
else:
self.gateway = self.find_gateway(self.dev)
self.cidr = netaddr.IPNetwork('%s/%s' % (self.vhost_ip,
self.netmask))
elif self.dev:
# Reprovision
cfg_file = "/etc/contrail/contrail-vrouter-agent.conf"
section = "DEFAULT"
key = "physical_interface_mac"
self.mac = self.get_config(cfg_file, section, key).strip()
section = "VIRTUAL-HOST-INTERFACE"
key = "ip"
self.cidr = netaddr.IPNetwork(self.get_config(cfg_file, section, key).strip())
section = "VIRTUAL-HOST-INTERFACE"
key = "gateway"
self.gateway = self.get_config(cfg_file, section, key).strip()
self.netmask = "255.255.255.0"
if self.dev:
if vgw_public_subnet:
os.chdir(self._temp_dir_name)
# Manipulating the string to use in agent_param
vgw_public_subnet_str = []
for i in vgw_public_subnet[1:-1].split(";"):
j = i[1:-1].split(",")
j = ";".join(j)
vgw_public_subnet_str.append(j)
vgw_public_subnet_str = str(tuple(
vgw_public_subnet_str)).replace("'", "")
vgw_public_subnet_str = vgw_public_subnet_str.replace(" ", "")
vgw_intf_list_str = str(tuple(
vgw_intf_list[1:-1].split(";"))).replace(" ", "")
cmds = ["sudo sed 's@dev=.*@dev=%s@g;" % self.dev,
"s@vgw_subnet_ip=.*@vgw_subnet_ip=%s@g;" %
vgw_public_subnet_str,
"s@vgw_intf=.*@vgw_intf=%s@g'" % vgw_intf_list_str,
" /etc/contrail/agent_param.tmpl > agent_param.new"]
local(' '.join(cmds))
local("sudo mv agent_param.new /etc/contrail/agent_param")
else:
os.chdir(self._temp_dir_name)
cmds = ["sudo sed 's/dev=.*/dev=%s/g' " % self.dev,
"/etc/contrail/agent_param.tmpl > agent_param.new"]
local(''.join(cmds))
local("sudo mv agent_param.new /etc/contrail/agent_param")
vmware_dev = None
gateway_mode = None
if (self._args.mode == 'vcenter' or
self._args.hypervisor == 'vmware'):
vmware_dev = self.get_secondary_device(self.dev)
if compute_as_gateway == 'server':
gateway_mode = "server"
# Set template options for DPDK mode
pci_dev = ""
platform_mode = "default"
if self._args.dpdk:
dpdk_args = dict(
u.split("=") for u in self._args.dpdk.split(","))
log.info(dpdk_args)
platform_mode = "dpdk"
supervisor_vrouter_file = ('/etc/contrail/' +
'supervisord_vrouter_files/' +
'contrail-vrouter-dpdk.ini')
systemd_vrouter_file = ('/lib/systemd/system/' +
'contrail-vrouter-dpdk.service')
if os.path.isfile(supervisor_vrouter_file):
self.vrouter_file = supervisor_vrouter_file
self.command_key = "command"
elif os.path.isfile(systemd_vrouter_file):
self.vrouter_file = systemd_vrouter_file
self.command_key = "ExecStart"
else:
raise RuntimeError("Vrouter Supervisor/Systemd not found.")
self.setup_hugepages_node(dpdk_args)
self.setup_coremask_node(dpdk_args)
self.setup_vm_coremask_node(False, dpdk_args)
self.setup_uio_driver(dpdk_args)
if self._args.dpdk and not self.reprov:
iface = self.dev
if self.is_interface_vlan(self.dev):
iface = self.get_physical_interface_of_vlan(self.dev)
local("ls /opt/contrail/bin/dpdk_nic_bind.py", warn_only=False)
cmd = "sudo /opt/contrail/bin/dpdk_nic_bind.py --status | "
cmd += "sudo grep -w %s | cut -d' ' -f 1" % iface.strip()
pci_dev = local(cmd, capture=True, warn_only=False)
# If there is no PCI address, the device is a bond.
# Bond interface in DPDK has zero PCI address.
if not pci_dev:
pci_dev = "0000:00:00.0"
elif self._args.dpdk and self.reprov:
cfg_file = "/etc/contrail/contrail-vrouter-agent.conf"
section = "DEFAULT"
key = "physical_interface_address"
pci_dev = self.get_config(cfg_file, section, key).strip()
if self.pdist == 'Ubuntu':
# Fix /dev/vhost-net permissions. It is required for
# multiqueue operation
local('sudo echo \'KERNEL=="vhost-net", '
'GROUP="kvm", MODE="0660"\' > '
'/etc/udev/rules.d/vhost-net.rules', warn_only=True)
# The vhost-net module has to be loaded at startup to
# ensure the correct permissions while the qemu is being
# launched
pattern = "vhost-net"
line = "vhost-net"
insert_line_to_file(pattern=pattern, line=line,
file_name='/etc/modules')
if not self._args.dpdk:
self.setup_vrouter_kmod_hugepages()
vrouter_kmod_1G_page = ''
vrouter_kmod_2M_page = ''
if self._args.vrouter_1G_hugepages != '0':
if (os.path.isfile('/mnt/hugepage_1G/vrouter_1G_mem_0')):
vrouter_kmod_1G_page = '/mnt/hugepage_1G/vrouter_1G_mem_0'
if (os.path.isfile('/mnt/hugepage_1G/vrouter_1G_mem_1')):
vrouter_kmod_1G_page = vrouter_kmod_1G_page + ' /mnt/hugepage_1G/vrouter_1G_mem_1'
if self._args.vrouter_2M_hugepages != '0':
if (os.path.isfile('/mnt/hugepage_2M/vrouter_2M_mem_0')):
vrouter_kmod_2M_page = '/mnt/hugepage_2M/vrouter_2M_mem_0'
if (os.path.isfile('/mnt/hugepage_2M/vrouter_2M_mem_1')):
vrouter_kmod_2M_page = vrouter_kmod_2M_page + ' /mnt/hugepage_2M/vrouter_2M_mem_1'
control_servers = ' '.join('%s:%s' % (server, '5269')
for server in self._args.control_nodes)
dns_servers = ' '.join('%s:%s' % (server, '53')
for server in self._args.control_nodes)
collector_servers = ' '.join('%s:%s' % (server, '8086')
for server in self._args.collectors)
if self._args.tsn_evpn_mode and self._args.tsn_servers:
tsn_servers = ' '.join(self._args.tsn_servers)
else:
tsn_servers = ''
configs = {
'DEFAULT': {
'platform': platform_mode,
'gateway_mode': gateway_mode or '',
'physical_interface_address': pci_dev,
'physical_interface_mac': self.mac,
'collectors': collector_servers,
'xmpp_auth_enable': self._args.xmpp_auth_enable,
'xmpp_dns_auth_enable': self._args.xmpp_dns_auth_enable,
'tsn_servers': tsn_servers,
'agent_mode': ''},
'NETWORKS': {
'control_network_ip': compute_ip},
'VIRTUAL-HOST-INTERFACE': {
'name': 'vhost0',
'ip': str(self.cidr),
'gateway': self.gateway,
'physical_interface': self.dev},
'HYPERVISOR': {
'type': ('kvm' if self._args.hypervisor == 'libvirt'
else self._args.hypervisor),
'vmware_mode': self._args.mode or '',
'vmware_physical_interface': vmware_dev or ''},
'CONTROL-NODE': {
'servers': control_servers},
'DNS': {
'servers': dns_servers},
'SANDESH': {
'sandesh_ssl_enable': self._args.sandesh_ssl_enable,
'introspect_ssl_enable':
self._args.introspect_ssl_enable},
'FLOWS': {
'thread_count': flow_thread_count},
'METADATA': {
'metadata_proxy_secret': self._args.metadata_secret,
'metadata_use_ssl': self._args.metadata_use_ssl,
'metadata_client_cert': ('/etc/contrail/ssl/certs/server.pem'
if self._args.metadata_use_ssl else ''),
'metdata_client_cert_type': ('PEM' if self._args.metadata_use_ssl
else ''),
'metadata_client_key': ('/etc/contrail/ssl/private/server-privkey.pem'
if self._args.metadata_use_ssl else '')},
'RESTART': {
'huge_page_2M': vrouter_kmod_2M_page,
'huge_page_1G': vrouter_kmod_1G_page,
'backup_enable': (True
if self._args.resource_backup_restore else False),
'backup_dir': ('/var/lib/contrail/backup'),
'backup_file_count': (self._args.backup_file_count),
'backup_idle_timeout': (self._args.backup_idle_timeout),
'restore_enable': (True
if self._args.resource_backup_restore else False),
'restore_audit_timeout': (self._args.restore_audit_timeout)},
}
# VGW configs
if vgw_public_vn_name and vgw_public_subnet:
vgw_public_vn_name = vgw_public_vn_name[1:-1].split(';')
vgw_public_subnet = vgw_public_subnet[1:-1].split(';')
vgw_intf_list = vgw_intf_list[1:-1].split(';')
if vgw_gateway_routes is not None:
vgw_gateway_routes = vgw_gateway_routes[1:-1].split(';')
for i in range(len(vgw_public_vn_name)):
ip_blocks = ''
if vgw_public_subnet[i].find("[") != -1:
for ele in vgw_public_subnet[i][1:-1].split(","):
ip_blocks += ele[1:-1] + " "
else:
ip_blocks += vgw_public_subnet[i]
routes = ''
if (vgw_gateway_routes is not None and
i < len(vgw_gateway_routes)):
if vgw_gateway_routes[i] != '[]':
if vgw_gateway_routes[i].find("[") != -1:
for ele in vgw_gateway_routes[i][1:-1].split(
","):
routes += ele[1:-1] + " "
else:
routes += vgw_gateway_routes[i]
configs['GATEWAY-%s' % i] = {'interface': vgw_intf_list[i],
'ip_blocks': ip_blocks,
'routes': routes,
'routing_instance': vgw_public_vn_name[i]}
for section, key_vals in list(configs.items()):
for key, val in list(key_vals.items()):
self.set_config(
'/etc/contrail/contrail-vrouter-agent.conf',
section, key, val)
if self.running_in_container:
self.config_vhost0_interface_in_container()
else:
self.fixup_vhost0_interface_configs()
def config_vhost0_interface_in_container(self):
if self.reprov:
log.info("vhost0 configuration already present")
return
# Insert vrouter and setup vrouter vifs
insert_cmd = "source /opt/contrail/bin/vrouter-functions.sh && "
insert_cmd += "insert_vrouter"
local(insert_cmd, executable='/bin/bash')
# Move ip address from vrouter physical device to vhost
config_vhost0_cmd = "ip address delete %s/%s dev %s && " % (
self.vhost_ip, self.cidr.prefixlen, self.dev)
config_vhost0_cmd += "ip address add %s/%s dev vhost0 && " % (
self.vhost_ip, self.cidr.prefixlen)
config_vhost0_cmd += "ip link set dev vhost0 up"
local(config_vhost0_cmd)
# Add default gateway to new device as link local if /32 IP Address
if self.cidr.prefixlen == 32:
local("ip route add unicast %s dev vhost0 scope link" %
self.gateway)
if not self.multi_net:
# Add default gateway to vhost
local("ip route add default via %s dev vhost0" % self.gateway)
def fixup_contrail_lbaas(self):
auth_url = self._args.keystone_auth_protocol + '://'
auth_url += self._args.keystone_ip
auth_url += ':' + self._args.keystone_auth_port
auth_url += '/' + 'v2.0'
configs = {
'BARBICAN': {
'admin_tenant_name': 'service',
'admin_user': 'neutron',
'admin_password': <PASSWORD>,
'auth_url': auth_url,
'region': 'RegionOne'}
}
# Workaround https://bugs.launchpad.net/juniperopenstack/+bug/1681172
cfgfile = '/etc/contrail/contrail-lbaas-auth.conf'
if not os.path.isfile(cfgfile):
local('sudo touch %s' % cfgfile)
for section, key_vals in list(configs.items()):
for key, val in list(key_vals.items()):
self.set_config(cfgfile, section, key, val)
def fixup_vhost0_interface_configs(self):
if self.reprov:
log.info("fixup_vhost0_interface_configs() not applicable")
return
if self.pdist in ['centos', 'fedora', 'redhat']:
# make ifcfg-vhost0
with open('%s/ifcfg-vhost0' % self._temp_dir_name, 'w') as f:
f.write('''#Contrail vhost0
DEVICE=vhost0
ONBOOT=yes
BOOTPROTO=none
IPV6INIT=no
USERCTL=yes
IPADDR=%s
NETMASK=%s
NM_CONTROLLED=no
#NETWORK MANAGER BUG WORKAROUND
SUBCHANNELS=1,2,3
''' % (self.vhost_ip, self.netmask))
# Don't set gateway and DNS on vhost0 if on non-mgmt network
if not self.multi_net:
if self.gateway:
f.write('GATEWAY=%s\n' % self.gateway)
dns_list = self.get_dns_servers(self.dev)
for i, dns in enumerate(dns_list):
f.write('DNS%d=%s\n' % (i + 1, dns))
domain_list = self.get_domain_search_list()
if domain_list:
f.write('DOMAIN="%s"\n' % domain_list)
prsv_cfg = []
mtu = self.get_if_mtu(self.dev)
if mtu:
dcfg = 'MTU=%s' % str(mtu)
f.write(dcfg + '\n')
prsv_cfg.append(dcfg)
f.flush()
if self.dev != 'vhost0':
src = "%s/ifcfg-vhost0" % self._temp_dir_name
dst = "/etc/sysconfig/network-scripts/ifcfg-vhost0"
local("sudo mv %s %s" % (src, dst), warn_only=True)
local("sudo sync", warn_only=True)
# make ifcfg-$dev
ifcfg = "/etc/sysconfig/network-scripts/ifcfg-%s" % self.dev
ifcfg_bkp = "/etc/sysconfig/network-scripts/orig.ifcfg-%s.rpmsave"\
% self.dev
if not os.path.isfile(ifcfg_bkp):
local("sudo cp %s %s" % (ifcfg, ifcfg_bkp), warn_only=True)
ifcfg_tmp = '%s/ifcfg-%s' % (self._temp_dir_name, self.dev)
self._rewrite_ifcfg_file(ifcfg_tmp, self.dev, prsv_cfg)
if self.multi_net:
self.migrate_routes(self.dev)
local("sudo mv %s /etc/contrail/" % ifcfg_tmp, warn_only=True)
if self.pdist not in ['Ubuntu']:
local("sudo chkconfig network on", warn_only=True)
local("sudo chkconfig supervisor-vrouter on",
warn_only=True)
# end self.pdist == centos | fedora | redhat
# setup lbaas prereqs
self.setup_lbaas_prereq()
if self.pdist in ['Ubuntu']:
self._rewrite_net_interfaces_file(
self.dev, self.mac, self.vhost_ip, self.netmask,
self.gateway, self._args.vmware,
self._args.vmware_vmpg_vswitch_mtu,
self._args.vmware_datanic_mtu)
# end self.pdist == ubuntu
def run_services(self):
if self.pdist not in ['Ubuntu']:
for svc in ['supervisor-vrouter']:
local('sudo chkconfig %s on' % svc)
if self.running_in_container:
for svc in ['contrail-vrouter-agent', 'contrail-vrouter-nodemgr']:
local('sudo service %s restart' % svc)
def add_vnc_config(self):
compute_ip = self._args.self_ip
compute_hostname = socket.gethostname()
use_ssl = False
if self._args.apiserver_auth_protocol == 'https':
use_ssl = True
prov_args = "--host_name %s --host_ip %s --api_server_ip %s "\
"--oper add --admin_user %s --admin_password %s "\
"--admin_tenant_name %s --openstack_ip %s "\
"--api_server_use_ssl %s" \
% (compute_hostname, compute_ip, self._args.cfgm_ip,
self._args.keystone_admin_user,
self._args.keystone_admin_password,
self._args.keystone_admin_tenant_name,
self._args.keystone_ip,
use_ssl)
if self._args.dpdk:
prov_args += " --dpdk_enabled"
cmd = "sudo python /opt/contrail/utils/provision_vrouter.py "
local(cmd + prov_args)
def add_qos_config(self):
qos_logical_queue = self._args.qos_logical_queue
qos_queue_id_list = self._args.qos_queue_id
default_hw_queue_qos = self._args.default_hw_queue_qos
qos_priority_tagging = self._args.qos_priority_tagging
priority_id_list = self._args.priority_id
priority_scheduling = self._args.priority_scheduling
priority_bandwidth = self._args.priority_bandwidth
agent_conf = "/etc/contrail/contrail-vrouter-agent.conf"
conf_file = "contrail-vrouter-agent.conf"
configs = {}
# Clean existing qos config
ltemp_dir = tempfile.mkdtemp()
local("sudo cp %s %s/" % (agent_conf, ltemp_dir))
local(
"sudo sed -i -e '/^\[QOS\]/d' -e '/^\[QUEUE-/d' -e '/^logical_queue/d' -e '/^default_hw_queue/d' -e '/^priority_tagging/d' %s/%s" %
(ltemp_dir, conf_file))
local(
"sudo sed -i -e '/^\[QOS-NIANTIC\]/d' -e '/^\[PG-/d' -e '/^scheduling/d' -e '/^bandwidth/d' %s/%s" %
(ltemp_dir, conf_file))
local("sudo cp %s/%s %s" % (ltemp_dir, conf_file, agent_conf))
local('sudo rm -rf %s' % (ltemp_dir))
# Set qos_enabled in agent_param to false
self.set_config(
'/etc/contrail/agent_param',
sec="''",
var='qos_enabled',
val='false')
# QOS configs
if qos_queue_id_list is not None:
self.set_config(
agent_conf,
'QOS',
'priority_tagging',
qos_priority_tagging)
num_sections = len(qos_logical_queue)
if(len(qos_logical_queue) == len(qos_queue_id_list) and
default_hw_queue_qos):
num_sections = num_sections - 1
for i in range(num_sections):
configs['QUEUE-%s' % qos_queue_id_list[i]] = {
'logical_queue':
'[%s]' % qos_logical_queue[i].replace(",", ", ")}
if (default_hw_queue_qos):
if(len(qos_logical_queue) == len(qos_queue_id_list)):
logical_queue = '[%s]' %\
qos_logical_queue[-1].replace(",", ", ")
else:
logical_queue = '[ ]'
configs['QUEUE-%s' % qos_queue_id_list[-1]] = {
'default_hw_queue': 'true',
'logical_queue': logical_queue}
for section, key_vals in list(configs.items()):
for key, val in list(key_vals.items()):
self.set_config(
agent_conf,
section, key, val)
if priority_id_list is not None:
local(
'sudo contrail-config --set /etc/contrail/contrail-vrouter-agent.conf QOS-NIANTIC')
for i in range(len(priority_id_list)):
configs['PG-%s' % priority_id_list[i]] = {
'scheduling': priority_scheduling[i],
'bandwidth': priority_bandwidth[i]}
for section, key_vals in list(configs.items()):
for key, val in list(key_vals.items()):
self.set_config(
agent_conf,
section, key, val)
if (qos_queue_id_list or priority_id_list):
# Set qos_enabled in agent_param
self.set_config(
'/etc/contrail/agent_param',
sec="''",
var='qos_enabled',
val='true')
# Run qosmap script on physical interface (on all members for bond
# interface)
physical_interface = local(
"sudo openstack-config --get /etc/contrail/contrail-vrouter-agent.conf VIRTUAL-HOST-INTERFACE physical_interface")
if os.path.isdir('/sys/class/net/%s/bonding' % physical_interface):
physical_interfaces_str = local(
"sudo cat /sys/class/net/%s/bonding/slaves | tr ' ' '\n' | sort | tr '\n' ' '" %
physical_interface)
else:
physical_interfaces_str = physical_interface
local(
"cd /opt/contrail/utils; python qosmap.py --interface_list %s " %
physical_interfaces_str)
def disable_nova_compute(self):
# Check if nova-compute is present in nova service list
# Disable nova-compute on TSN node
if local("nova service-list | grep nova-compute", warn_only=True).succeeded:
# Stop the service
local("sudo service nova-compute stop", warn_only=True)
if self.pdist in ['Ubuntu']:
local('sudo echo "manual" >> /etc/init/nova-compute.override')
else:
local('sudo chkconfig nova-compute off')
def add_tsn_vnc_config(self):
tsn_ip = self._args.self_ip
self.tsn_hostname = socket.gethostname()
prov_args = "--host_name %s --host_ip %s --api_server_ip %s --oper add "\
"--admin_user %s --admin_password %s --admin_tenant_name %s "\
"--openstack_ip %s --router_type tor-service-node --disable_vhost_vmi "\
% (self.tsn_hostname, tsn_ip, self._args.cfgm_ip,
self._args.keystone_admin_user,
self._args.keystone_admin_password,
self._args.keystone_admin_tenant_name, self._args.keystone_ip)
if self._args.apiserver_auth_protocol == 'https':
prov_args += " --api_server_use_ssl True"
local(
"python /opt/contrail/utils/provision_vrouter.py %s" %
(prov_args))
def start_tsn_service(self):
nova_conf_file = '/etc/contrail/contrail-vrouter-agent.conf'
mode = 'tsn'
if self._args.tsn_evpn_mode:
mode = 'tsn-no-forwarding'
local(
"openstack-config --set %s DEFAULT agent_mode %s" %
(nova_conf_file, mode))
def setup_tsn_node(self):
self.disable_nova_compute()
self.add_tsn_vnc_config()
self.start_tsn_service()
def increase_vrouter_limit(self):
"""Increase the maximum number of mpls label
and nexthop on tsn node"""
if self._args.vrouter_module_params:
vrouter_module_params = self._args.vrouter_module_params.rstrip(
',')
vrouter_module_params_args = dict(
u.split("=") for u in
vrouter_module_params.split(","))
if self._args.dpdk:
self.dpdk_increase_vrouter_limit(
vrouter_module_params_args)
else:
cmd = "options vrouter"
if 'mpls_labels' in list(vrouter_module_params_args.keys()):
cmd += " vr_mpls_labels=%s" % vrouter_module_params_args['mpls_labels']
if 'nexthops' in list(vrouter_module_params_args.keys()):
cmd += " vr_nexthops=%s" % vrouter_module_params_args['nexthops']
if 'vrfs' in list(vrouter_module_params_args.keys()):
cmd += " vr_vrfs=%s" % vrouter_module_params_args['vrfs']
if 'macs' in list(vrouter_module_params_args.keys()):
cmd += " vr_bridge_entries=%s" % vrouter_module_params_args['macs']
if 'flow_entries' in list(vrouter_module_params_args.keys()):
cmd += " vr_flow_entries=%s" % vrouter_module_params_args['flow_entries']
if 'oflow_entries' in list(vrouter_module_params_args.keys()):
cmd += " vr_oflow_entries=%s" % vrouter_module_params_args['oflow_entries']
if 'mac_oentries' in list(vrouter_module_params_args.keys()):
cmd += " vr_bridge_oentries=%s" % vrouter_module_params_args['mac_oentries']
if 'flow_hold_limit' in list(vrouter_module_params_args.keys()):
cmd += " vr_flow_hold_limit=%s" % vrouter_module_params_args['flow_hold_limit']
if 'max_interface_entries' in list(vrouter_module_params_args.keys()):
cmd += " vr_interfaces=%s" % vrouter_module_params_args['max_interface_entries']
if 'vrouter_dbg' in list(vrouter_module_params_args.keys()):
cmd += " vrouter_dbg=%s" % vrouter_module_params_args['vrouter_dbg']
if 'vr_memory_alloc_checks' in list(vrouter_module_params_args.keys()):
cmd += " vr_memory_alloc_checks=%s" % vrouter_module_params_args['vr_memory_alloc_checks']
local(
"echo %s > %s" %
(cmd, '/etc/modprobe.d/vrouter.conf'), warn_only=True)
def setup_vrouter_kmod_hugepages(self):
"""Setup 1G and 2M hugepages for vrouter"""
no_of_pages = 2
# Update vrouter kernel mode hugepage config
self.setup_vrouter_kmod_hugepage_grub()
# Delete vrouter kernel mode 1G hugepage config
if os.path.isfile('/etc/fstab'):
pattern = "hugepage_1G"
line = ""
insert_line_to_file(pattern=pattern, line=line,
file_name='/etc/fstab')
pattern = "vrouter_kmod_1G_hugepages"
line = "vrouter_kmod_1G_hugepages=0"
insert_line_to_file(pattern=pattern, line=line,
file_name='/etc/contrail/agent_param')
# Delete vrouter kernel mode 2M hugepage config
if os.path.isfile('/etc/fstab'):
pattern = "hugepage_2M"
line = ""
insert_line_to_file(pattern=pattern, line=line,
file_name='/etc/fstab')
pattern = "vrouter_kmod_2M_hugepages"
line = "vrouter_kmod_2M_hugepages=0"
insert_line_to_file(pattern=pattern, line=line,
file_name='/etc/contrail/agent_param')
# Configure vrouter kernel mode 1G hugepages
if self._args.vrouter_1G_hugepages != '0':
if int(self._args.vrouter_1G_hugepages) > 0 and int(self._args.vrouter_1G_hugepages) <= 2:
no_of_pages = int(self._args.vrouter_1G_hugepages)
mounted = local("sudo mount | grep hugepage_1G | cut -d' ' -f 3",
capture=True, warn_only=False)
if (mounted != ""):
print("hugepages already mounted on %s" % mounted)
else:
local("sudo mkdir -p /mnt/hugepage_1G", warn_only=False)
local("sudo mount -t hugetlbfs -o pagesize=1G none /mnt/hugepage_1G", warn_only=False)
if os.path.isdir('/mnt/hugepage_1G'):
for i in range(no_of_pages):
local("sudo touch /mnt/hugepage_1G/vrouter_1G_mem_%s " % i, warn_only=False)
pattern = "hugepage_1G"
line = "hugetlbfs "\
"/mnt/hugepage_1G hugetlbfs defaults,pagesize=1G 0 0"
insert_line_to_file(pattern=pattern, line=line,
file_name='/etc/fstab')
pattern = "vrouter_kmod_1G_hugepages"
line = "vrouter_kmod_1G_hugepages=%s" % no_of_pages
insert_line_to_file(pattern=pattern, line=line,
file_name='/etc/contrail/agent_param')
# Configure vrouter kernel mode 2M hugepages
if self._args.vrouter_2M_hugepages != '0' and self._args.vrouter_1G_hugepages != '0':
if int(self._args.vrouter_2M_hugepages) >= 0 and int(self._args.vrouter_2M_hugepages) <= 2:
no_of_pages = int(self._args.vrouter_2M_hugepages)
mounted = local("sudo mount | grep hugepage_2M | cut -d' ' -f 3",
capture=True, warn_only=False)
if (mounted != ""):
print("hugepages already mounted on %s" % mounted)
else:
local("sudo mkdir -p /mnt/hugepage_2M", warn_only=False)
local("sudo mount -t hugetlbfs -o pagesize=2M none /mnt/hugepage_2M", warn_only=False)
if os.path.isdir('/mnt/hugepage_2M'):
for i in range(no_of_pages):
local("sudo touch /mnt/hugepage_2M/vrouter_2M_mem_%s " % i, warn_only=False)
pattern = "hugepage_2M"
line = "hugetlbfs "\
"/mnt/hugepage_2M hugetlbfs defaults,pagesize=2M 0 0"
insert_line_to_file(pattern=pattern, line=line,
file_name='/etc/fstab')
pattern = "vrouter_kmod_2M_hugepages"
line = "vrouter_kmod_2M_hugepages=%s" % no_of_pages
insert_line_to_file(pattern=pattern, line=line,
file_name='/etc/contrail/agent_param')
def setup(self):
self.disable_selinux()
self.disable_iptables()
self.setup_coredump()
self.fixup_config_files()
self.increase_vrouter_limit()
self.setup_sriov_grub()
if self._args.tsn_mode or self._args.tsn_evpn_mode:
self.setup_tsn_node()
self.run_services()
else:
self.run_services()
if self._args.register and not self.reprov:
self.add_vnc_config()
```
#### File: contrail_vrouter_provisioning/toragent/setup.py
```python
import os
import sys
import argparse
import platform
import socket
from contrail_vrouter_provisioning import local
from contrail_vrouter_provisioning.base import ContrailSetup
from contrail_vrouter_provisioning.toragent.templates import tor_agent_conf
from contrail_vrouter_provisioning.toragent.templates import tor_agent_ini
from contrail_vrouter_provisioning.toragent.templates import tor_agent_service
from distutils.version import LooseVersion
(PLATFORM, VERSION, EXTRA) = platform.linux_distribution()
class TorAgentBaseSetup(ContrailSetup):
def __init__(self, tor_agent_args, args_str=None):
super(TorAgentBaseSetup, self).__init__()
self._args = tor_agent_args
# if the non_mgmt_ip is set use this as control ip
if self._args.non_mgmt_ip:
self._args.self_ip = self._args.non_mgmt_ip
if self._args.tor_agent_name is None:
self._args.tor_agent_name = socket.getfqdn()+ '-' + str(self._args.tor_id)
def fixup_tor_agent(self):
#if self._args.tor_ovs_protocol.lower() == 'pssl':
self.ssl_cacert = '/etc/contrail/ssl/certs/tor-ca-cert.pem'
self.ssl_cert = '/etc/contrail/ssl/certs/tor.' + self._args.tor_id + '.cert.pem'
self.ssl_privkey = '/etc/contrail/ssl/private/tor.' + self._args.tor_id + '.private.pem'
control_servers = ' '.join('%s:%s' % (server, '5269')
for server in self._args.control_nodes)
collector_servers = ' '.join('%s:%s' % (server, '8086')
for server in self._args.collectors)
dns_servers = ' '.join('%s:%s' % (server, '53')
for server in self._args.control_nodes)
template_vals = {
'__contrail_control_ip__': self._args.self_ip,
'__contrail_tor_name__': self._args.tor_name,
'__contrail_http_server_port__': self._args.http_server_port,
'__contrail_tor_ip__': self._args.tor_ip,
'__contrail_tor_id__': self._args.tor_id,
'__contrail_tsn_ovs_port__': self._args.tor_ovs_port,
'__contrail_tsn_ip__': self._args.tsn_ip,
'__contrail_tor_ovs_protocol__': self._args.tor_ovs_protocol,
'__contrail_tor_agent_ovs_ka__': self._args.tor_agent_ovs_ka,
'__contrail_tor_agent_name__': self._args.tor_agent_name,
'__contrail_tor_tunnel_ip__': self._args.tor_tunnel_ip,
'__contrail_tor_product_name__': self._args.tor_product_name,
'__contrail_tor_vendor_name__': self._args.tor_vendor_name,
'__contrail_tor_ssl_cert__': self.ssl_cert,
'__contrail_tor_ssl_privkey__': self.ssl_privkey,
'__contrail_tor_ssl_cacert__': self.ssl_cacert,
'__contrail_control_servers__': control_servers,
'__contrail_collector_servers__': collector_servers,
'__contrail_dns_servers__': dns_servers,
'__xmpp_dns_auth_enable__': self._args.xmpp_dns_auth_enable,
'__xmpp_auth_enable__': self._args.xmpp_auth_enable,
}
self._template_substitute_write(
tor_agent_conf.template, template_vals,
self._temp_dir_name + '/tor_agent_conf')
self.tor_file_name = ('contrail-tor-agent-' + self._args.tor_id +
'.conf')
local("sudo mv %s/tor_agent_conf /etc/contrail/%s" %
(self._temp_dir_name, self.tor_file_name))
def fixup_tor_ini(self):
self.tor_process_name = 'contrail-tor-agent-' + self._args.tor_id
self.tor_log_file_name = self.tor_process_name + '-stdout.log'
template_vals = {
'__contrail_tor_agent__': self.tor_process_name,
'__contrail_tor_agent_conf_file__': self.tor_file_name,
'__contrail_tor_agent_log_file__': self.tor_log_file_name
}
self._template_substitute_write(
tor_agent_ini.template, template_vals,
self._temp_dir_name + '/tor_agent_ini')
self.tor_ini_file_name = self.tor_process_name + '.ini'
src = "%s/tor_agent_ini" % self._temp_dir_name
dst = "/etc/contrail/supervisord_vrouter_files/%s" %\
self.tor_ini_file_name
local("sudo mv %s %s" % (src, dst))
def fixup_tor_service(self):
self.tor_process_name = 'contrail-tor-agent-' + self._args.tor_id
template_vals = {
'__contrail_tor_agent_conf_file__': self.tor_file_name,
}
self._template_substitute_write(
tor_agent_service.template, template_vals,
self._temp_dir_name + '/tor_agent_service')
self.tor_service_file_name = self.tor_process_name + '.service'
src = "%s/tor_agent_service" % self._temp_dir_name
dst = "/lib/systemd/system/%s" %\
self.tor_service_file_name
local("sudo mv %s %s" % (src, dst))
def create_init_file(self):
local("sudo cp /etc/init.d/contrail-vrouter-agent /etc/init.d/%s" %(self.tor_process_name))
def add_vnc_config(self):
cmd = "sudo python /opt/contrail/utils/provision_vrouter.py"
cmd += " --host_name %s" % self._args.tor_agent_name
cmd += " --host_ip %s" % self._args.self_ip
cmd += " --api_server_ip %s" % self._args.cfgm_ip
cmd += " --admin_user %s" % self._args.admin_user
cmd += " --admin_password %s" % self._args.admin_password
cmd += " --admin_tenant_name %s" % self._args.admin_tenant_name
cmd += " --openstack_ip %s" % self._args.authserver_ip
cmd += " --router_type tor-agent"
if self._args.auth_protocol == 'https':
cmd += " --api_server_use_ssl True"
cmd += " --oper add "
local(cmd)
def add_physical_device(self):
cmd = "sudo python /opt/contrail/utils/provision_physical_device.py"
cmd += " --device_name %s" % self._args.tor_name
cmd += " --vendor_name %s" % self._args.tor_vendor_name
cmd += " --device_mgmt_ip %s" % self._args.tor_ip
cmd += " --device_tunnel_ip %s" % self._args.tor_tunnel_ip
cmd += " --device_tor_agent %s" % self._args.tor_agent_name
cmd += " --device_tsn %s" % self._args.tor_tsn_name
cmd += " --api_server_ip %s" % self._args.cfgm_ip
cmd += " --admin_user %s" % self._args.admin_user
cmd += " --admin_password %s" % self._args.admin_password
cmd += " --admin_tenant_name %s" % self._args.admin_tenant_name
cmd += " --openstack_ip %s" % self._args.authserver_ip
if self._args.auth_protocol == 'https':
cmd += " --api_server_use_ssl True"
if self._args.tor_product_name:
cmd += " --product_name %s" %(self._args.tor_product_name)
cmd += " --oper add "
local(cmd)
def update_supervisor(self):
if self._args.restart:
local("sudo supervisorctl -c /etc/contrail/supervisord_vrouter.conf update", warn_only=True)
def run_services(self):
if self._args.restart:
cmd = "sudo service %s start" % self.tor_process_name
local(cmd)
cmd = "sudo systemctl enable %s" % self.tor_process_name
local(cmd)
def setup(self):
self.fixup_tor_agent()
if (('ubuntu' in PLATFORM.lower()) and
(LooseVersion(VERSION) >= LooseVersion('16.04'))):
self.fixup_tor_service()
self.add_vnc_config()
self.add_physical_device()
self.run_services()
else:
self.fixup_tor_ini()
self.create_init_file()
self.add_vnc_config()
self.add_physical_device()
self.update_supervisor()
class TorAgentSetup(ContrailSetup):
def __init__(self, args_str=None):
super(TorAgentSetup, self).__init__()
self._args = None
self.global_defaults = {
'cfgm_ip': '127.0.0.1',
'authserver_ip': '127.0.0.1',
'admin_user':None,
'admin_passwd':None,
'admin_tenant_name':'admin',
'auth_protocol':None,
'tor_agent_name':None,
'tor_vendor_name':'',
'tor_product_name':'',
'http_server_port':9090,
'tor_agent_ovs_ka':10000,
'restart':True,
'xmpp_auth_enable': False,
'xmpp_dns_auth_enable': False,
}
self.parse_args(args_str)
def parse_args(self, args_str):
'''
Eg. setup-vnc-tor-agent --agent_name contrail-tor-1
--http_server_port 9090 --discovery_server_ip 10.204.217.39
--tor_id 1 --tor_ip 10.204.221.35 --tor_ovs_port 9999
--tsn_ip 10.204.221.33 --tor_ovs_protocol tcp
--tor_agent_ovs_ka 10000
'''
parser = argparse.ArgumentParser()
parser.add_argument("--cfgm_ip", help = "IP Address of the config node")
parser.add_argument("--self_ip", help="IP Address of this(compute) node")
parser.add_argument(
"--non_mgmt_ip",
help="IP Address of non-management interface(fabric network)"
"on the compute node")
parser.add_argument("--authserver_ip", help = "IP Address of the authserver(keystone) node")
parser.add_argument("--admin_user", help = "Authserver admin tenants user name")
parser.add_argument("--admin_password", help = "AuthServer admin user's password")
parser.add_argument("--admin_tenant_name", help = "AuthServer admin tenant name")
parser.add_argument("--auth_protocol", help = "AuthServer(keystone) running protocol")
parser.add_argument("--tor_name", help="Name of the TOR agent")
parser.add_argument("--http_server_port", help="Port number for the HTTP server.")
parser.add_argument("--tor_ip", help="TOR Switch IP")
parser.add_argument("--tor_id", help="Unique ID for the TOR")
parser.add_argument("--tor_ovs_port", help="OVS Port Number")
parser.add_argument("--tsn_ip", help="Tor tunnel Node IP")
parser.add_argument("--tor_ovs_protocol", help="TOR OVS Protocol. Currently Only TCP and ssl supported")
parser.add_argument("--tor_vendor_name", help="TOR Vendor name")
parser.add_argument("--tor_tsn_name", help="TOR Tsn name")
parser.add_argument("--tor_agent_ovs_ka", help="TOR Agent OVS Keepalive timer value in millisecs")
parser.add_argument("--tor_agent_name", help="TOR Agent name")
parser.add_argument("--tor_tunnel_ip", help="TOR tunnel ip")
parser.add_argument("--tor_product_name", help="TOR product name ")
parser.add_argument(
"--control-nodes",
help="List of IP of the VNC control-nodes",
nargs='+', type=str)
parser.add_argument(
"--collectors",
help="List of IP of the VNC collectors",
nargs='+', type=str)
parser.add_argument("--tor_id_list", help="tor id list", nargs='+', type=str)
parser.add_argument(
"--xmpp_auth_enable", help="Enable xmpp auth",
action="store_true")
parser.add_argument(
"--xmpp_dns_auth_enable", help="Enable DNS xmpp auth",
action="store_true")
parser.set_defaults(**self.global_defaults)
self._args = parser.parse_args(args_str)
def main(args_str=None):
tor_agent_args = TorAgentSetup(args_str)._args
tor_agent = TorAgentBaseSetup(tor_agent_args)
tor_agent.setup()
if __name__ == "__main__":
main()
``` |
{
"source": "jnpr-tjiang/devmgr",
"score": 2
} |
#### File: api/resources/device.py
```python
from flask import request
from flask_restful import Resource
from sqlalchemy.exc import IntegrityError
from devmgr.models import Device, Annotation, Label
from devmgr.extensions import ma, db
from devmgr.common.pagination import paginate
class AnnotationSchema(ma.ModelSchema):
class Meta:
model = Annotation
fields = ['name', 'value']
class DeviceSchema(ma.ModelSchema):
annotations = ma.Nested(AnnotationSchema, many=True)
class Meta:
model = Device
class DeviceResource(Resource):
"""Single device resource
"""
def get(self, device_id):
schema = DeviceSchema()
device = Device.query.get_or_404(device_id)
return {
'device': schema.dump(device).data
}
def put(self, device_id):
schema = DeviceSchema()
device = Device.query.get_or_404(device_id)
device, errors = schema.load(request.json, instance=device)
if errors:
return errors, 422
db.session.commit()
return {
'msg': 'device updated',
'device': schema.dump(device).data
}
def delete(self, device_id):
device = Device.query.get_or_404(device_id)
db.session.delete(device)
db.session.commit()
return {
'msg': "Device (%s) deleted" % device.name
}
class DeviceList(Resource):
"""Device resource creation and get list of device resources
"""
def get(self):
schema = DeviceSchema(many=True)
query = Device.query
return paginate(query, schema)
def post(self):
schema = DeviceSchema()
device, errors = schema.load(request.json)
if errors:
return errors, 422
try:
db.session.add(device)
db.session.commit()
except IntegrityError as ex:
db.session.rollback()
return {'msg': str(ex)}, 422
return {
'msg': 'Device %s created' % device.name,
'device': schema.dump(device).data
}, 201
```
#### File: devmgr/devmgr/app.py
```python
from flask import Flask
from devmgr import api
from devmgr.extensions import db
def create_app(testing=False, cli=False):
"""Application factory, used to create application
"""
app = Flask('devmgr')
app.config.from_object('devmgr.config')
if testing is True:
app.config['TESTING'] = True
configure_extensions(app, cli)
register_blueprints(app)
return app
def configure_extensions(app, cli):
"""Configure Flask extensions
"""
db.init_app(app)
def register_blueprints(app):
"""Register blueprints
"""
app.register_blueprint(api.views.blueprint)
```
#### File: devmgr/models/annotation.py
```python
from devmgr.extensions import db
class Annotation(db.Model):
"""Key-value annotations for any config object. Annotated value is usually
a json blob that is not indexed
"""
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(100))
value = db.Column(db.JSON)
device_id = db.Column(
db.Integer, db.ForeignKey('device.id'), nullable=False
)
__table_args__ = (
db.UniqueConstraint('name', 'device_id', name='unique_key_value'),
)
def __repr__(self):
return '<Annotation %s>' % self.name
``` |
{
"source": "jnqd/simiki",
"score": 2
} |
#### File: simiki/tests/test_parse_config.py
```python
from __future__ import unicode_literals
import os.path
import unittest
import datetime
from simiki.config import parse_config, get_default_config
class TestParseConfig(unittest.TestCase):
def setUp(self):
self.expected_config = get_default_config()
self.expected_config.update({
"author": "<NAME>",
"debug": True,
"default_ext": "markdown",
"description": "This is a simiki's config sample, \u6d4b\u8bd5\u6837\u4f8b",
"destination": "destination",
"keywords": "wiki, simiki, python, \u7ef4\u57fa",
"root": "/wiki/",
"source": "source",
"attach": "attach",
"theme": "mytheme",
"themes_dir": "simiki_themes",
"title": "\u6211\u7684Wiki",
"url": "http://wiki.tankywoo.com"
})
self.config_file = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
"tests", "configs", "config_sample.yml")
def test_parse_config(self):
config = parse_config(self.config_file)
self.expected_config.pop('time')
_date = config.pop('time')
if hasattr(unittest.TestCase, 'assertIsInstance'):
self.assertIsInstance(_date, datetime.datetime)
else:
assert isinstance(_date, datetime.datetime), \
'%s is not an instance of %r' % \
(repr(_date), datetime.datetime)
self.assertEqual(
config,
self.expected_config
)
def test_parse_config_not_exist(self):
not_exist_config_file = os.path.join(self.config_file, "not_exist")
self.assertRaises(Exception,
lambda: parse_config(not_exist_config_file))
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "jnrbsn/daemonocle",
"score": 2
} |
#### File: daemonocle/daemonocle/helpers.py
```python
import os
import posixpath
from collections.abc import Callable
from concurrent.futures import ThreadPoolExecutor, as_completed
from operator import itemgetter
import click
import psutil
from daemonocle._utils import (
format_elapsed_time, json_encode, to_bytes, waitstatus_to_exitcode)
from daemonocle.core import Daemon, expose_action, get_action, list_actions
from daemonocle.exceptions import DaemonError
class FHSDaemon(Daemon):
"""A Daemon subclass that makes opinionatedly complies with the
Filesystem Hierarchy Standard"""
def __init__(
self, name=None, prefix='/opt', log_prefix='', **kwargs):
if name is not None:
self.name = name
elif not getattr(self, 'name', None):
raise DaemonError('name must be defined for FHSDaemon')
kwargs.update({
'chrootdir': None,
'detach': True,
})
prefix = posixpath.realpath(prefix)
if prefix == '/opt':
kwargs.update({
'pid_file': '/var/opt/{name}/run/{name}.pid',
'stdout_file': '/var/opt/{name}/log/{log_prefix}out.log',
'stderr_file': '/var/opt/{name}/log/{log_prefix}err.log',
})
elif prefix == '/usr/local':
kwargs.update({
'pid_file': '/var/local/run/{name}/{name}.pid',
'stdout_file': '/var/local/log/{name}/{log_prefix}out.log',
'stderr_file': '/var/local/log/{name}/{log_prefix}err.log',
})
elif prefix == '/usr':
kwargs.update({
'pid_file': '/var/run/{name}/{name}.pid',
'stdout_file': '/var/log/{name}/{log_prefix}out.log',
'stderr_file': '/var/log/{name}/{log_prefix}err.log',
})
else:
kwargs.update({
'pid_file': posixpath.join(prefix, 'run/{name}.pid'),
'stdout_file': posixpath.join(
prefix, 'log/{log_prefix}out.log'),
'stderr_file': posixpath.join(
prefix, 'log/{log_prefix}err.log'),
})
# Format paths
for key in ('pid_file', 'stdout_file', 'stderr_file'):
kwargs[key] = kwargs[key].format(
name=self.name, log_prefix=log_prefix)
if 'work_dir' in kwargs:
work_dir = posixpath.realpath(kwargs['work_dir'])
if work_dir == prefix and not posixpath.isdir(work_dir):
# Normally, the work_dir is required to exist, but if the
# work_dir is the same as the prefix, automatically create it
# if it doesn't exist.
umask = kwargs.get('umask', 0o22)
uid = kwargs.get('uid', os.getuid())
gid = kwargs.get('gid', os.getgid())
os.makedirs(work_dir, 0o777 & ~umask)
os.chown(work_dir, uid, gid)
super(FHSDaemon, self).__init__(**kwargs)
class MultiDaemon(object):
"""Daemon wrapper class that manages multiple copies of the same worker"""
def __init__(self, num_workers, daemon_cls=Daemon, **daemon_kwargs):
if num_workers < 1:
raise DaemonError('num_workers must be >= 1 for MultiDaemon')
self.num_workers = num_workers
self.worker = daemon_kwargs.get('worker', None)
self._daemons = []
kwargs_to_format = {'name', 'work_dir'}
if issubclass(daemon_cls, FHSDaemon):
kwargs_to_format.add('prefix')
else:
kwargs_to_format.update(('pid_file', 'stdout_file', 'stderr_file'))
pid_files = set()
for n in range(self.num_workers):
kwargs = daemon_kwargs.copy()
kwargs.update({
'chrootdir': None,
'detach': True,
})
for key in kwargs_to_format:
if key in kwargs:
kwargs[key] = kwargs[key].format(n=n)
daemon = daemon_cls(**kwargs)
# Enable multi mode
daemon.worker_id = n
if daemon.pid_file is None:
raise DaemonError('pid_file must be defined for MultiDaemon')
pid_files.add(daemon.pid_file)
self._daemons.append(daemon)
if len(pid_files) < self.num_workers:
raise DaemonError('PID files must be unique for MultiDaemon')
@classmethod
def list_actions(cls):
return list_actions(cls)
def get_action(self, action):
return get_action(self, action)
def do_action(self, action, *args, **kwargs):
func = self.get_action(action)
return func(*args, **kwargs)
def cli(self, *args, **kwargs):
from daemonocle.cli import DaemonCLI
cli = DaemonCLI(daemon=self)
return cli(*args, **kwargs)
@expose_action
def start(self, worker_id=None, debug=False, *args, **kwargs):
"""Start the daemon."""
if worker_id is not None:
daemons = [self._daemons[worker_id]]
else:
daemons = self._daemons
# Do first part of detaching
pid = os.fork()
if pid:
status = os.waitpid(pid, 0)
exit(waitstatus_to_exitcode(status[1]))
# All workers will be in the same session, and each worker will
# be in its own process group (handled in Daemon class).
os.setsid()
try:
ctx = click.get_current_context()
except RuntimeError:
ctx = None
for daemon in daemons:
if ctx is not None:
ctx.obj = daemon
daemon.start(debug=debug, *args, **kwargs)
@expose_action
def stop(self, worker_id=None, timeout=None, force=False):
"""Stop the daemon."""
if worker_id is not None:
daemons = [self._daemons[worker_id]]
else:
daemons = self._daemons
for daemon in daemons:
daemon.stop(timeout=timeout, force=force)
@expose_action
def restart(
self, worker_id=None, timeout=None, force=False, debug=False,
*args, **kwargs):
"""Stop then start the daemon."""
self.stop(worker_id=worker_id, timeout=timeout, force=force)
self.start(worker_id=worker_id, debug=debug, *args, **kwargs)
def get_status_single(self, worker_id, fields=None):
return self._daemons[worker_id].get_status(fields=fields)
def get_status(self, fields=None):
"""Get the statuses of all the workers in parallel."""
statuses = []
with ThreadPoolExecutor(max_workers=self.num_workers) as executor:
future_to_worker_id = {}
for n in range(self.num_workers):
future = executor.submit(
self.get_status_single, n, fields=fields)
future_to_worker_id[future] = n
for future in as_completed(future_to_worker_id):
n = future_to_worker_id[future]
statuses.append((n, future.result()))
return [s[1] for s in sorted(statuses, key=itemgetter(0))]
@expose_action
def status(self, worker_id=None, json=False, fields=None):
"""Get the status of the daemon."""
if worker_id is not None:
statuses = [self.get_status_single(worker_id, fields=fields)]
else:
statuses = self.get_status(fields=fields)
if json:
status_width = max(len(json_encode(s)) for s in statuses)
term_width, term_height = click.get_terminal_size()
if status_width + 3 > term_width:
message = json_encode(statuses, pretty=True)
else:
message = ['[']
for i, status in enumerate(statuses):
message.append(
' ' + json_encode(status) +
(',' if i < len(statuses) - 1 else '')
)
message.append(']')
message = '\n'.join(message)
else:
message = []
for status in statuses:
if status.get('status') == psutil.STATUS_DEAD:
message.append('{name} -- not running'.format(
name=status['name']))
else:
status['uptime'] = format_elapsed_time(status['uptime'])
template = (
'{name} -- pid: {pid}, status: {status}, '
'uptime: {uptime}, %cpu: {cpu_percent:.1f}, '
'%mem: {memory_percent:.1f}')
message.append(template.format(**status))
message = '\n'.join(message)
click.echo(message)
class ExecWorker(Callable):
"""A worker class that simply executes another program"""
def __init__(self, name, *args):
self.name = to_bytes(name)
self.args = tuple(to_bytes(a) for a in args)
if b'/' in self.name:
self.name = posixpath.realpath(self.name)
self.__doc__ = 'Run "{}" as a daemon.'.format(
self.name.decode('ascii', errors='backslashreplace'))
def __call__(self): # pragma: no cover
exec_prog = os.execv if self.name[0] == b'/' else os.execvp
exec_prog(self.name, (self.name,) + self.args)
```
#### File: daemonocle/tests/test_utils.py
```python
import errno
import os
import posixpath
import subprocess
import pytest
from daemonocle import DaemonEnvironmentError
from daemonocle._utils import check_dir_exists, proc_get_open_fds
def test_check_dir_exists(temp_dir):
test_dir = posixpath.join(temp_dir, 'foo')
os.mkdir(test_dir)
check_dir_exists(test_dir)
with pytest.raises(OSError) as exc_info:
check_dir_exists(posixpath.join(temp_dir, 'baz'))
assert exc_info.value.errno == errno.ENOENT
assert 'No such file or directory' in str(exc_info.value)
test_file = posixpath.join(temp_dir, 'bar')
with open(test_file, 'w') as f:
f.write('bar\n')
with pytest.raises(OSError) as exc_info:
check_dir_exists(test_file)
assert exc_info.value.errno == errno.ENOTDIR
assert 'Not a directory' in str(exc_info.value)
def test_proc_get_open_fds_fallbacks_current_proc(temp_dir, monkeypatch):
# Normal
result_1 = proc_get_open_fds()
def mock_os_listdir_scandir(path='.'):
raise OSError(errno.ENOENT, 'No such file or directory', path)
monkeypatch.setattr(os, 'listdir', mock_os_listdir_scandir)
if hasattr(os, 'scandir'):
monkeypatch.setattr(os, 'scandir', mock_os_listdir_scandir)
# Simulating not being able to read "/proc/<pid>/fd"
result_2 = proc_get_open_fds()
temp_lsof = posixpath.join(temp_dir, 'lsof')
with open(temp_lsof, 'w') as f:
f.write('#!/bin/sh\nexit 42\n')
os.chmod(temp_lsof, 0o755)
orig_env_path = os.environ.get('PATH', '')
temp_env_path = (
':'.join((temp_dir, orig_env_path)) if orig_env_path else temp_dir)
try:
os.environ['PATH'] = temp_env_path
# Simulating "lsof" error
result_3 = proc_get_open_fds()
finally:
os.environ['PATH'] = orig_env_path
assert result_1 == result_2 == result_3
def test_proc_get_open_fds_fallbacks_other_proc(temp_dir, monkeypatch):
proc = subprocess.Popen(['sleep', '60'])
pid = proc.pid
try:
# Normal
result_1 = proc_get_open_fds(pid)
def mock_os_listdir_scandir(path='.'):
raise OSError(errno.ENOENT, 'No such file or directory', path)
monkeypatch.setattr(os, 'listdir', mock_os_listdir_scandir)
if hasattr(os, 'scandir'):
monkeypatch.setattr(os, 'scandir', mock_os_listdir_scandir)
# Simulating not being able to read "/proc/<pid>/fd"
result_2 = proc_get_open_fds(pid)
assert result_1 == result_2
temp_lsof = posixpath.join(temp_dir, 'lsof')
with open(temp_lsof, 'w') as f:
f.write('#!/bin/sh\nexit 42\n')
os.chmod(temp_lsof, 0o755)
orig_env_path = os.environ.get('PATH', '')
temp_env_path = (
':'.join((temp_dir, orig_env_path)) if orig_env_path else temp_dir)
os.environ['PATH'] = temp_env_path
try:
# Simulating "lsof" error
with pytest.raises(DaemonEnvironmentError) as exc_info:
proc_get_open_fds(pid)
assert 'Unable to get open file descriptors' in str(exc_info.value)
finally:
os.environ['PATH'] = orig_env_path
finally:
proc.terminate()
proc.wait()
def test_exit(pyscript):
result = pyscript("""
from daemonocle._utils import exit
exit(0)
""").run()
assert result.returncode == 0
result = pyscript("""
from daemonocle._utils import exit
exit(42)
""").run()
assert result.returncode == 42
result = pyscript("""
from daemonocle._utils import exit
exit(9999)
""").run()
assert result.returncode == 9999 & 0xff
result = pyscript("""
from daemonocle._utils import exit
exit(-15)
""").run()
assert result.returncode == 128 + 15
result = pyscript("""
from daemonocle._utils import exit
exit('27Quj7FzhWRVidZHygXUtE1WNcVGGxiTEAP')
""").run()
assert result.returncode == 1
assert result.stderr == b'27Quj7FzhWRVidZHygXUtE1WNcVGGxiTEAP\n'
``` |
{
"source": "jnrbsn/python-click-prettify",
"score": 2
} |
#### File: jnrbsn/python-click-prettify/click_prettify.py
```python
import re
import subprocess
import sys
import traceback
import click
import click.core
import click.exceptions
_re_error_message_keywords = re.compile(
r'^(error|aborted|failed|fatal)[:!]', flags=re.IGNORECASE | re.MULTILINE)
def _echo_stderr_red(message, *args, **kwargs):
if sys.exc_info() and kwargs.get('file') is sys.stderr:
# Only modify the message if an exception is currently being
# handled and we're printing to STDERR.
# Capitalize keywords in the error message
message = _re_error_message_keywords.sub(
lambda m: m.group(0).upper(), message)
# Make it red
message = click.style(message, fg='red')
return click.echo(message, *args, **kwargs)
click.exceptions.echo = _echo_stderr_red
click.core.echo = _echo_stderr_red
class _UsageErrorWithFullHelp(click.exceptions.UsageError):
def show(self, *args, **kwargs):
# Swap out get_usage for get_help
get_usage = self.ctx.get_usage
self.ctx.get_usage = self.ctx.get_help
try:
return super().show(*args, **kwargs)
finally:
# Change it back
self.ctx.get_usage = get_usage
click.exceptions.UsageError = _UsageErrorWithFullHelp
click.UsageError = _UsageErrorWithFullHelp
def _excepthook_red(etype, value, tb):
# Print exception in red
click.secho(
''.join(traceback.format_exception(etype, value, tb)),
fg='red', file=sys.stderr, nl=False,
)
sys.exit(1)
sys.excepthook = _excepthook_red
def hecho(message, level=1):
"""Print a heading in a style inspired by Homebrew"""
heading_colors = {
1: 'green',
2: 'blue',
3: 'magenta',
}
arrow_color = heading_colors[level]
click.echo(' '.join((
click.style('==>', fg=arrow_color),
click.style(message, bold=True),
)))
def h1echo(msg):
"""Print a level 1 heading"""
hecho(msg, level=1)
def h2echo(msg):
"""Print a level 2 heading"""
hecho(msg, level=2)
def h3echo(msg):
"""Print a level 3 heading"""
hecho(msg, level=3)
click.hecho = hecho
click.h1echo = h1echo
click.h2echo = h2echo
click.h3echo = h3echo
_subproc_run_default_hmsg = '$ {command}'
def subproc_run(
args, hmsg=_subproc_run_default_hmsg, hlevel=1, fg=True, **kwargs):
"""User-friendly wrapper around subprocess.run"""
check = kwargs.get('check', True)
kwargs['check'] = False
kwargs['shell'] = False
if not fg:
kwargs['capture_output'] = True
if hmsg is _subproc_run_default_hmsg:
hmsg = None
command = ' '.join((f"'{a}'" if ' ' in a else a) for a in args)
if hmsg:
hecho(hmsg.format(command=command, args=args), level=hlevel)
proc_result = subprocess.run(args, **kwargs)
if check and proc_result.returncode != 0:
error_message = (
f'ERROR: Command "{command}" returned non-zero exit status '
f'{proc_result.returncode}'
)
if proc_result.stderr:
error_message += f'\nSTDERR:\n{proc_result.stderr}'
click.secho(error_message, fg='red')
sys.exit(1)
return proc_result
click.subproc_run = subproc_run
``` |
{
"source": "jnrbsn/python-latest-user-agents",
"score": 2
} |
#### File: jnrbsn/python-latest-user-agents/tests.py
```python
import json
import os
import shutil
import time
from tempfile import mkdtemp
from threading import Thread
import pytest
import requests
from freezegun import freeze_time
import latest_user_agents
from latest_user_agents import (
clear_user_agent_cache, get_latest_user_agents, get_random_user_agent)
__all__ = []
fake_user_agents = ['Mozilla/5.0 (Foo) Bar', 'Mozilla/5.0 (Baz) Qux']
def _items_equal(a, b):
return len(a) == len(b) and sorted(a) == sorted(b)
@pytest.fixture(scope='function', autouse=True)
def patch_cache(monkeypatch):
"""Make all tests use a fresh cache.
The cache directory will be a unique temporary directory for all test
functions, and the in-memory cache will be cleared before each test.
"""
tmp_dir = mkdtemp(prefix='latest_user_agents_test_')
try:
# Change cache location on disk
monkeypatch.setattr(latest_user_agents, '_cache_dir', tmp_dir)
monkeypatch.setattr(
latest_user_agents, '_cache_file',
os.path.join(tmp_dir, 'user-agents.sqlite'))
# Clear in-memory cache
monkeypatch.setattr(latest_user_agents, '_cached_user_agents', None)
yield
finally:
if os.path.isdir(tmp_dir):
shutil.rmtree(tmp_dir)
@pytest.fixture(scope='function', autouse=True)
def patch_requests(monkeypatch):
"""Mock HTTP requests wtih fake data instead of making real requests."""
class MockResponse(object):
text = json.dumps(fake_user_agents, indent=4).strip() + '\n'
@staticmethod
def json():
return fake_user_agents
@staticmethod
def raise_for_status():
pass
def mock_requests_get(*args, **kwargs):
return MockResponse()
monkeypatch.setattr(requests, 'get', mock_requests_get)
@pytest.fixture(scope='function')
def spy_download(mocker):
"""Spy on downloads of new user agent data."""
return mocker.spy(latest_user_agents, '_download')
@pytest.fixture(scope='function')
def spy_read_cache(mocker):
"""Spy on reads of the cache file."""
return mocker.spy(latest_user_agents, '_read_cache')
@pytest.fixture(name='clear_in_memory_cache', scope='function')
def clear_in_memory_cache_fixture(monkeypatch):
"""Return a function that can clear the in-memory cache."""
def fixture():
monkeypatch.setattr(latest_user_agents, '_cached_user_agents', None)
return fixture
@pytest.mark.parametrize(
('test_func', 'assertion'),
(
(get_latest_user_agents, lambda x: _items_equal(x, fake_user_agents)),
(get_random_user_agent, lambda x: x in fake_user_agents),
),
)
def test_latest_user_agents(
test_func, assertion, spy_download, spy_read_cache,
clear_in_memory_cache):
"""Test basic/normal usage of main functions."""
# This should download new data
assert assertion(test_func())
spy_download.assert_called_once()
spy_download.reset_mock()
clear_in_memory_cache()
# This should NOT download new data, but read the cache on-disk since
# we cleared the in-memory cache above
assert assertion(test_func())
spy_download.assert_not_called()
spy_read_cache.assert_called_once()
spy_download.reset_mock()
spy_read_cache.reset_mock()
# This should neither download new data nor read the cache because
# it hits the in-memory cache
assert assertion(test_func())
spy_download.assert_not_called()
spy_read_cache.assert_not_called()
spy_download.reset_mock()
clear_user_agent_cache()
# This should download new data again since we cleared all the cache above
assert assertion(test_func())
spy_download.assert_called_once()
def test_expired_cache(spy_download, clear_in_memory_cache):
"""Test normal cache expiration."""
with freeze_time() as frozen_time:
# This should download new data
assert get_latest_user_agents() == fake_user_agents
spy_download.assert_called_once()
spy_download.reset_mock()
clear_in_memory_cache()
frozen_time.tick(25 * 3600)
# This should download new data again since the cache expired
assert get_latest_user_agents() == fake_user_agents
spy_download.assert_called_once()
def test_double_clear_cache():
"""Test that clearing the cache multiple times doesn't cause errors."""
clear_user_agent_cache()
clear_user_agent_cache()
def test_download_error(
monkeypatch, spy_download, spy_read_cache, clear_in_memory_cache):
"""Test cache expiration when we're unable to download new data."""
with freeze_time() as frozen_time:
# This should download new data
assert get_latest_user_agents() == fake_user_agents
spy_download.assert_called_once()
spy_download.reset_mock()
clear_in_memory_cache()
frozen_time.tick(25 * 3600)
# Mock a download failure
class MockResponse(object):
@staticmethod
def raise_for_status():
raise requests.ConnectionError()
def mock_requests_get(*args, **kwargs):
return MockResponse()
monkeypatch.setattr(requests, 'get', mock_requests_get)
# This will attempt to download new data but end up reading the cache
assert _items_equal(get_latest_user_agents(), fake_user_agents)
spy_download.assert_called_once()
assert isinstance(spy_download.spy_exception, requests.ConnectionError)
spy_read_cache.assert_called_once()
spy_download.reset_mock()
spy_read_cache.reset_mock()
clear_in_memory_cache()
# This should NOT download new data, but read the cache on-disk since
# we cleared the in-memory cache above
assert _items_equal(get_latest_user_agents(), fake_user_agents)
spy_download.assert_not_called()
spy_read_cache.assert_called_once()
spy_download.reset_mock()
spy_read_cache.reset_mock()
clear_in_memory_cache()
frozen_time.tick(7 * 86400)
# Since the cache is too old, this will raise an exception
with pytest.raises(latest_user_agents.LatestUserAgentsError):
get_latest_user_agents()
def test_racing_locks(spy_download, spy_read_cache):
"""Test multiple threads racing to get a lock."""
# Acquire the lock
latest_user_agents._cache_lock.acquire()
# Start another thread and give it enough time to block while trying
# to acquire the lock itself
thread = Thread(target=get_latest_user_agents)
thread.start()
time.sleep(1)
# Fill the cache while the other thread is waiting for the lock
assert get_latest_user_agents() == fake_user_agents
spy_download.reset_mock()
spy_read_cache.reset_mock()
# Release the lock and check that the other thread used the cache
# that we just filled
latest_user_agents._cache_lock.release()
thread.join()
spy_download.assert_not_called()
spy_read_cache.assert_not_called()
``` |
{
"source": "jnrizvi/CMPUT404-assignment-web-client",
"score": 3
} |
#### File: jnrizvi/CMPUT404-assignment-web-client/httpclient.py
```python
import sys
import socket
import re
# you may use urllib to encode data appropriately
import urllib.parse
def help():
print("httpclient.py [GET/POST] [URL]\n")
# I don't like doing it like this, but regex is not working
# status_codes = ['200', '300', '301', '302', '400', '404']
class HTTPResponse(object):
def __init__(self, code=200, body=""):
self.code = code
self.body = body
def pretty_print(self):
print("Code:", self.code)
print("Body:", self.body)
class HTTPClient(object):
#def get_host_port(self,url):
def connect(self, host, port):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect((host, port))
return None
# TODO - String format the result returned by recvall
def get_code(self, data):
headers = data.split("\r\n\r\n")[0]
http_status_header = headers.split("\r\n")[0]
# print(http_status_header)
match = re.search(r"\s\d{3}\s", http_status_header)
if match:
# print(match.group().strip())
return match.group().strip()
# for code in status_codes:
# if code in http_status_header:
# # print(code)
# return code
return 500
# TODO - String format the result returned by recvall
def get_headers(self,data):
headers = data.split("\r\n\r\n")[0]
headers_as_list = headers.split("\r\n")
# print(headers_as_list)
return headers_as_list
# TODO - String format the result returned by recvall
def get_body(self, data):
body = data.split("\r\n\r\n")[1]
# print(body)
return body
def sendall(self, data):
self.socket.sendall(data.encode('utf-8'))
def close(self):
self.socket.close()
# read everything from the socket
def recvall(self, sock):
buffer = bytearray()
done = False
while not done:
part = sock.recv(1024)
if (part):
buffer.extend(part)
else:
done = not part
return buffer.decode('utf-8')
# TODO - handle 404 requests and 200 requests
def GET(self, url, args=None):
code = 500
body = ""
components = urllib.parse.urlparse(url)
port = components.port
if port == None:
# if there's no host, set port to 80 as default
port = 80
host = components.hostname
path = components.path
if len(path) == 0:
path = "/"
# host, port from url
self.connect(host, port)
# make up payload string, format with path and host.
payload = f'GET {path} HTTP/1.1\r\nHost: {host}\r\nAccept: /*/\r\nConnection: Close\r\n\r\n'
# payload = f'GET {path} HTTP/1.1\r\nHost: {host}\r\nConnection: Close\r\n\r\n'
# payload = f'GET {path} HTTP/1.1\r\nHost: {host}\r\nAccept: /*/\r\n\r\n'
# payload goes in here to be sent
self.sendall(payload)
result = self.recvall(self.socket)
# status code (within headers)
code = self.get_code(result)
# headers
headers = self.get_headers(result)
# body
body = self.get_body(result)
print(body)
self.close()
return HTTPResponse(int(code), body)
# TODO - handle 404 requests and 200 requests
# I think args is referring to what's in the url: parameter=value
def POST(self, url, args=None):
code = 500
body = ""
# can we use this?
components = urllib.parse.urlparse(url)
port = components.port
if port == None:
# if there's no host, set port to 80 as default
port = 80
host = components.hostname
path = components.path
if len(path) == 0:
path = "/"
# host, port from url
self.connect(host, port)
keyValues = []
if args == None:
args = ''
else:
# build a string of fields and values from the args, if provided. Cast the dict to a string and add it to payload
args = str(urllib.parse.urlencode(args))
# make up payload string, format with path and host.
# Args should go in here as well if they are not None
# Need to close the connection too
# Need content length too (length of args string)
payload = f'POST {path} HTTP/1.1\r\nHost: {host}\r\nAccept: /*/\r\nConnection: Close\r\nContent-Type: application/x-www-form-urlencoded\r\nContent-Length: {len(args)}\r\n\r\n{args}\r\n\r\n'
# (encoded) payload goes in here?
self.sendall(payload)
result = self.recvall(self.socket)
# status code (within headers)
code = self.get_code(result)
# headers
headers = self.get_headers(result)
# body
body = self.get_body(result)
print(body)
self.close()
return HTTPResponse(int(code), body)
def command(self, url, command="GET", args=None):
if (command == "POST"):
return self.POST( url, args )
else:
return self.GET( url, args )
if __name__ == "__main__":
client = HTTPClient()
command = "GET"
if (len(sys.argv) <= 1):
help()
sys.exit(1)
elif (len(sys.argv) == 3):
print(client.command( sys.argv[2], sys.argv[1] ))
# client.command( sys.argv[2], sys.argv[1] ).pretty_print()
else:
print(client.command( sys.argv[1] ))
# Remember that a client (socket) starts a connection to a server socket that is listening.
# There is no server in this assignment, which is why we just print out the result of GET/POST
# just use urllib.parseurl and send the data? with a standard GET / POST request
# then print the body that gets returned
# http://www.cs.ualberta.ca/
# http://softwareprocess.es/static/SoftwareProcess.es.html
# http://c2.com/cgi/wiki?CommonLispHyperSpec
# http://slashdot.org
``` |
{
"source": "jnrizvi/CMPUT404-assignment-webserver",
"score": 3
} |
#### File: jnrizvi/CMPUT404-assignment-webserver/server.py
```python
import socketserver
import re
import os
# from PIL import Image
# Copyright 2013 <NAME>, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Furthermore it is derived from the Python documentation examples thus
# some of the code is Copyright © 2001-2013 Python Software
# Foundation; All Rights Reserved
#
# http://docs.python.org/2/library/socketserver.html
#
# run: python freetests.py
# try: curl -v -X GET http://127.0.0.1:8080/
# FROM: https://stackoverflow.com/questions/47726865/html-page-not-displaying-using-python-socket-programming 2021-01-27 by lappet 2017
# filename = 'static/index.html'
# f = open(filename, 'r')
# csock.sendall(str.encode("HTTP/1.0 200 OK\n",'iso-8859-1'))
# csock.sendall(str.encode('Content-Type: text/html\n', 'iso-8859-1'))
# csock.send(str.encode('\r\n'))
# # send data per line
# for l in f.readlines():
# print('Sent ', repr(l))
# csock.sendall(str.encode(""+l+"", 'iso-8859-1'))
# l = f.read(1024)
# f.close()
# END FROM
class MyWebServer(socketserver.BaseRequestHandler):
def handle(self):
self.data = self.request.recv(1024).strip()
# Need to parse the self.data. Look at the path to see which file/folder should be returned. Look at HTTP method and see if its GET. POST/PUT/DELETE not supported
# print ("Got a request of: %s\n" % self.data)
# print(self.data)
request_snippet = self.data.decode("utf-8").split("?")[0]
print(request_snippet)
# Just grab the HTTP method and the file path using a regular expression
# THIS REGEX NEEDS TO BE UPDATED TO HANDLE DEEP PATH
match = re.match(r"GET\s\/([A-Za-z]+\.[A-Za-z]+)?\s", request_snippet)
if match:
method_and_path = match.group().split(" ")
print(method_and_path)
# from self.data, extract the path to know which files should be sent.
# If / is the path, the index.html should be the file to send
if method_and_path[1] == "/":
filename = '/index.html'
else:
# otherwise, serve the other requested file (css, maybe favicon?)
filename = method_and_path[1]
# print(filename)
# print(os.listdir("www"))
# Send a 404 status code if the file in the request does not match a file in the directory
if filename[1:] in os.listdir("www") == False:
self.request.sendall(bytearray("HTTP/1.1 404 FILE_NOT_FOUND\r\n", 'utf-8'))
else:
f = open("www" + filename, "r")
l = f.read()
self.request.sendall(bytearray("HTTP/1.1 200 OK\n",'utf-8'))
mimetype = "text/html"
if "css" in filename:
mimetype = "text/css"
self.request.sendall(bytearray(f'Content-Type: {mimetype}\n', 'utf-8'))
self.request.send(bytearray('\n', 'utf-8'))
self.request.sendall(bytearray(""+l+"", 'utf-8'))
f.close()
else:
self.request.sendall(bytearray("HTTP/1.1 404 FILE_NOT_FOUND\r\n", 'utf-8'))
if __name__ == "__main__":
HOST, PORT = "localhost", 8080
socketserver.TCPServer.allow_reuse_address = True
# Create the server, binding to localhost on port 8080
server = socketserver.TCPServer((HOST, PORT), MyWebServer)
# Activate the server; this will keep running until you
# interrupt the program with Ctrl-C
server.serve_forever()
# # Reading the file and sending it
# python3 -m http.server
# # Check against it? What do you mean?
# GET http://127
``` |
{
"source": "jnrksv/ha-tide",
"score": 3
} |
#### File: custom_components/tide/config_flow.py
```python
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import ATTR_ATTRIBUTION, CONF_LATITUDE, CONF_LONGITUDE
from . import DOMAIN
@config_entries.HANDLERS.register(DOMAIN)
class TideFlowHandler(config_entries.ConfigFlow):
"""Config flow for Tide."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_PUSH
def __init__(self):
"""Initialize."""
self._errors = {}
async def async_step_user(self, user_input=None):
"""Handle a flow initialized by the user."""
if user_input is not None:
return self.async_create_entry(
title="Tide", data=user_input)
return self.async_show_form(step_id="user", data_schema=vol.Schema({
vol.Required(CONF_LATITUDE,
default=1.0): vol.Coerce(float),
vol.Required(CONF_LONGITUDE,
default=1.0): vol.Coerce(float)
}), errors=self._errors)
async def async_step_import(self, user_input=None):
"""Import a config flow from configuration."""
return self.async_create_entry(title="configuration.yaml", data={})
``` |
{
"source": "jnromero/steep",
"score": 2
} |
#### File: code/html/monitor.py
```python
import imp
import sys
import netifaces as ni
from pathlib import Path
def getPage(config):
pf = imp.load_source('pf',str(Path(config['webServerRoot'])/Path(config['packageFolder']).joinpath("html","pageFunctions.py")))
thisVersion=sys.version.split("\n")[0]
thisVersion=thisVersion.replace("\"","'")
string=""
string+="\n\nwindow.pythonVersion=\"%s\";\n"%(thisVersion)
string+="window.pythonExecutable=\"%s\";\n"%(sys.executable)
ips=[]
for k in ni.interfaces():
ni.ifaddresses(k)
try:
ip = ni.ifaddresses(k)[2][0]['addr']
ips.append(["%s"%(k),"%s"%(ip)])
except Exception as e:
# print("error getting ips",str(e))
"no address"
ips=[[str(y.encode("utf-8")) for y in x] for x in ips]
string+="window.ipAddresses=%s;\n"%(ips)
files=pf.getFiles(config)
this=''
this+='<html>\n'
this+='\t<head>\n'
this+='\t\t<title>STEEP: Monitor</title>\n'
this+=pf.addExternalFiles(config,"headStart")
this+=pf.javascriptLine(files["exp"]['config.js'])
this+=pf.javascriptLine(files['common']['jquery.js'])
this+=pf.javascriptLine(files["exp"]['monitor.js'])
this+=pf.javascriptLine(files['common']['common.js'])
this+=pf.javascriptLine(files['common']['websocketConnect.js'])
this+=pf.javascriptLine(files['common']['chat.js'])
this+=pf.cssLine(files['common']['monitor.css'])
this+=pf.cssLine(files['common']['common.css'])
this+=pf.cssLine(files['common']['chat.css'])
this+=pf.addExternalFiles(config,"headEnd")
this+='\t</head>\n'
this+='\t<body>\n'
this+=pf.addExternalFiles(config,"bodyStart")
this+='\t\t<div id="mainDiv"></div>\n'
this+='\t\t\t<script type="text/javascript">%s;</script>\n'%(string)
this+=pf.javascriptLine(files['common']['monitor.js'])
this+=pf.addExternalFiles(config,"bodyEnd")
this+='\t</body>\n'
this+='</html>'
return this
```
#### File: python/modules/instructions.py
```python
from __future__ import print_function,division,absolute_import
import pickle
import json
import time
from twisted.internet import reactor
import random
from mutagen.mp3 import MP3
from pathlib import Path
class SteepInstructions():
def __init__(self):
"self.doNothering=1"
self.instructionsAudioFile=Path(self.config['domain'])/Path(self.config['currentExperiment'])/Path(self.config['instructionsFolder']).joinpath("generatedFiles","output.mp3")
self.instructionsFinishedCaption="Instructions Finished. Please wait patiently for experiment to continue."
#so video can be like a regular subject with a status
self.instructionsPlaybackSpeed=1
self.createSubject("video")
self.getInstructionsDuration()
self.getInstructionsTasks()
self.getInstructionsCaptions()
self.taskCalls={}
self.captionCalls={}
def toggleInsructionsPauseOnClient(self,message,client):
sid=client.subjectID
if self.data['subjects'][sid].instructionsPlaying==1:
timeIN=time.time()-self.data['subjects'][sid].instructionsStartTime
params={}
params['startTime']="%.02f"%(timeIN)
self.cancelInstructionsCalls(sid)
self.setURLParameters(params,sid,"send")
self.runJavascriptFunction("pauseInstructions",sid,"send")
self.data['subjects'][sid].instructionsPlaying=0
else:
timeIN=self.data['subjects'][sid].queryParameters["startTime"][0]
self.instructionsDemo(sid,float(timeIN))
def changeInstructionsTime(self,message,client):
sid=client.subjectID
if sid=="monitor":
sid="allPlusVideo"
if "amount" in message:
timeIN=min(self.instructionsLength-30,max(0,time.time()-self.data['subjects'][sid].instructionsStartTime+message['amount']))
self.setURLParameters(params,sid,"send")
elif "percentage" in message:
timeIN=self.instructionsLength*message['percentage']
params={}
params['startTime']="%.02f"%(timeIN)
self.cancelInstructionsCalls(sid)
self.instructionsDemo(sid,timeIN)
def changeInstructionsTimeFromMonitor(self,message,client):
sid="allPlusVideo"
self.stopInstructions({},{})
timeIN=self.instructionsLength*message['percentage']
self.data['instructionsTime']=timeIN
self.playInstructions()
def instructionsDemo(self,sid,timeIN=0):
if timeIN==0 and "startTime" in self.data['subjects'][sid].queryParameters:
timeIN=float(self.data['subjects'][sid].queryParameters["startTime"][0])
if timeIN>self.instructionsLength:
timeIN=0
print("DMO");
msgs=[]
msgList=self.loadInstructionsOnClient(sid,"return")
msgs+=msgList
#draw cursor overlay
msgList=self.runJavascriptFunction("drawCursorOverlay",sid,"return")
msgs+=msgList
#draw caption overlay
msgList=self.runJavascriptFunction("drawCaptionOverlay",sid,"return")
msgs+=msgList
#draw instructions timer
msgList=self.drawInstructionsTimer(sid,"return")
msgs+=msgList
for s in self.getSubjectIDList(sid):
self.data['subjects'][s].instructionsPlaying=1
msgList=self.runJavascriptFunction("clearAllInstructions",sid,"return")
msgs+=msgList
#dont end instructions for demo
# self.initializeTimer(sid,self.instructionsLength,self.endInstructions)
self.initializeTimer(sid,self.instructionsLength,self.endInstructions)
self.data['timers'][sid]=[time.time(),time.time()-timeIN,self.instructionsLength]
# self.data['subjects'][sid].timer=[time.time(),time.time()-timeIN,self.instructionsLength]
print(self.data['timers'][sid])
kwargs={"sid":sid}
[taskMsgs,index,timeToNext]=self.catchUpTasks(sid,timeIN)
for s in self.getSubjectIDList(sid):
self.data['subjects'][s].taskIndex=index
self.taskCalls[sid]=reactor.callLater(float(timeToNext)/self.instructionsPlaybackSpeed,self.runAnotherTask,**kwargs)
msgs+=taskMsgs
[msgList,index,timeToNext]=self.catchUpCaptions(sid,timeIN)
for s in self.getSubjectIDList(sid):
self.data['subjects'][s].captionIndex=index
self.captionCalls[sid]=reactor.callLater(float(timeToNext)/self.instructionsPlaybackSpeed,self.displayCaption,**kwargs)
msgs+=msgList
msgList=self.startAudio(sid,"return",timeIN)
msgs+=msgList
msgList=self.runJavascriptFunction("drawInstructionsControls",sid,"return")
msgs+=msgList
self.sendListOfMessages(msgs)
def getInstructionsTasks(self):
filename=Path(self.config['webServerRoot'])/Path(self.config['currentExperiment'])/Path(self.config['instructionsFolder']).joinpath("generatedFiles","taskTimes.json")
file = open(filename,'r')
self.taskTimes=json.load(file)
file.close()
self.data['taskIndex']=-1
filename=Path(self.config['webServerRoot'])/Path(self.config['currentExperiment'])/Path(self.config['instructionsFolder']).joinpath("generatedFiles","tasksOut.json")
file = open(filename,'r')
self.tasks=json.load(file)
file.close()
def getInstructionsCaptions(self):
#is a list of lists with [caption,time to next,total time at end of this caption display]
filename=Path(self.config['webServerRoot'])/Path(self.config['currentExperiment'])/Path(self.config['instructionsFolder']).joinpath("generatedFiles","captions.json")
file = open(filename,'r')
self.captions=json.load(file)
file.close()
self.captions.append([self.instructionsFinishedCaption,100000,100000])
self.data['captionIndex']=-1
def getInstructionsDuration(self):
mp3Path=Path(self.config['webServerRoot'])/Path(self.config['currentExperiment'])/Path(self.config['instructionsFolder']).joinpath("generatedFiles","output.mp3")
instructionLength=MP3(mp3Path).info.length
# instructionLength=MP3(self.instructionsAudioFile)
self.instructionsLength=float(instructionLength)/self.instructionsPlaybackSpeed
self.instructionsLength+=10#add 10 second buffer at end.
def getCurrentInstructionsTime(self):
#sets self.instructionsTime
if self.data['serverStatus']['instructions']['playing']==1:
self.data['instructionsTime']=time.time()-self.data['instructionsStartTime']
def setInstructionsStartTime(self):
#sets self.instructionsStartTime
if self.data['serverStatus']['instructions']['playing']==1:
self.data['instructionsStartTime']=time.time()-self.data['instructionsTime']
def reconnectInstructions(self,sid="all",output="send",timeIN="none"):
print("reconnecting "+sid)
msgs=[]
msgList=self.loadInstructionsOnClient(sid,"return")
msgs+=msgList
#draw cursor overlay
msgList=self.runJavascriptFunction("drawCursorOverlay",sid,"return")
msgs+=msgList
#draw caption overlay
msgList=self.runJavascriptFunction("drawCaptionOverlay",sid,"return")
msgs+=msgList
#draw instructions timer
msgList=self.drawInstructionsTimer(sid,"return")
msgList=self.drawInstructionsTimer("allPlusVideo","return")
msgs+=msgList
[taskMsgs,index,timeToNext]=self.catchUpTasks(sid,timeIN)
msgs+=taskMsgs
[msgList,index,timeToNext]=self.catchUpCaptions(sid,timeIN)
msgs+=msgList
if self.data['serverStatus']['instructions']['playing']==1:
msgList=self.startAudio(sid,"return")
msgs+=msgList
else:
# self.initializeTimer("all",self.instructionsLength,self.endInstructions)
self.data['timer']=[time.time(),time.time()-self.data['instructionsTime'],self.instructionsLength]
msgList=self.runJavascriptFunction("pauseInstructions",sid,"return")
msgs+=msgList
self.sendListOfMessages(msgs)
def loadInstructions(self,message,client):
#this ensures that the server knows that the instructions are loaded
print("Loading Instructions....")
self.data['serverStatus']['page']="instructions"
self.data['instructionsTime']=0
self.data['instructionsStartTime']=0
self.data['serverStatus']['instructions']['lastCaption']=self.captions[0][0]
self.data['serverStatus']['instructions']['loaded']=1
self.loadInstructionsOnClient("allPlusVideo")
self.monitorMessage()
self.updateTaskTable()
def loadInstructionsOnClient(self,sid="all",output="send"):
print(len(self.data['subjectIDs']))
print("Loading Instructions on client....",sid)
messages=[]
# def sendListOfMessages(self,messages):
# def messageToId(self,msg,sid="all",output="send"):
#update status message
for s in self.getSubjectIDList(sid):
self.data['subjects'][s].status['page']="generic"
self.data['subjects'][s].status['message']=["The instruction video will start shortly...."]
msgList=self.updateStatus(sid,"return")
messages+=msgList
#load Instructions message
msg={}
msg['type']="loadInstructions"
msg['source']=self.instructionsAudioFile
msg['length']=self.instructionsLength
msgList=self.messageToId(msg,sid,"return")
messages+=msgList
# #Load Click moved to load instructions
# msgList=self.runJavascriptFunction("loadClickSound",sid,"return")
# messages+=msgList
if output=="send":
self.sendListOfMessages(messages)
elif output=="return":
return messages
def runJavascriptFunction(self,functionName,sid="all",output="send"):
msg={}
msg['type']=functionName
return self.messageToId(msg,sid,output)
def placeCursor(self,x,y,sid="all",output="send"):
msg={}
msg['type']='placeCursor'
msg['x']=x
msg['y']=y
return self.messageToId(msg,sid,output)
def changeCaptionBottom(self,y,sid="all",output="send"):
msg={}
msg['type']='changeCaptionBottom'
msg['y']=y
return self.messageToId(msg,sid,output)
def moveCursor(self,endX,endY,time=0.5,delay=0,sid="all",output="send"):
msg={}
msg['type']='moveCursor'
msg['endX']=endX
msg['endY']=endY
msg['time']=time
msg['delay']=delay
return self.messageToId(msg,sid,output)
def moveCursorToDiv(self,divName,time=0.5,delay=0,anchor="none",sid="all",output="send"):
msg={}
msg['type']='moveCursorToDiv'
msg['anchor']=anchor
msg['divName']=divName
msg['time']=time
msg['delay']=delay
return self.messageToId(msg,sid,output)
def cursorDown(self,delay,position,sid="all",output="send"):
msg={}
msg['type']='cursorDown'
msg['position']=position
msg['delay']=delay
return self.messageToId(msg,sid,output)
def cursorDownDiv(self,delay,divName,anchor="none",sid="all",output="send"):
msg={}
msg['type']='cursorDownDiv'
msg['divName']=divName
msg['anchor']=anchor
msg['delay']=delay
print(sid,output)
print("Cursordown")
return self.messageToId(msg,sid,output)
def mouseSequence(self,sequence,sid="all",output="send"):
kwargs={"sid":sid,"output":"return"}
msgs=[]
sequenceTime=0
currentPosition=[0,0]
for s in sequence:
if s[0]=="place":
msgList=self.placeCursor(s[1],s[2],**kwargs)
msgs+=msgList
currentPosition=[s[1],s[2]]
elif s[0]=="toPoint":
msgList=self.moveCursor(s[1],s[2],s[3],sequenceTime,**kwargs)
msgs+=msgList
sequenceTime+=s[3]
currentPosition=[s[1],s[2]]
elif s[0]=="toDiv":
for kw in kwargs:
s[1][kw]=kwargs[kw]
s[1]['delay']=sequenceTime
msgList=self.moveCursorToDiv(**s[1])
msgs+=msgList
if "time" not in s[1]:
thisTime=0.5
else:
thisTime=s[1]['time']
sequenceTime+=thisTime
elif s[0]=="click" or s[0]=="clickDiv":
if s[0]=="click":
msgList=self.cursorDown(sequenceTime,currentPosition,**kwargs)
msgs+=msgList
elif s[0]=="clickDiv":
theseArgs={}
for kw in ['divName','anchor']:
if kw in s[1]:
theseArgs[kw]=s[1][kw]
for kw in ['sid','output']:
theseArgs[kw]=kwargs[kw]
theseArgs['delay']=sequenceTime
msgList=self.cursorDownDiv(**theseArgs)
msgs+=msgList
clickArgs={}
clickArgs['func']=s[1]['func']
clickArgs['args']=s[1]['args']
clickArgs['sid']=sid
clickArgs['output']="send"
self.taskCalls[sid]=reactor.callLater(float(sequenceTime+.15)/self.instructionsPlaybackSpeed,self.runSingleTask,**clickArgs)
sequenceTime+=.65
if output=="send":
self.sendListOfMessages(msgs)
else:
return msgs
def startAudio(self,sid="all",output="send",timeIN="none"):
#self.instructionsTime should be set before this is run
if timeIN=="none" or sid in ["all","allPlusVideo"]:
timeIN=self.data['instructionsTime']
self.data['instructionsStartTime']=time.time()-timeIN
else:
self.data['subjects'][sid].instructionsStartTime=time.time()-timeIN
msg={}
msg['currentTime']=timeIN
msg['playbackRate']=self.instructionsPlaybackSpeed
msg['type']='startAudio'
return self.messageToId(msg,sid,output)
def changeBackgroundColor(self,color,sid="all",output="send"):
msg={}
msg['type']='changeBackgroundColor'
msg['color']=color
return self.messageToId(msg,sid,output)
def highlightDiv(self,divName,sid="all",output="send"):
msg={}
msg['type']='highlightDiv'
msg['divName']=divName
return self.messageToId(msg,sid,output)
def unHighlightDiv(self,divName,sid="all",output="send"):
msg={}
msg['type']='unHighlightDiv'
msg['divName']=divName
return self.messageToId(msg,sid,output)
def placeText(self,divID,text,left,top,fontSize,color,fadeTime,textAlign,sid="all",output="send"):
msg={}
msg['type']='placeText'
msg['divid']=divID
msg['text']=text
msg['left']=left
msg['top']=top
msg['fontSize']=fontSize
msg['color']=color
msg['fadeTime']=fadeTime
msg['textAlign']=textAlign
return self.messageToId(msg,sid,output)
def playInstructions(self):
msgs=[]
self.data['serverStatus']['instructions']['playing']=1
# if self.data['serverStatus']['instructions']['loaded']==0
self.data['serverStatus']['instructions']['started']=1
self.data['serverStatus']['instructions']['time']=float(self.data['instructionsTime'])/self.instructionsLength
#draw cursor overlay
msgList=self.runJavascriptFunction("drawCursorOverlay","allPlusVideo","return")
msgs+=msgList
#draw caption overlay
msgList=self.runJavascriptFunction("drawCaptionOverlay","allPlusVideo","return")
msgs+=msgList
#draw instructions timer
msgList=self.drawInstructionsTimer("allPlusVideo","return")
msgs+=msgList
msgList=self.startAudio("allPlusVideo","return")
msgs+=msgList
msgList=self.runJavascriptFunction("clearAllInstructions","allPlusVideo","return")
msgs+=msgList
# self.initializeTimer("all",self.instructionsLength,self.endInstructions)
self.initializeTimer("all",self.instructionsLength-self.data['instructionsTime'],self.endInstructions)
# self.data['timer']=[time.time(),time.time()-self.data['instructionsTime'],self.instructionsLength]
[taskMsgs,index,timeToNext]=self.catchUpTasks()
self.data['taskIndex']=index
kwargs={"sid":"allPlusVideo"}
self.taskCalls['allPlusVideo']=reactor.callLater(float(timeToNext)/self.instructionsPlaybackSpeed,self.runAnotherTask,**kwargs)
msgs+=taskMsgs
[msgList,index,timeToNext]=self.catchUpCaptions()
self.data['captionIndex']=index
kwargs={"sid":"allPlusVideo"}
self.captionCalls['allPlusVideo']=reactor.callLater(float(timeToNext)/self.instructionsPlaybackSpeed,self.displayCaption,**kwargs)
msgs+=msgList
self.sendListOfMessages(msgs)
self.monitorMessage()
self.updateTaskTable()
def resumeInstructions(self,message,client):
self.playInstructions()
def restartInstructions(self,message,client):
if self.data['serverStatus']['instructions']['playing']==1:
self.stopInstructions({},{})
for sid in ['video']+self.data['subjectIDs']:
self.data['subjects'][sid].status['page']="generic"
self.data['subjects'][sid].status['message']=["The instruction video will start shortly...."]
self.updateStatus(sid)
self.data['instructionsTime']=0
self.data['serverStatus']['instructions']['time']=float(self.data['instructionsTime'])/self.instructionsLength
self.data['serverStatus']['instructions']['playing']=0
self.monitorMessage()
self.updateTaskTable()
def cancelInstructionsCalls(self,sid):
if sid in self.taskCalls:
if self.taskCalls[sid].cancelled==0:
if self.taskCalls[sid].called==0:
self.taskCalls[sid].cancel()
else:
print("trying to cancel already called Task",sid)
if sid in self.captionCalls:
if self.captionCalls[sid].cancelled==0:
if self.captionCalls[sid].called==0:
self.captionCalls[sid].cancel()
else:
print("trying to cancel already called Caption",sid)
def stopInstructions(self,message,client):
self.cancelInstructionsCalls("allPlusVideo")
self.runJavascriptFunction("pauseInstructions","allPlusVideo","send")
self.data['instructionsTime']=time.time()-self.data['instructionsStartTime']
self.data['serverStatus']['instructions']['time']=float(self.data['instructionsTime'])/self.instructionsLength
self.data['serverStatus']['instructions']['playing']=0
self.monitorMessage()
self.updateTaskTable()
def runSingleTask(self,func,args,sid="all",output="send"):
args['sid']=sid
args['output']="return"
thisFunction = getattr(self,func)
msgList=thisFunction(**args)
if output=="send":
self.sendListOfMessages(msgList)
else:
return msgList
def catchUpTasks(self,sid="all",timeIN="none"):
print("catchUpTasks")
if timeIN=="none":
self.getCurrentInstructionsTime()
timeIN=self.data['instructionsTime']
print(timeIN)
toBeRun=[{"func":"runJavascriptFunction","args":{"functionName":"clearAllInstructions"}}]
lastPosition=[-23,-23]
for index in range(len(self.taskTimes)):
taskTime=self.taskTimes[index]
thisTask=self.tasks[index]
if taskTime<timeIN:
if "args" in thisTask:
if "functionName" in thisTask['args']:
if thisTask['args']['functionName']=="clearAllInstructions":
toBeRun=[]
lastPosition=[-23,-23]
if thisTask['func']=="mouseSequence":
thisSequence= thisTask['args']['sequence']
for k in thisSequence:
if k[0]=="place":
lastPosition=[k[1],k[2]]
elif k[0]=="place":
lastPosition=[k[1],k[2]]
elif k[0]=="toDiv":
lastPosition=["div",k[1]["divName"]]
elif k[0]=="click" or k[0]=="clickDiv":
clickArgs={}
clickArgs['func']=k[1]['func']
clickArgs['args']=k[1]['args']
clickArgs['args']['sid']=sid
toBeRun.append(clickArgs)
else:
toBeRun.append(thisTask)
else:
#all tasks after time don't need to be looked at
break
#index is the index of the next task to be run.
#place cursor:
if lastPosition==[-23,-23]:
"do nothing"
elif lastPosition[0]=="div":
this={"func":"mouseSequence","args":{"sequence":[["toDiv",{"divName":lastPosition[1],"time":0}]]}}
toBeRun.append(this)
else:
this={"func":"placeCursor","args":{"x":lastPosition[0],"y":lastPosition[1]}}
toBeRun.append(this)
messages=[]
for task in toBeRun:
msgList=self.runSingleTask(task['func'],task['args'],sid,"return")
messages+=msgList
timeToNextTask=self.taskTimes[index]-timeIN
return [messages,index-1,timeToNextTask]
def testRunAnotherTask(self,message,client):
thisIndex=self.data['taskIndex']
if "testInstructionTimeIndex" not in self.data:
self.data['testInstructionTimeIndex']=0
timeForThisTask=float(self.taskTimes[self.data['testInstructionTimeIndex']])-.1
self.data['instructionsTime']=timeForThisTask
self.data['serverStatus']['instructions']['playing']=1
self.reconnectInstructions("allPlusVideo","send",timeForThisTask)
self.data['testInstructionTimeIndex']+=1
def runAnotherTask(self,sid="all"):
runTask=0
if self.config['serverType']=="regularExperiment":
self.data['taskIndex']+=1
thisIndex=self.data['taskIndex']
sendToWho="allPlusVideo"
runTask=1
elif self.config['serverType']=="demoExperiment":
if sid in self.data:
self.data['subjects'][sid].taskIndex+=1
thisIndex=self.data['subjects'][sid].taskIndex
sendToWho=sid
runTask=1
if runTask==1:
timeForThisTask=float(self.taskTimes[thisIndex])
task=self.tasks[thisIndex]
msgList=self.runSingleTask(task['func'],task['args'],sendToWho,"send")
# self.getCurrentInstructionsTime()
if thisIndex<len(self.taskTimes)-1:
timeForNextThisTask=float(self.taskTimes[thisIndex+1])
timeToNext=timeForNextThisTask-timeForThisTask
kwargs={"sid":sid}
self.taskCalls[sid]=reactor.callLater(float(timeToNext)/self.instructionsPlaybackSpeed,self.runAnotherTask,**kwargs)
self.updateInstructionTimeToMonitor(sid)
def catchUpCaptions(self,sid="all",timeIN="none"):
if timeIN=="none":
self.getCurrentInstructionsTime()
timeIN=self.data['instructionsTime']
for index in range(len(self.captions)):
caption=self.captions[index]
if caption[-1]>timeIN:
break
currentCaption=self.captions[index][0]
timeToNext=self.captions[index][-1]-timeIN
msgList=self.setCaption(currentCaption,sid,"return")
return [msgList,index,timeToNext]
def updateInstructionTimeToMonitor(self,sid="all"):
if sid=="all" or sid=="allPlusVideo":
self.data['instructionsTime']=time.time()-self.data['instructionsStartTime']
self.data['serverStatus']['instructions']['time']=float(self.data['instructionsTime'])/self.instructionsLength
else:
self.data['subjects'][sid].instructionsTime=time.time()-self.data['subjects'][sid].instructionsStartTime
self.data['serverStatus']['instructions']['time']=float(self.data['subjects'][sid].instructionsTime)/self.instructionsLength
self.monitorMessage()
self.updateTaskTable()
def drawInstructionsTimer(self,sid="all",output="send"):
msg={}
msg['type']='drawInstructionsTimer'
if sid=="allPlusVideo":
msg['whichTimer']="all"
else:
msg['whichTimer']=sid
# if self.config['serverType']=="regularExperiment":
# msg['whichTimer']="all"
# elif self.config['serverType']=="demoExperiment":
# msg['whichTimer']="selfTimer"
return self.messageToId(msg,sid,output)
def setCaption(self,caption,sid="all",output="send"):
msg={}
msg['type']='setCaptions'
msg['caption']=caption
msg['length']=self.instructionsLength
if sid=="allPlusVideo":
msg['whichTimer']="all"
else:
msg['whichTimer']=sid
return self.messageToId(msg,sid,output)
def resyncAudio(self,sid="all",output="send"):
# self.data['timers']['all'][1]+=1
msg={}
msg['type']='resyncAudio'
msg['length']=self.instructionsLength
if sid=="allPlusVideo":
msg['whichTimer']="all"
else:
msg['whichTimer']=sid
return self.messageToId(msg,sid,output)
def displayCaption(self,sid="all"):
setCaption=0
if self.config['serverType']=="regularExperiment":
self.data['captionIndex']+=1
thisIndex=self.data['captionIndex']
sendToWho="allPlusVideo"
setCaption=1
elif self.config['serverType']=="demoExperiment":
if sid in self.data:
self.data['subjects'][sid].captionIndex+=1
thisIndex=self.data['subjects'][sid].captionIndex
sendToWho=sid
setCaption=1
if setCaption==1:
thisCaption=self.captions[thisIndex][0]
timeToNext=float(self.captions[thisIndex][1])
self.setCaption(thisCaption,sendToWho,"send")
if self.captions[thisIndex][0]==self.instructionsFinishedCaption:
'do nothing'
else:
kwargs={"sid":sid}
self.captionCalls[sid]=reactor.callLater(float(timeToNext)/self.instructionsPlaybackSpeed,self.displayCaption,**kwargs)
self.data['serverStatus']['instructions']['lastCaption']=thisCaption
self.updateInstructionTimeToMonitor(sid)
def endInstructionsMessage(self,message,client):
self.stopInstructions({},{})
self.endInstructions()
def endInstructions(self):
print("Instructions Finished")
self.data['serverStatus']['instructions']['started']=0
self.data['serverStatus']['instructions']['loaded']=0
self.data['serverStatus']['instructions']['playing']=0
self.data['serverStatus']['instructions']['finished']=1
self.data['serverStatus']['page']="none"
self.monitorMessage()
self.updateTaskTable()
msg={}
msg['type']="endInstructions"
# for sid in self.getSubjectIDList("allPlusVideo"):
# self.data['subjects'][sid].status["page"]="generic"
# self.data['subjects'][sid].status["message"]=["The Instructions Have Ended. <br> Plese wait for experiment to continue."]
return self.messageToId(msg,"allPlusVideo","send")
```
#### File: python/modules/logger.py
```python
from __future__ import print_function,division,absolute_import
import os
from twisted.python import log
import time
import sys
import pickle
from builtins import bytes
from pathlib import Path
class logCounter():
def __init__(self):
self.counter=0
self.fileCount=1
self.totalCounter=0
self.currentTab=1
def increment(self):
self.counter+=1
self.totalCounter+=1
out=False
if self.counter>500:
self.fileCount+=1
self.counter=0
out=True
return out
def createBlankFile(filename):
file = open(filename,'wb')
file.close()
def getFilenames(config,fileCount):
txtFile=Path(config['logFolder']).joinpath("txt","%s.txt"%(fileCount))
pickleFile=Path(config['logFolder']).joinpath("pickle","%s.pickle"%(fileCount))
return [txtFile,pickleFile]
def newFiles(config,fileCount):
[txtFile,pickleFile]=getFilenames(config,fileCount)
createBlankFile(txtFile)
createBlankFile(pickleFile)
return [txtFile,pickleFile]
class SteepLogger(object):
def __init__(self,stream,streamType,config,thisCounter,consoleMessage):
self.stream=stream
self.config=config
self.counter=thisCounter
self.type=streamType#stdErr or stdOut
self.consoleMessage=consoleMessage
[self.txtLogFilename,self.pickleLogFilename]=newFiles(self.config,self.counter.fileCount)
self.newLine()
def newLine(self):
self.currentLine=""
def write(self,obj,**kwargs):
#open files for appending
#split into lines
lines=obj.split("\n")
if "type" in kwargs:
thisType=kwargs['type']
else:
thisType=self.type
for k in range(len(lines)):
self.currentLine+=lines[k]
if k<len(lines)-1:
#write to text file
with open(self.txtLogFilename,'ab') as f:
f.write(bytes(self.currentLine+"\n", encoding="UTF-8"))
#write to pickle file ([type,line,linecounter,kwargs])
with open(self.pickleLogFilename,'ab') as f:
pickle.dump([thisType,self.currentLine.replace(" "," "),self.counter.totalCounter,kwargs],f,protocol=2)
#write to console
self.stream.write(thisType+" "*(15-len(thisType))+" "+self.currentLine+"\n")
self.stream.flush()
self.currentLine=""
if self.counter.increment():
print("Incrementing, and Creating New Log Files")
[self.txtLogFilename,self.pickleLogFilename]=newFiles(self.config,self.counter.fileCount)
self.consoleMessage()
def flush(self):
self.stream.flush()
def twistedObserver(eventDict):
kwargs={"type":"twisted"}
if 'log_namespace' not in eventDict:
sys.stdout.write(eventDict+"\n",**kwargs)
elif eventDict['isError']==1:
kwargs={"type":"twistedError"}
if 'log_text' in eventDict:
for k in eventDict['log_text'].split("\n"):
sys.stdout.write(k+"\n",**kwargs)
elif 'log_io' in eventDict:
sys.stdout.write(eventDict['log_io']+"\n",**kwargs)
else:
sys.stdout.write(str(eventDict)+"\n",**kwargs)
elif eventDict['log_namespace']=='stdout':
sys.stdout.write(eventDict['log_io']+"\n",**kwargs)
elif eventDict['log_namespace']=='twisted.python.log' and 'log_io' in eventDict:
if eventDict['log_io'].find("\"GET")==-1:
sys.stdout.write(eventDict['log_io']+"\n",**kwargs)
else:
if "log_text" in eventDict:
sys.stdout.write(eventDict['log_text']+"\n",**kwargs)
elif "log_format" in eventDict:
sys.stdout.write(eventDict['log_format']+"\n",**kwargs)
else:
sys.stdout.write(eventDict['message']+"\n",**kwargs)
def log(text,**kwargs):
print(text)
```
#### File: python/modules/monitor.py
```python
from __future__ import print_function,division,absolute_import
import json
import pickle
import time
class monitorClass():
def __init__(self):
#This should be set in experiment.py
#self.monitorTaskList=['loadInstructions','startQuiz','startExperiment']
# self.data['taskList']=self.monitorTaskList
self.monitorTasks()
self.data['taskDoneTime']=[]
self.data['monitorTableInfo']={}
self.clientInfoSpecificMonitorTableEntries()
def sendMessageToMonitorClients(self,msg):
try:
for client in self.monitorClients:
if client.page=="monitor":
msg=self.getExtraMonitorPageInfo(msg,client)
client.sendMessage(json.dumps(msg).encode('utf8'))
except Exception as thisExept:
print(thisExept)
print("can't send message to monitor, from sendMessageToMonitorClients")
#def monitorMessage(self): in mainServer.py so that it can use reactor.
def updateMonitorPage(self,client):
self.updateMonitorHeader(client)
if client.page=="monitor":
self.updateMonitorTable()
self.updateTaskTable()
elif client.page=="console":
self.lastConsoleMessageTime=time.time()-10#this ensures a page refresh
self.consoleMessage()
elif client.page=="serverInfo":
self.showServerInfo(client)
def showServerInfo(self,client):
try:
msg={'type':"showServerInfo"}
msg=self.getExtraMonitorPageInfo(msg,client)
client.sendMessage(json.dumps(msg).encode('utf8'))
except Exception as thisExept:
print(thisExept)
print("can't send message to monitor, from showServerInfo")
def updateMonitorHeader(self,client):
msg={"type":"updateMonitorHeader"}
msg['currentPage']=client.page
msg['serverStatus']=self.data['serverStatus']
msg=self.getExtraMonitorPageInfo(msg,client)
client.sendMessage(json.dumps(msg).encode('utf8'))
def newMonitorTable(self):
for client in self.monitorClients:
client.currentMonitorTable=self.currentMonitorTable
self.updateMonitorHeader(client)
self.updateMonitorTable()
self.updateTaskTable()
def updateMonitorTable(self):
try:
for client in self.monitorClients:
if client.page=="monitor":
msg={"type":"updateMonitorTable"}
msg['table']=self.getMonitorTable(client)
msg['serverStatus']=self.data['serverStatus']
msg=self.getExtraMonitorPageInfo(msg,client)
client.sendMessage(json.dumps(msg).encode('utf8'))
except Exception as thisExept:
print(thisExept)
print("can't send message to monitor, from updateMonitorTable")
def updateTaskTable(self):
msg={"type":"updateTaskTable"}
msg['taskList']=[x['name'] if isinstance(x,dict) else x for x in self.monitorTaskList]
msg['taskStatus']=self.data['taskStatus']
msg['serverStatus']=self.data['serverStatus']
msg['dataFile']=self.config['dataFilePath']
msg['dataFileURL']=self.config['dataFileURL']
msg['dataFolderURL']=self.config['dataFolderURL']
self.sendMessageToMonitorClients(msg)
def toggleAcceptingSwitch(self,message,client):
self.data['serverStatus']['acceptingClients']=1-self.data['serverStatus']['acceptingClients']
if self.data['serverStatus']['acceptingClients']==0:
print("Done Accepting Clients!")
print("%s Clients Connected"%(len(self.data['subjectIDs'])))
#check to see if notAcceptingClientsAnymore is defined in experiment
notAcceptingClientsAnymore = getattr(self,"notAcceptingClientsAnymore",None)
if callable(notAcceptingClientsAnymore):
self.notAcceptingClientsAnymore()
else:
self.notAcceptingClientsAnymoreDefault()
else:
print("Accepting Clients Now!")
self.finalPayoffsSpecificMonitorTableEntries()
self.monitorMessage()
self.updateTaskTable()
def clientInfoSpecificMonitorTableEntries(self):
self.currentMonitorTable="clientInfo"
self.data['monitorTableInfo']['clientInfo']=[
['Refresh' ,'"<a href=\'javascript:void(0)\' onclick=\'refreshClient([\\\"%s\\\"]);\'>%s</a>"%(sid,sid)'],
['Connection' ,'self.data["subjects"][sid].connectionStatus'],
['IP Address' ,'self.clientsById[sid].peer'],
['Disconnect','"<a href=\'javascript:void(0)\' onclick=\'disconnectClient([\\\"%s\\\"]);\'>%s</a>"%(sid,sid)'],
['Accept/Reject','"<a href=\'javascript:void(0)\' onclick=\'%sClient([\\\"%s\\\"]);\'>%s</a>"%("reject"*int(sid in self.data["subjectIDs"])+"accept"*int(sid in self.data["rejectIDs"]),sid,"Reject"*int(sid in self.data["subjectIDs"])+"Accept"*int(sid in self.data["rejectIDs"]))']
]
def finalPayoffsSpecificMonitorTableEntries(self):
try:
self.currentMonitorTable="finalPayoffs"
sid=self.data['subjectIDs'][0]
thisTable=[]
for k in self.data['subjects'][sid].finalPayoffs:
thisTable.append([k,"'$%%.02f'%%(self.data['subjects'][sid].finalPayoffs['%s'])"%(k)])
self.data['monitorTableInfo']['finalPayoffs']=thisTable
except Exception as e:
print("can't set final payoffs monitor table",str(e))
def getMonitorTableValue(self,sid,item):
try:
out=eval(item[1])
except:
out="NA"
return out
def getMonitorTable(self,client):
thisMonitorTable=[["subjectID","sid"]]+self.data['monitorTableInfo'][client.currentMonitorTable]
tableData={}
connectedRejects=[x for x in self.data['rejectIDs'] if x in self.clientsById]
connectedDuplicates=[x for x in self.data['duplicateIDs'] if x in self.clientsById]
tableData['subjectIDs']=connectedRejects+connectedDuplicates+self.data['subjectIDs']
tableData['connected']={}
#get Connection statius
for sid in tableData['subjectIDs']:
if sid in self.data['rejectIDs']:
value="rejected"
elif sid in self.data['duplicateIDs']:
value="duplicate"
else:
value=self.data['subjects'][sid].connectionStatus
tableData['connected'][sid]=value
#get communication statius
tableData['communication']={}
for sid in tableData['subjectIDs']:
tableData['communication'][sid]=self.data['subjects'][sid].communicationStatus[0]
#get Values
tableData['titles']=[x[0] for x in thisMonitorTable]
for sid in tableData['subjectIDs']:
tableData[sid]={}
for item in thisMonitorTable:
string=item[0]
value=self.getMonitorTableValue(sid,item)
tableData[sid][string]=value
tableData=self.sortMonitorTable(tableData,client.monitorTableSortColumn)
return tableData
def sortMonitorTable(self,tableData,sortColumns):
toBeSorted=[]
for sid in tableData['subjectIDs']:
thisList=[]
for c in sortColumns:
index=c[0]
sortTyep=c[1]
table=c[2]
item=self.data['monitorTableInfo'][table][index-1]
if index==0:
thisList.append(sid)
else:
thisList.append(self.getMonitorTableValue(sid,item))
if sid in self.data['subjectIDs']:
thisList.append("ZZZ")
elif sid in self.data['rejectIDs']:
thisList.append("BBB")
elif sid in self.data['duplicateIDs']:
thisList.append("AAA")
toBeSorted.append(thisList)
index=0
for c in sortColumns:
index+=1
if c[1]=="reg":
toBeSorted.sort(key=lambda x: x[index])#reverse=True)
else:
toBeSorted.sort(key=lambda x: x[index],reverse=True)
tableData['subjectIDs']=[x[1] for x in toBeSorted]
return tableData
def sortMonitorTableMessage(self,message,client):
if client.monitorTableSortColumn[-1][0]==int(message['col']) and client.monitorTableSortColumn[-1][2]==client.currentMonitorTable:
if client.monitorTableSortColumn[-1][1]=="rev":
client.monitorTableSortColumn[-1][1]="reg"
elif client.monitorTableSortColumn[-1][1]=="reg":
client.monitorTableSortColumn[-1][1]="rev"
else:
client.monitorTableSortColumn.append([int(message['col']),"reg",client.currentMonitorTable])
client.monitorTableSortColumn=client.monitorTableSortColumn[1:]
self.monitorMessage()
def changeMonitorTable(self,message,client):
client.currentMonitorTable=message["table"]
client.page="monitor"
self.updateMonitorPage(client)
def changeMonitorPage(self,message,client):
client.page=message["page"]
self.updateMonitorPage(client)
def changeConsoleTab(self,message,client):
if message["tab"]=="first":
client.consoleTab=1
elif message["tab"]=="next":
client.consoleTab+=1
client.consoleTab=min(client.consoleTab,self.logCounter.fileCount)
elif message["tab"]=="previous":
client.consoleTab-=1
client.consoleTab=max(client.consoleTab,1)
elif message["tab"]=="last":
client.consoleTab=self.logCounter.fileCount
client.page="console"
self.updateMonitorPage(client)
def getExtraMonitorPageInfo(self,msg,client):
msg['consoleTabs']=[client.consoleTab,self.logCounter.fileCount]
msg['monitorTables']=[x for x in self.data['monitorTableInfo']]
msg['currentMonitorTable']=client.currentMonitorTable
return msg
def taskDone(self,message):
task=message['type']
self.data['taskDoneTime'].append([task,time.time()])
self.data['taskStatus'][task]['status']="Done"
self.saveData()
self.newMonitorTable()
def taskDoneMany(self,message):
task=message['type']
self.data['taskDoneTime'].append([task,time.time()])
self.data['taskStatus'][task]['status']=""
self.saveData()
self.newMonitorTable()
def taskToTitle(self,taskName):
title=""
for l in taskName:
if l.isupper():
title+=" "
title+=l
title=title.title()
return title
def monitorTasks(self):
self.data.setdefault('taskStatus',{})
for task in self.monitorTaskList:
taskName=task['name'] if isinstance(task,dict) else task
taskType=task if isinstance(task,dict) else {"name":taskName,"type":"regular"}
self.data['taskStatus'].setdefault(taskName,{})
self.data['taskStatus'][taskName].setdefault('status',"")
self.data['taskStatus'][taskName].setdefault('title',self.taskToTitle(taskName))
self.data['taskStatus'][taskName].setdefault('type',taskType)
``` |
{
"source": "JNRowe-retired/Dolt",
"score": 2
} |
#### File: dolt/apis/github.py
```python
from dolt import Dolt
class GitHub(Dolt):
def __init__(self, api_version="v2", *args, **kwargs):
super(GitHub, self).__init__(*args, **kwargs)
self._api_url = "https://github.com/api/%s/json" % api_version
self._url_template = "%(domain)s/%(generated_url)s"
```
#### File: dolt/apis/statsmix.py
```python
from dolt import Dolt
import httplib2
from xml.dom import minidom
class StatsMixHttp(object):
def __init__(self, api_key=None, http=None, *args, **kwargs):
self.api_key = api_key
self.http = http or httplib2.Http()
def request(self, *args, **kwargs):
if not 'headers' in kwargs:
kwargs['headers'] = {}
kwargs['headers']['X-StatsMix-Token'] = self.api_key
return self.http.request(*args, **kwargs)
class StatsMix(Dolt):
def __init__(self, api_key, *args, **kwargs):
http = StatsMixHttp(api_key, *args, **kwargs)
kwargs['http'] = http
super(StatsMix, self).__init__(*args, **kwargs)
self._api_url = "http://statsmix.com/api/v1"
def _handle_response(self, response, data):
return minidom.parseString(data)
```
#### File: Dolt/dolt/__init__.py
```python
import httplib2
import urllib
try:
import json
except ImportError:
import simplejson as json
try:
from decorator import decorator
except ImportError:
# No decorator package available. Create a no-op "decorator".
def decorator(f):
def decorate(_func):
def inner(*args, **kwargs):
return f(_func, *args, **kwargs)
return inner
return decorate
@decorator
def _makes_clone(_func, *args, **kw):
"""
A decorator that returns a clone of the current object so that
we can re-use the object for similar requests.
"""
self = args[0]._clone()
_func(self, *args[1:], **kw)
return self
class Dolt(object):
"""
A dumb little wrapper around RESTful interfaces.
Subclass `Dolt` to create specific APIs.
Example::
class MyApi(Dolt):
_api_url = 'https://api.example.com'
_url_template = '%(domain)s/%(generated_url)s.json'
api = MyApi()
print api.images()
"""
_api_url = ''
"""The base url for this API"""
_url_template = '%(domain)s/%(generated_url)s'
"""
Template used to generate URLs.
- `%(domain)s` is the `_api_url`
- `%(generated_url)s` is where the URL parts go.
"""
_stack_collapser = '/'.join
_params_template = '?%s'
def __init__(self, http=None):
self._supported_methods = ("GET", "POST", "PUT", "HEAD", "DELETE", "OPTIONS")
self._attribute_stack = []
self._method = "GET"
self._body = None
self._http = http or httplib2.Http()
self._params = {}
self._headers = {}
def __call__(self, *args, **kwargs):
url = self.get_url(*[str(a) for a in args], **kwargs)
response, data = self._http.request(url, self._method, body=self._body, headers=self._headers)
return self._handle_response(response, data)
def _generate_params(self, params):
return self._params_template % urllib.urlencode(params)
def _handle_response(self, response, data):
"""
Deserializes JSON if the content-type matches, otherwise returns the response
body as is.
"""
# Content-Type headers can include additional parameters(RFC 1521), so
# we split on ; to match against only the type/subtype
if data and response.get('content-type', '').split(';')[0] in (
'application/json',
'application/x-javascript',
'text/javascript',
'text/x-javascript',
'text/x-json'
):
return json.loads(data)
else:
return data
@_makes_clone
def __getitem__(self, name):
"""
Adds `name` to the URL path.
"""
self._attribute_stack.append(name)
return self
@_makes_clone
def __getattr__(self, name):
"""
Sets the HTTP method for the request or adds `name` to the URL path.
::
>>> dolt.GET._method == 'GET'
True
>>> dolt.foo.bar.get_url()
'/foo/bar'
"""
if name in self._supported_methods:
self._method = name
elif not name.endswith(')'):
self._attribute_stack.append(name)
return self
@_makes_clone
def with_params(self, **params):
"""
Add/overwrite URL query parameters to the request.
"""
self._params.update(params)
return self
@_makes_clone
def with_body(self, body=None, **params):
"""
Add a body to the request.
When `body` is a:
- string, it will be used as is.
- dict or list of (key, value) pairs, it will be form encoded
- None, remove request body
- anything else, a TypeError will be raised
If `body` is a dict or None you can also pass in keyword
arguments to add to the body.
::
>>> dolt.with_body(dict(key='val'), foo='bar')._body
'foo=bar&key=val'
"""
if isinstance(body, (tuple, list)):
body = dict(body)
if params:
# Body must be None or able to be a dict
if isinstance(body, dict):
body.update(params)
elif body is None:
body = params
else:
raise ValueError('Body must be None or a dict if used with params, got: %r' % body)
if isinstance(body, basestring):
self._body = body
elif isinstance(body, dict):
self._body = urllib.urlencode(body)
elif body is None:
self._body = None
else:
raise TypeError('Invalid body type %r' % body)
return self
def with_json(self, data=None, **params):
"""
Add a json body to the request.
:param data: A json string, a dict, or a list of key, value pairs
:param params: A dict of key value pairs to JSON encode
"""
if isinstance(data, (tuple, list)):
data = dict(data)
if params:
# data must be None or able to be a dict
if isinstance(data, dict):
data.update(params)
elif data is None:
data = params
else:
raise ValueError('Data must be None or a dict if used with params, got: %r' % data)
req = self.with_headers({'Content-Type': 'application/json', 'Accept': 'application/json'})
if isinstance(data, basestring):
# Looks like it's already been encoded
return req.with_body(data)
else:
return req.with_body(json.dumps(data))
@_makes_clone
def with_headers(self, headers=None, **params):
"""
Add headers to the request.
:param headers: A dict, or a list of key, value pairs
:param params: A dict of key value pairs
"""
if isinstance(headers, (tuple, list)):
headers = dict(headers)
if params:
if isinstance(headers, dict):
headers.update(params)
elif headers is None:
headers = params
self._headers.update(headers)
return self
def get_url(self, *paths, **params):
"""
Returns the URL for this request.
:param paths: Additional URL path parts to add to the request
:param params: Additional query parameters to add to the request
"""
path_stack = self._attribute_stack[:]
if paths:
path_stack.extend(paths)
u = self._stack_collapser(path_stack)
url = self._url_template % {
"domain": self._api_url,
"generated_url" : u,
}
if self._params or params:
internal_params = self._params.copy()
internal_params.update(params)
url += self._generate_params(internal_params)
return url
def _clone(self):
"""
Clones the state of the current operation.
The state is cloned so that you can freeze the state at a certain point for re-use.
::
>>> cat = dolt.cat
>>> cat.get_url()
'/cat'
>>> o = cat.foo
>>> o.get_url()
'/cat/foo'
>>> cat.get_url()
'/cat'
"""
cls = self.__class__
q = cls.__new__(cls)
q.__dict__ = self.__dict__.copy()
q._params = self._params.copy()
q._headers = self._headers.copy()
q._attribute_stack = self._attribute_stack[:]
return q
try:
__IPYTHON__
def __dir__(self):
return [
'_supported_methods',
'_attribute_stack',
'_method',
'_body',
'_http',
'_params',
'_headers',
'_api_url',
'_url_template',
'_stack_collapser',
'_params_template',
'__init__',
'__call__',
'_handle_response',
'__getattr__',
'get_url',
'__dir__',
]
_getAttributeNames = trait_names = __dir__
except NameError:
pass
class Simpleton(Dolt):
"""
A dumber little wrapper around RESTful interfaces.
Example::
api = Simpleton('http://api.example.com')
print api.images()
"""
def __init__(self, base_url, http=None):
super(Simpleton, self).__init__(http=http)
self._api_url = base_url
``` |
{
"source": "JNRowe-retired/github-cli",
"score": 3
} |
#### File: src/github/issues.py
```python
import sys
import urllib
import webbrowser as browser
from optparse import OptionParser
try:
import simplejson as json
except ImportError:
try:
import json
except ImportError:
print "error: simplejson required"
sys.exit(1)
from github.utils import urlopen2, get_remote_info, edit_text, \
get_remote_info_from_option, get_prog, Pager, wrap_text, get_underline
from github.version import get_version
def smart_unicode(text):
try:
return str(text)
except UnicodeEncodeError:
return text.encode('utf-8')
def format_issue(issue, verbose=True):
output = []
if verbose:
indent = ""
else:
indent = " " * (5 - len(str(issue['number'])))
title = "%s%s. %s" % (indent, issue['number'], issue['title'])
title = smart_unicode(title)
if not verbose:
output.append(title[:80])
if verbose:
title = wrap_text(title)
output.append(title)
underline = get_underline(title)
output.append(underline)
if issue['body']:
body = smart_unicode(wrap_text(issue['body']))
output.append(body)
output.append(" state: %s" % issue['state'])
output.append(" user: %s" % issue['user'])
output.append(" votes: %s" % issue['votes'])
output.append(" created: %s" % issue['created_at'])
updated = issue.get('updated_at')
if updated and not updated == issue['created_at']:
output.append(" updated: %s" % updated)
output.append(" comments: %s" % issue.get('comments', 0))
output.append(" ")
return output
def format_comment(comment, nr, total):
timestamp = comment.get("updated_at", comment["created_at"])
title = "comment %s of %s by %s (%s)" % (nr, total, comment["user"],
timestamp)
title = smart_unicode(title)
output = [title]
underline = get_underline(title)
output.append(underline)
body = smart_unicode(wrap_text(comment['body']))
output.append(body)
return output
def pprint_issue(issue, verbose=True):
lines = format_issue(issue, verbose)
lines.insert(0, " ") # insert empty first line
print "\n".join(lines)
def handle_error(result):
output = []
for msg in result['error']:
if msg == result['error'][0]:
output.append(msg['error'])
else:
output.append("error: %s" % msg['error'])
error_msg = "\n".join(output)
raise Exception(error_msg)
def validate_number(number, example):
msg = "number required\nexample: %s" % example.replace("%prog", get_prog())
if not number:
raise Exception(msg)
else:
try:
int(number)
except:
raise Exception(msg)
def get_key(data, key):
try:
return data[key]
except KeyError:
raise Exception("unexpected failure")
def create_edit_issue(issue=None, text=None):
main_text = """# Please explain the issue.
# The first line will be used as the title.
# Lines starting with `#` will be ignored."""
if issue:
issue['main'] = main_text
template = """%(title)s
%(body)s
%(main)s
#
# number: %(number)s
# user: %(user)s
# votes: %(votes)s
# state: %(state)s
# created: %(created_at)s""" % issue
else:
template = "\n%s" % main_text
if text:
# \n on the command-line becomes \\n; undoing this:
text = text.replace("\\n", "\n")
else:
text = edit_text(template)
if not text:
raise Exception("can not submit an empty issue")
lines = text.splitlines()
title = lines[0]
body = "\n".join(lines[1:]).strip()
return {'title': title, 'body': body}
def create_comment(issue):
inp = """
# Please enter a comment.
# Lines starting with `#` will be ignored.
#
# number: %(number)s
# user: %(user)s
# votes: %(votes)s
# state: %(state)s
# created: %(created_at)s""" % issue
out = edit_text(inp)
if not out:
raise Exception("can not submit an empty comment")
lines = out.splitlines()
comment = "\n".join(lines).strip()
return comment
class Commands(object):
def __init__(self, user, repo):
self.user = user
self.repo = repo
self.url_template = "http://github.com/api/v2/json/issues/%s/%s/%s"
def search(self, search_term=None, state='open', verbose=False, **kwargs):
if not search_term:
example = "%s search experimental" % get_prog()
msg = "error: search term required\nexample: %s" % example
print msg
sys.exit(1)
search_term_quoted = urllib.quote_plus(search_term)
search_term_quoted = search_term_quoted.replace(".", "%2E")
result = self.__submit('search', search_term, state)
issues = get_key(result, 'issues')
header = "# searching for '%s' returned %s issues" % (search_term,
len(issues))
printer = Pager()
printer.write(header)
for issue in issues:
lines = format_issue(issue, verbose)
printer.write("\n".join(lines))
printer.close()
def list(self, state='open', verbose=False, webbrowser=False, **kwargs):
if webbrowser:
issues_url_template = "http://github.com/%s/%s/issues/%s"
if state == "closed":
issues_url = issues_url_template % (self.user, self.repo,
state)
else:
issues_url = issues_url_template % (self.user, self.repo, "")
try:
browser.open(issues_url)
except:
print "error: opening page in web browser failed"
else:
sys.exit(0)
if state == 'all':
states = ['open', 'closed']
else:
states = [state]
printer = Pager()
for st in states:
header = "# %s issues on %s/%s" % (st, self.user, self.repo)
printer.write(header)
result = self.__submit('list', st)
issues = get_key(result, 'issues')
if issues:
for issue in issues:
lines = format_issue(issue, verbose)
printer.write("\n".join(lines))
else:
printer.write("no %s issues available" % st)
if not st == states[-1]:
printer.write() # new line between states
printer.close()
def show(self, number=None, verbose=False, webbrowser=False, **kwargs):
validate_number(number, example="%prog show 1")
if webbrowser:
issue_url_template = "http://github.com/%s/%s/issues/%s/find"
issue_url = issue_url_template % (self.user, self.repo, number)
try:
browser.open(issue_url)
except:
print "error: opening page in web browser failed"
else:
sys.exit(0)
issue = self.__get_issue(number)
if not verbose:
pprint_issue(issue)
else:
printer = Pager()
lines = format_issue(issue, verbose=True)
lines.insert(0, " ")
printer.write("\n".join(lines))
if issue.get("comments", 0) > 0:
comments = self.__submit('comments', number)
comments = get_key(comments, 'comments')
lines = [] # reset
total = len(comments)
for i in range(total):
comment = comments[i]
lines.extend(format_comment(comment, i+1, total))
lines.append(" ")
printer.write("\n".join(lines))
printer.close()
def open(self, message=None, **kwargs):
post_data = create_edit_issue(text=message)
result = self.__submit('open', data=post_data)
issue = get_key(result, 'issue')
pprint_issue(issue)
def close(self, number=None, **kwargs):
validate_number(number, example="%prog close 1")
result = self.__submit('close', number)
issue = get_key(result, 'issue')
pprint_issue(issue)
def reopen(self, number=None, **kwargs):
validate_number(number, example="%prog open 1")
result = self.__submit('reopen', number)
issue = get_key(result, 'issue')
pprint_issue(issue)
def edit(self, number=None, **kwargs):
validate_number(number, example="%prog edit 1")
gh_issue = self.__get_issue(number)
output = {'title': gh_issue['title'], 'body': gh_issue['body']}
post_data = create_edit_issue(gh_issue)
if post_data['title'] == output['title'] and \
post_data['body'].splitlines() == output['body'].splitlines():
print "no changes found"
sys.exit(1)
result = self.__submit('edit', number, data=post_data)
issue = get_key(result, 'issue')
pprint_issue(issue)
def label(self, command, label, number=None, **kwargs):
validate_number(number, example="%prog label %s %s 1" % (command,
label))
if command not in ['add', 'remove']:
msg = "label command should use either 'add' or 'remove'\n"\
"example: %prog label add %s %s" % (label, number)
raise Exception(msg)
label = urllib.quote(label)
label = label.replace(".", "%2E") # this is not done by urllib.quote
result = self.__submit('label/%s' % command, label, number)
labels = get_key(result, 'labels')
if labels:
print "labels for issue #%s:" % number
for label in labels:
print "- %s" % label
else:
print "no labels found for issue #%s" % number
def comment(self, number=None, **kwargs):
validate_number(number, example="%prog comment 1")
gh_issue = self.__get_issue(number)
comment = create_comment(gh_issue)
post_data = {'comment': comment}
result = self.__submit('comment', number, data=post_data)
returned_comment = get_key(result, 'comment')
if returned_comment:
print "comment for issue #%s submitted successfully" % number
def __get_issue(self, number):
result = self.__submit('show', number)
return get_key(result, 'issue')
def __submit(self, action, *args, **kwargs):
base_url = self.url_template % (action, self.user, self.repo)
args_list = list(args)
args_list.insert(0, base_url)
url = "/".join(args_list)
page = urlopen2(url, **kwargs)
result = json.load(page)
page.close()
if result.get('error'):
handle_error(result)
else:
return result
def main():
usage = """usage: %prog command [args] [options]
Examples:
%prog list [-s open|closed|all] show open, closed or all issues
(default: open)
%prog [-s o|c|a] -v same as above, but with issue details
%prog same as: %prog list
%prog -v same as: %prog list -v
%prog [-s o|c] -w show issues' GitHub page in web browser
(default: open)
%prog show <nr> show issue <nr>
%prog show <nr> -v same as above, but with comments
%prog <nr> same as: %prog show <nr>
%prog <nr> -w show issue <nr>'s GitHub page in web
browser
%prog open (o) create a new issue (with $EDITOR)
%prog open (o) -m <msg> create a new issue with <msg> content
(optionally, use \\n for new lines; first
line will be the issue title)
%prog close (c) <nr> close issue <nr>
%prog open (o) <nr> reopen issue <nr>
%prog edit (e) <nr> edit issue <nr> (with $EDITOR)
%prog label add (al) <label> <nr> add <label> to issue <nr>
%prog label remove (rl) <label> <nr> remove <label> from issue <nr>
%prog search (s) <term> search for <term> (default: open)
%prog s <term> [-s o|c] -v same as above, but with details
%prog s <term> -s closed only search in closed issues
%prog comment (m) <nr> create a comment for issue <nr>
(with $EDITOR)
%prog -r <user>/<repo> specify a repository (can be used for
all commands)
%prog -r <repo> specify a repository (gets user from
global git config)"""
description = """Description:
command-line interface to GitHub's Issues API (v2)"""
parser = OptionParser(usage=usage, description=description)
parser.add_option("-v", "--verbose", action="store_true", dest="verbose",
default=False, help="show issue details (only for show, list and "\
"search commands) [default: False]")
parser.add_option("-s", "--state", action="store", dest="state",
type='choice', choices=['o', 'open', 'c', 'closed', 'a', 'all'],
default='open', help="specify state (only for list and search "\
"(except `all`) commands) choices are: open (o), closed (c), all "\
"(a) [default: open]")
parser.add_option("-m", "--message", action="store", dest="message",
default=None, help="message content for opening an issue without "\
"using the editor")
parser.add_option("-r", "--repo", "--repository", action="store",
dest="repo", help="specify a repository (format: "\
"`user/repo` or just `repo` (latter will get the user from the "\
"global git config))")
parser.add_option("-w", "--web", "--webbrowser", action="store_true",
dest="webbrowser", default=False, help="show issue(s) GitHub page "\
"in web browser (only for list and show commands) [default: False]")
parser.add_option("-V", "--version", action="store_true",
dest="show_version", default=False,
help="show program's version number and exit")
class CustomValues:
pass
(options, args) = parser.parse_args(values=CustomValues)
kwargs = dict([(k, v) for k, v in options.__dict__.items() \
if not k.startswith("__")])
if kwargs.get('show_version'):
print("ghi %s" % get_version('short'))
sys.exit(0)
if kwargs.get('state'):
kwargs['state'] = {'o': 'open', 'c': 'closed', 'a': 'all'}.get(
kwargs['state'], kwargs['state'])
if args:
cmd = args[0]
try:
nr = str(int(cmd))
if cmd == nr:
cmd = 'show'
args = (cmd, nr)
except:
pass
else:
cmd = 'list' # default command
if cmd == 'search':
search_term = " ".join(args[1:])
args = (args[0], search_term)
# handle command aliases
cmd = {'o': 'open', 'c': 'close', 'e': 'edit', 'm': 'comment',
's': 'search'}.get(cmd, cmd)
if cmd == 'open' and len(args) > 1:
cmd = 'reopen'
if cmd == 'al' or cmd == 'rl':
alias = cmd
cmd = 'label'
args_list = [cmd, {'a': 'add', 'r': 'remove'}[alias[0]]]
args_list.extend(args[1:])
args = tuple(args_list)
try:
repository = kwargs.get('repo')
if repository:
user, repo = get_remote_info_from_option(repository)
else:
user, repo = get_remote_info()
commands = Commands(user, repo)
getattr(commands, cmd)(*args[1:], **kwargs)
except AttributeError:
return "error: command '%s' not implemented" % cmd
except Exception, info:
return "error: %s" % info
if __name__ == '__main__':
main()
```
#### File: github-cli/tests/test_version.py
```python
from nose.tools import eq_
import github.version
def test_get_version():
get_version = github.version.get_version
for tup, short, normal, verbose in [
((0, 2, 0, 'final', 0), '0.2.0', '0.2', '0.2 final'),
((0, 2, 7, 'final', 0), '0.2.7', '0.2.7', '0.2.7 final'),
((0, 2, 0, 'alpha', 1), '0.2a1', '0.2 alpha 1', '0.2 alpha 1'),
((0, 2, 7, 'beta', 1), '0.2.7b1', '0.2.7 beta 1', '0.2.7 beta 1'),
((0, 2, 0, 'release candidate', 1),
'0.2rc1', '0.2 release candidate 1', '0.2 release candidate 1'),
((1, 0, 0, 'alpha', 0), '1.0a', '1.0 pre-alpha', '1.0 pre-alpha'),
]:
github.version.VERSION = tup
yield eq_, get_version('short'), short
yield eq_, get_version('normal'), normal
yield eq_, get_version('verbose'), verbose
``` |
{
"source": "JNRowe-retired/http",
"score": 4
} |
#### File: http/http/response.py
```python
from headers import Headers
from url import Url
class Response(object):
"""
The ``Response`` object encapsulates HTTP style responses.
"""
def __init__(self, status, headers=Headers(), content=None,
message=None, request=None):
"""
Construct a new ``Response`` object.
:param status: HTTP status code for the response
:type status: integer
:param headers: HTTP headers
:type status: a list of tuples or a class:`Headers` object
:param content: content
:param message: HTTP message for the response
:param request: origin Request object used
:type request: class:`Request`
.. attribute:: redirects
List of redirections
"""
self._status = status
self.message = message
self.redirects = list()
if (not isinstance(headers, Headers)):
headers = Headers(headers)
self._headers = headers
self._content = content
self._request = request
@property
def status(self):
"""
Returns the HTTP status
:rtype: int
"""
return int(self._status)
@property
def is_info(self):
"""
Returns if the response was informational
:rtype: boolean
"""
if self.status >= 100 and self.status < 200:
return True
return False
@property
def is_success(self):
"""
Returns if the response was success
:rtypen: boolean
"""
if self.status >= 200 and self.status < 300:
return True
return False
@property
def is_redirect(self):
"""
Returns if the response was redirect
:rtype: boolean
"""
if self.status >= 300 and self.status < 400:
return True
return False
@property
def is_client_error(self):
"""
Returns if the response was a client error
:rtype: boolean
"""
if self.status >= 400 and self.status < 500:
return True
return False
@property
def is_server_error(self):
"""
Returns if the response was a client server
:rtype: boolean
"""
if self.status >= 500 and self.status < 600:
return True
@property
def is_error(self):
"""
Returns if the response was an error
:rtype: boolean
"""
if self.is_client_error or self.is_server_error:
return True
return False
@property
def base(self):
"""
Returns the base URI for this response
:rtype: class:`Url` or None
"""
url = None
if self.header('Content-Base'):
url = self.header('Content-Base')
if self.header('Content-Location'):
url = self.header('Content-Location')
if url is None and self.request:
url = self.request.url
if not url:
return None
if not isinstance(url, Url):
url = Url(url)
return url
@property
def request(self):
"""
Returns the request object that caused that response
:rtype: class:`Request`
"""
return self._request
@property
def content(self):
"""
Returns the actual content of the response
:rtype: string
"""
return self._content
@content.setter
def content(self, content):
"""
Set the actual content of the response
"""
self._content = content
def header(self, name):
"""
Returns the value for a given header
:rtype: string
"""
return self._headers.get(name)
@property
def headers(self):
"""
Returns the class:`Headers` object
:rtype: class:`Headers`
"""
return self._headers
@property
def status_line(self):
"""
Returns the string '<code> <message>'
:rtype: string
"""
return "{0} {1}".format(self.status, self.message)
@property
def last_modified(self):
"""
Returns a datetime object represeting the *Last-Modified* header
:rtype: class:`datetime`
"""
return self._headers.last_modified
@property
def date(self):
"""
Returns a datetime object represeting the *Date* header
:rtype: class:`datetime`
"""
return self._headers.date
@property
def expires(self):
"""
Returns a datetime object represeting the *Expires* header
:rtype: class:`datetime`
"""
return self._headers.expires
@property
def content_length(self):
"""
Returns the content-length of the actual response
:rtype: int
"""
return self._headers.content_length
@property
def content_is_text(self):
"""
Returns ``True`` if the "Content-Type" header is set to text
:rtype: boolean
"""
return self._headers.content_is_text
@property
def content_is_xml(self):
"""
Returns ``True`` if the "Content-Type" header is set to XML
:rtype: boolean
"""
return self._headers.content_is_xml
@property
def content_is_xhtml(self):
"""
Returns True if the "Content-Type" header is set to XHTML
:rtype: boolean
"""
return self._headers.content_is_xhtml
```
#### File: http/tests/test_request.py
```python
from unittest2 import TestCase
from http import Request, Headers, Url
from datetime import datetime
class TestClient(TestCase):
def test_base(self):
request = Request('GET', 'http://github.com')
self.assertTrue(request)
self.assertEqual(request.method, 'GET')
def test_method(self):
for method in ['GET', 'get', 'gEt']:
request = Request(method, 'http')
self.assertEqual(request.method, method)
request = Request('GET', 'http')
request.method = 'POST'
self.assertEqual(request.method, 'POST')
request.method = 'post'
self.assertEqual(request.method, 'post')
def test_headers(self):
request = Request('GET', 'http', {'Connection': 'keep-alive'})
self.assertIsInstance(request.headers, Headers)
headers = Headers({'Connection': 'keep-alive'})
request = Request('GET', 'http', headers)
self.assertTrue(request)
self.assertEqual(request.header('connection'), 'keep-alive')
def test_header(self):
request = Request('GET', 'http://lumberjaph.net')
request.header('If-Modified-Since',
'Wed, 08 Feb 2012 05:08:50 GMT')
self.assertEqual(request.header('If-Modified-Since'),
'Wed, 08 Feb 2012 05:08:50 GMT')
def test_url(self):
request = Request('GET', 'http')
self.assertIsInstance(request.url, Url)
def test_content(self):
request = Request('GET', 'http', content='foo')
self.assertEqual(request.content, 'foo')
def test_date_headers(self):
request = Request('GET', 'http')
request.if_modified_since = datetime(2011, 12, 12, 12, 0, 0)
self.assertEqual(request._headers.get('If-Modified-Since'), 'Mon, 12 Dec 2011 12:00:00 GMT')
``` |
{
"source": "JNRowe-retired/incoming",
"score": 4
} |
#### File: incoming/incoming/datatypes.py
```python
class Types(object):
'''
Base class for creating new datatypes for validation. When this class is
sub-classed, :meth:`validate` must be implemented in the sub-class and
this method will be resposible for the actual validation.
'''
def __init__(self, required=None, error=None, *args, **kwargs):
'''
:param bool required: if a particular (this) field is required or not.
This param allows specifying field level setting
if a particular field is required or not.
:param str error: a generic error message that will be used by
:class:`incoming.PayloadValidator` in the returned
:class:`incoming.incoming.PayloadErrors` object when
the validation test fails.
'''
self.required = required
self.error = error or self._DEFAULT_ERROR
def validate(self, val, *args, **kwargs):
'''
This method must be overridden by the sub-class for implementing the
validation test. The overridden method can be a regular method or
``classmethod`` or ``staticmethod``. This method must only return a
:class:`bool` value.
:param val: the value that has to be validated.
:returns bool: if the implemented validation test passed or not.
'''
raise NotImplementedError('validate() method must be implemented in '
'the sub-class.')
def test(self, key, val, payload, errors):
'''
Responsible for running the validate method. This method hides the gory
checks of identifying if the validate method is a normal method or a
``staticmethod`` or a ``classmethod``. This method is more of a helper
for :class:`incoming.PayloadValidator`.
:param str key: the key who's value is going to get validated.
:param val: the value on which the test is to be validated.
:param payload: the entire payload to which the key and val belong to.
:param errors: :class:`incoming.incoming.PayloadErrors` object.
:returns bool: if the validation passed, depends on validate.
'''
try:
method = getattr(self, 'validate')
except AttributeError:
try:
method = self.__class__.validate
except AttributeError:
raise NotImplementedError('validate() must be implemented '
'either as a method of the class or'
'a staticmethod of the class.')
result = method(val, key=key, payload=payload, errors=errors)
# Check if the value returned by the validate method is a boolean
if not isinstance(result, bool):
raise TypeError('The value returned by validate() method must be '
'a bool value.')
if not result:
errors.prepend(key, self.error)
return False
return True
class Instance(Types):
'''
Sub-class of :class:`Types` class for Integer type. Validates if a value is
of a given type or not.
This class should not be used directly, it should be subclassed
'''
type_ = None
@classmethod
def validate(cls, val, *args, **kwargs):
if cls.type_ is None:
raise NotImplementedError
else:
return isinstance(val, cls.type_)
class Integer(Instance):
'''
Sub-class of :class:`Types` class for Integer type. Validates if a value is
a :class:`int` value.
'''
_DEFAULT_ERROR = 'Invalid data. Expected an integer.'
type_ = int
class Float(Instance):
'''
Sub-class of :class:`Types` class for Float type. Validates if a value is
a :class:`float` value.
'''
_DEFAULT_ERROR = 'Invalid data. Expected a float.'
type_ = float
class Number(Instance):
'''
Sub-class of :class:`Types` class for Number type. Validates if a value is
a :class:`int` value or :class:`float` value.
'''
_DEFAULT_ERROR = 'Invalid data. Expected an integer or a float).'
type_ = (int, float)
class String(Instance):
'''
Sub-class of :class:`Types` class for String type. Validates if a value is
a :class:`str` value or :class:`unicode` value.
'''
_DEFAULT_ERROR = 'Invalid data. Expected a string.'
type_ = basestring
class Array(Instance):
'''
Sub-class of :class:`Types` class for Array type. Validates if a value is
a :class:`list` object.
'''
_DEFAULT_ERROR = 'Invalid data. Expected an array.'
type_ = list
class Boolean(Instance):
'''
Sub-class of :class:`Types` class for Boolean type. Validates if a value is
a :class:`bool`.
'''
_DEFAULT_ERROR = 'Invalid data. Expected a boolean value.'
type_ = bool
class Function(Types):
'''
Sub-class of :class:`Types` class for Function type. This type allows using
functions for validation. Using :class:`incoming.datatypes.Function`,
validation tests can be written in a function or a regular method, a
``staticmethod`` or a ``classmethod`` on the sub-class of
:class:`incoming.PayloadValidator`.
'''
_DEFAULT_ERROR = 'Invalid data.'
def __init__(self, func, *args, **kwargs):
'''
:param func: any callable that accepts ``val``, ``*args`` and
``**kwawrgs`` and returns a :class:`bool` value, True
if ``val`` validates, False otherwise.
'''
if not callable(func) and not isinstance(func, str):
raise TypeError('function must be a callable or a string '
'representing method belonging to the validator '
'class.')
self.func = func
super(Function, self).__init__(*args, **kwargs)
def validate(self, val, *args, **kwargs):
result = self.func(val, *args, **kwargs)
if not isinstance(result, bool):
raise ValueError('Validation function does not return a bool.')
return result
class JSON(Types):
'''
Sub-class of :class:`Types` class for JSON type. This type allows writing
validation for nested JSON.
'''
_DEFAULT_ERROR = 'Invalid data. Expected JSON.'
def __init__(self, cls, *args, **kwargs):
'''
:param cls: sub-class of :class:`incoming.PayloadValidator` for
validating nested JSON. Class object can be accepted. In
case you want to have the class nested withing the parent
validator, you can pass the name of the class as a string
as well.
'''
self.cls = cls
super(JSON, self).__init__(*args, **kwargs)
def validate(self, val, *args, **kwargs):
if not isinstance(val, dict):
return False
obj = self.cls()
is_valid, result = obj.validate(val)
if not is_valid:
kwargs['errors'].append(kwargs['key'], result)
return False
return True
```
#### File: incoming/tests/test_datatypes.py
```python
import unittest
from . import TestCase
from .. import datatypes
from ..incoming import PayloadErrors, PayloadValidator
class TestTypes(TestCase):
def test_init_raises_attribute_error(self):
self.assertRaises(AttributeError, datatypes.Types)
def test_validate_raises_not_implemented_error(self):
class SomeType(datatypes.Types):
_DEFAULT_ERROR = 'Some error message.'
self.assertRaises(NotImplementedError, SomeType().validate, 'test')
class TestInteger(TestCase):
def test_integer_validates(self):
self.assertTrue(datatypes.Integer.validate(2))
self.assertTrue(datatypes.Integer.validate(-2))
self.assertFalse(datatypes.Integer.validate(2.1))
class TestFloat(TestCase):
def test_float_validates(self):
self.assertFalse(datatypes.Float.validate(2))
self.assertFalse(datatypes.Float.validate(-2))
self.assertTrue(datatypes.Float.validate(2.1))
self.assertTrue(datatypes.Float.validate(-2.1))
class TestNumber(TestCase):
def test_number_validates(self):
self.assertTrue(datatypes.Number.validate(2))
self.assertTrue(datatypes.Number.validate(-2))
self.assertTrue(datatypes.Number.validate(2.1))
self.assertTrue(datatypes.Number.validate(-2.1))
class TestString(TestCase):
def test_string_validates(self):
self.assertTrue(datatypes.String.validate('Some string'))
self.assertTrue(datatypes.String.validate(u'Some string'))
self.assertTrue(datatypes.String.validate(r'Some string'))
self.assertFalse(datatypes.String.validate(1))
class TestArray(TestCase):
def test_array_validates(self):
self.assertTrue(datatypes.Array.validate(['item1', 'item2']))
self.assertFalse(datatypes.Array.validate({}))
class TestBoolean(TestCase):
def test_boolean_validates(self):
self.assertTrue(datatypes.Boolean.validate(True))
self.assertTrue(datatypes.Boolean.validate(False))
class TestFunction(TestCase):
def test_function_validates_with_function(self):
def test_func(val, *args, **kwargs):
if val < 18:
return False
return True
self.assertFalse(datatypes.Function(func=test_func).validate(10))
self.assertTrue(datatypes.Function(func=test_func).validate(18))
def test_function_validates_with_methods(self):
class PseudoNameSpace(object):
@staticmethod
def test_func_static(val, *args, **kwargs):
if val < 18:
return False
return True
@classmethod
def test_func_class(cls, val, *args, **kwargs):
if val < 18:
return False
return True
def test_func_regular(self, val, *arg, **kwargs):
if val < 18:
return False
return True
# staticmethod
self.assertFalse(datatypes.Function(
func=PseudoNameSpace.test_func_static).validate(10))
self.assertTrue(datatypes.Function(
func=PseudoNameSpace.test_func_static).validate(18))
# classmethod
self.assertFalse(datatypes.Function(
func=PseudoNameSpace.test_func_class).validate(10))
self.assertTrue(datatypes.Function(
func=PseudoNameSpace.test_func_class).validate(18))
# regular
self.assertFalse(datatypes.Function(
func=PseudoNameSpace().test_func_regular).validate(10))
self.assertTrue(datatypes.Function(
func=PseudoNameSpace().test_func_regular).validate(18))
class TestJSON(TestCase):
def test_json_validates_normal_types(self):
class CustomJSONValidator(PayloadValidator):
age = datatypes.Integer()
errors = PayloadErrors()
result = datatypes.JSON(CustomJSONValidator).validate(
dict(age=10),
key='nested',
errors=errors)
self.assertTrue(result)
self.assertFalse('nested' in errors)
self.assertTrue(len(errors.to_dict().keys()) == 0)
result = datatypes.JSON(CustomJSONValidator).validate(
dict(age='10'),
key='nested',
errors=errors)
self.assertFalse(result)
self.assertTrue('nested' in errors)
self.assertTrue(len(errors.to_dict().keys()) == 1)
def test_json_validates_function_types(self):
class CustomJSONValidator(PayloadValidator):
age = datatypes.Function(func='validate_age')
def validate_age(self, val, *args, **kwargs):
if not isinstance(val, int):
return False
if val < 18:
return False
return True
errors = PayloadErrors()
result = datatypes.JSON(CustomJSONValidator)
result.validate(
dict(age=20),
key='nested',
errors=errors)
self.assertTrue(result)
self.assertFalse('nested' in errors)
self.assertTrue(len(errors.to_dict().keys()) == 0)
errors = PayloadErrors()
result = datatypes.JSON(CustomJSONValidator).validate(
dict(age=10),
key='nested',
errors=errors)
self.assertFalse(result)
self.assertTrue('nested' in errors)
self.assertTrue(len(errors.to_dict().keys()) == 1)
``` |
{
"source": "JNRowe-retired/pecan",
"score": 2
} |
#### File: pecan/pecan/core.py
```python
try:
from simplejson import loads
except ImportError: # pragma: no cover
from json import loads # noqa
from threading import local
from itertools import chain
from mimetypes import guess_type, add_type
from os.path import splitext
import logging
import operator
import six
from webob import Request, Response, exc, acceptparse
from .compat import urlparse, unquote_plus, izip
from .secure import handle_security
from .templating import RendererFactory
from .routing import lookup_controller, NonCanonicalPath
from .util import _cfg, encode_if_needed
from .middleware.recursive import ForwardRequestException
# make sure that json is defined in mimetypes
add_type('application/json', '.json', True)
state = local()
logger = logging.getLogger(__name__)
def proxy(key):
class ObjectProxy(object):
def __getattr__(self, attr):
obj = getattr(state, key)
return getattr(obj, attr)
def __setattr__(self, attr, value):
obj = getattr(state, key)
return setattr(obj, attr, value)
def __delattr__(self, attr):
obj = getattr(state, key)
return delattr(obj, attr)
def __dir__(self):
obj = getattr(state, key)
return dir(obj)
return ObjectProxy()
request = proxy('request')
response = proxy('response')
def override_template(template, content_type=None):
'''
Call within a controller to override the template that is used in
your response.
:param template: a valid path to a template file, just as you would specify
in an ``@expose``.
:param content_type: a valid MIME type to use for the response.func_closure
'''
request.pecan['override_template'] = template
if content_type:
request.pecan['override_content_type'] = content_type
def abort(status_code=None, detail='', headers=None, comment=None, **kw):
'''
Raise an HTTP status code, as specified. Useful for returning status
codes like 401 Unauthorized or 403 Forbidden.
:param status_code: The HTTP status code as an integer.
:param detail: The message to send along, as a string.
:param headers: A dictionary of headers to send along with the response.
:param comment: A comment to include in the response.
'''
raise exc.status_map[status_code](
detail=detail,
headers=headers,
comment=comment,
**kw
)
def redirect(location=None, internal=False, code=None, headers={},
add_slash=False):
'''
Perform a redirect, either internal or external. An internal redirect
performs the redirect server-side, while the external redirect utilizes
an HTTP 302 status code.
:param location: The HTTP location to redirect to.
:param internal: A boolean indicating whether the redirect should be
internal.
:param code: The HTTP status code to use for the redirect. Defaults to 302.
:param headers: Any HTTP headers to send with the response, as a
dictionary.
'''
if add_slash:
if location is None:
split_url = list(urlparse.urlsplit(state.request.url))
new_proto = state.request.environ.get(
'HTTP_X_FORWARDED_PROTO', split_url[0]
)
split_url[0] = new_proto
else:
split_url = urlparse.urlsplit(location)
split_url[2] = split_url[2].rstrip('/') + '/'
location = urlparse.urlunsplit(split_url)
if not headers:
headers = {}
if internal:
if code is not None:
raise ValueError('Cannot specify a code for internal redirects')
request.environ['pecan.recursive.context'] = request.context
raise ForwardRequestException(location)
if code is None:
code = 302
raise exc.status_map[code](location=location, headers=headers)
def render(template, namespace):
'''
Render the specified template using the Pecan rendering framework
with the specified template namespace as a dictionary. Useful in a
controller where you have no template specified in the ``@expose``.
:param template: The path to your template, as you would specify in
``@expose``.
:param namespace: The namespace to use for rendering the template, as a
dictionary.
'''
return state.app.render(template, namespace)
def load_app(config):
'''
Used to load a ``Pecan`` application and its environment based on passed
configuration.
:param config: Can be a dictionary containing configuration, a string which
represents a (relative) configuration filename
returns a pecan.Pecan object
'''
from .configuration import _runtime_conf, set_config
set_config(config, overwrite=True)
for package_name in getattr(_runtime_conf.app, 'modules', []):
module = __import__(package_name, fromlist=['app'])
if hasattr(module, 'app') and hasattr(module.app, 'setup_app'):
app = module.app.setup_app(_runtime_conf)
app.config = _runtime_conf
return app
raise RuntimeError(
'No app.setup_app found in any of the configured app.modules'
)
class Pecan(object):
'''
Base Pecan application object. Generally created using ``pecan.make_app``,
rather than being created manually.
Creates a Pecan application instance, which is a WSGI application.
:param root: A string representing a root controller object (e.g.,
"myapp.controller.root.RootController")
:param default_renderer: The default template rendering engine to use.
Defaults to mako.
:param template_path: A relative file system path (from the project root)
where template files live. Defaults to 'templates'.
:param hooks: A callable which returns a list of
:class:`pecan.hooks.PecanHook`s
:param custom_renderers: Custom renderer objects, as a dictionary keyed
by engine name.
:param extra_template_vars: Any variables to inject into the template
namespace automatically.
:param force_canonical: A boolean indicating if this project should
require canonical URLs.
:param guess_content_type_from_ext: A boolean indicating if this project
should use the extension in the URL for guessing
the content type to return.
'''
SIMPLEST_CONTENT_TYPES = (
['text/html'],
['text/plain']
)
def __init__(self, root, default_renderer='mako',
template_path='templates', hooks=lambda: [],
custom_renderers={}, extra_template_vars={},
force_canonical=True, guess_content_type_from_ext=True, **kw):
if isinstance(root, six.string_types):
root = self.__translate_root__(root)
self.root = root
self.renderers = RendererFactory(custom_renderers, extra_template_vars)
self.default_renderer = default_renderer
# pre-sort these so we don't have to do it per-request
if six.callable(hooks):
hooks = hooks()
self.hooks = list(sorted(
hooks,
key=operator.attrgetter('priority')
))
self.template_path = template_path
self.force_canonical = force_canonical
self.guess_content_type_from_ext = guess_content_type_from_ext
def __translate_root__(self, item):
'''
Creates a root controller instance from a string root, e.g.,
> __translate_root__("myproject.controllers.RootController")
myproject.controllers.RootController()
:param item: The string to the item
'''
if '.' in item:
parts = item.split('.')
name = '.'.join(parts[:-1])
fromlist = parts[-1:]
module = __import__(name, fromlist=fromlist)
kallable = getattr(module, parts[-1])
msg = "%s does not represent a callable class or function."
assert hasattr(kallable, '__call__'), msg % item
return kallable()
raise ImportError('No item named %s' % item)
def route(self, req, node, path):
'''
Looks up a controller from a node based upon the specified path.
:param node: The node, such as a root controller object.
:param path: The path to look up on this node.
'''
path = path.split('/')[1:]
try:
node, remainder = lookup_controller(node, path)
return node, remainder
except NonCanonicalPath as e:
if self.force_canonical and \
not _cfg(e.controller).get('accept_noncanonical', False):
if req.method == 'POST':
raise RuntimeError(
"You have POSTed to a URL '%s' which "
"requires a slash. Most browsers will not maintain "
"POST data when redirected. Please update your code "
"to POST to '%s/' or set force_canonical to False" %
(req.pecan['routing_path'],
req.pecan['routing_path'])
)
redirect(code=302, add_slash=True)
return e.controller, e.remainder
def determine_hooks(self, controller=None):
'''
Determines the hooks to be run, in which order.
:param controller: If specified, includes hooks for a specific
controller.
'''
controller_hooks = []
if controller:
controller_hooks = _cfg(controller).get('hooks', [])
if controller_hooks:
return list(
sorted(
chain(controller_hooks, self.hooks),
key=operator.attrgetter('priority')
)
)
return self.hooks
def handle_hooks(self, hook_type, *args):
'''
Processes hooks of the specified type.
:param hook_type: The type of hook, including ``before``, ``after``,
``on_error``, and ``on_route``.
:param \*args: Arguments to pass to the hooks.
'''
if hook_type in ['before', 'on_route']:
hooks = state.hooks
else:
hooks = reversed(state.hooks)
for hook in hooks:
result = getattr(hook, hook_type)(*args)
# on_error hooks can choose to return a Response, which will
# be used instead of the standard error pages.
if hook_type == 'on_error' and isinstance(result, Response):
return result
def get_args(self, pecan_state, all_params, remainder, argspec, im_self):
'''
Determines the arguments for a controller based upon parameters
passed the argument specification for the controller.
'''
args = []
kwargs = dict()
valid_args = argspec[0][1:]
def _decode(x):
return unquote_plus(x) if isinstance(x, six.string_types) \
else x
remainder = [_decode(x) for x in remainder]
if im_self is not None:
args.append(im_self)
# grab the routing args from nested REST controllers
if 'routing_args' in pecan_state:
remainder = pecan_state['routing_args'] + list(remainder)
del pecan_state['routing_args']
# handle positional arguments
if valid_args and remainder:
args.extend(remainder[:len(valid_args)])
remainder = remainder[len(valid_args):]
valid_args = valid_args[len(args):]
# handle wildcard arguments
if [i for i in remainder if i]:
if not argspec[1]:
abort(404)
args.extend(remainder)
# get the default positional arguments
if argspec[3]:
defaults = dict(izip(argspec[0][-len(argspec[3]):], argspec[3]))
else:
defaults = dict()
# handle positional GET/POST params
for name in valid_args:
if name in all_params:
args.append(all_params.pop(name))
elif name in defaults:
args.append(defaults[name])
else:
break
# handle wildcard GET/POST params
if argspec[2]:
for name, value in six.iteritems(all_params):
if name not in argspec[0]:
kwargs[encode_if_needed(name)] = value
return args, kwargs
def render(self, template, namespace):
renderer = self.renderers.get(
self.default_renderer,
self.template_path
)
if template == 'json':
renderer = self.renderers.get('json', self.template_path)
if ':' in template:
renderer = self.renderers.get(
template.split(':')[0],
self.template_path
)
template = template.split(':')[1]
return renderer.render(template, namespace)
def handle_request(self, req, resp):
'''
The main request handler for Pecan applications.
'''
# get a sorted list of hooks, by priority (no controller hooks yet)
state.hooks = self.hooks
pecan_state = req.pecan
# store the routing path for the current application to allow hooks to
# modify it
pecan_state['routing_path'] = path = req.encget('PATH_INFO')
# handle "on_route" hooks
self.handle_hooks('on_route', state)
# lookup the controller, respecting content-type as requested
# by the file extension on the URI
pecan_state['extension'] = None
# attempt to guess the content type based on the file extension
if self.guess_content_type_from_ext \
and not pecan_state['content_type'] \
and '.' in path:
new_path, extension = splitext(path)
# preface with a letter to ensure compat for 2.5
potential_type = guess_type('x' + extension)[0]
if potential_type is not None:
path = new_path
pecan_state['extension'] = extension
pecan_state['content_type'] = potential_type
controller, remainder = self.route(req, self.root, path)
cfg = _cfg(controller)
if cfg.get('generic_handler'):
raise exc.HTTPNotFound
# handle generic controllers
im_self = None
if cfg.get('generic'):
im_self = six.get_method_self(controller)
handlers = cfg['generic_handlers']
controller = handlers.get(req.method, handlers['DEFAULT'])
handle_security(controller, im_self)
cfg = _cfg(controller)
# add the controller to the state so that hooks can use it
state.controller = controller
# if unsure ask the controller for the default content type
content_types = cfg.get('content_types', {})
if not pecan_state['content_type']:
# attempt to find a best match based on accept headers (if they
# exist)
accept = getattr(req.accept, 'header_value', '*/*')
if accept == '*/*' or (
accept.startswith('text/html,') and
list(content_types.keys()) in self.SIMPLEST_CONTENT_TYPES):
pecan_state['content_type'] = cfg.get(
'content_type',
'text/html'
)
else:
best_default = acceptparse.MIMEAccept(
accept
).best_match(
content_types.keys()
)
if best_default is None:
msg = "Controller '%s' defined does not support " + \
"content_type '%s'. Supported type(s): %s"
logger.error(
msg % (
controller.__name__,
pecan_state['content_type'],
content_types.keys()
)
)
raise exc.HTTPNotAcceptable()
pecan_state['content_type'] = best_default
elif cfg.get('content_type') is not None and \
pecan_state['content_type'] not in content_types:
msg = "Controller '%s' defined does not support content_type " + \
"'%s'. Supported type(s): %s"
logger.error(
msg % (
controller.__name__,
pecan_state['content_type'],
content_types.keys()
)
)
raise exc.HTTPNotFound
# get a sorted list of hooks, by priority
state.hooks = self.determine_hooks(controller)
# handle "before" hooks
self.handle_hooks('before', state)
# fetch any parameters
if req.method == 'GET':
params = dict(req.GET)
else:
params = dict(req.params)
# fetch the arguments for the controller
args, kwargs = self.get_args(
pecan_state,
params,
remainder,
cfg['argspec'],
im_self
)
# get the result from the controller
result = controller(*args, **kwargs)
# a controller can return the response object which means they've taken
# care of filling it out
if result is response:
return
raw_namespace = result
# pull the template out based upon content type and handle overrides
template = content_types.get(pecan_state['content_type'])
# check if for controller override of template
template = pecan_state.get('override_template', template)
pecan_state['content_type'] = pecan_state.get(
'override_content_type',
pecan_state['content_type']
)
# if there is a template, render it
if template:
if template == 'json':
pecan_state['content_type'] = 'application/json'
result = self.render(template, result)
# If we are in a test request put the namespace where it can be
# accessed directly
if req.environ.get('paste.testing'):
testing_variables = req.environ['paste.testing_variables']
testing_variables['namespace'] = raw_namespace
testing_variables['template_name'] = template
testing_variables['controller_output'] = result
# set the body content
if isinstance(result, six.text_type):
resp.text = result
elif result:
resp.body = result
# set the content type
if pecan_state['content_type']:
resp.content_type = pecan_state['content_type']
def __call__(self, environ, start_response):
'''
Implements the WSGI specification for Pecan applications, utilizing
``WebOb``.
'''
# create the request and response object
state.request = req = Request(environ)
state.response = resp = Response()
state.hooks = []
state.app = self
state.controller = None
# handle the request
try:
# add context and environment to the request
req.context = environ.get('pecan.recursive.context', {})
req.pecan = dict(content_type=None)
self.handle_request(req, resp)
except Exception as e:
# if this is an HTTP Exception, set it as the response
if isinstance(e, exc.HTTPException):
state.response = e
environ['pecan.original_exception'] = e
# if this is not an internal redirect, run error hooks
on_error_result = None
if not isinstance(e, ForwardRequestException):
on_error_result = self.handle_hooks('on_error', state, e)
# if the on_error handler returned a Response, use it.
if isinstance(on_error_result, Response):
state.response = on_error_result
else:
if not isinstance(e, exc.HTTPException):
raise
finally:
# handle "after" hooks
self.handle_hooks('after', state)
# get the response
try:
return state.response(environ, start_response)
finally:
# clean up state
del state.hooks
del state.request
del state.response
del state.controller
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.