max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
leetcode.com/python/389_Find_the_Difference.py | XSoyOscar/Algorithms | 713 | 11169570 | class Solution(object):
def findTheDifference(self, s, t):
"""
:type s: str
:type t: str
:rtype: str
"""
ans = 0
for char in s + t:
ans ^= char
return char(ans)
sol = Solution()
out = sol.findTheDifference("abcd", "abcde")
print('Res: ',out) |
detection/object_detection/obj_4_efficientdet/WindowObj4EfficientdetDataParam.py | THEFASHIONGEEK/Monk_Gui | 129 | 11169593 | import os
import sys
import json
from PyQt5 import QtCore, QtWidgets
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
class WindowObj4EfficientdetDataParam(QtWidgets.QWidget):
backward_4_efficientdet = QtCore.pyqtSignal();
forward_valdata_param = QtCore.pyqtSignal();
def __init__(self):
super().__init__()
self.title = 'Efficient Detection - Training Data Param'
self.left = 10
self.top = 10
self.width = 800
self.height = 600
self.cfg_setup();
self.initUI();
def cfg_setup(self):
if(os.path.isfile("obj_4_efficientdet.json")):
with open('obj_4_efficientdet.json') as json_file:
self.system = json.load(json_file)
else:
self.system = {};
self.system["anno_type"] = "monk"
self.system["val_anno_type"] = "monk"
self.system["monk_root_dir"] = "Monk_Object_Detection/example_notebooks/sample_dataset/kangaroo/";
self.system["monk_img_dir"] = "Images";
self.system["monk_anno_file"] = "train_labels.csv";
self.system["voc_root_dir"] = "Monk_Object_Detection/example_notebooks/sample_dataset/kangaroo/";
self.system["voc_img_dir"] = "Images";
self.system["voc_anno_dir"] = "";
self.system["coco_root_dir"] = "Monk_Object_Detection/example_notebooks/sample_dataset/";
self.system["coco_coco_dir"] = "kangaroo";
self.system["coco_img_dir"] = "";
self.system["coco_set_dir"] = "Images"
self.system["val_data"] = "no";
self.system["val_monk_root_dir"] = "Monk_Object_Detection/example_notebooks/sample_dataset/kangaroo/";
self.system["val_monk_img_dir"] = "Images";
self.system["val_monk_anno_file"] = "train_labels.csv";
self.system["val_voc_root_dir"] = "Monk_Object_Detection/example_notebooks/sample_dataset/kangaroo/";
self.system["val_voc_img_dir"] = "Images";
self.system["val_voc_anno_dir"] = "";
self.system["val_coco_root_dir"] = "Monk_Object_Detection/example_notebooks/sample_dataset/";
self.system["val_coco_coco_dir"] = "kangaroo";
self.system["val_coco_img_dir"] = "";
self.system["val_coco_set_dir"] = "Images";
self.system["batch_size"] = "4";
self.system["image_size"] = "512";
self.system["use_gpu"] = "yes";
self.system["devices"] = "0";
self.system["es_min_delta"] = "0.0";
self.system["es_patience"] = "0";
self.system["lr"] = "0.001";
self.system["val_interval"] = "2";
self.system["epochs"] = "5";
self.system["output_model_dir"] = "trained";
with open('obj_4_efficientdet.json', 'w') as outfile:
json.dump(self.system, outfile)
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height);
# Backward
self.b1 = QPushButton('Back', self)
self.b1.move(500,550)
self.b1.clicked.connect(self.backward)
# Forward
self.b2 = QPushButton('Next', self)
self.b2.move(600,550)
self.b2.clicked.connect(self.forward);
# Quit
self.b3 = QPushButton('Quit', self)
self.b3.move(700,550)
self.b3.clicked.connect(self.close)
self.tb1 = QTextEdit(self)
self.tb1.move(20, 20)
self.tb1.resize(400, 450)
self.tb1.setText(self.monk_format());
self.tb1.setReadOnly(True)
self.r1 = QRadioButton("Monk format", self)
if self.system["anno_type"] == "monk":
self.r1.setChecked(True)
self.r1.move(430,20)
self.r1.toggled.connect(self.monk);
self.r2 = QRadioButton("VOC format", self)
if self.system["anno_type"] == "voc":
self.r2.setChecked(True)
self.r2.move(560,20)
self.r2.toggled.connect(self.voc);
self.r3 = QRadioButton("COCO format", self)
if self.system["anno_type"] == "coco":
self.r3.setChecked(True)
self.r3.move(670,20)
self.r3.toggled.connect(self.coco);
self.m = [];
self.m_l1 = QLabel(self);
self.m_l1.setText("1. root:");
self.m_l1.move(450, 70);
self.m.append(self.m_l1);
self.m_b1 = QPushButton('Select Folder', self)
self.m_b1.move(550,70)
self.m_b1.clicked.connect(self.select_root_dataset)
self.m.append(self.m_b1);
self.m_tb1 = QTextEdit(self)
self.m_tb1.move(450, 100)
self.m_tb1.resize(300, 50)
self.m_tb1.setText(self.system["monk_root_dir"]);
self.m_tb1.setReadOnly(True)
self.m.append(self.m_tb1);
self.m_l2 = QLabel(self);
self.m_l2.setText("2. img_dir:");
self.m_l2.move(450, 180);
self.m.append(self.m_l2);
self.m_b2 = QPushButton('Select Folder', self)
self.m_b2.move(550,180)
self.m_b2.clicked.connect(self.select_img_dir)
self.m.append(self.m_b2);
self.m_tb2 = QTextEdit(self)
self.m_tb2.move(450, 210)
self.m_tb2.resize(300, 50)
self.m_tb2.setText(self.system["monk_img_dir"]);
self.m_tb2.setReadOnly(True)
self.m.append(self.m_tb2);
self.m_l3 = QLabel(self);
self.m_l3.setText("3. anno_file:");
self.m_l3.move(450, 280);
self.m.append(self.m_l3);
self.m_b3 = QPushButton('Select File', self)
self.m_b3.move(550, 280)
self.m_b3.clicked.connect(self.select_anno_file)
self.m.append(self.m_b3);
self.m_tb3 = QTextEdit(self)
self.m_tb3.move(450, 310)
self.m_tb3.resize(300, 50)
self.m_tb3.setText(self.system["monk_anno_file"]);
self.m_tb3.setReadOnly(True)
self.m.append(self.m_tb3);
self.m_l4 = QLabel(self);
self.m_l4.setText("4. batch_size:");
self.m_l4.move(450, 380);
self.m.append(self.m_l4);
self.m_e4 = QLineEdit(self)
self.m_e4.move(550, 380);
self.m_e4.setText(self.system["batch_size"]);
self.m_e4.resize(200, 25);
self.m.append(self.m_e4);
self.v = [];
self.v_l1 = QLabel(self);
self.v_l1.setText("1. root:");
self.v_l1.move(450, 70);
self.v.append(self.v_l1);
self.v_b1 = QPushButton('Select Folder', self)
self.v_b1.move(550,70)
self.v_b1.clicked.connect(self.select_root_dataset)
self.v.append(self.v_b1);
self.v_tb1 = QTextEdit(self)
self.v_tb1.move(450, 100)
self.v_tb1.resize(300, 50)
self.v_tb1.setText(self.system["voc_root_dir"]);
self.v_tb1.setReadOnly(True)
self.v.append(self.v_tb1);
self.v_l2 = QLabel(self);
self.v_l2.setText("2. img_dir:");
self.v_l2.move(450, 180);
self.v.append(self.v_l2);
self.v_b2 = QPushButton('Select Folder', self)
self.v_b2.move(550,180)
self.v_b2.clicked.connect(self.select_img_dir)
self.v.append(self.v_b2);
self.v_tb2 = QTextEdit(self)
self.v_tb2.move(450, 210)
self.v_tb2.resize(300, 50)
self.v_tb2.setText(self.system["voc_img_dir"]);
self.v_tb2.setReadOnly(True)
self.v.append(self.v_tb2);
self.v_l3 = QLabel(self);
self.v_l3.setText("3. anno_dir:");
self.v_l3.move(450, 280);
self.v.append(self.v_l3);
self.v_b3 = QPushButton('Select Folder', self)
self.v_b3.move(550, 280)
self.v_b3.clicked.connect(self.select_anno_dir)
self.v.append(self.v_b3);
self.v_tb3 = QTextEdit(self)
self.v_tb3.move(450, 310)
self.v_tb3.resize(300, 50)
self.v_tb3.setText(self.system["voc_anno_dir"]);
self.v_tb3.setReadOnly(True)
self.v.append(self.v_tb3);
self.v_l4 = QLabel(self);
self.v_l4.setText("4. batch_size:");
self.v_l4.move(450, 380);
self.v.append(self.v_l4);
self.v_e4 = QLineEdit(self)
self.v_e4.move(550, 380);
self.v_e4.setText(self.system["batch_size"]);
self.v_e4.resize(200, 25);
self.v.append(self.v_e4);
self.c = [];
self.c_l1 = QLabel(self);
self.c_l1.setText("1. root:");
self.c_l1.move(450, 70);
self.c.append(self.c_l1);
self.c_b1 = QPushButton('Select Folder', self)
self.c_b1.move(550,70)
self.c_b1.clicked.connect(self.select_root_dataset)
self.c.append(self.c_b1);
self.c_tb1 = QTextEdit(self)
self.c_tb1.move(450, 100)
self.c_tb1.resize(300, 50)
self.c_tb1.setText(self.system["coco_root_dir"]);
self.c_tb1.setReadOnly(True)
self.c.append(self.c_tb1);
self.c_l2 = QLabel(self);
self.c_l2.setText("2. coco_dir:");
self.c_l2.move(450, 180);
self.c.append(self.c_l2);
self.c_b2 = QPushButton('Select Folder', self)
self.c_b2.move(550,180)
self.c_b2.clicked.connect(self.select_coco_dir)
self.c.append(self.c_b2);
self.c_b2_1 = QPushButton('Set Blank', self)
self.c_b2_1.move(650,180)
self.c_b2_1.clicked.connect(self.select_coco_dir_blank)
self.c.append(self.c_b2_1);
self.c_tb2 = QTextEdit(self)
self.c_tb2.move(450, 210)
self.c_tb2.resize(300, 50)
self.c_tb2.setText(self.system["coco_coco_dir"]);
self.c_tb2.setReadOnly(True)
self.c.append(self.c_tb2);
self.c_l3 = QLabel(self);
self.c_l3.setText("3. img_dir:");
self.c_l3.move(450, 280);
self.c.append(self.c_l3);
self.c_b3 = QPushButton('Select Folder', self)
self.c_b3.move(550,280)
self.c_b3.clicked.connect(self.select_img_dir)
self.c.append(self.c_b3);
self.c_b3_1 = QPushButton('Set Blank', self)
self.c_b3_1.move(650,280)
self.c_b3_1.clicked.connect(self.select_img_dir_blank)
self.c.append(self.c_b3_1);
self.c_tb3 = QTextEdit(self)
self.c_tb3.move(450, 310)
self.c_tb3.resize(300, 50)
self.c_tb3.setText(self.system["coco_img_dir"]);
self.c_tb3.setReadOnly(True)
self.c.append(self.c_tb3);
self.c_l5 = QLabel(self);
self.c_l5.setText("4. set_dir:");
self.c_l5.move(450, 380);
self.c.append(self.c_l5);
self.c_b5 = QPushButton('Select Folder', self)
self.c_b5.move(550,380)
self.c_b5.clicked.connect(self.select_set_dir)
self.c.append(self.c_b5);
self.c_b5_1 = QPushButton('Set Blank', self)
self.c_b5_1.move(650,380)
self.c_b5_1.clicked.connect(self.select_set_dir_blank)
self.c.append(self.c_b5_1);
self.c_tb5 = QTextEdit(self)
self.c_tb5.move(450, 410)
self.c_tb5.resize(300, 50)
self.c_tb5.setText(self.system["coco_set_dir"]);
self.c_tb5.setReadOnly(True)
self.c.append(self.c_tb5);
self.c_l4 = QLabel(self);
self.c_l4.setText("5. batch_size:");
self.c_l4.move(450, 480);
self.c.append(self.c_l4);
self.c_e4 = QLineEdit(self)
self.c_e4.move(550, 480);
self.c_e4.setText(self.system["batch_size"]);
self.c_e4.resize(200, 25);
self.c.append(self.c_e4);
if self.system["anno_type"] == "monk":
self.monk();
elif self.system["anno_type"] == "voc":
self.voc();
else:
self.coco();
def monk(self):
self.tb1.setText(self.monk_format());
for x in self.m:
x.show();
for x in self.v:
x.hide();
for x in self.c:
x.hide();
def voc(self):
self.tb1.setText(self.voc_format());
for x in self.m:
x.hide();
for x in self.v:
x.show();
for x in self.c:
x.hide();
def coco(self):
self.tb1.setText(self.coco_format());
for x in self.m:
x.hide();
for x in self.v:
x.hide();
for x in self.c:
x.show();
def select_root_dataset(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
folderName = QFileDialog.getExistingDirectory(self,"QFileDialog.getExistingDirectory()", os.getcwd())
if self.r1.isChecked():
self.m_b1.setText("Selected");
self.m_tb1.setText(folderName);
self.system["monk_root_dir"] = folderName;
if self.r2.isChecked():
self.v_b1.setText("Selected");
self.v_tb1.setText(folderName);
self.system["voc_root_dir"] = folderName;
if self.r3.isChecked():
self.c_b1.setText("Selected");
self.c_tb1.setText(folderName);
self.system["coco_root_dir"] = folderName;
def select_coco_dir(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
folderName = QFileDialog.getExistingDirectory(self,"QFileDialog.getExistingDirectory()", self.system["coco_root_dir"])
folderName = folderName.split("/")[-1];
self.c_b2.setText("Selected");
self.c_tb2.setText(folderName);
self.system["coco_coco_dir"] = folderName;
def select_coco_dir_blank(self):
self.c_tb2.setText("");
self.system["coco_coco_dir"] = "";
def select_img_dir(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
if self.r1.isChecked():
folderName = QFileDialog.getExistingDirectory(self,"QFileDialog.getExistingDirectory()", self.system["monk_root_dir"])
if self.r2.isChecked():
folderName = QFileDialog.getExistingDirectory(self,"QFileDialog.getExistingDirectory()", self.system["voc_root_dir"])
if self.r3.isChecked():
folderName = QFileDialog.getExistingDirectory(self,"QFileDialog.getExistingDirectory()",
self.system["coco_root_dir"] + "/" + self.system["coco_coco_dir"])
folderName = folderName.split("/")[-1];
if self.r1.isChecked():
self.m_b2.setText("Selected");
self.m_tb2.setText(folderName);
self.system["monk_img_dir"] = folderName;
if self.r2.isChecked():
self.v_b2.setText("Selected");
self.v_tb2.setText(folderName);
self.system["voc_img_dir"] = folderName;
if self.r3.isChecked():
self.c_b3.setText("Selected");
self.c_tb3.setText(folderName);
self.system["coco_img_dir"] = folderName;
def select_img_dir_blank(self):
self.c_tb3.setText("");
self.system["coco_img_dir"] = "";
def select_anno_file(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
fileName, _ = QFileDialog.getOpenFileName(self,"QFileDialog.getOpenFileName()", self.system["monk_root_dir"],
"Monk Project Files (*.csv);;All Files (*)", options=options)
if fileName:
fileName = fileName.split("/")[-1];
self.system["monk_anno_file"] = fileName;
self.m_b3.setText("Selected");
self.m_tb3.setText(fileName);
def select_anno_dir(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
folderName = QFileDialog.getExistingDirectory(self,"QFileDialog.getExistingDirectory()", self.system["voc_root_dir"])
folderName = folderName.split("/")[-1];
self.v_b3.setText("Selected");
self.v_tb3.setText(folderName);
self.system["voc_anno_dir"] = folderName;
def select_set_dir(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
folderName = QFileDialog.getExistingDirectory(self,"QFileDialog.getExistingDirectory()",
self.system["coco_root_dir"] + "/" + self.system["coco_coco_dir"] + "/" + self.system["coco_img_dir"])
folderName = folderName.split("/")[-1];
self.c_b5.setText("Selected");
self.c_tb5.setText(folderName);
self.system["coco_set_dir"] = folderName;
def select_set_dir_blank(self):
self.c_tb5_1.setText("");
self.system["coco_set_dir"] = "";
def forward(self):
if self.r1.isChecked():
self.system["anno_type"] = "monk";
self.system["batch_size"] = self.m_e4.text();
if self.r2.isChecked():
self.system["anno_type"] = "voc";
self.system["batch_size"] = self.v_e4.text();
if self.r3.isChecked():
self.system["anno_type"] = "coco";
self.system["batch_size"] = self.c_e4.text();
with open('obj_4_efficientdet.json', 'w') as outfile:
json.dump(self.system, outfile)
self.forward_valdata_param.emit();
def backward(self):
if self.r1.isChecked():
self.system["anno_type"] = "monk";
self.system["batch_size"] = self.m_e4.text();
if self.r2.isChecked():
self.system["anno_type"] = "voc";
self.system["batch_size"] = self.v_e4.text();
if self.r3.isChecked():
self.system["anno_type"] = "coco";
self.system["batch_size"] = self.c_e4.text();
with open('obj_4_efficientdet.json', 'w') as outfile:
json.dump(self.system, outfile)
self.backward_4_efficientdet.emit();
def monk_format(self):
wr = "";
wr += "Monk Type Data Format\n"
wr += "Dataset Directory Structure\n\n";
wr += "Parent_Directory (root)\n";
wr += " |\n";
wr += " |-----------Images (img_dir)\n";
wr += " | |\n";
wr += " | |------------------img1.jpg\n";
wr += " | |------------------img2.jpg\n";
wr += " | |------------------.........(and so on)\n";
wr += " |\n";
wr += " |\n";
wr += " |-----------train_labels.csv (anno_file)\n\n";
wr += "Annotation file format\n";
wr += " | Id | Labels |\n";
wr += " | img1.jpg | x1 y1 x2 y2 label1 x1 y1 x2 y2 label2 |\n";
wr += " Labels: xmin ymin xmax ymax label\n";
wr += " xmin, ymin - top left corner of bounding box\n";
wr += " xmax, ymax - bottom right corner of bounding box\n";
return wr;
def voc_format(self):
wr = "";
wr += "VOC Type Data Format\n"
wr += "Dataset Directory Structure\n\n";
wr += "Parent_Directory (root)\n";
wr += " |\n";
wr += " |-----------Images (img_dir)\n";
wr += " | |\n";
wr += " | |------------------img1.jpg\n";
wr += " | |------------------img2.jpg\n";
wr += " | |------------------.........(and so on)\n";
wr += " |\n";
wr += " |\n";
wr += " |-----------Annotations (anno_dir)\n";
wr += " | |\n";
wr += " | |------------------img1.xml\n";
wr += " | |------------------img2.xml\n";
wr += " | |------------------.........(and so on)\n";
return wr;
def coco_format(self):
wr = "";
wr += "COCO Type Data Format\n"
wr += "Dataset Directory Structure\n\n";
wr += "Parent_Directory (root_dir)\n";
wr += " |\n";
wr += " |------kangaroo (coco_dir)\n";
wr += " | |\n";
wr += " | |---Images (set_dir)\n";
wr += " | |----|\n";
wr += " | |-------------------img1.jpg\n";
wr += " | |-------------------img2.jpg\n";
wr += " | |------------------.........(and so on)\n";
wr += " |\n";
wr += " |\n";
wr += " | |---annotations\n";
wr += " | |----|\n";
wr += " | |--------------------instances_Images.json\n";
wr += " | |--------------------classes.txt\n"
wr += "\n";
wr += "\n";
wr += " instances_Images.json -> In proper COCO format\n";
wr += " Note: Annotation file name too coincides against the set_dir\n";
wr += " classes.txt -> A list of classes in alphabetical order\n";
return wr;
'''
app = QApplication(sys.argv)
screen = WindowObj4EfficientdetDataParam()
screen.show()
sys.exit(app.exec_())
'''
|
deploy/scripts/aphros/vtk.py | cselab/aphros | 252 | 11169617 | #!/usr/bin/env python3
try:
import numpy as np
except ImportError:
pass
import re
import sys
import inspect
import os
def printerr(m):
sys.stderr.write(str(m) + "\n")
def ReadVtkPoly(f, verbose=False):
"""
Reads vtk points, polygons and fields from legacy VTK file.
f: `str` or file-like
Path to legacy VTK file or file-like object.
Returns:
points: `numpy.ndarray`, (num_points, 3)
Points (vertices).
poly: `list` [`list` [ `int` ]], (num_cells, ...)
Polygons as lists of indices in `points`.
cell_fields: `dict` [`str`, `numpy.ndarray`] , (num_cells,)
Cell felds indexed by name. Each field has shape (num_cells,).
"""
def Assert(cond, msg=""):
if not cond:
caller = inspect.getframeinfo(inspect.stack()[1][0])
lines = "\n".join(caller[3]).strip()
filename = os.path.basename(caller.filename)
lineno = caller.lineno
printerr("\n{:}:{:} {:}".format(filename, lineno, lines))
printerr("Failing at iteration {:} in state s={:}".format(
lnum + 1, s))
if msg: printerr(str(msg))
printerr("Current input line:\n{:}".format(l.strip()))
printerr("Next line would be:\n{:}".format(f.readline().strip()))
exit(1)
class S:
header, comment, binary, dataset, points, \
polygons, cell_data, cell_scalars, cell_field = range(9)
points = None
poly = None
dim = 3
num_points = None
num_poly = None
cell_fields = dict()
cell_field_name = None
binary = False
path = None
if type(f) is str:
path = f
f = open(path, 'rb')
else:
pass # expect file-like
s = S.header
if f:
for lnum, l in enumerate(f):
l = str(l)
if not l.strip():
continue
if s == S.header: # check header
Assert("# vtk" in l)
s = S.comment
elif s == S.comment: # skip comment
s = S.binary
elif s == S.binary:
Assert("ASCII" in l or "BINARY" in l)
binary = "BINARY" in l
s = S.dataset
elif s == S.dataset:
Assert("DATASET POLYDATA" in l)
s = S.points
elif s == S.points:
Assert("POINTS" in l)
dtype = np.float64 if "double" in l else np.float32
num_points = int(re.findall("\D*(\d*)\D*", l)[0])
points = np.empty((num_points, dim))
if binary:
dt = np.dtype('>f4')
bytes = f.read(3 * num_points * dt.itemsize)
points = np.frombuffer(bytes, dtype=dt)
points = points.astype(np.float)
f.readline()
else:
points = np.fromfile(f,
dtype=np.float,
count=num_points * 3,
sep=' ')
points = points.reshape((num_points, 3))
Assert(points.shape[0] == num_points)
Assert(points.shape[1] == 3)
if verbose: printerr("Read {:} points".format(points.shape[0]))
s = S.polygons
elif s == S.polygons:
Assert("POLYGONS" in l)
m = re.findall("\D*(\d*)\s*(\d*)", l)[0]
num_poly = int(m[0])
num_ints = int(m[1])
if binary:
dt = np.dtype('>i')
bytes = f.read(num_ints * dt.itemsize)
ints = np.frombuffer(bytes, dtype=dt)
ints = ints.astype(np.int)
f.readline()
else:
ints = np.fromfile(f,
dtype=np.int,
count=num_ints,
sep=' ')
i = 0
poly = []
for ip in range(num_poly):
n = ints[i]
i += 1
poly.append(ints[i:i + n])
i += n
Assert(i == num_ints)
Assert(len(poly) == num_poly)
if verbose: printerr("Read {:} polygons".format(len(poly)))
s = S.cell_data
elif s == S.cell_data:
if "CELL_DATA" in l:
n = int(re.findall("\D*(\d*)", l)[0])
Assert(n == num_poly)
s = S.cell_scalars
elif "POINT_DATA" in l:
pass
elif s == S.cell_scalars: # read cell field
if "SCALARS" in l:
cell_field_name = re.findall("SCALARS\s*(\S+)", l)[0]
s = S.cell_field
else:
s = S.cell_data
elif s == S.cell_field:
Assert("LOOKUP_TABLE" in l)
if binary:
dt = np.dtype('>f4')
bytes = f.read(num_poly * dt.itemsize)
u = np.frombuffer(bytes, dtype=dt)
u = u.astype(np.float)
f.readline()
else:
u = np.fromfile(f, dtype=np.float, count=num_poly, sep=' ')
Assert(u.shape[0] == num_poly, ["u.shape=", u.shape])
if verbose:
printerr("Read cell field '{:}'".format(cell_field_name))
cell_fields[cell_field_name] = u
s = S.cell_scalars
if path:
f.close()
return points, poly, cell_fields
|
src/sage/symbolic/complexity_measures.py | bopopescu/sage | 1,742 | 11169637 | """
Complexity Measures
Some measures of symbolic expression complexity. Each complexity
measure is expected to take a symbolic expression as an argument, and
return a number.
"""
def string_length(expr):
"""
Returns the length of ``expr`` after converting it to a string.
INPUT:
- ``expr`` -- the expression whose complexity we want to measure.
OUTPUT:
A real number representing the complexity of ``expr``.
RATIONALE:
If the expression is longer on-screen, then a human would probably
consider it more complex.
EXAMPLES:
This expression has three characters, ``x``, ``^``, and ``2``::
sage: from sage.symbolic.complexity_measures import string_length
sage: f = x^2
sage: string_length(f)
3
"""
return len(str(expr))
|
joints_detectors/hrnet/pose_estimation/utilitys.py | rcourivaud/video-to-pose3D | 574 | 11169649 | import torch
import torchvision.transforms as transforms
from lib.utils.transforms import *
joint_pairs = [[0, 1], [1, 3], [0, 2], [2, 4],
[5, 6], [5, 7], [7, 9], [6, 8], [8, 10],
[5, 11], [6, 12], [11, 12],
[11, 13], [12, 14], [13, 15], [14, 16]]
colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0], \
[0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255], \
[170, 0, 255], [255, 0, 255]]
def plot_keypoint(image, coordinates, confidence, keypoint_thresh):
# USE cv2
joint_visible = confidence[:, :, 0] > keypoint_thresh
for i in range(coordinates.shape[0]):
pts = coordinates[i]
for color_i, jp in zip(colors, joint_pairs):
if joint_visible[i, jp[0]] and joint_visible[i, jp[1]]:
pt0 = pts[jp, 0];
pt1 = pts[jp, 1]
pt0_0, pt0_1, pt1_0, pt1_1 = int(pt0[0]), int(pt0[1]), int(pt1[0]), int(pt1[1])
cv2.line(image, (pt0_0, pt1_0), (pt0_1, pt1_1), color_i, 6)
# cv2.circle(image,(pt0_0, pt0_1), 2, color_i, thickness=-1)
# cv2.circle(image,(pt1_0, pt1_1), 2, color_i, thickness=-1)
return image
def upscale_bbox_fn(bbox, img, scale=1.25):
new_bbox = []
x0 = bbox[0]
y0 = bbox[1]
x1 = bbox[2]
y1 = bbox[3]
w = (x1 - x0) / 2
h = (y1 - y0) / 2
center = [x0 + w, y0 + h]
new_x0 = max(center[0] - w * scale, 0)
new_y0 = max(center[1] - h * scale, 0)
new_x1 = min(center[0] + w * scale, img.shape[1])
new_y1 = min(center[1] + h * scale, img.shape[0])
new_bbox = [new_x0, new_y0, new_x1, new_y1]
return new_bbox
def detector_to_simple_pose(img, class_IDs, scores, bounding_boxs, output_shape=(256, 192), scale=1.25):
L = class_IDs.shape[1]
thr = 0.5
upscale_bbox = []
for i in range(L):
if class_IDs[0][i].asscalar() != 0:
continue
if scores[0][i].asscalar() < thr:
continue
bbox = bounding_boxs[0][i]
upscale_bbox.append(upscale_bbox_fn(bbox.asnumpy().tolist(), img, scale=scale))
if len(upscale_bbox) > 0:
pose_input = crop_resize_normalize(img, upscale_bbox, output_shape)
pose_input = pose_input.as_in_context(ctx)
else:
pose_input = None
return pose_input, upscale_bbox
def _box2cs(box, image_width, image_height):
x, y, w, h = box[:4]
return _xywh2cs(x, y, w, h, image_width, image_height)
def _xywh2cs(x, y, w, h, image_width, image_height):
center = np.zeros((2), dtype=np.float32)
center[0] = x + w * 0.5
center[1] = y + h * 0.5
aspect_ratio = image_width * 1.0 / image_height
pixel_std = 200
if w > aspect_ratio * h:
h = w * 1.0 / aspect_ratio
elif w < aspect_ratio * h:
w = h * aspect_ratio
scale = np.array(
[w * 1.0 / pixel_std, h * 1.0 / pixel_std],
dtype=np.float32)
if center[0] != -1:
scale = scale * 1.25
return center, scale
def preprocess(image, bboxs, scores, cfg, thred_score=0.8):
if type(image) == str:
data_numpy = cv2.imread(image, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)
else:
data_numpy = image
inputs = []
centers = []
scales = []
score_num = np.sum(scores > thred_score)
max_box = min(5, score_num)
for bbox in bboxs[:max_box]:
x1, y1, x2, y2 = bbox
box = [x1, y1, x2 - x1, y2 - y1]
# 截取 box fron image --> return center, scale
c, s = _box2cs(box, data_numpy.shape[0], data_numpy.shape[1])
centers.append(c)
scales.append(s)
r = 0
trans = get_affine_transform(c, s, r, cfg.MODEL.IMAGE_SIZE)
input = cv2.warpAffine(
data_numpy,
trans,
(int(cfg.MODEL.IMAGE_SIZE[0]), int(cfg.MODEL.IMAGE_SIZE[1])),
flags=cv2.INTER_LINEAR)
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
input = transform(input).unsqueeze(0)
inputs.append(input)
inputs = torch.cat(inputs)
return inputs, data_numpy, centers, scales
|
lib/hachoir/metadata/csv.py | 0x20Man/Watcher3 | 320 | 11169658 | from hachoir.parser import createParser
from hachoir.core.tools import makePrintable
from hachoir.metadata import extractMetadata
from hachoir.core.i18n import initLocale
from sys import argv, stderr, exit
from os import walk
from os.path import join as path_join
from fnmatch import fnmatch
import codecs
OUTPUT_FILENAME = "metadata.csv"
class Extractor:
def __init__(self, directory, fields):
self.directory = directory
self.fields = fields
self.charset = "UTF-8"
self.total = 0
self.invalid = 0
def main(self):
output = codecs.open(OUTPUT_FILENAME, "w", self.charset)
for filename in self.findFiles(self.directory, '*.doc'):
self.total += 1
line = self.processFile(filename)
if line:
print(line, file=output)
else:
self.invalid += 1
output.close()
self.summary()
def summary(self):
print(file=stderr)
print("Valid files: %s" % (self.total - self.invalid), file=stderr)
print("Invalid files: %s" % self.invalid, file=stderr)
print("Total files: %s" % self.total, file=stderr)
print(file=stderr)
print("Result written into %s" % OUTPUT_FILENAME, file=stderr)
def findFiles(self, directory, pattern):
for dirpath, dirnames, filenames in walk(directory):
for filename in filenames:
if not fnmatch(filename.lower(), pattern):
continue
yield path_join(dirpath, filename)
def processFile(self, filename):
print("[%s] Process file %s..." % (self.total, filename))
parser = createParser(filename)
if not parser:
print("Unable to parse file", file=stderr)
return None
try:
metadata = extractMetadata(parser)
except Exception as err:
print("Metadata extraction error: %s" % str(err), file=stderr)
return None
if not metadata:
print("Unable to extract metadata", file=stderr)
return None
filename = makePrintable(filename, self.charset)
line = [filename]
for field in self.fields:
value = metadata.getText(field, '')
value = makePrintable(value, self.charset)
line.append(value)
return '; '.join(line)
def main():
initLocale()
if len(argv) != 3:
print("usage: %s directory fields" % argv[0], file=stderr)
print(file=stderr)
print("eg. %s . title,creation_date" % argv[0], file=stderr)
exit(1)
directory = argv[1]
fields = [field.strip() for field in argv[2].split(",")]
Extractor(directory, fields).main()
|
tests/test_token.py | federicoemartinez/Flask-HTTPAuth | 1,082 | 11169667 | import base64
import unittest
from flask import Flask
from flask_httpauth import HTTPTokenAuth
class HTTPAuthTestCase(unittest.TestCase):
def setUp(self):
app = Flask(__name__)
app.config['SECRET_KEY'] = 'my secret'
token_auth = HTTPTokenAuth('MyToken')
token_auth2 = HTTPTokenAuth('Token', realm='foo')
token_auth3 = HTTPTokenAuth(header='X-API-Key')
@token_auth.verify_token
def verify_token(token):
if token == 'this-is-the-token!':
return 'user'
@token_auth3.verify_token
def verify_token3(token):
if token == 'this-is-the-token!':
return 'user'
@token_auth.error_handler
def error_handler():
return 'error', 401, {'WWW-Authenticate': 'MyToken realm="Foo"'}
@app.route('/')
def index():
return 'index'
@app.route('/protected')
@token_auth.login_required
def token_auth_route():
return 'token_auth:' + token_auth.current_user()
@app.route('/protected-optional')
@token_auth.login_required(optional=True)
def token_auth_optional_route():
return 'token_auth:' + str(token_auth.current_user())
@app.route('/protected2')
@token_auth2.login_required
def token_auth_route2():
return 'token_auth2'
@app.route('/protected3')
@token_auth3.login_required
def token_auth_route3():
return 'token_auth3:' + token_auth3.current_user()
self.app = app
self.token_auth = token_auth
self.client = app.test_client()
def test_token_auth_prompt(self):
response = self.client.get('/protected')
self.assertEqual(response.status_code, 401)
self.assertTrue('WWW-Authenticate' in response.headers)
self.assertEqual(response.headers['WWW-Authenticate'],
'MyToken realm="Foo"')
def test_token_auth_ignore_options(self):
response = self.client.options('/protected')
self.assertEqual(response.status_code, 200)
self.assertTrue('WWW-Authenticate' not in response.headers)
def test_token_auth_login_valid(self):
response = self.client.get(
'/protected', headers={'Authorization':
'MyToken this-is-the-token!'})
self.assertEqual(response.data.decode('utf-8'), 'token_auth:user')
def test_token_auth_login_valid_different_case(self):
response = self.client.get(
'/protected', headers={'Authorization':
'mytoken this-is-the-token!'})
self.assertEqual(response.data.decode('utf-8'), 'token_auth:user')
def test_token_auth_login_optional(self):
response = self.client.get('/protected-optional')
self.assertEqual(response.data.decode('utf-8'), 'token_auth:None')
def test_token_auth_login_invalid_token(self):
response = self.client.get(
'/protected', headers={'Authorization':
'MyToken this-is-not-the-token!'})
self.assertEqual(response.status_code, 401)
self.assertTrue('WWW-Authenticate' in response.headers)
self.assertEqual(response.headers['WWW-Authenticate'],
'MyToken realm="Foo"')
def test_token_auth_login_invalid_scheme(self):
response = self.client.get(
'/protected', headers={'Authorization': 'Foo this-is-the-token!'})
self.assertEqual(response.status_code, 401)
self.assertTrue('WWW-Authenticate' in response.headers)
self.assertEqual(response.headers['WWW-Authenticate'],
'MyToken realm="Foo"')
def test_token_auth_login_invalid_header(self):
response = self.client.get(
'/protected', headers={'Authorization': 'this-is-a-bad-header'})
self.assertEqual(response.status_code, 401)
self.assertTrue('WWW-Authenticate' in response.headers)
self.assertEqual(response.headers['WWW-Authenticate'],
'MyToken realm="Foo"')
def test_token_auth_login_invalid_no_callback(self):
response = self.client.get(
'/protected2', headers={'Authorization':
'Token this-is-the-token!'})
self.assertEqual(response.status_code, 401)
self.assertTrue('WWW-Authenticate' in response.headers)
self.assertEqual(response.headers['WWW-Authenticate'],
'Token realm="foo"')
def test_token_auth_custom_header_valid_token(self):
response = self.client.get(
'/protected3', headers={'X-API-Key': 'this-is-the-token!'})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data.decode('utf-8'), 'token_auth3:user')
def test_token_auth_custom_header_invalid_token(self):
response = self.client.get(
'/protected3', headers={'X-API-Key': 'invalid-token-should-fail'})
self.assertEqual(response.status_code, 401)
self.assertTrue('WWW-Authenticate' in response.headers)
def test_token_auth_custom_header_invalid_header(self):
response = self.client.get(
'/protected3', headers={'API-Key': 'this-is-the-token!'})
self.assertEqual(response.status_code, 401)
self.assertTrue('WWW-Authenticate' in response.headers)
self.assertEqual(response.headers['WWW-Authenticate'],
'Bearer realm="Authentication Required"')
def test_token_auth_header_precedence(self):
basic_creds = base64.b64encode(b'susan:bye').decode('utf-8')
response = self.client.get(
'/protected3', headers={'Authorization': 'Basic ' + basic_creds,
'X-API-Key': 'this-is-the-token!'})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data.decode('utf-8'), 'token_auth3:user')
|
LeetCode/python3/914.py | ZintrulCre/LeetCode_Archiver | 279 | 11169672 | class Solution:
def hasGroupsSizeX(self, deck):
"""
:type deck: List[int]
:rtype: bool
"""
from fractions import gcd
from collections import Counter
from functools import reduce
vals = Counter(deck).values()
g = reduce(gcd, vals)
return g >= 2
|
tests/tgan/test_launcher.py | HDI-Project/TGAN | 131 | 11169721 | <filename>tests/tgan/test_launcher.py
'''
from unittest import TestCase, skip
from unittest.mock import MagicMock, patch
# from tgan import launcher
launcher = None
@skip()
class TestLauncher(TestCase):
@patch('tgan.launcher.subprocess.call', autospec=True)
@patch('tgan.launcher.multiprocessing.current_process', autospec=True)
def test_worker(self, current_mock, call_mock):
""" """
# Setup
task_tuple = [
['path to executable', '--args']
]
current_mock.return_value = MagicMock(_identity=[1])
# Run
result = launcher.worker(task_tuple)
# Check
assert result is None
current_mock.assert_called_once_with()
call_mock.assert_called_once_with(['path to executable', '--args', '--gpu', '1'])
@patch('tgan.launcher.evaluate_classification', autospec=True)
@patch('tgan.launcher.npz_to_csv', autospec=True)
def test_evaluate_worker(self, npz_mock, evaluate_mock):
""" """
# Setup
task_tuple = (
'model_id',
'model_arg',
'epoch_id',
'epoch_t',
'working_dir',
'test_csv',
'continuous_cols'
)
evaluate_mock.return_value = 'score'
expected_result = ('model_id', 'epoch_id', 'score')
# Run
result = launcher.evaluate_worker(task_tuple)
# Check
assert result == expected_result
npz_mock.assert_called_once_with(
'working_dir/syntheticmodel_id_epoch_t.npz',
'working_dir/syntheticmodel_id_epoch_t.csv',
)
evaluate_mock.assert_called_once_with(
'working_dir/syntheticmodel_id_epoch_t.csv', 'test_csv', 'continuous_cols')
@patch('tgan.launcher.evaluate_classification', autospec=True)
@patch('tgan.launcher.npz_to_csv', autospec=True)
def test_evaluate_worker_returns_minus_1_on_error(self, npz_mock, evaluate_mock):
"""evaluate_worker returns -1 if there is an error during the scoring of model."""
# Setup
task_tuple = (
'model_id',
'model_arg',
'epoch_id',
'epoch_t',
'working_dir',
'test_csv',
'continuous_cols'
)
evaluate_mock.side_effect = Exception('Something failed')
expected_result = ('model_id', 'epoch_id', -1)
# Run
result = launcher.evaluate_worker(task_tuple)
# Check
assert result == expected_result
npz_mock.assert_called_once_with(
'working_dir/syntheticmodel_id_epoch_t.npz',
'working_dir/syntheticmodel_id_epoch_t.csv',
)
evaluate_mock.assert_called_once_with(
'working_dir/syntheticmodel_id_epoch_t.csv', 'test_csv', 'continuous_cols')
@patch('tgan.launcher.LOGGER.error', autospec=True)
@patch('tgan.launcher.os.mkdir', autospec=True)
def test_run_experiment_wrong_folder_return_none(self, mkdir_mock, log_mock):
""" """
# Setup
task = {
'name': 'name',
'epoch': '',
'steps_per_epoch': '',
'output_epoch': '',
'sample_rows': '',
'train_csv': '',
'continuous_cols': ''
}
mkdir_mock.side_effect = Exception('something went wrong')
# Run
result = launcher.run_experiment(task)
# Check
assert result is None
mkdir_mock.assert_called_once_with('expdir/name')
log_mock.assert_called_once_with('skip %s, folder exist.', 'name')
'''
|
pyx12/errh_xml.py | azoner/pyx12 | 120 | 11169751 | <reponame>azoner/pyx12
######################################################################
# Copyright (c)
# <NAME> <<EMAIL>>
# All rights reserved.
#
# This software is licensed as described in the file LICENSE.txt, which
# you should have received as part of this distribution.
#
######################################################################
"""
Capture X12 Errors
"""
import logging
import tempfile
import os
# Intrapackage imports
from .errors import EngineError
from .xmlwriter import XMLWriter
class err_handler(object):
"""
The interface to the error handling structures.
"""
def __init__(self, xml_out=None, basedir=None):
"""
@param xml_out: Output filename, if None, will dump to tempfile
@param basedir: working directory, where file will be created
"""
self.logger = logging.getLogger('pyx12.errh_xml')
if xml_out:
self.filename = xml_out
self.fd = open(xml_out, 'w')
else:
try:
(fdesc, self.filename) = tempfile.mkstemp('.xml', 'pyx12_')
self.fd = os.fdopen(fdesc, 'w+b')
except:
(fdesc, self.filename) = tempfile.mkstemp(suffix='.xml', prefix='pyx12_', dir=basedir)
self.fd = os.fdopen(fdesc, 'w+b')
self.cur_line = None
self.errors = []
if not self.fd:
raise EngineError('Could not open temp error xml file')
self.writer = XMLWriter(self.fd)
self.writer.push("x12err")
def __del__(self):
while len(self.writer) > 0:
self.writer.pop()
if not self.fd.closed:
self.fd.close()
def getFilename(self):
return self.filename
def handleErrors(self, err_list):
"""
@param err_list: list of errors to apply
"""
self.errors.extend(err_list)
#for (err_type, err_cde, err_str, err_val, src_line) in err_list:
# if err_type == 'isa':
# self.isa_error(err_cde, err_str)
# elif err_type == 'gs':
# self.gs_error(err_cde, err_str)
# elif err_type == 'st':
# self.st_error(err_cde, err_str)
# elif err_type == 'seg':
# self.seg_error(err_cde, err_str, err_val, src_line)
def getCurLine(self):
"""
@return: Current file line number
@rtype: int
"""
return self.cur_line
def Write(self, cur_line):
"""
Generate XML for the segment data and matching map node
"""
if len(self.errors) > 0:
self.writer.push("seg", attrs={'line': '%i' % (cur_line)})
for (err_type, err_cde, err_str, err_val, src_line) in self.errors:
self.writer.push("err", attrs={"code": err_cde})
#self.writer.elem(u"type", err_type)
#self.writer.elem(u"code", err_cde)
self.writer.elem("desc", err_str)
if err_val:
self.writer.elem("errval", err_val)
#self.writer.push(u"seg", {u'line': '%i'%(cur_line)})
#self.writer.elem(u'ele', seg_data.get_value('%02i' %
#(i+1)),
# attrs={u'id': child_node.id})
self.writer.pop() # end err
self.writer.pop() # end segment
self.errors = []
class ErrorErrhNull(Exception):
"""Class for errh_null errors."""
class errh_list(object):
"""
A null error object - used for testing.
Stores the current error in simple variables.
"""
def __init__(self):
self.logger = logging.getLogger('pyx12.errh_xml')
#self.id = 'ROOT'
self.errors = []
#self.cur_node = self
self.cur_line = 0
#self.err_cde = None
#self.err_str = None
def get_errors(self):
return self.errors
def reset(self):
self.errors = []
def get_cur_line(self):
"""
@return: Current file line number
@rtype: int
"""
return self.cur_line
def set_cur_line(self, cur_line):
"""
"""
self.cur_line = cur_line
# def get_id(self):
# """
# @return: Error node type
# @rtype: string
# """
# return self.id
def add_isa_loop(self, seg, src):
"""
"""
#raise ErrorErrhNull, 'add_isa loop'
pass
def add_gs_loop(self, seg, src):
"""
"""
pass
def add_st_loop(self, seg, src):
"""
"""
pass
def add_seg(self, map_node, seg, seg_count, cur_line, ls_id):
"""
"""
pass
def add_ele(self, map_node):
"""
"""
pass
def isa_error(self, err_cde, err_str):
"""
@param err_cde: ISA level error code
@type err_cde: string
@param err_str: Description of the error
@type err_str: string
"""
self.errors.append(('isa', err_cde, err_str, None, None))
sout = ''
sout += 'Line:%i ' % (self.cur_line)
sout += 'ISA:%s - %s' % (err_cde, err_str)
self.logger.error(sout)
def gs_error(self, err_cde, err_str):
"""
@param err_cde: GS level error code
@type err_cde: string
@param err_str: Description of the error
@type err_str: string
"""
self.errors.append(('gs', err_cde, err_str, None, None))
sout = ''
sout += 'Line:%i ' % (self.cur_line)
sout += 'GS:%s - %s' % (err_cde, err_str)
self.logger.error(sout)
def st_error(self, err_cde, err_str):
"""
@param err_cde: Segment level error code
@type err_cde: string
@param err_str: Description of the error
@type err_str: string
"""
self.errors.append(('st', err_cde, err_str, None, None))
sout = ''
sout += 'Line:%i ' % (self.cur_line)
sout += 'ST:%s - %s' % (err_cde, err_str)
self.logger.error(sout)
def seg_error(self, err_cde, err_str, err_value=None, src_line=None):
"""
@param err_cde: Segment level error code
@type err_cde: string
@param err_str: Description of the error
@type err_str: string
"""
self.errors.append(('seg', err_cde, err_str, err_value, src_line))
sout = ''
sout += 'Line:%i ' % (self.cur_line)
sout += 'SEG:%s - %s' % (err_cde, err_str)
if err_value:
sout += ' (%s)' % err_value
self.logger.error(sout)
def ele_error(self, err_cde, err_str, bad_value):
"""
@param err_cde: Element level error code
@type err_cde: string
@param err_str: Description of the error
@type err_str: string
"""
self.errors.append(('ele', err_cde, err_str, bad_value, None))
sout = ''
sout += 'Line:%i ' % (self.cur_line)
sout += 'ELE:%s - %s' % (err_cde, err_str)
if bad_value:
sout += ' (%s)' % (bad_value)
self.logger.error(sout)
def close_isa_loop(self, node, seg, src):
"""
"""
pass
def close_gs_loop(self, node, seg, src):
"""
"""
pass
def close_st_loop(self, node, seg, src):
"""
"""
pass
def find_node(self, atype):
"""
Find the last node of a type
"""
pass
def get_parent(self):
return None
# def get_first_child(self):
# """
# """
# if len(self.children) > 0:
# return self.children[0]
# else:
# return None
def get_next_sibling(self):
"""
"""
return None
def get_error_count(self):
"""
"""
return len(self.errors)
def is_closed(self):
"""
@rtype: boolean
"""
return True
|
utime/preprocessing/dataset_preparation/phys/phys.py | learning310/U-Time | 138 | 11169753 | <reponame>learning310/U-Time
import os
from glob import glob
from utime.preprocessing.dataset_preparation.utils import download_dataset
# Get path to current module file
_FILE_PATH = os.path.split(__file__)[0]
# Server base URL
_SERVER_URL = "https://physionet.org/files/challenge-2018/1.0.0"
_CHECKSUM_FILE = "{}/phys_checksums.txt".format(_FILE_PATH)
def phys_paths_func(file_name, server_url, out_dataset_folder):
"""
See utime/preprocessing/dataset_preparation/utils.py [download_dataset]
A callable of signature func(file_name, server_url, out_dataset_folder) which returns:
1) download_url (path to fetch file from on remote system)
2) out_file_path (path to store file on local system)
"""
download_url = server_url + "/{}".format(file_name)
out_subject_folder, file_name = file_name.replace("training/", "").split("/")
out_file_path = os.path.join(out_dataset_folder, out_subject_folder, file_name)
return download_url, out_file_path
def download_phys(out_dataset_folder, N_first=None):
""" Download the DCSM (255 records) dataset """
return download_dataset(
out_dataset_folder=out_dataset_folder,
server_url=_SERVER_URL,
checksums_path=_CHECKSUM_FILE,
paths_func=phys_paths_func,
N_first=N_first*3 if N_first else None # Three items per subject
)
def preprocess_phys_hypnograms(dataset_folder_path):
"""
Preprocesses files from the PHYS dataset.
OBS: Only processes the hypnogram (.arousal) files
Creates 1 new file in each PHYS subject dir (.ids format)
:param dataset_folder_path: path to PHYS file on local disk
:return: None
"""
import numpy as np
from wfdb.io import rdann
from utime.io.high_level_file_loaders import load_psg
from utime.bin.extract_hypno import to_ids
from utime.hypnogram import SparseHypnogram
from utime import Defaults
# Get list of subject folders
subject_folders = glob(os.path.join(dataset_folder_path, "tr*"))
LABEL_MAP = {
'N1': "N1",
'N2': "N2",
'N3': "N3",
'R': "REM",
'W': "W",
}
for i, folder in enumerate(subject_folders):
name = os.path.split(os.path.abspath(folder))[-1]
print(f"{i+1}/{len(subject_folders)}", name)
# Get sleep-stages
edf_file = folder + f"/{name}.mat"
org_hyp_file = folder + f"/{name}.arousal"
new_hyp_file = folder + f"/{name}.arousal.st"
out_path = new_hyp_file.replace(".arousal.st", "-HYP.ids")
if os.path.exists(out_path):
print("Exists, skipping...")
continue
if os.path.exists(org_hyp_file):
os.rename(org_hyp_file, new_hyp_file)
psg, header = load_psg(edf_file, load_channels=['C3-M2'])
hyp = rdann(new_hyp_file[:-3], "st")
sample_rate = header["sample_rate"]
psg_length_sec = len(psg)/sample_rate
pairs = zip(hyp.aux_note, hyp.sample)
stages = [s for s in pairs if not ("(" in s[0] or ")" in s[0])]
stages = [(s[0], int(s[1]/sample_rate)) for s in stages]
stages, starts = map(list, zip(*stages))
if starts[0] != 0:
i = [0] + starts
s = ["UNKNOWN"] + [LABEL_MAP[s] for s in stages]
else:
i, s = starts, stages
diff = psg_length_sec - i[-1]
assert diff >= 0
d = list(np.diff(i)) + [(diff//30) * 30]
SparseHypnogram(i, d, [Defaults.get_stage_string_to_class_int()[s_] for s_ in s], 30)
to_ids(i, d, s, out_path)
|
src/mceditlib/structure.py | elcarrion06/mcedit2 | 673 | 11169780 | """
structure
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
from math import floor
from mceditlib import nbt
log = logging.getLogger(__name__)
def exportStructure(filename, dim, selection, author=None, excludedBlocks=None):
"""
Parameters
----------
filename : unicode
dim : mceditlib.worldeditor.WorldEditorDimension
selection : mceditlib.selection.SelectionBox
Returns
-------
"""
excludedBlocks = set(excludedBlocks or [])
rootTag = nbt.TAG_Compound()
rootTag['author'] = nbt.TAG_String(author or "Anonymous")
rootTag['version'] = nbt.TAG_Int(1)
rootTag['size'] = nbt.TAG_List([nbt.TAG_Int(s) for s in selection.size])
entities = rootTag['entities'] = nbt.TAG_List(list_type=nbt.ID_COMPOUND)
blocks = rootTag['blocks'] = nbt.TAG_List(list_type=nbt.ID_COMPOUND)
palette = rootTag['palette'] = nbt.TAG_List(list_type=nbt.ID_COMPOUND)
ox, oy, oz = selection.origin
paletteIDs = {}
for x, y, z in selection.positions:
block = dim.getBlock(x, y, z)
if block in excludedBlocks:
continue
paletteIdx = paletteIDs.get(block.nameAndState, None)
if paletteIdx is None:
paletteTag = nbt.TAG_Compound()
paletteTag['Name'] = nbt.TAG_String(block.internalName)
if len(block.stateDict):
paletteTag['Properties'] = nbt.TAG_Compound()
for k, v in block.stateDict.iteritems():
paletteTag['Properties'][k] = nbt.TAG_String(v)
paletteIdx = paletteIDs[block.nameAndState] = len(palette)
palette.append(paletteTag)
blockTag = nbt.TAG_Compound()
blockTag['state'] = nbt.TAG_Int(paletteIdx)
blockTag['pos'] = nbt.TAG_List([nbt.TAG_Int(a) for a in x - ox, y - oy, z - oz])
tileEntity = dim.getTileEntity((x, y, z))
if tileEntity:
tileEntity = tileEntity.copyWithOffset(-selection.origin)
blockTag['nbt'] = tileEntity.rootTag
blocks.append(blockTag)
for entity in dim.getEntities(selection):
entity = entity.copyWithOffset(-selection.origin)
entityTag = nbt.TAG_Compound()
entityTag['pos'] = nbt.TAG_List([nbt.TAG_Double(a) for a in entity.Position])
entityTag['blockPos'] = nbt.TAG_List([nbt.TAG_Int(int(floor(a))) for a in entity.Position])
entityTag['nbt'] = entity.rootTag
entities.append(entityTag)
rootTag.save(filename) |
sen/tui/views/disk_usage.py | lachmanfrantisek/sen | 956 | 11169785 | """
TODO:
* nicer list
* summary
* clickable items
* enable deleting volumes
"""
import urwid
from sen.util import humanize_bytes, graceful_chain_get
from sen.tui.views.base import View
from sen.tui.widgets.list.util import SingleTextRow
from sen.tui.widgets.table import assemble_rows
from sen.tui.constants import MAIN_LIST_FOCUS
from sen.tui.widgets.util import SelectableText, get_map
from sen.tui.widgets.list.base import WidgetBase
class DfBufferView(WidgetBase, View):
def __init__(self, ui, buffer):
"""
:param ui:
:param buffer: Buffer instance, display help about this buffer
"""
self.ui = ui
self.buffer = buffer
self.walker = urwid.SimpleFocusListWalker([])
super().__init__(ui, self.walker)
def refresh(self, df=None, containers=None, images=None):
content = []
if df is None:
content += [
SingleTextRow("Data is being loaded, it may even take a couple minutes.",
maps={"normal": "main_list_white", "focus": MAIN_LIST_FOCUS}),
]
else:
if containers:
content += [
SingleTextRow("Containers",
maps={"normal": "main_list_white", "focus": MAIN_LIST_FOCUS}),
SingleTextRow("")
]
containers_content = [[
SelectableText("Name", maps=get_map("main_list_lg")),
SelectableText("Image Size", maps=get_map("main_list_lg")),
SelectableText("Writable Layer Size", maps=get_map("main_list_lg")),
]]
for c in containers:
containers_content.append(
[SelectableText(c.short_name),
SelectableText(humanize_bytes(c.size_root_fs or 0)),
SelectableText(humanize_bytes(c.size_rw_fs or 0)),
])
content.extend(assemble_rows(containers_content, dividechars=3))
content += [
SingleTextRow("")
]
if images:
content += [
SingleTextRow("Images",
maps={"normal": "main_list_white", "focus": MAIN_LIST_FOCUS}),
SingleTextRow("")
]
images_content = [[
SelectableText("Name", maps=get_map("main_list_lg")),
SelectableText("Size", maps=get_map("main_list_lg")),
SelectableText("Shared Size", maps=get_map("main_list_lg")),
SelectableText("Unique Size", maps=get_map("main_list_lg"))
]]
for i in images:
images_content.append([
SelectableText(i.short_name, maps=get_map("main_list_dg")),
SelectableText(
humanize_bytes(i.total_size or 0),
maps=get_map("main_list_dg")),
SelectableText(
humanize_bytes(i.shared_size or 0),
maps=get_map("main_list_dg")),
SelectableText(
humanize_bytes(i.unique_size or 0),
maps=get_map("main_list_dg"))
])
content.extend(assemble_rows(images_content, dividechars=3))
content += [
SingleTextRow("")
]
volumes = graceful_chain_get(df, "Volumes")
if volumes:
content += [
SingleTextRow("Volumes",
maps={"normal": "main_list_white", "focus": MAIN_LIST_FOCUS}),
SingleTextRow("")
]
volumes_content = [[
SelectableText("Name", maps=get_map("main_list_lg")),
SelectableText("Links", maps=get_map("main_list_lg")),
SelectableText("Size", maps=get_map("main_list_lg")),
]]
for v in volumes:
v_name = graceful_chain_get(v, "Name", default="")
v_size = graceful_chain_get(v, "UsageData", "Size", default=0)
v_links = graceful_chain_get(v, "UsageData", "RefCount", default=0)
volumes_content.append([
SelectableText(v_name, maps=get_map("main_list_dg")),
SelectableText("%s" % v_links, maps=get_map("main_list_dg")),
SelectableText(
humanize_bytes(v_size),
maps=get_map("main_list_dg")),
])
content.extend(assemble_rows(volumes_content, dividechars=3))
self.set_body(content)
self.set_focus(0)
|
tutorial/ctc_loss.py | sailist/ASRFrame | 223 | 11169807 | <filename>tutorial/ctc_loss.py
import numpy as np
import keras.backend as K
import tensorflow as tf
a = [1,2,3,1,2,4,6,6,6,6]
b = [3,1,2,3,5,1,6,6,6,6]
c = [2,1,0,2,3,4,6,6,6,6]
y_true = np.stack([a,b,c])
y_pred = np.random.rand(3,15,7).astype(np.float32)
input_length = np.stack([[7],[8],[9]])
label_length = np.stack([[4],[4],[4]])
result = K.ctc_batch_cost(y_true,y_pred,input_length,label_length)
print(K.eval(result)) |
astrality/tests/conftest.py | JakobGM/Astrality | 111 | 11169851 | """Application wide fixtures."""
import os
from pathlib import Path
import shutil
import pytest
import astrality
from astrality.actions import ActionBlock
from astrality.config import GlobalModulesConfig, user_configuration
from astrality.context import Context
from astrality.module import Module, ModuleManager
@pytest.fixture
def conf_path():
"""Return str path to configuration directory."""
conf_path = Path(__file__).parents[1] / 'config'
return conf_path
@pytest.fixture
def conf_file_path(conf_path):
"""Return path to example configuration."""
return conf_path / 'astrality.yml'
@pytest.fixture(scope='session', autouse=True)
def conf():
"""Return the configuration object for the example configuration."""
this_test_file = os.path.abspath(__file__)
conf_path = Path(this_test_file).parents[1] / 'config'
return user_configuration(conf_path)[0]
@pytest.fixture(scope='session', autouse=True)
def context():
"""Return the context object for the example configuration."""
this_test_file = os.path.abspath(__file__)
conf_path = Path(this_test_file).parents[1] / 'config'
return user_configuration(conf_path)[2]
@pytest.fixture(scope='session', autouse=True)
def modules():
"""Return the modules object for the example configuration."""
this_test_file = os.path.abspath(__file__)
conf_path = Path(this_test_file).parents[1] / 'config'
return user_configuration(conf_path)[1]
@pytest.fixture
def test_config_directory():
"""Return path to test config directory."""
return Path(__file__).parent / 'test_config'
@pytest.yield_fixture
def temp_directory(tmpdir):
"""Return path to temporary directory, and cleanup afterwards."""
return Path(tmpdir).resolve()
@pytest.fixture
def context_directory(test_config_directory):
"""Return path to directory containing several context files."""
return test_config_directory / 'context'
@pytest.fixture
def template_directory(test_config_directory):
"""Return path to directory containing several templates"""
return test_config_directory / 'templates'
@pytest.fixture
def module_factory(test_config_directory):
"""Return Module factory for testing."""
def _module_factory(
name='test',
on_startup=None,
on_modified=None,
on_exit=None,
path=None,
module_directory=test_config_directory / 'test_modules' /
'using_all_actions',
replacer=lambda x: x,
context_store={},
) -> Module:
"""Return module with specified action blocks and config."""
module = Module(
name=name,
module_config={},
module_directory=module_directory,
replacer=replacer,
context_store=context_store,
)
if on_startup:
module.action_blocks['on_startup'] = on_startup
if on_exit:
module.action_blocks['on_exit'] = on_exit
if on_modified:
module.action_blocks['on_modified'][path] = on_modified
return module
return _module_factory
@pytest.fixture
def module_manager_factory():
"""Return ModuleManager factory for testing."""
def _module_manager_factory(
*modules,
context=Context(),
) -> ModuleManager:
"""Return ModuleManager object with given modules and context."""
module_manager = ModuleManager(
context=context,
)
module_manager.modules = {
module.name: module
for module
in modules
}
# Insert correct context for all actions
for module in modules:
for block in module.all_action_blocks():
for action_type in ActionBlock.action_types:
for actions in getattr(block, f'_{action_type}_actions'):
actions.context_store = context
return module_manager
return _module_manager_factory
@pytest.fixture
def create_temp_files(tmpdir):
"""Return temp file factory function."""
temp_dir = Path(tmpdir)
def _create_temp_files(number):
"""Create `number` tempfiles in seperate directories and yield paths."""
for _number in range(number):
temp_file = temp_dir / str(_number) / f'file{_number}.temp'
temp_file.parent.mkdir(parents=True)
temp_file.touch()
yield temp_file
return _create_temp_files
@pytest.fixture
def action_block_factory(test_config_directory):
"""Return action block factory function for testing."""
def _action_block_factory(
import_context={},
compile={},
copy={},
run={},
stow={},
symlink={},
directory=test_config_directory,
replacer=lambda x: x,
context_store=Context(),
):
"""Return module with given parameters."""
config = {
'import_context': import_context,
'compile': compile,
'copy': copy,
'run': run,
'stow': stow,
'symlink': symlink,
}
return ActionBlock(
action_block=config,
directory=directory,
replacer=replacer,
context_store=context_store,
global_modules_config=GlobalModulesConfig(
config={},
config_directory=test_config_directory,
),
module_name='test',
)
return _action_block_factory
@pytest.yield_fixture(autouse=True)
def patch_xdg_directory_standard(tmpdir, monkeypatch, request):
"""During testing, the XDG directory standard is monkeypatched."""
if 'dont_patch_xdg' in request.keywords:
yield
return
data_dir = Path(tmpdir).parent / '.local' / 'share' / 'astrality'
# Clear data directory before the test
if data_dir.exists():
shutil.rmtree(str(data_dir))
data_dir.mkdir(parents=True)
monkeypatch.setattr(
astrality.xdg.XDG,
'data_home',
data_dir,
)
yield data_dir
# Delete directory for next test
if data_dir.exists():
shutil.rmtree(str(data_dir))
@pytest.fixture(autouse=True)
def patch_astrality_config_home(monkeypatch):
"""Patch $ASTRALITY_CONFIG_HOME."""
example_config = Path(__file__).parents[2] / 'config'
monkeypatch.setitem(
os.environ,
'ASTRALITY_CONFIG_HOME',
str(example_config),
)
|
airflow/providers/amazon/aws/operators/emr_containers.py | ChaseKnowlden/airflow | 15,947 | 11169867 | <gh_stars>1000+
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Any, Optional
from uuid import uuid4
from airflow.exceptions import AirflowException
try:
from functools import cached_property
except ImportError:
from cached_property import cached_property
from airflow.models import BaseOperator
from airflow.providers.amazon.aws.hooks.emr_containers import EMRContainerHook
class EMRContainerOperator(BaseOperator):
"""
An operator that submits jobs to EMR on EKS virtual clusters.
:param name: The name of the job run.
:type name: str
:param virtual_cluster_id: The EMR on EKS virtual cluster ID
:type virtual_cluster_id: str
:param execution_role_arn: The IAM role ARN associated with the job run.
:type execution_role_arn: str
:param release_label: The Amazon EMR release version to use for the job run.
:type release_label: str
:param job_driver: Job configuration details, e.g. the Spark job parameters.
:type job_driver: dict
:param configuration_overrides: The configuration overrides for the job run,
specifically either application configuration or monitoring configuration.
:type configuration_overrides: dict
:param client_request_token: The client idempotency token of the job run request.
Use this if you want to specify a unique ID to prevent two jobs from getting started.
If no token is provided, a UUIDv4 token will be generated for you.
:type client_request_token: str
:param aws_conn_id: The Airflow connection used for AWS credentials.
:type aws_conn_id: str
:param poll_interval: Time (in seconds) to wait between two consecutive calls to check query status on EMR
:type poll_interval: int
:param max_tries: Maximum number of times to wait for the job run to finish.
Defaults to None, which will poll until the job is *not* in a pending, submitted, or running state.
:type max_tries: int
"""
template_fields = ["name", "virtual_cluster_id", "execution_role_arn", "release_label", "job_driver"]
ui_color = "#f9c915"
def __init__( # pylint: disable=too-many-arguments
self,
*,
name: str,
virtual_cluster_id: str,
execution_role_arn: str,
release_label: str,
job_driver: dict,
configuration_overrides: Optional[dict] = None,
client_request_token: Optional[str] = None,
aws_conn_id: str = "aws_default",
poll_interval: int = 30,
max_tries: Optional[int] = None,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.name = name
self.virtual_cluster_id = virtual_cluster_id
self.execution_role_arn = execution_role_arn
self.release_label = release_label
self.job_driver = job_driver
self.configuration_overrides = configuration_overrides or {}
self.aws_conn_id = aws_conn_id
self.client_request_token = client_request_token or str(uuid4())
self.poll_interval = poll_interval
self.max_tries = max_tries
self.job_id = None
@cached_property
def hook(self) -> EMRContainerHook:
"""Create and return an EMRContainerHook."""
return EMRContainerHook(
self.aws_conn_id,
virtual_cluster_id=self.virtual_cluster_id,
)
def execute(self, context: dict) -> Optional[str]:
"""Run job on EMR Containers"""
self.job_id = self.hook.submit_job(
self.name,
self.execution_role_arn,
self.release_label,
self.job_driver,
self.configuration_overrides,
self.client_request_token,
)
query_status = self.hook.poll_query_status(self.job_id, self.max_tries, self.poll_interval)
if query_status in EMRContainerHook.FAILURE_STATES:
error_message = self.hook.get_job_failure_reason(self.job_id)
raise AirflowException(
f"EMR Containers job failed. Final state is {query_status}. "
f"query_execution_id is {self.job_id}. Error: {error_message}"
)
elif not query_status or query_status in EMRContainerHook.INTERMEDIATE_STATES:
raise AirflowException(
f"Final state of EMR Containers job is {query_status}. "
f"Max tries of poll status exceeded, query_execution_id is {self.job_id}."
)
return self.job_id
def on_kill(self) -> None:
"""Cancel the submitted job run"""
if self.job_id:
self.log.info("Stopping job run with jobId - %s", self.job_id)
response = self.hook.stop_query(self.job_id)
http_status_code = None
try:
http_status_code = response["ResponseMetadata"]["HTTPStatusCode"]
except Exception as ex:
self.log.error("Exception while cancelling query: %s", ex)
finally:
if http_status_code is None or http_status_code != 200:
self.log.error("Unable to request query cancel on EMR. Exiting")
else:
self.log.info(
"Polling EMR for query with id %s to reach final state",
self.job_id,
)
self.hook.poll_query_status(self.job_id)
|
neupy/layers/activations.py | FrostByte266/neupy | 801 | 11169884 | import numpy as np
import tensorflow as tf
from neupy import init
from neupy.utils import asfloat, as_tuple, tf_utils
from neupy.exceptions import LayerConnectionError, WeightInitializationError
from neupy.core.properties import (
NumberProperty, TypedListProperty,
ParameterProperty, IntProperty,
)
from .base import BaseLayer
__all__ = (
'Linear', 'Sigmoid', 'Tanh', 'Softmax',
'Relu', 'LeakyRelu', 'Elu', 'PRelu',
'Softplus', 'HardSigmoid',
)
class Linear(BaseLayer):
"""
Layer with linear activation function. It applies linear transformation
when the ``n_units`` parameter specified and acts as an identity
when it's not specified.
Parameters
----------
n_units : int or None
Number of units in the layers. It also corresponds to the number of
output features that will be produced per sample after passing it
through this layer. The ``None`` value means that layer will not have
parameters and it will only apply activation function to the input
without linear transformation output for the specified input value.
Defaults to ``None``.
weight : array-like, Tensorfow variable, scalar or Initializer
Defines layer's weights. Default initialization methods
you can find :ref:`here <init-methods>`.
Defaults to :class:`HeNormal() <neupy.init.HeNormal>`.
bias : 1D array-like, Tensorfow variable, scalar, Initializer or None
Defines layer's bias. Default initialization methods you can find
:ref:`here <init-methods>`. Defaults to
:class:`Constant(0) <neupy.init.Constant>`.
The ``None`` value excludes bias from the calculations and
do not add it into parameters list.
{BaseLayer.name}
Methods
-------
{BaseLayer.Methods}
activation_function(input)
Applies activation function to the input.
Attributes
----------
{BaseLayer.Attributes}
Examples
--------
Linear Regression
>>> from neupy.layers import *
>>> network = Input(10) >> Linear(5)
"""
n_units = IntProperty(minval=1, allow_none=True)
weight = ParameterProperty()
bias = ParameterProperty(allow_none=True)
def __init__(self, n_units=None, weight=init.HeNormal(), bias=0,
name=None):
super(Linear, self).__init__(name=name)
self.n_units = n_units
self.weight = weight
self.bias = bias
def get_output_shape(self, input_shape):
input_shape = tf.TensorShape(input_shape)
if self.n_units is None:
return input_shape
if input_shape and input_shape.ndims != 2:
raise LayerConnectionError(
"Input shape expected to have 2 dimensions, got {} instead. "
"Shape: {}".format(input_shape.ndims, input_shape))
n_samples = input_shape[0]
return tf.TensorShape((n_samples, self.n_units))
def create_variables(self, input_shape):
if self.n_units is None:
return
input_shape = tf.TensorShape(input_shape)
self.input_shape = input_shape
_, n_input_features = input_shape
if n_input_features.value is None:
raise WeightInitializationError(
"Cannot create variables for the layer `{}`, because "
"number of input features is unknown. Input shape: {}"
"Layer: {}".format(self.name, input_shape, self))
self.weight = self.variable(
value=self.weight, name='weight',
shape=as_tuple(n_input_features, self.n_units))
if self.bias is not None:
self.bias = self.variable(
value=self.bias, name='bias',
shape=as_tuple(self.n_units))
def output(self, input, **kwargs):
input = tf.convert_to_tensor(input, dtype=tf.float32)
if self.n_units is None:
return self.activation_function(input)
if self.bias is None:
output = tf.matmul(input, self.weight)
return self.activation_function(output)
output = tf.matmul(input, self.weight) + self.bias
return self.activation_function(output)
def activation_function(self, input_value):
return input_value
def __repr__(self):
if self.n_units is None:
return self._repr_arguments(name=self.name)
return self._repr_arguments(
self.n_units,
name=self.name,
weight=self.weight,
bias=self.bias,
)
class Sigmoid(Linear):
"""
Layer with the sigmoid used as an activation function. It applies
linear transformation when the ``n_units`` parameter specified and
sigmoid function after the transformation. When ``n_units`` is not
specified, only sigmoid function will be applied to the input.
Parameters
----------
{Linear.Parameters}
Methods
-------
{Linear.Methods}
Attributes
----------
{Linear.Attributes}
Examples
--------
Logistic Regression (LR)
>>> from neupy.layers import *
>>> network = Input(10) >> Sigmoid(1)
Feedforward Neural Networks (FNN)
>>> from neupy.layers import *
>>> network = Input(10) >> Sigmoid(5) >> Sigmoid(1)
Convolutional Neural Networks (CNN) for Semantic Segmentation
Sigmoid layer can be used in order to normalize probabilities
per pixel in semantic classification task with two classes.
In the example below, we have as input 32x32 image that predicts
one of the two classes. Sigmoid normalizes raw predictions per pixel
to the valid probabilities.
>>> from neupy.layers import *
>>> network = Input((32, 32, 1)) >> Sigmoid()
"""
def activation_function(self, input_value):
return tf.nn.sigmoid(input_value)
class HardSigmoid(Linear):
"""
Layer with the hard sigmoid used as an activation function. It applies
linear transformation when the ``n_units`` parameter specified and
hard sigmoid function after the transformation. When ``n_units`` is
not specified, only hard sigmoid function will be applied to the input.
Parameters
----------
{Linear.Parameters}
Methods
-------
{Linear.Methods}
Attributes
----------
{Linear.Attributes}
Examples
--------
Feedforward Neural Networks (FNN)
>>> from neupy.layers import *
>>> network = Input(10) >> HardSigmoid(5)
"""
def activation_function(self, input_value):
input_value = (0.2 * input_value) + 0.5
return tf.clip_by_value(input_value, 0., 1.)
class Tanh(Linear):
"""
Layer with the hyperbolic tangent used as an activation function.
It applies linear transformation when the ``n_units`` parameter
specified and ``tanh`` function after the transformation. When
``n_units`` is not specified, only ``tanh`` function will be applied
to the input.
Parameters
----------
{Linear.Parameters}
Methods
-------
{Linear.Methods}
Attributes
----------
{Linear.Attributes}
Examples
--------
Feedforward Neural Networks (FNN)
>>> from neupy.layers import *
>>> network = Input(10) >> Tanh(5)
"""
def activation_function(self, input_value):
return tf.nn.tanh(input_value)
class Relu(Linear):
"""
Layer with the rectifier (ReLu) used as an activation function.
It applies linear transformation when the ``n_units`` parameter
specified and ``relu`` function after the transformation. When
``n_units`` is not specified, only ``relu`` function will be applied
to the input.
Parameters
----------
{Linear.n_units}
alpha : float
Alpha parameter defines the decreasing rate
for the negative values. If ``alpha``
is non-zero value then layer behave like a
leaky ReLu. Defaults to ``0``.
weight : array-like, Tensorfow variable, scalar or Initializer
Defines layer's weights. Default initialization methods
you can find :ref:`here <init-methods>`.
Defaults to :class:`HeNormal(gain=2) <neupy.init.HeNormal>`.
{Linear.bias}
{BaseLayer.name}
Methods
-------
{Linear.Methods}
Attributes
----------
{Linear.Attributes}
Examples
--------
Feedforward Neural Networks (FNN)
>>> from neupy.layers import *
>>> network = Input(10) >> Relu(20) >> Relu(1)
Convolutional Neural Networks (CNN)
>>> from neupy.layers import *
>>> network = join(
... Input((32, 32, 3)),
... Convolution((3, 3, 16)) >> Relu(),
... Convolution((3, 3, 32)) >> Relu(),
... Reshape(),
... Softmax(10),
... )
"""
alpha = NumberProperty(minval=0)
def __init__(self, n_units=None, alpha=0, weight=init.HeNormal(gain=2),
bias=init.Constant(value=0), name=None):
self.alpha = alpha
super(Relu, self).__init__(
n_units=n_units, weight=weight, bias=bias, name=name)
def activation_function(self, input_value):
if self.alpha == 0:
return tf.nn.relu(input_value)
return tf.nn.leaky_relu(input_value, asfloat(self.alpha))
def __repr__(self):
if self.n_units is None:
return self._repr_arguments(name=self.name, alpha=self.alpha)
return self._repr_arguments(
self.n_units,
name=self.name,
alpha=self.alpha,
weight=self.weight,
bias=self.bias,
)
class LeakyRelu(Linear):
"""
Layer with the leaky rectifier (Leaky ReLu) used as an activation
function. It applies linear transformation when the ``n_units``
parameter specified and leaky relu function after the transformation.
When ``n_units`` is not specified, only leaky relu function will be
applied to the input.
Parameters
----------
{Linear.Parameters}
Methods
-------
{Linear.Methods}
Attributes
----------
{Linear.Attributes}
Notes
-----
Do the same as ``Relu(input_size, alpha=0.01)``.
Examples
--------
Feedforward Neural Networks (FNN)
>>> from neupy.layers import *
>>> network = Input(10) >> LeakyRelu(20) >> LeakyRelu(1)
"""
def activation_function(self, input_value):
return tf.nn.leaky_relu(input_value, alpha=asfloat(0.01))
class Softplus(Linear):
"""
Layer with the softplus used as an activation function. It applies linear
transformation when the ``n_units`` parameter specified and softplus
function after the transformation. When ``n_units`` is not specified,
only softplus function will be applied to the input.
Parameters
----------
{Linear.Parameters}
Methods
-------
{Linear.Methods}
Attributes
----------
{Linear.Attributes}
Examples
--------
Feedforward Neural Networks (FNN)
>>> from neupy.layers import *
>>> network = Input(10) >> Softplus(4)
"""
def activation_function(self, input_value):
return tf.nn.softplus(input_value)
class Softmax(Linear):
"""
Layer with the softmax activation function. It applies linear
transformation when the ``n_units`` parameter specified and softmax
function after the transformation. When ``n_units`` is not specified,
only softmax function will be applied to the input.
Parameters
----------
{Linear.Parameters}
Methods
-------
{Linear.Methods}
Attributes
----------
{Linear.Attributes}
Examples
--------
Feedforward Neural Networks (FNN)
>>> from neupy.layers import *
>>> network = Input(10) >> Relu(20) >> Softmax(10)
Convolutional Neural Networks (CNN) for Semantic Segmentation
Softmax layer can be used in order to normalize probabilities
per pixel. In the example below, we have as input 32x32 image
with raw prediction per each pixel for 10 different classes.
Softmax normalizes raw predictions per pixel to the probability
distribution.
>>> from neupy.layers import *
>>> network = Input((32, 32, 10)) >> Softmax()
"""
def activation_function(self, input_value):
return tf.nn.softmax(input_value)
class Elu(Linear):
"""
Layer with the exponential linear unit (ELU) used as an activation
function. It applies linear transformation when the ``n_units``
parameter specified and elu function after the transformation.
When ``n_units`` is not specified, only elu function will be
applied to the input.
Parameters
----------
{Linear.Parameters}
Methods
-------
{Linear.Methods}
Attributes
----------
{Linear.Attributes}
Examples
--------
Feedforward Neural Networks (FNN)
>>> from neupy.layers import *
>>> network = Input(10) >> Elu(5) >> Elu(1)
References
----------
.. [1] http://arxiv.org/pdf/1511.07289v3.pdf
"""
def activation_function(self, input_value):
return tf.nn.elu(input_value)
class PRelu(Linear):
"""
Layer with the parametrized ReLu used as an activation function.
Layer learns additional parameter ``alpha`` during the training.
It applies linear transformation when the ``n_units`` parameter
specified and parametrized relu function after the transformation.
When ``n_units`` is not specified, only parametrized relu function
will be applied to the input.
Parameters
----------
alpha_axes : int or tuple
Axes that will not include unique alpha parameter.
Single integer value defines the same as a tuple with one value.
Defaults to ``-1``.
alpha : array-like, Tensorfow variable, scalar or Initializer
Separate alpha parameter per each non-shared axis for the ReLu.
Scalar value means that each element in the tensor will be
equal to the specified value. Default initialization methods you
can find :ref:`here <init-methods>`.
Defaults to ``Constant(value=0.25)``.
{Linear.Parameters}
Methods
-------
{Linear.Methods}
Attributes
----------
{Linear.Attributes}
Examples
--------
Feedforward Neural Networks (FNN)
>>> from neupy.layers import *
>>> network = Input(10) >> PRelu(20) >> PRelu(1)
Convolutional Neural Networks (CNN)
>>> from neupy.layers import *
>>> network = join(
... Input((32, 32, 3)),
... Convolution((3, 3, 16)) >> PRelu(),
... Convolution((3, 3, 32)) >> PRelu(),
... Reshape(),
... Softmax(10),
... )
References
----------
.. [1] Delving Deep into Rectifiers: Surpassing Human-Level
Performance on ImageNet Classification.
https://arxiv.org/pdf/1502.01852v1.pdf
"""
alpha_axes = TypedListProperty()
alpha = ParameterProperty()
def __init__(self, n_units=None, alpha_axes=-1, alpha=0.25,
weight=init.HeNormal(gain=2), bias=0, name=None):
self.alpha = alpha
self.alpha_axes = as_tuple(alpha_axes)
if 0 in self.alpha_axes:
raise ValueError("Cannot specify alpha for 0-axis")
super(PRelu, self).__init__(
n_units=n_units, weight=weight, bias=bias, name=name)
def get_output_shape(self, input_shape):
input_shape = tf.TensorShape(input_shape)
if input_shape and max(self.alpha_axes) >= input_shape.ndims:
max_axis_index = input_shape.ndims - 1
raise LayerConnectionError(
"Cannot specify alpha for the axis #{}. Maximum "
"available axis is {} (0-based indices)."
"".format(max(self.alpha_axes), max_axis_index))
return super(PRelu, self).get_output_shape(input_shape)
def create_variables(self, input_shape):
super(PRelu, self).create_variables(input_shape)
output_shape = self.get_output_shape(input_shape)
self.alpha = self.variable(
value=self.alpha, name='alpha',
shape=[output_shape[axis] for axis in self.alpha_axes])
def activation_function(self, input):
input = tf.convert_to_tensor(input, dtype=tf.float32)
ndim = input.shape.ndims
dimensions = np.arange(ndim)
alpha_axes = dimensions[list(self.alpha_axes)]
alpha = tf_utils.dimshuffle(self.alpha, ndim, alpha_axes)
return tf.maximum(0.0, input) + alpha * tf.minimum(0.0, input)
def __repr__(self):
if self.n_units is None:
return self._repr_arguments(
name=self.name,
alpha_axes=self.alpha_axes,
alpha=self.alpha)
return self._repr_arguments(
self.n_units,
name=self.name,
alpha_axes=self.alpha_axes,
alpha=self.alpha,
weight=self.weight,
bias=self.bias)
|
loaner/web_app/backend/lib/sync_users_test.py | gng-demo/travisfix | 175 | 11169893 | <gh_stars>100-1000
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for backend.lib.sync_users."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import mock
from loaner.web_app import constants
from loaner.web_app.backend.clients import directory
from loaner.web_app.backend.lib import sync_users
from loaner.web_app.backend.models import user_model
from loaner.web_app.backend.testing import loanertest
class SyncUsersTest(loanertest.EndpointsTestCase):
def setUp(self):
super(SyncUsersTest, self).setUp()
user_model.Role.create(
name='technician',
associated_group='technicians@{}'.format(loanertest.USER_DOMAIN))
patcher = mock.patch.object(directory, 'DirectoryApiClient')
mock_directory = patcher.start()
self.addCleanup(patcher.stop)
mock_users_for_group = mock_directory.return_value.get_all_users_in_group
def group_client_side_effect(*args, **kwargs): # pylint: disable=unused-argument
if args[0] == constants.SUPERADMINS_GROUP:
return [
'need-superadmin@{}'.format(loanertest.USER_DOMAIN),
'keep-superadmin@{}'.format(loanertest.USER_DOMAIN),
]
elif args[0] == 'technicians@{}'.format(loanertest.USER_DOMAIN):
return [
'need-technician@{}'.format(loanertest.USER_DOMAIN),
'keep-technician@{}'.format(loanertest.USER_DOMAIN),
]
mock_users_for_group.side_effect = group_client_side_effect
def test_sync_user_roles__standard_user(self):
user_model.User.get_user('standard-user@{}'.format(loanertest.USER_DOMAIN))
sync_users.sync_user_roles()
self.make_assertions(
'standard-user@{}'.format(loanertest.USER_DOMAIN), [], False)
def test_sync_user_roles__superadmin(self):
user_model.User.get_user(
'need-superadmin@{}'.format(loanertest.USER_DOMAIN))
user_model.User.get_user(
'remove-superadmin@{}'.format(loanertest.USER_DOMAIN)
).update(superadmin=True)
user_model.User.get_user(
'keep-superadmin@{}'.format(loanertest.USER_DOMAIN)
).update(superadmin=True)
sync_users.sync_user_roles()
self.make_assertions(
'need-superadmin@{}'.format(loanertest.USER_DOMAIN), [], True)
self.make_assertions(
'remove-superadmin@{}'.format(loanertest.USER_DOMAIN), [], False)
self.make_assertions(
'keep-superadmin@{}'.format(loanertest.USER_DOMAIN), [], True)
def test_sync_User_roles__role(self):
user_model.User.get_user(
'need-technician@{}'.format(loanertest.USER_DOMAIN))
user_model.User.get_user(
'remove-technician@{}'.format(loanertest.USER_DOMAIN)
).update(roles=['technician'])
user_model.User.get_user(
'keep-technician@{}'.format(loanertest.USER_DOMAIN)
).update(roles=['technician'])
sync_users.sync_user_roles()
self.make_assertions(
'need-technician@{}'.format(loanertest.USER_DOMAIN), ['technician'],
False)
self.make_assertions(
'remove-<EMAIL>(loanertest.USER_DOMAIN), [], False)
self.make_assertions(
'keep-<EMAIL>(loan<EMAIL>.<EMAIL>), ['technician'],
False)
def make_assertions(self, user_id, roles, superadmin):
"""Asserts that users have correct roles/superadmin.
Args:
user_id: str, user email ID.
roles: list|str|, roles user should have.
superadmin: bool, if the user should be superadmin.
"""
user = user_model.User.get_user(user_id)
self.assertCountEqual(user.role_names, roles)
self.assertEqual(user.superadmin, superadmin)
if __name__ == '__main__':
loanertest.main()
|
AIDog/serving/test_client_v1.py | BaranovArtyom/aiexamples | 119 | 11169926 | <gh_stars>100-1000
#!/usr/bin/env python
import argparse
import requests
import json
import tensorflow as tf
import numpy as np
def read_tensor_from_image_file(file_name,
input_height=299,
input_width=299,
input_mean=0,
input_std=255):
input_name = "file_reader"
output_name = "normalized"
file_reader = tf.read_file(file_name, input_name)
if file_name.endswith(".png"):
image_reader = tf.image.decode_png(
file_reader, channels=3, name="png_reader")
elif file_name.endswith(".gif"):
image_reader = tf.squeeze(
tf.image.decode_gif(file_reader, name="gif_reader"))
elif file_name.endswith(".bmp"):
image_reader = tf.image.decode_bmp(file_reader, name="bmp_reader")
else:
image_reader = tf.image.decode_jpeg(
file_reader, channels=3, name="jpeg_reader")
float_caster = tf.cast(image_reader, tf.float32)
dims_expander = tf.expand_dims(float_caster, 0)
resized = tf.image.resize_bilinear(dims_expander, [input_height, input_width])
normalized = tf.divide(tf.subtract(resized, [input_mean]), [input_std])
sess = tf.Session()
result = sess.run(normalized)
return result
def load_labels(label_file):
label = []
proto_as_ascii_lines = tf.gfile.GFile(label_file).readlines()
for l in proto_as_ascii_lines:
label.append(l.rstrip())
return label
if __name__ == "__main__":
file_name = "../TestImages/n02085620-Chihuahua/n02085620_11140.jpg"
label_file = "dog_labels_inception_v3.txt"
input_height = 299
input_width = 299
input_mean = 0
input_std = 255
model_name = "default"
enable_ssl = False
parser = argparse.ArgumentParser()
parser.add_argument("--image", help="image to be processed")
parser.add_argument("--labels", help="name of file containing labels")
parser.add_argument("--input_height", type=int, help="input height")
parser.add_argument("--input_width", type=int, help="input width")
parser.add_argument("--input_mean", type=int, help="input mean")
parser.add_argument("--input_std", type=int, help="input std")
parser.add_argument("--model_name", help="name of predict model")
parser.add_argument("--enable_ssl", type=bool, help="if use https")
args = parser.parse_args()
if args.image:
file_name = args.image
if args.labels:
label_file = args.labels
if args.input_height:
input_height = args.input_height
if args.input_width:
input_width = args.input_width
if args.input_mean:
input_mean = args.input_mean
if args.input_std:
input_std = args.input_std
if args.model_name:
model_name = args.model_name
if args.enable_ssl:
enable_ssl = args.enable_ssl
t = read_tensor_from_image_file(
file_name,
input_height=input_height,
input_width=input_width,
input_mean=input_mean,
input_std=input_std)
if enable_ssl :
endpoint = "https://ilego.club:8500"
else:
endpoint = "http://ilego.club:8500"
print(t.shape)
json_data = {"model_name": model_name, "data": {"image": t.tolist()}}
result = requests.post(endpoint, json=json_data)
res = np.array(json.loads(result.text)["prediction"][0])
print(res)
indexes = np.argsort(-res)
labels = load_labels(label_file)
top_k = 3
for i in range(top_k):
idx = indexes[i]
print(labels[idx], res[idx])
|
tests/utils/test_tree.py | graingert/py-backwards | 338 | 11169927 | from typed_ast import ast3 as ast
from astunparse import unparse
from py_backwards.utils.snippet import snippet
from py_backwards.utils.tree import (get_parent, get_node_position,
find, insert_at, replace_at)
def test_get_parent(as_ast):
@as_ast
def tree():
x = 1
assignment = tree.body[0].body[0]
assert get_parent(tree, assignment) == tree.body[0]
class TestGetNodePosition:
def test_from_body(self, as_ast):
@as_ast
def tree():
x = 1
print(10)
call = tree.body[0].body[1].value
position = get_node_position(tree, call)
assert position.index == 1
assert position.parent == tree.body[0]
assert position.attribute == 'body'
def test_from_orelse(self, as_ast):
@as_ast
def tree():
if True:
print(0)
else:
print(1)
call = tree.body[0].body[0].orelse[0].value
position = get_node_position(tree, call)
assert position.index == 0
assert position.parent == tree.body[0].body[0]
assert position.attribute == 'orelse'
def test_find(as_ast):
@as_ast
def tree():
print('hi there')
print(10)
calls = list(find(tree, ast.Call))
assert len(calls) == 2
@snippet
def to_insert():
print(10)
def test_insert_at(as_ast, as_str):
def fn():
print('hi there')
tree = as_ast(fn)
insert_at(0, tree.body[0], to_insert.get_body())
def fn():
print(10)
print('hi there')
expected_code = as_str(fn)
assert unparse(tree).strip() == expected_code
def test_replace_at(as_ast, as_str):
def fn():
print('hi there')
tree = as_ast(fn)
replace_at(0, tree.body[0], to_insert.get_body())
def fn():
print(10)
expected_code = as_str(fn)
assert unparse(tree).strip() == expected_code
|
python/phonenumbers/data/region_FM.py | rodgar-nvkz/python-phonenumbers | 2,424 | 11169933 | """Auto-generated file, do not edit by hand. FM metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_FM = PhoneMetadata(id='FM', country_code=691, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='(?:[39]\\d\\d|820)\\d{4}', possible_length=(7,)),
fixed_line=PhoneNumberDesc(national_number_pattern='31(?:00[67]|208|309)\\d\\d|(?:3(?:[2357]0[1-9]|602|804|905)|(?:820|9[2-6]\\d)\\d)\\d{3}', example_number='3201234', possible_length=(7,)),
mobile=PhoneNumberDesc(national_number_pattern='31(?:00[67]|208|309)\\d\\d|(?:3(?:[2357]0[1-9]|602|804|905)|(?:820|9[2-7]\\d)\\d)\\d{3}', example_number='3501234', possible_length=(7,)),
number_format=[NumberFormat(pattern='(\\d{3})(\\d{4})', format='\\1 \\2', leading_digits_pattern=['[389]'])])
|
2020/06/19/Intro to Flask Blueprints/flask_blueprint_example/myapp/site/routes.py | kenjitagawa/youtube_video_code | 492 | 11169962 | <gh_stars>100-1000
from flask import Blueprint
site = Blueprint('site', __name__)
@site.route('/')
def index():
return '<h1>Welcome to the home page!</h1>'
|
tests/test_audio_tag.py | TUT-ARG/sed_eval | 113 | 11169990 | """
Unit tests for audio tag metrics
"""
import nose.tools
import sed_eval
import os
import numpy
import dcase_util
def test_direct_use():
reference_tag_list = dcase_util.containers.MetaDataContainer([
{
'filename': 'test1.wav',
'tags': 'cat,dog'
},
{
'filename': 'test2.wav',
'tags': 'dog'
},
{
'filename': 'test3.wav',
'tags': 'bird,cat'
},
{
'filename': 'test4.wav',
'tags': 'cat'
},
{
'filename': 'test5.wav',
'tags': 'bird,speech'
},
{
'filename': 'test6.wav',
'tags': 'dog,speech'
},
{
'filename': 'test7.wav',
'tags': 'speech'
},
])
estimated_tag_probabilities = dcase_util.containers.ProbabilityContainer([
{
'filename': 'test1.wav',
'label': 'bird',
'probability': 0.2
},
{
'filename': 'test1.wav',
'label': 'cat',
'probability': 0.99
},
{
'filename': 'test1.wav',
'label': 'dog',
'probability': 0.88
},
{
'filename': 'test1.wav',
'label': 'speech',
'probability': 0.01
},
{
'filename': 'test2.wav',
'label': 'bird',
'probability': 0.1
},
{
'filename': 'test2.wav',
'label': 'cat',
'probability': 0.3
},
{
'filename': 'test2.wav',
'label': 'dog',
'probability': 0.8
},
{
'filename': 'test2.wav',
'label': 'speech',
'probability': 0.1
},
{
'filename': 'test3.wav',
'label': 'bird',
'probability': 0.7
},
{
'filename': 'test3.wav',
'label': 'cat',
'probability': 0.6
},
{
'filename': 'test3.wav',
'label': 'dog',
'probability': 0.4
},
{
'filename': 'test3.wav',
'label': 'speech',
'probability': 0.3
},
{
'filename': 'test4.wav',
'label': 'bird',
'probability': 0.323
},
{
'filename': 'test4.wav',
'label': 'cat',
'probability': 0.6
},
{
'filename': 'test4.wav',
'label': 'dog',
'probability': 0.56
},
{
'filename': 'test4.wav',
'label': 'speech',
'probability': 0.4
},
{
'filename': 'test5.wav',
'label': 'bird',
'probability': 0.8
},
{
'filename': 'test5.wav',
'label': 'cat',
'probability': 0.7
},
{
'filename': 'test5.wav',
'label': 'dog',
'probability': 0.45
},
{
'filename': 'test5.wav',
'label': 'speech',
'probability': 0.43
},
{
'filename': 'test6.wav',
'label': 'bird',
'probability': 0.9
},
{
'filename': 'test6.wav',
'label': 'cat',
'probability': 0.53
},
{
'filename': 'test6.wav',
'label': 'dog',
'probability': 0.83
},
{
'filename': 'test6.wav',
'label': 'speech',
'probability': 0.95
},
{
'filename': 'test7.wav',
'label': 'bird',
'probability': 0.2
},
{
'filename': 'test7.wav',
'label': 'cat',
'probability': 0.2
},
{
'filename': 'test7.wav',
'label': 'dog',
'probability': 0.89
},
{
'filename': 'test7.wav',
'label': 'speech',
'probability': 0.45
},
])
estimated_tag_list = dcase_util.containers.MetaDataContainer()
for file in estimated_tag_probabilities.unique_files:
k = estimated_tag_probabilities.filter(filename=file)
tags = []
for item in k:
if item.probability > 0.5:
tags.append(item.label)
estimated_tag_list.append(
{
'filename': file,
'tags': tags
}
)
tag_evaluator = sed_eval.audio_tag.AudioTaggingMetrics(
tags=reference_tag_list.unique_tags
)
tag_evaluator.evaluate(
reference_tag_list=reference_tag_list,
estimated_tag_list=estimated_tag_list,
estimated_tag_probabilities=estimated_tag_probabilities
)
results = tag_evaluator.results()
nose.tools.eq_(results['overall']['count']['Nref'], 11)
nose.tools.eq_(results['overall']['count']['Nsys'], 14)
nose.tools.assert_almost_equals(results['overall']['eer']['eer'], 0.181818181818181)
nose.tools.assert_almost_equals(results['overall']['f_measure']['f_measure'], 0.72)
nose.tools.assert_almost_equals(results['overall']['f_measure']['precision'], 0.6428571428571429)
nose.tools.assert_almost_equals(results['overall']['f_measure']['recall'], 0.8181818181818182)
nose.tools.eq_(results['class_wise_average']['eer']['eer'], 0.175)
nose.tools.assert_almost_equals(results['class_wise_average']['f_measure']['f_measure'], 0.7)
nose.tools.assert_almost_equals(results['class_wise_average']['f_measure']['precision'], 0.7166666666666667)
nose.tools.assert_almost_equals(results['class_wise_average']['f_measure']['recall'], 0.8333333333333333)
print(tag_evaluator)
def test_direct_use2():
reference_tag_list = [
{
'filename': 'test1.wav',
'tags': 'cat,dog'
},
{
'filename': 'test2.wav',
'tags': 'dog'
},
{
'filename': 'test3.wav',
'tags': 'bird,cat'
},
{
'filename': 'test4.wav',
'tags': 'cat'
},
{
'filename': 'test5.wav',
'tags': 'bird,speech'
},
{
'filename': 'test6.wav',
'tags': 'dog,speech'
},
{
'filename': 'test7.wav',
'tags': 'speech'
},
]
estimated_tag_probabilities = [
{
'filename': 'test1.wav',
'label': 'bird',
'probability': 0.2
},
{
'filename': 'test1.wav',
'label': 'cat',
'probability': 0.99
},
{
'filename': 'test1.wav',
'label': 'dog',
'probability': 0.88
},
{
'filename': 'test1.wav',
'label': 'speech',
'probability': 0.01
},
{
'filename': 'test2.wav',
'label': 'bird',
'probability': 0.1
},
{
'filename': 'test2.wav',
'label': 'cat',
'probability': 0.3
},
{
'filename': 'test2.wav',
'label': 'dog',
'probability': 0.8
},
{
'filename': 'test2.wav',
'label': 'speech',
'probability': 0.1
},
{
'filename': 'test3.wav',
'label': 'bird',
'probability': 0.7
},
{
'filename': 'test3.wav',
'label': 'cat',
'probability': 0.6
},
{
'filename': 'test3.wav',
'label': 'dog',
'probability': 0.4
},
{
'filename': 'test3.wav',
'label': 'speech',
'probability': 0.3
},
{
'filename': 'test4.wav',
'label': 'bird',
'probability': 0.323
},
{
'filename': 'test4.wav',
'label': 'cat',
'probability': 0.6
},
{
'filename': 'test4.wav',
'label': 'dog',
'probability': 0.56
},
{
'filename': 'test4.wav',
'label': 'speech',
'probability': 0.4
},
{
'filename': 'test5.wav',
'label': 'bird',
'probability': 0.8
},
{
'filename': 'test5.wav',
'label': 'cat',
'probability': 0.7
},
{
'filename': 'test5.wav',
'label': 'dog',
'probability': 0.45
},
{
'filename': 'test5.wav',
'label': 'speech',
'probability': 0.43
},
{
'filename': 'test6.wav',
'label': 'bird',
'probability': 0.9
},
{
'filename': 'test6.wav',
'label': 'cat',
'probability': 0.53
},
{
'filename': 'test6.wav',
'label': 'dog',
'probability': 0.83
},
{
'filename': 'test6.wav',
'label': 'speech',
'probability': 0.95
},
{
'filename': 'test7.wav',
'label': 'bird',
'probability': 0.2
},
{
'filename': 'test7.wav',
'label': 'cat',
'probability': 0.2
},
{
'filename': 'test7.wav',
'label': 'dog',
'probability': 0.89
},
{
'filename': 'test7.wav',
'label': 'speech',
'probability': 0.45
},
]
tag_evaluator = sed_eval.audio_tag.AudioTaggingMetrics(
tags=['cat', 'dog', 'bird', 'speech']
)
tag_evaluator.evaluate(
reference_tag_list=reference_tag_list,
estimated_tag_probabilities=estimated_tag_probabilities
)
results = tag_evaluator.results()
nose.tools.assert_almost_equals(results['overall']['eer']['eer'], 0.181818181818181)
nose.tools.eq_(results['class_wise_average']['eer']['eer'], 0.175)
tag_evaluator.reset()
results = tag_evaluator.results()
nose.tools.eq_(results['overall']['count']['Nsys'], 0)
nose.tools.eq_(results['overall']['count']['Nref'], 0)
@nose.tools.raises(ValueError)
def test_parameters_1():
reference_tag_list = dcase_util.containers.MetaDataContainer([
{
'filename': 'test1.wav',
'tags': 'cat,dog'
},
{
'filename': 'test2.wav',
'tags': 'dog'
},
{
'filename': 'test3.wav',
'tags': 'bird,cat'
},
{
'filename': 'test4.wav',
'tags': 'cat'
},
{
'filename': 'test5.wav',
'tags': 'bird,speech'
},
{
'filename': 'test6.wav',
'tags': 'dog,speech'
},
{
'filename': 'test7.wav',
'tags': 'speech'
},
])
tag_evaluator = sed_eval.audio_tag.AudioTaggingMetrics(
tags=reference_tag_list.unique_tags
)
tag_evaluator.evaluate(
reference_tag_list=reference_tag_list
)
|
util/clustergen/cluster.py | mfkiwl/snitch | 105 | 11169996 | #!/usr/bin/env python3
# Copyright 2020 ETH Zurich and University of Bologna.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
from dataclasses import dataclass
from enum import Enum
from jsonschema import ValidationError, RefResolver, Draft7Validator, validators
from mako.lookup import TemplateLookup
from math import ceil, log2
import json
import re
import logging as log
import pathlib
# Fill in default values for config values which do not have a user-defined value.
def extend_with_default(validator_class):
validate_properties = validator_class.VALIDATORS["properties"]
def set_defaults(validator, properties, instance, schema):
for property, subschema in properties.items():
if "default" in subschema:
instance.setdefault(property, subschema["default"])
for error in validate_properties(
validator,
properties,
instance,
schema,
):
yield error
return validators.extend(
validator_class,
{"properties": set_defaults},
)
DefaultValidatingDraft7Validator = extend_with_default(Draft7Validator)
class Generator(object):
DISCLAIMER = """// AUTOMATICALLY GENERATED by clustergen.py; edit the script or configuration
// instead."""
file_path = pathlib.Path(__file__).parent
snitch_cluster_folder = file_path / "../../hw/ip/snitch_cluster"
templates = TemplateLookup(directories=[snitch_cluster_folder],
output_encoding="utf-8")
"""
Generator class which contains common component to generate different systems.
@root_schema: Schema object to which the generator corresponds.
"""
def __init__(self, root_schema):
# Load the cluster schema.
absolute_path_to_schema_dir = self.file_path / "../../docs/schema"
root_schema_filename = root_schema
self.root_schema = read_schema(absolute_path_to_schema_dir /
root_schema_filename)
store_set = dict()
# iterate over schema directory and generate a mapping from remote URLs
# to local URIs.
for path in absolute_path_to_schema_dir.iterdir():
schema = read_schema(path)
store_set[schema["$id"]] = schema
# Instantiate a custom resolver with the update store set.
self.resolver = RefResolver.from_schema(self.root_schema,
store=store_set)
def validate(self, cfg):
# Validate the schema. This can fail.
try:
DefaultValidatingDraft7Validator(
self.root_schema, resolver=self.resolver).validate(cfg)
except ValidationError as e:
print(e)
exit(e)
@dataclass
class RiscvISA:
"""Contain a valid base ISA string"""
i: bool = False
e: bool = False
m: bool = False
a: bool = False
f: bool = False
d: bool = False
isa_string = re.compile(r"^rv32(i|e)([m|a|f|d]*)$")
def parse_isa_string(s):
"""Construct an `RiscvISA` object from a string"""
s.lower()
isa = RiscvISA()
m = RiscvISA.isa_string.match(s)
if m:
setattr(isa, m.group(1), True)
if m.group(2):
[setattr(isa, t, True) for t in m.group(2)]
else:
raise ValueError("Illegal ISA string.")
return isa
class PMA(Enum):
# Region supports atomics
ATOMIC = 1
# Region is cached
CACHED = 2
# Region is execute
EXECUTE = 3
# Region is non-idempotent
NON_IDEMPOTENT = 4
class PMACfg(object):
def __init__(self):
self.regions = list()
def add_region(self, pma, base, mask):
self.regions.append((pma, base, mask))
def add_region_length(self, pma, base, length, addr_width):
# The base *must* be aligned to the length, i.e. only one region is added.
is_length_power_of_two = (length != 0) and (length & (length-1) == 0)
# Python uses arbitrary-precision integers --> we can support any address width
mask_addr_space = ((1 << addr_width) - 1)
mask = mask_addr_space & ~(length - 1)
is_mask_aligned = ((~mask & base) == 0)
if (not is_length_power_of_two) or (mask == 0) or (not is_mask_aligned):
exit("Cacheable regions must have a power-of-two length aligned to their base.")
else:
self.add_region(pma, base, mask)
class SnitchCluster(Generator):
"""
Instance of a Snitch cluster.
"""
files = {
'cfg': "src/snitch_cfg.sv.tpl",
'wrapper': "src/snitch_cluster_wrapper.sv.tpl"
}
def __init__(self, cfg, pma_cfg):
"""
Initialize with a given configuration. The constructor checks conformans
to the cluster schema and constructs a `cfg` object.
"""
super().__init__("snitch_cluster.schema.json")
self.mems = set()
self.mems_desc = dict()
self.validate(cfg)
self.cfg = cfg
# Perform configuration validation.
if self.cfg_validate():
exit("Failed parameter validation.")
self.cfg['pkg_name'] = "{}_pkg".format(self.cfg['name'])
self.calc_cache_sizes()
self.parse_pma_cfg(pma_cfg)
self.parse_cores()
def l1_region(self):
"""Return L1 Region as tuple. Base and length."""
return (self.cfg['cluster_base_addr'], self.cfg['tcdm']['size'])
def render_wrapper(self):
"""Render the cluster wrapper"""
cfg_template = self.templates.get_template(self.files['wrapper'])
return cfg_template.render_unicode(cfg=self.cfg,
to_sv_hex=to_sv_hex,
disclaimer=self.DISCLAIMER)
def add_mem(self,
words,
width,
byte_enable=True,
desc=None,
speed_optimized=True,
density_optimized=False,
dual_port=False):
mem = (
width, # width
words, # words
8, # byte_width
2 if dual_port else 1, # ports
1, # latency
byte_enable, # byte_enable
speed_optimized, # speed optimized
density_optimized, # density optimized
dual_port
)
self.mems.add(mem)
if mem in self.mems_desc:
self.mems_desc[mem] += [desc]
else:
self.mems_desc[mem] = [desc or ""]
def memory_cfg(self):
# Add TCDMs
self.add_mem(self.cfg['tcdm']['depth'],
self.cfg['data_width'],
desc='tcdm')
# Add instruction caches
for i, h in enumerate(self.cfg['hives']):
self.add_mem(h['icache']['depth'],
h['icache']['cacheline'],
desc='icache data (hive {})'.format(i),
byte_enable=True)
self.add_mem(h['icache']['depth'],
self.tag_width,
desc='icache tag (hive {})'.format(i),
byte_enable=False)
cfg = list()
for mem in self.mems:
cfg.append({
'description': self.mems_desc[mem],
'width': mem[0],
'words': mem[1],
'byte_width': mem[2],
'ports': mem[3],
'latency': mem[4],
'byte_enable': mem[5],
'speed_optimized': mem[6],
'density_optimized': mem[7],
'dual_port': mem[8],
})
return json.dumps(cfg, sort_keys=True, indent=4)
def calc_cache_sizes(self):
# Calculate TCDM parameters
tcdm_bytes = self.cfg['data_width'] // 8
self.cfg['tcdm']['depth'] = self.cfg['tcdm']['size'] * 1024 // (
self.cfg['tcdm']['banks'] * tcdm_bytes)
# Calc icache parameters
for i, hive in enumerate(self.cfg['hives']):
cl_bytes = self.cfg['hives'][i]['icache']['cacheline'] // 8
self.cfg['hives'][i]['icache']['depth'] = self.cfg['hives'][i][
'icache']['size'] * 1024 // self.cfg['hives'][i]['icache'][
'sets'] // cl_bytes
# tag width
self.tag_width = self.cfg['addr_width'] - clog2(
hive['icache']['cacheline'] // 8) - clog2(hive['icache']['depth']) + 3
def parse_pma_cfg(self, pma_cfg):
self.cfg['pmas'] = dict()
# print(pma_cfg.regions)
self.cfg['pmas']['cached'] = list()
for pma in pma_cfg.regions:
if pma[0] == PMA.CACHED:
self.cfg['pmas']['cached'].append((pma[1], pma[2]))
def parse_isect_ssr(self, ssr, core):
ssr.update({'isect_master': False, 'isect_master_idx': False, 'isect_slave': False})
if core['ssr_intersection']:
ssr_isect_triple = core['ssr_intersection_triple']
if ssr['reg_idx'] in ssr_isect_triple[0:2]:
if not ssr['indirection']:
raise ValueError('An intersection master SSR must be indirection-capable')
ssr['isect_master'] = True
ssr['isect_master_idx'] = (ssr['reg_idx'] == ssr_isect_triple[1])
if ssr['reg_idx'] == ssr_isect_triple[2]:
ssr['indirection'] = True # Required for indirector generation, but not functional
ssr['isect_slave'] = True
def parse_cores(self):
"""Parse cores struct"""
def gen_mask(c, s):
return "{}'b{}".format(c, ''.join(reversed(s)))
cores = list()
for i, core_list in enumerate(self.cfg['hives']):
for core in core_list['cores']:
core['hive'] = i
core['isa_parsed'] = parse_isa_string(
core['isa'])
# Enforce consistent config if no SSRs
if not core['xssr'] or 'ssrs' not in core or not len(core['ssrs']):
core['xssr'] = False
core['ssrs'] = []
# Assign SSR register indices and intersection roles
next_free_reg = 0
for ssr in core['ssrs']:
if ssr['reg_idx'] in (None, next_free_reg):
ssr['reg_idx'] = next_free_reg
next_free_reg += 1
self.parse_isect_ssr(ssr, core)
# Sort SSRs by register indices (required by decoding logic)
core['ssrs'].sort(key=lambda x: x['reg_idx'])
# Minimum 1 element to avoid illegal ranges (Xssr prevents generation)
core['num_ssrs'] = max(len(core['ssrs']), 1)
cores.append(dict(core))
self.cfg['nr_hives'] = len(self.cfg['hives'])
self.cfg['nr_cores'] = len(cores)
self.cfg['num_ssrs_max'] = max(len(core['ssrs']) for core in cores)
self.cfg['cores'] = cores
def cfg_validate(self):
failed = True
"""Perform more advanced validation, i.e., sanity check parameters."""
if int(self.cfg['addr_width']) < 30:
log.error("`addr_width` must be greater or equal to 30.")
elif not ((int(self.cfg['data_width']) == 32) or
(int(self.cfg['data_width']) == 64)):
log.error("`data_width` must be 32 or 64 bit")
elif int(self.cfg['dma_data_width']) <= 0:
log.error("`dma_data_width` must be set")
elif int(self.cfg['dma_data_width']) % int(
self.cfg['data_width']) != 0:
log.error(
"DMA port {} has to be multiple of {} (bank width)".format(
self.cfg['dma_data_width'], self.cfg['data_width']))
elif is_pow2(self.cfg['dma_data_width']):
log.error("`dma_data_width` must be a power of two")
# elif cfg.en_rvd and not cfg.en_rvf:
# log.error("RVD needs RVF")
# elif cfg.en_rvd and not cfg.data_width == 64:
# log.error("RVD needs 64 bit data buses")
elif (self.cfg['tcdm']['size'] % self.cfg['tcdm']['banks']) != 0:
log.error(
"The total size of the TCDM must be divisible by the requested amount of banks."
)
elif is_pow2(self.cfg['tcdm']['size']):
log.error("The TCDM size must be a power of two.")
elif is_pow2(self.cfg['tcdm']['banks']):
log.error("The amount of banks must be a power of two.")
else:
failed = False
# Warnings
if (int(self.cfg['dma_data_width']) != 512):
log.warn("Design was never tested with this configuration")
return failed
class SnitchClusterTB(Generator):
"""
A very simplistic system, which instantiates a single cluster and
surrounding DRAM to test and simulate this system. This can also serve as a
starting point on how to use the `snitchgen` library to generate more
complex systems.
"""
def __init__(self, cfg):
super().__init__("snitch_cluster_tb.schema.json")
# Validate the schema.
self.validate(cfg)
# from here we know that we have a valid object.
# and construct a new SnitchClusterTB object.
self.cfg = cfg
pma_cfg = PMACfg()
# For this example system make the entire dram cacheable.
pma_cfg.add_region_length(PMA.CACHED, self.cfg['dram']['address'],
self.cfg['dram']['length'],
self.cfg['cluster']['addr_width'])
self.cfg['cluster']['tie_ports'] = True
# Store Snitch cluster config in separate variable
self.cluster = SnitchCluster(cfg["cluster"], pma_cfg)
def render_wrapper(self):
return self.cluster.render_wrapper()
def render_linker_script(self):
"""Generate a linker script for the cluster testbench"""
cfg_template = self.templates.get_template("test/link.ld.tpl")
return cfg_template.render_unicode(cfg=self.cfg,
l1_region=self.cluster.l1_region())
def render_bootdata(self):
"""Generate a C file with boot information for the cluster testbench"""
cfg_template = self.templates.get_template("test/bootdata.cc.tpl")
return cfg_template.render_unicode(cfg=self.cfg)
def render_deps(self, dep_name):
return self.cluster.render_deps(dep_name)
def read_schema(path):
"""Read a single schema file and return the parsed JSON content.
Aborts if the JSON file could not be decoed."""
with open(path, "r") as f:
try:
schema = json.load(f)
except json.decoder.JSONDecodeError as e:
exit("Invalid schema file: {}".format(e))
return schema
def clog2(x):
"""Returns the ceiled integer logarithm dualis."""
return int(ceil(log2(x)))
def is_pow2(x):
return 2**clog2(x) != x
def to_sv_hex(x, length=None):
return "{}'h{}".format(length or "", hex(x)[2:])
|
pypower/loadcase.py | Bengt/PYPOWER | 221 | 11170021 | <reponame>Bengt/PYPOWER
# Copyright (c) 1996-2015 PSERC. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""Loads a PYPOWER case dictionary.
"""
import sys
from os.path import basename, splitext, exists
from copy import deepcopy
from numpy import array, zeros, ones, c_
from scipy.io import loadmat
from pypower._compat import PY2
from pypower.idx_gen import PMIN, MU_PMAX, MU_PMIN, MU_QMAX, MU_QMIN, APF
from pypower.idx_brch import PF, QF, PT, QT, MU_SF, MU_ST, BR_STATUS
if not PY2:
basestring = str
def loadcase(casefile,
return_as_obj=True, expect_gencost=True, expect_areas=True):
"""Returns the individual data matrices or an dict containing them
as values.
Here C{casefile} is either a dict containing the keys C{baseMVA}, C{bus},
C{gen}, C{branch}, C{areas}, C{gencost}, or a string containing the name
of the file. If C{casefile} contains the extension '.mat' or '.py', then
the explicit file is searched. If C{casefile} containts no extension, then
L{loadcase} looks for a '.mat' file first, then for a '.py' file. If the
file does not exist or doesn't define all matrices, the function returns
an exit code as follows:
0. all variables successfully defined
1. input argument is not a string or dict
2. specified extension-less file name does not exist
3. specified .mat file does not exist
4. specified .py file does not exist
5. specified file fails to define all matrices or contains syntax
error
If the input data is not a dict containing a 'version' key, it is
assumed to be a PYPOWER case file in version 1 format, and will be
converted to version 2 format.
@author: <NAME> (PSERC Cornell & Universidad
Autonoma de Manizales)
@author: <NAME> (PSERC Cornell)
"""
if return_as_obj == True:
expect_gencost = False
expect_areas = False
info = 0
# read data into case object
if isinstance(casefile, basestring):
# check for explicit extension
if casefile.endswith(('.py', '.mat')):
rootname, extension = splitext(casefile)
fname = basename(rootname)
else:
# set extension if not specified explicitly
rootname = casefile
if exists(casefile + '.mat'):
extension = '.mat'
elif exists(casefile + '.py'):
extension = '.py'
else:
info = 2
fname = basename(rootname)
lasterr = ''
## attempt to read file
if info == 0:
if extension == '.mat': ## from MAT file
try:
d = loadmat(rootname + extension, struct_as_record=True)
if 'ppc' in d or 'mpc' in d: ## it's a MAT/PYPOWER dict
if 'ppc' in d:
struct = d['ppc']
else:
struct = d['mpc']
val = struct[0, 0]
s = {}
for a in val.dtype.names:
s[a] = val[a]
else: ## individual data matrices
d['version'] = '1'
s = {}
for k, v in d.items():
s[k] = v
s['baseMVA'] = s['baseMVA'][0] # convert array to float
except IOError as e:
info = 3
lasterr = str(e)
elif extension == '.py': ## from Python file
try:
if PY2:
execfile(rootname + extension)
else:
exec(compile(open(rootname + extension).read(),
rootname + extension, 'exec'))
try: ## assume it returns an object
s = eval(fname)()
except ValueError as e:
info = 4
lasterr = str(e)
## if not try individual data matrices
if info == 0 and not isinstance(s, dict):
s = {}
s['version'] = '1'
if expect_gencost:
try:
s['baseMVA'], s['bus'], s['gen'], s['branch'], \
s['areas'], s['gencost'] = eval(fname)()
except IOError as e:
info = 4
lasterr = str(e)
else:
if return_as_obj:
try:
s['baseMVA'], s['bus'], s['gen'], \
s['branch'], s['areas'], \
s['gencost'] = eval(fname)()
except ValueError as e:
try:
s['baseMVA'], s['bus'], s['gen'], \
s['branch'] = eval(fname)()
except ValueError as e:
info = 4
lasterr = str(e)
else:
try:
s['baseMVA'], s['bus'], s['gen'], \
s['branch'] = eval(fname)()
except ValueError as e:
info = 4
lasterr = str(e)
except IOError as e:
info = 4
lasterr = str(e)
if info == 4 and exists(rootname + '.py'):
info = 5
err5 = lasterr
elif isinstance(casefile, dict):
s = deepcopy(casefile)
else:
info = 1
# check contents of dict
if info == 0:
# check for required keys
if (s['baseMVA'] is None or s['bus'] is None \
or s['gen'] is None or s['branch'] is None) or \
(expect_gencost and s['gencost'] is None) or \
(expect_areas and s['areas'] is None):
info = 5 ## missing some expected fields
err5 = 'missing data'
else:
## remove empty areas if not needed
if hasattr(s, 'areas') and (len(s['areas']) == 0) and (not expect_areas):
del s['areas']
## all fields present, copy to ppc
ppc = deepcopy(s)
if not hasattr(ppc, 'version'): ## hmm, struct with no 'version' field
if ppc['gen'].shape[1] < 21: ## version 2 has 21 or 25 cols
ppc['version'] = '1'
else:
ppc['version'] = '2'
if (ppc['version'] == '1'):
# convert from version 1 to version 2
ppc['gen'], ppc['branch'] = ppc_1to2(ppc['gen'], ppc['branch']);
ppc['version'] = '2'
if info == 0: # no errors
if return_as_obj:
return ppc
else:
result = [ppc['baseMVA'], ppc['bus'], ppc['gen'], ppc['branch']]
if expect_gencost:
if expect_areas:
result.extend([ppc['areas'], ppc['gencost']])
else:
result.extend([ppc['gencost']])
return result
else: # error encountered
if info == 1:
sys.stderr.write('Input arg should be a case or a string '
'containing a filename\n')
elif info == 2:
sys.stderr.write('Specified case not a valid file\n')
elif info == 3:
sys.stderr.write('Specified MAT file does not exist\n')
elif info == 4:
sys.stderr.write('Specified Python file does not exist\n')
elif info == 5:
sys.stderr.write('Syntax error or undefined data '
'matrix(ices) in the file\n')
else:
sys.stderr.write('Unknown error encountered loading case.\n')
sys.stderr.write(lasterr + '\n')
return info
def ppc_1to2(gen, branch):
##----- gen -----
## use the version 1 values for column names
if gen.shape[1] >= APF:
sys.stderr.write('ppc_1to2: gen matrix appears to already be in '
'version 2 format\n')
return gen, branch
shift = MU_PMAX - PMIN - 1
tmp = array([MU_PMAX, MU_PMIN, MU_QMAX, MU_QMIN]) - shift
mu_Pmax, mu_Pmin, mu_Qmax, mu_Qmin = tmp
## add extra columns to gen
tmp = zeros((gen.shape[0], shift))
if gen.shape[1] >= mu_Qmin:
gen = c_[ gen[:, 0:PMIN + 1], tmp, gen[:, mu_Pmax:mu_Qmin] ]
else:
gen = c_[ gen[:, 0:PMIN + 1], tmp ]
##----- branch -----
## use the version 1 values for column names
shift = PF - BR_STATUS - 1
tmp = array([PF, QF, PT, QT, MU_SF, MU_ST]) - shift
Pf, Qf, Pt, Qt, mu_Sf, mu_St = tmp
## add extra columns to branch
tmp = ones((branch.shape[0], 1)) * array([-360, 360])
tmp2 = zeros((branch.shape[0], 2))
if branch.shape[1] >= mu_St - 1:
branch = c_[ branch[:, 0:BR_STATUS + 1], tmp, branch[:, PF - 1:MU_ST + 1], tmp2 ]
elif branch.shape[1] >= QT - 1:
branch = c_[ branch[:, 0:BR_STATUS + 1], tmp, branch[:, PF - 1:QT + 1] ]
else:
branch = c_[ branch[:, 0:BR_STATUS + 1], tmp ]
return gen, branch
|
cfgov/mega_menu/tests/test_models.py | Colin-Seifer/consumerfinance.gov | 156 | 11170082 | <filename>cfgov/mega_menu/tests/test_models.py
from django.db import IntegrityError
from django.test import TestCase
from mega_menu.models import Menu
class MenuTests(TestCase):
def test_str(self):
self.assertEqual(str(Menu("en")), "English")
self.assertEqual(str(Menu("es")), "Spanish")
def test_unique_for_language(self):
Menu.objects.create(language="en")
with self.assertRaises(IntegrityError):
Menu.objects.create(language="en")
|
fletcher/string_mixin.py | sthagen/fletcher | 225 | 11170097 | import numpy as np
import pyarrow as pa
import pyarrow.compute as pc
from pandas.core.arrays import ExtensionArray
try:
# Only available in pandas 1.2+
from pandas.core.strings.object_array import ObjectStringArrayMixin
class _IntermediateExtensionArray(ExtensionArray, ObjectStringArrayMixin):
pass
except ImportError:
class _IntermediateExtensionArray(ExtensionArray): # type: ignore
pass
class StringSupportingExtensionArray(_IntermediateExtensionArray):
def _str_contains(self, pat, case=True, flags=0, na=np.nan, regex=True):
if not regex and case and hasattr(pc, "match_substring"):
return type(self)(pc.match_substring(self.data, pat), dtype=pa.bool_())
else:
return super()._str_contains(pat, case, flags, na, regex)
def _str_map(self, *args, **kwargs):
return type(self)(super()._str_map(*args, **kwargs))
def _str_startswith(self, pat, na=None):
# TODO: This is currently not implemented in Arrow but only directly in the fr_strx accessor.
return super()._str_startswith(pat, na)
def _str_endswith(self, pat, na=None):
# TODO: This is currently not implemented in Arrow but only directly in the fr_strx accessor.
return super()._str_endswith(pat, na)
def _str_isalnum(self):
if hasattr(pc, "utf8_is_alnum"):
return type(self)(pc.utf8_is_alnum(self.data))
else:
return super()._str_isalnum()
def _str_isalpha(self):
if hasattr(pc, "utf8_is_alpha"):
return type(self)(pc.utf8_is_alpha(self.data))
else:
return super()._str_isalpha()
def _str_isdecimal(self):
if hasattr(pc, "utf8_is_decimal"):
return type(self)(pc.utf8_is_decimal(self.data))
else:
return super()._str_isdecimal()
def _str_isdigit(self):
if hasattr(pc, "utf8_is_digit"):
return type(self)(pc.utf8_is_digit(self.data))
else:
return super()._str_isdigit()
def _str_islower(self):
if hasattr(pc, "utf8_is_lower"):
return type(self)(pc.utf8_is_lower(self.data))
else:
return super()._str_islower()
def _str_isnumeric(self):
if hasattr(pc, "utf8_is_numeric"):
return type(self)(pc.utf8_is_numeric(self.data))
else:
return super()._str_isnumeric()
def _str_isspace(self):
if hasattr(pc, "utf8_is_space"):
return type(self)(pc.utf8_is_space(self.data))
else:
return super()._str_isspace()
def _str_istitle(self):
if hasattr(pc, "utf8_is_title"):
return type(self)(pc.utf8_is_title(self.data))
else:
return super()._str_istitle()
def _str_isupper(self):
if hasattr(pc, "utf8_is_upper"):
return type(self)(pc.utf8_is_upper(self.data))
else:
return super()._str_isupper()
|
build.py | ferrandinand/bombardier | 3,557 | 11170121 | import argparse
import os
import subprocess
platforms = [
("darwin", "amd64"),
("darwin", "arm64"),
("freebsd", "386"),
("freebsd", "amd64"),
("freebsd", "arm"),
("linux", "386"),
("linux", "amd64"),
("linux", "arm"),
("linux", "arm64"),
("netbsd", "386"),
("netbsd", "amd64"),
("netbsd", "arm"),
("openbsd", "386"),
("openbsd", "amd64"),
("openbsd", "arm"),
("openbsd", "arm64"),
("windows", "386"),
("windows", "amd64"),
]
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Auxilary build script.")
parser.add_argument("-v", "--version", default="unspecified",
type=str, help="string used as a version when building binaries")
args = parser.parse_args()
version = args.version
for (build_os, build_arch) in platforms:
ext = ""
if build_os == "windows":
ext = ".exe"
build_env = os.environ.copy()
build_env["GOOS"] = build_os
build_env["GOARCH"] = build_arch
subprocess.run(["go", "build", "-ldflags", "-X main.version=%s" %
version, "-o", "bombardier-%s-%s%s" % (build_os, build_arch, ext)], env=build_env)
|
modules/data.remote/inst/RpTools/RpTools/biophys_xarray.py | CFranc22/pecan | 151 | 11170133 | # -*- coding: utf-8 -*-
"""
Created on Mon May 11 14:34:08 2020
@author: <NAME> (<EMAIL>),
Finnish Meteorological Institute)
Olli's python implementation of ESA SNAP s2toolbox biophysical processor and
computation of vegetation indices.
See ATBD at https://step.esa.int/docs/extra/ATBD_S2ToolBox_L2B_V1.1.pdf
And java source code at
https://github.com/senbox-org/s2tbx/tree/master/s2tbx-biophysical/src/main/java/org/esa/s2tbx/biophysical
Caveats
Currently changes out of bounds inputs and outputs to nan (or min or max value
if output wihtin tolerance). Maybe output flagging information as well ( i.e.
diffferent flags input and output out of bounds).
Convex hull input checking currently disabled. It's computationally slow and
not sure of its benefits. Better to filter out bad data based on L2A quality
info/classification\
and hope averaging removes some bad pixels.
"""
import requests
import io
import numpy as np
import xarray as xr
# url to Sentinel 2 Toolbox's auxdata
# This base_url points towards the original toolbox(not the one created by Olli)
base_url = "https://raw.githubusercontent.com/senbox-org/s2tbx/master/s2tbx-biophysical/src/main/resources/auxdata/2_1/{}/{}"
def get_fromurl(var, pattern):
"""
Fetches the contents of a text file from the base url and stores it in a ndarray.
Author: <NAME>
Parameters
----------
var (str) -- type of the product, one of FAPAR, FCOVER, LAI, LAI_Cab and LAI_Cw.
pattern (str) -- name of the file excluding the initial variable part.
Returns
-------
ndarray -- loaded with the contents of the text file.
"""
# attach variable and file name to the base url
res_url = base_url.format(var, str(var) + "%s" % str(pattern))
# make a GET request to the url to fetch the data.
res_url = requests.get(res_url)
# check the HTTP status code to see if any error has occured.
res_url.raise_for_status()
# store the contents of the url in an in-memory buffer and use it to load the ndarray.
return np.loadtxt(io.BytesIO(res_url.content), delimiter=",")
# Read SNAP Biophysical processor neural network parameters
nn_params = {}
for var in ["FAPAR", "FCOVER", "LAI", "LAI_Cab", "LAI_Cw"]:
norm_minmax = get_fromurl(var, "_Normalisation")
denorm_minmax = get_fromurl(var, "_Denormalisation")
layer1_weights = get_fromurl(var, "_Weights_Layer1_Neurons")
layer1_bias = get_fromurl(var, "_Weights_Layer1_Bias").reshape(-1, 1)
layer2_weights = get_fromurl(var, "_Weights_Layer2_Neurons").reshape(1, -1)
layer2_bias = get_fromurl(var, "_Weights_Layer2_Bias").reshape(1, -1)
extreme_cases = get_fromurl(var, "_ExtremeCases")
if var == "FCOVER":
nn_params[var] = {
"norm_minmax": norm_minmax,
"denorm_minmax": denorm_minmax,
"layer1_weights": layer1_weights,
"layer1_bias": layer1_bias,
"layer2_weights": layer2_weights,
"layer2_bias": layer2_bias,
"extreme_cases": extreme_cases,
}
else:
defdom_min = get_fromurl(var, "_DefinitionDomain_MinMax")[0, :].reshape(-1, 1)
defdom_max = get_fromurl(var, "_DefinitionDomain_MinMax")[1, :].reshape(-1, 1)
defdom_grid = get_fromurl(var, "_DefinitionDomain_Grid")
nn_params[var] = {
"norm_minmax": norm_minmax,
"denorm_minmax": denorm_minmax,
"layer1_weights": layer1_weights,
"layer1_bias": layer1_bias,
"layer2_weights": layer2_weights,
"layer2_bias": layer2_bias,
"defdom_min": defdom_min,
"defdom_max": defdom_max,
"defdom_grid": defdom_grid,
"extreme_cases": extreme_cases,
}
def _normalization(x, x_min, x_max):
x_norm = 2 * (x - x_min) / (x_max - x_min) - 1
return x_norm
def _denormalization(y_norm, y_min, y_max):
y = 0.5 * (y_norm + 1) * (y_max - y_min)
return y
def _input_ouf_of_range(x, variable):
x_copy = x.copy()
x_bands = x_copy[:8, :]
# check min max domain
defdom_min = nn_params[variable]["defdom_min"][:, 0].reshape(-1, 1)
defdom_max = nn_params[variable]["defdom_max"][:, 0].reshape(-1, 1)
bad_input_mask = (x_bands < defdom_min) | (x_bands > defdom_max)
bad_vector = np.any(bad_input_mask, axis=0)
x_bands[:, bad_vector] = np.nan
# convex hull check, currently disabled due to time consumption vs benefit
# gridProject = lambda v: np.floor(10 * (v - defdom_min) / (defdom_max - defdom_min) + 1 ).astype(int)
# x_bands = gridProject(x_bands)
# isInGrid = lambda v: any((v == x).all() for x in nn_params[variable]['defdom_grid'])
# notInGrid = ~np.array([isInGrid(v) for v in x_bands.T])
# x[:,notInGrid | bad_vector] = np.nan
x_copy[:, bad_vector] = np.nan
return x_copy
def _output_ouf_of_range(output, variable):
new_output = np.copy(output)
tolerance = nn_params[variable]["extreme_cases"][0]
output_min = nn_params[variable]["extreme_cases"][1]
output_max = nn_params[variable]["extreme_cases"][2]
new_output[output < (output_min + tolerance)] = np.nan
new_output[(output > (output_min + tolerance)) & (output < output_min)] = output_min
new_output[(output < (output_max - tolerance)) & (output > output_max)] = output_max
new_output[output > (output_max - tolerance)] = np.nan
return new_output
def _compute_variable(x, variable):
x_norm = np.zeros_like(x)
x = _input_ouf_of_range(x, variable)
x_norm = _normalization(
x,
nn_params[variable]["norm_minmax"][:, 0].reshape(-1, 1),
nn_params[variable]["norm_minmax"][:, 1].reshape(-1, 1),
)
out_layer1 = np.tanh(
nn_params[variable]["layer1_weights"].dot(x_norm)
+ nn_params[variable]["layer1_bias"]
)
out_layer2 = (
nn_params[variable]["layer2_weights"].dot(out_layer1)
+ nn_params[variable]["layer2_bias"]
)
output = _denormalization(
out_layer2,
nn_params[variable]["denorm_minmax"][0],
nn_params[variable]["denorm_minmax"][1],
)[0]
output = _output_ouf_of_range(output, variable)
output = output.reshape(1, np.shape(x)[1])
return output
def run_snap_biophys(dataset, variable):
"""Compute specified variable using the SNAP algorithm.
See ATBD at https://step.esa.int/docs/extra/ATBD_S2ToolBox_L2B_V1.1.pdf
Parameters
----------
dataset : xr dataset
xarray dataset.
variable : str
Options 'FAPAR', 'FCOVER', 'LAI', 'LAI_Cab' or 'LAI_Cw'
Returns
-------
xarray dataset
Adds the specified variable array to dataset (variable name in
lowercase).
"""
# generate view angle bands/layers
vz = (
np.ones_like(dataset.band_data[:, 0, :, :]).T
* np.cos(np.radians(dataset.view_zenith)).values
)
vz = vz[..., np.newaxis]
vzarr = xr.DataArray(
vz,
coords=[dataset.y, dataset.x, dataset.time, ["view_zenith"]],
dims=["y", "x", "time", "band"],
)
sz = (
np.ones_like(dataset.band_data[:, 0, :, :]).T
* np.cos(np.radians(dataset.sun_zenith)).values
)
sz = sz[..., np.newaxis]
szarr = xr.DataArray(
sz,
coords=[dataset.y, dataset.x, dataset.time, ["sun_zenith"]],
dims=["y", "x", "time", "band"],
)
raz = (
np.ones_like(dataset.band_data[:, 0, :, :]).T
* np.cos(np.radians(dataset.sun_azimuth - dataset.view_azimuth)).values
)
raz = raz[..., np.newaxis]
razarr = xr.DataArray(
raz,
coords=[dataset.y, dataset.x, dataset.time, ["relative_azimuth"]],
dims=["y", "x", "time", "band"],
)
newarr = xr.concat([dataset.band_data, vzarr, szarr, razarr], dim="band")
newarr = newarr.stack(xy=("x", "y"))
arr = xr.apply_ufunc(
_compute_variable,
newarr,
input_core_dims=[["band", "xy"]],
output_core_dims=[["xy"]],
kwargs={"variable": variable},
vectorize=True,
).unstack()
return dataset.assign({variable.lower(): arr})
|
public-engines/iris-species-engine/tests/training/test_trainer.py | tallandroid/incubator-marvin | 101 | 11170144 | <filename>public-engines/iris-species-engine/tests/training/test_trainer.py<gh_stars>100-1000
#!/usr/bin/env python
# coding=utf-8
try:
import mock
except ImportError:
import unittest.mock as mock
import pandas as pd
from marvin_iris_species_engine.training import Trainer
@mock.patch('marvin_iris_species_engine.training.trainer.svm.SVC')
@mock.patch('marvin_iris_species_engine.training.trainer.LogisticRegression')
@mock.patch('marvin_iris_species_engine.training.trainer.DecisionTreeClassifier')
@mock.patch('marvin_iris_species_engine.training.trainer.KNeighborsClassifier')
def test_execute(knn_mocked, dt_mocked, lr_mocked, svc_mocked, mocked_params):
data_petals_x = {
'PetalLengthCm': [5, 6],
'PetalWidthCm': [7, 8],
}
data_sepals_x = {
'SepalLengthCm': [1, 2],
'SepalWidthCm': [3, 4],
}
data_joined_x = {
'SepalLengthCm': [1, 2],
'SepalWidthCm': [3, 4],
'PetalLengthCm': [5, 6],
'PetalWidthCm': [7, 8],
}
data_petals_y = {'Species': ['species1']}
data_sepals_y = {'Species': ['species1']}
data_joined_y = {'Species': ['species1']}
train_x_p = pd.DataFrame(data=data_petals_x)
train_y_p = pd.DataFrame(data=data_petals_y)
test_x_p = pd.DataFrame(data=data_petals_x)
test_y_p = pd.DataFrame(data=data_petals_y)
train_x_s = pd.DataFrame(data=data_sepals_x)
train_y_s = pd.DataFrame(data=data_sepals_y)
test_x_s = pd.DataFrame(data=data_sepals_x)
test_y_s = pd.DataFrame(data=data_sepals_y)
train_X = pd.DataFrame(data=data_joined_x)
train_y = pd.DataFrame(data=data_joined_y)
test_X = pd.DataFrame(data=data_joined_x)
test_y = pd.DataFrame(data=data_joined_y)
data_source = {
'petals': {'train_X': train_x_p, 'train_y': train_y_p, 'test_X': test_x_p, 'test_y': test_y_p},
'sepals': {'train_X': train_x_s, 'train_y': train_y_s, 'test_X': test_x_s, 'test_y': test_y_s},
'joined': {'train_X': train_X, 'train_y': train_y, 'test_X': test_X, 'test_y': test_y}
}
ac = Trainer(dataset=data_source)
ac.execute(params=mocked_params)
knn_mocked.assert_called()
dt_mocked.assert_called()
lr_mocked.assert_called()
svc_mocked.assert_called()
|
src/poetry/repositories/__init__.py | pkoch/poetry | 7,258 | 11170162 | from __future__ import annotations
from poetry.repositories.pool import Pool
from poetry.repositories.repository import Repository
__all__ = ["Pool", "Repository"]
|
atlas/foundations_sdk/src/test/local_run/test_initialize_default_environment.py | DeepLearnI/atlas | 296 | 11170187 |
from foundations_spec import *
from foundations.local_run.initialize_default_environment import create_config_file
class TestInitializeDefaultEnvironment(Spec):
mock_open = let_patch_mock_with_conditional_return('builtins.open')
mock_mkdirs = let_patch_mock('os.makedirs')
mock_typed_config_klass = let_patch_mock_with_conditional_return('foundations_core_cli.typed_config_listing.TypedConfigListing')
mock_typed_config = let_mock()
@let
def mock_file(self):
mock_file = Mock()
mock_file.__enter__ = lambda *_: mock_file
mock_file.__exit__ = Mock()
mock_file.write = self._write_file_data
return mock_file
@set_up
def set_up(self):
self.mock_open.return_when(self.mock_file, 'config/execution/default.config.yaml', 'w+')
self.mock_typed_config_klass.return_when(self.mock_typed_config, 'execution')
self.mock_typed_config.config_path = ConditionalReturn()
self.mock_typed_config.config_path.return_when(None, 'default')
self._file_data = None
def test_create_default_config_creates_default_execution_config(self):
import yaml
create_config_file()
config = yaml.load(self._file_data)
self.assertEqual({'results_config': {}, 'cache_config': {}}, config)
def test_create_default_does_not_create_config_if_already_existing(self):
import yaml
self.mock_typed_config.config_path.clear()
self.mock_typed_config.config_path.return_when('config/execution/default.config.yaml', 'default')
self._file_data = "---\nhello: world\n"
create_config_file()
config = yaml.load(self._file_data)
self.assertEqual({'hello': 'world'}, config)
def test_ensure_directory_exists(self):
create_config_file()
self.mock_mkdirs.assert_called_with('config/execution', exist_ok=True)
def _write_file_data(self, data):
self._file_data = data
|
Python/Algorithms/DeepLearningAlgorithms/Layers.py | m-payal/AlgorithmsAndDataStructure | 195 | 11170221 | """
__FileCreationDate__ : //2020
__Author__ : CodePerfectPlus
__Package__ : Python 3
__GitHub__ : https://www.github.com/codeperfectplus
"""
|
tools/nntool/quantization/symmetric/kernels/softmax.py | 00-01/gap_sdk | 118 | 11170226 | # Copyright (C) 2020 GreenWaves Technologies, SAS
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import numpy as np
from graph.types.activations import SoftMaxParameters
from quantization.kernels.kernel_base import KernelBase, params_type, qrec_type
from quantization.new_qrec import QRec
from utils.exp_17_15 import exp_fp_17_15
def softmax_func(arg, axis=None):
if axis is None:
axis = -1
v = arg - np.max(arg, axis=axis, keepdims=True)
exp_v = np.exp(v)
v = exp_v/np.sum(exp_v, axis=axis, keepdims=True)
if len(arg.shape) == 1:
v = v.flatten()
return v
@params_type(SoftMaxParameters)
@qrec_type('symmetric')
class SoftMaxSymmetric(KernelBase):
@classmethod
def execute(cls, params,
in_tensors,
qrec: QRec,
**kwargs):
old_err = np.seterr(over='raise')
in_tensor = qrec.prepare_inputs(
params, in_tensors, ktype="symmetric")[0]
# TODO - Implement properly quantized version
in_tensor = qrec.in_qs[0].dequantize(in_tensor)
in_tensor = qrec.out_qs[0].quantize(softmax_func(in_tensor, axis=params.axis))
np.seterr(**old_err)
return qrec.get_outputs(params, [in_tensor], ktype="symmetric")
@params_type(SoftMaxParameters)
@qrec_type('scaled')
class SoftMaxSymmetricMult(KernelBase):
@classmethod
def execute(cls, params,
in_tensors,
qrec: QRec,
**kwargs):
# in_tensor = in_tensors[0].flatten()
in_tensor = in_tensors[0].astype(np.int32)
max_val = np.max(in_tensor, axis=params.axis, keepdims=True)
norm = 15 + np.ceil(np.log2(qrec.in_qs[0].scale)).astype(np.int32)
exp = exp_fp_17_15((in_tensor.astype(np.int32) - max_val) << (norm))
sum_exp = np.sum(exp, axis=params.axis, keepdims=True)
inv_sum = (np.array([(1 << 15)-1], dtype=np.uint32) << 15)//sum_exp
res = np.abs((exp * inv_sum + (1 << 14)) >> 15)
iinfo = np.iinfo(np.int16)
res = np.clip(res, iinfo.min, iinfo.max).astype(
np.int16).reshape(params.out_dims[0].shape)
return qrec.get_outputs(params, [res], ktype="symmetric")
|
scripts/gdrive_scanner.py | alaaseeku/rusty-hog | 281 | 11170229 | # A python script to "scan" a GDrive folder containing docs and binaries.
# You will need the Google Python API libraries:
# pip install --upgrade google-api-python-client google-auth-httplib2 google-auth-oauthlib
from __future__ import print_function
import csv
import io
import json
import os.path
import random
import subprocess
import sys
import tempfile
import uuid
from multiprocessing import Pool
from tempfile import tempdir
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
import argparse
from googleapiclient.http import MediaIoBaseDownload
INSIGHTS_INSERT_KEY = os.environ["INSIGHTS_INSERT_KEY"]
INSIGHTS_ACCT_ID = os.environ["INSIGHTS_ACCT_ID"]
DUROC_HOG_PATH = os.environ["DUROC_HOG_PATH"]
ANKAMALI_HOG_PATH = os.environ["ANKAMALI_HOG_PATH"]
# If modifying these scopes, delete the file token.json.
SCOPES = ['https://www.googleapis.com/auth/drive.readonly']
tempdir = tempfile.gettempdir()
creds = None
# The file token.json stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('temptoken_scanner.json'):
creds = Credentials.from_authorized_user_file('temptoken_scanner.json', SCOPES)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'clientsecret.json', SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('temptoken_scanner.json', 'w') as token:
token.write(creds.to_json())
SERVICE = build('drive', 'v3', credentials=creds)
def scan_ankamali(x):
filename = os.path.join(tempdir, str(uuid.uuid4()))
print(f"Fetching {x['id']}")
s = subprocess.run([ANKAMALI_HOG_PATH, "--outputfile", filename, x['id']],
capture_output=True)
return {"id": x['id'], "results": filename}
def scan_duroc(x):
results_filename = os.path.join(tempdir, str(uuid.uuid4()))
scan_target_filename = os.path.join(tempdir, x['name'])
print(f"Fetching {x['id']} {x['webContentLink']} {x['name']} and writing to {scan_target_filename}")
request = SERVICE.files().get_media(fileId=x['id'])
fh = io.BytesIO()
downloader = MediaIoBaseDownload(fh, request)
done = False
while done is False:
status, done = downloader.next_chunk()
with open(scan_target_filename, 'wb') as f:
f.write(fh.getbuffer())
f.close()
s = subprocess.run([DUROC_HOG_PATH, "-z", scan_target_filename],
capture_output=True)
print(s.stdout)
print(s.stderr)
# os.remove(scan_target_filename)
return {"id": x['id'], "results": results_filename, "name": x['name'], "link": x['webContentLink']}
def parseargs():
parser = argparse.ArgumentParser()
parser.add_argument("--driveid", help="GDrive id of drive to scan, defaults to user's drive")
parser.add_argument("-f", "--folder", help="Scan within specific folder ID")
parser.add_argument("-r", "--recursive", help="Scan files with parents")
parser.add_argument("--sample", help="Only scan a sample of available files", type=int)
parser.add_argument("--modifiedTime", help="Only scan files after a specific date (ISO format)", type=int)
parser.add_argument("--scope", help="GDrive scoping option", choices=['user', 'drive', 'domain'], default='user')
return parser.parse_args()
def main(args):
"""Shows basic usage of the Drive v3 API.
Prints the names and ids of the first 10 files the user has access to.
"""
# Call the Drive v3 API
file_get_kwargs = {
'pageSize': 100,
'fields': 'nextPageToken, files(contentHints/thumbnail,fileExtension,iconLink,id,name,size,thumbnailLink,webContentLink,webViewLink,mimeType,parents)',
'corpora': args.scope
}
if args.driveid:
file_get_kwargs['driveId'] = args.driveid
if args.folder:
file_get_kwargs['q'] = f"'{args.folder}' in parents"
page = 1
print(f"Fetching page {page}")
results = SERVICE.files().list(**file_get_kwargs).execute()
page += 1
files = results.get('files', [])
nextPageToken = results.get('nextPageToken', None)
while nextPageToken:
file_get_kwargs['pageToken'] = nextPageToken
print(f"Fetching page {page}")
results = SERVICE.files().list(**file_get_kwargs).execute()
page += 1
files += results.get('files', [])
nextPageToken = results.get('nextPageToken', None)
print("Completed fetching file-listing")
files = list(filter(lambda x: x['mimeType'] != 'application/vnd.google-apps.folder', files))
ankamali_hog_files = list(filter(lambda x: x['mimeType'] == 'application/vnd.google-apps.spreadsheet' or x['mimeType'] == 'application/vnd.google-apps.document', files))
mime_block_list = [
'application/vnd.google-apps.audio',
'application/vnd.google-apps.document',
'application/vnd.google-apps.drive',
'application/vnd.google-apps.drawing',
'application/vnd.google-apps.file',
'application/vnd.google-apps.folder',
'application/vnd.google-apps.form',
'application/vnd.google-apps.fusiontable',
'application/vnd.google-apps.map',
'application/vnd.google-apps.photo',
'application/vnd.google-apps.presentation',
'application/vnd.google-apps.script',
'application/vnd.google-apps.shortcut',
'application/vnd.google-apps.site',
'application/vnd.google-apps.spreadsheet',
'application/vnd.google-apps.unknown',
'application/vnd.google-apps.video'
]
duroc_hog_files = list(filter(lambda x: x['mimeType'] not in mime_block_list, files))
if args.sample:
if len(ankamali_hog_files) > args.sample:
ankamali_hog_files = random.sample(ankamali_hog_files, args.sample)
if len(duroc_hog_files) > args.sample:
duroc_hog_files = random.sample(duroc_hog_files, args.sample)
output_ankamali = []
output_duroc = []
print("Starting the Rusty Hog scanning process...")
if len(ankamali_hog_files) > 0:
with Pool(4) as p:
output_ankamali.extend(p.map(scan_ankamali, ankamali_hog_files))
if len(duroc_hog_files) > 0:
with Pool(4) as p:
output_duroc.extend(p.map(scan_duroc, duroc_hog_files))
print("Complete! Dumping output to output_duroc.csv...")
with open('output_duroc.csv', 'w') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(['id', 'reason', 'stringsFound', 'path', 'linenum', 'weblink'])
for result in output_duroc:
try:
print(f"Reading duroc hog results {result['results']}")
with open(result['results'], 'r') as f:
result_list = json.load(f)
for finding in result_list:
writer.writerow([result['id'],
finding['reason'],
str(finding['stringsFound']),
result['name'],
finding['linenum'],
result['link']])
except:
print("Unexpected error:", sys.exc_info()[0])
try:
os.remove(result['results'])
except:
print("Unexpected error:", sys.exc_info()[0])
print("Complete! Dumping output to output_ankamali.csv...")
with open('output_ankamali.csv', 'w') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(['id', 'reason', 'stringsFound', 'path', 'date'])
for result in output_ankamali:
try:
with open(result['results'], 'r') as f:
result_list = json.load(f)
for finding in result_list:
writer.writerow([result['id'],
finding['reason'],
str(finding['stringsFound']),
finding['path'],
finding['date']])
except:
print(f"Couldn't find duroc hog output {result['results']}")
try:
os.remove(result['results'])
except:
pass
if __name__ == '__main__':
args = parseargs()
main(args) |
Installer/create_version_file.py | MrTimbones/ed-scout | 116 | 11170298 | import os
import re
from jinja2 import Environment, FileSystemLoader
def extract_version_parts(git_response):
regex = r"v(\d)\.(\d)\.(\d)(?:-(\d+)-([a-z0-9]+)(?:-([a-z0-9]+))?)?"
matches = re.finditer(regex, git_response, re.MULTILINE)
groups = list(matches)[0].groups()
if len(groups) > 3:
commits_since_tag = groups[3]
if groups[3]:
commits_since_tag = groups[3]
else:
commits_since_tag = '0'
commit_sha = groups[4]
else:
commits_since_tag = '0'
commit_sha = None
four_part_version = list(groups[0:3]) + [commits_since_tag]
version_info = {
'four_part_version': four_part_version,
'is_dirty': (len(groups) > 4)
}
return version_info
# The full version, including alpha/beta/rc tags.
release = os.popen('git describe --tags --dirty').read().strip()
print(release) # Returns something like v1.5.1-4-gc25ef16-dirty
release_parts = release.split('-')
basic_version = release_parts[0]
commits_since_tag = release_parts[1] if len(release_parts) > 1 else None
sha = release_parts[2] if len(release_parts) > 2 else None
dirty_flag = release_parts[3] if len(release_parts) > 3 else None
# Write the version used to display version info in the web gui and logs.
with open(os.path.join("../EDScoutWebUI", "version.py"), "w") as f:
f.write(f'release = "{release}"\n')
f.write(f'version = "{basic_version}"\n')
# record the version more simply here to aid the packaging process
with open("version.txt", "w") as f:
f.write(f'{release}')
env = Environment(
loader=FileSystemLoader('.'),
)
template = env.get_template('version_template.txt')
version_parts = extract_version_parts(release)
csv_version = ', '.join(version_parts['four_part_version']) # Something like 1,5,1,0
short_version = '.'.join(version_parts['four_part_version'][0:3]) # Something like 1.5.1
long_version = release # Something like v1.5.1-4-gc25ef16-dirty
rendered_verion_file = template.render(csv_version=csv_version, short_version=short_version, long_version=long_version)
# print(rendered_verion_file)
with open("version_for_installer.txt", "w") as f:
f.write(rendered_verion_file)
|
sponsors/migrations/0035_auto_20210826_1929.py | ewjoachim/pythondotorg | 911 | 11170340 | # Generated by Django 2.0.13 on 2021-08-26 19:29
from django.db import migrations, models
import sponsors.models
class Migration(migrations.Migration):
dependencies = [
('sponsors', '0034_contract_document_docx'),
]
operations = [
migrations.AlterField(
model_name='contract',
name='document',
field=models.FileField(blank=True, upload_to='sponsors/contracts/', verbose_name='Unsigned PDF'),
),
migrations.AlterField(
model_name='contract',
name='document_docx',
field=models.FileField(blank=True, upload_to='sponsors/contracts/docx/', verbose_name='Unsigned Docx'),
),
migrations.AlterField(
model_name='contract',
name='signed_document',
field=models.FileField(blank=True, upload_to=sponsors.models.signed_contract_random_path, verbose_name='Signed PDF'),
),
]
|
examples/newtons_cradle.py | conductiveIT/pymunk-1 | 670 | 11170389 | """A screensaver version of Newton's Cradle with an interactive mode.
"""
__docformat__ = "reStructuredText"
import os
import random
import sys
description = """
---- Newton's Cradle ----
A screensaver version of Newton's Cradle with an interactive mode
/s - Run in fullscreen screensaver mode
/p #### - Display a preview of the screensaver using a window handler
/i - Interactive mode
"""
if len(sys.argv) < 2:
print(description)
sys.exit()
is_interactive = False
display_flags = 0
if sys.argv[1] == "/p": # preview mode
os.environ["SDL_VIDEODRIVER"] = "windib"
os.environ["SDL_WINDOWID"] = sys.argv[2]
display_size = (100, 100)
is_interactive = False
### We must set OS env before the pygame imports..
import pygame
if sys.argv[1] == "/s": # fullscreen screensaver mode
display_size = (0, 0)
is_interactive = False
display_flags = (
display_flags | pygame.FULLSCREEN
) # FULLSCREEN) # | DOUBLEBUF | HWSURFACE )
elif sys.argv[1] == "/i": # interactive
display_size = (600, 600)
is_interactive = True
import pymunk as pm
from pymunk import Vec2d
def drawcircle(image, colour, origin, radius, width=0):
if width == 0:
pygame.draw.circle(image, colour, origin, int(radius))
else:
if radius > 65534 / 5:
radius = 65534 / 5
circle = pygame.Surface(
[radius * 2 + width, radius * 2 + width]
).convert_alpha()
circle.fill([0, 0, 0, 0])
pygame.draw.circle(
circle,
colour,
[circle.get_width() / 2, circle.get_height() / 2],
radius + (width / 2),
)
if int(radius - (width / 2)) > 0:
pygame.draw.circle(
circle,
[0, 0, 0, 0],
[circle.get_width() / 2, circle.get_height() / 2],
abs(int(radius - (width / 2))),
)
image.blit(
circle,
[
origin[0] - (circle.get_width() / 2),
origin[1] - (circle.get_height() / 2),
],
)
def reset_bodies(space):
for body in space.bodies:
body.position = Vec2d(*body.start_position)
body.force = 0, 0
body.torque = 0
body.velocity = 0, 0
body.angular_velocity = 0
color = pygame.Color(
random.randint(1, 255), random.randint(1, 255), random.randint(1, 255)
)
for shape in space.shapes:
shape.color = color
def main():
pygame.init()
screen = pygame.display.set_mode(display_size, display_flags)
width, height = screen.get_size()
def to_pygame(p):
"""Small hack to convert pymunk to pygame coordinates"""
return int(p.x), int(-p.y + height)
def from_pygame(p):
return to_pygame(p)
clock = pygame.time.Clock()
running = True
font = pygame.font.Font(None, 16)
### Physics stuff
space = pm.Space()
space.gravity = (0.0, -1900.0)
space.damping = 0.999 # to prevent it from blowing up.
mouse_body = pm.Body(body_type=pm.Body.KINEMATIC)
bodies = []
for x in range(-100, 150, 50):
x += width / 2
offset_y = height / 2
mass = 10
radius = 25
moment = pm.moment_for_circle(mass, 0, radius, (0, 0))
body = pm.Body(mass, moment)
body.position = (x, -125 + offset_y)
body.start_position = Vec2d(*body.position)
shape = pm.Circle(body, radius)
shape.elasticity = 0.9999999
space.add(body, shape)
bodies.append(body)
pj = pm.PinJoint(space.static_body, body, (x, 125 + offset_y), (0, 0))
space.add(pj)
reset_bodies(space)
selected = None
if not is_interactive:
pygame.time.set_timer(pygame.USEREVENT + 1, 70000) # apply force
pygame.time.set_timer(pygame.USEREVENT + 2, 120000) # reset
pygame.event.post(pygame.event.Event(pygame.USEREVENT + 1))
pygame.mouse.set_visible(False)
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
elif event.type == pygame.KEYDOWN and event.key == pygame.K_p:
pygame.image.save(screen, "newtons_cradle.png")
if event.type == pygame.USEREVENT + 1:
r = random.randint(1, 4)
for body in bodies[0:r]:
body.apply_impulse_at_local_point((-6000, 0))
if event.type == pygame.USEREVENT + 2:
reset_bodies(space)
elif (
event.type == pygame.KEYDOWN
and event.key == pygame.K_r
and is_interactive
):
reset_bodies(space)
elif (
event.type == pygame.KEYDOWN
and event.key == pygame.K_f
and is_interactive
):
r = random.randint(1, 4)
for body in bodies[0:r]:
body.apply_impulse_at_local_point((-6000, 0))
elif event.type == pygame.MOUSEBUTTONDOWN and is_interactive:
if selected != None:
space.remove(selected)
p = from_pygame(Vec2d(*event.pos))
hit = space.point_query_nearest(p, 0, pm.ShapeFilter())
if hit != None:
shape = hit.shape
rest_length = mouse_body.position.get_distance(shape.body.position)
ds = pm.DampedSpring(
mouse_body, shape.body, (0, 0), (0, 0), rest_length, 1000, 10
)
space.add(ds)
selected = ds
elif event.type == pygame.MOUSEBUTTONUP and is_interactive:
if selected != None:
space.remove(selected)
selected = None
elif event.type == pygame.KEYDOWN:
running = False
elif event.type == pygame.MOUSEBUTTONDOWN:
running = False
mpos = pygame.mouse.get_pos()
p = from_pygame(Vec2d(*mpos))
mouse_body.position = p
### Clear screen
screen.fill(pygame.Color("black"))
### Draw stuff
for c in space.constraints:
pv1 = c.a.position + c.anchor_a
pv2 = c.b.position + c.anchor_b
p1 = to_pygame(pv1)
p2 = to_pygame(pv2)
pygame.draw.aalines(screen, pygame.Color("lightgray"), False, [p1, p2])
for ball in space.shapes:
p = to_pygame(ball.body.position)
drawcircle(screen, ball.color, p, int(ball.radius), 0)
# pygame.draw.circle(screen, ball.color, p, int(ball.radius), 0)
### Update physics
fps = 50
iterations = 25
dt = 1.0 / float(fps) / float(iterations)
for x in range(iterations): # 10 iterations to get a more stable simulation
space.step(dt)
### Flip screen
if is_interactive:
screen.blit(
font.render(
"fps: " + str(clock.get_fps()), True, pygame.Color("white")
),
(0, 0),
)
screen.blit(
font.render(
"Press left mouse button and drag to interact",
True,
pygame.Color("darkgrey"),
),
(5, height - 35),
)
screen.blit(
font.render(
"Press R to reset, any other key to quit",
True,
pygame.Color("darkgrey"),
),
(5, height - 20),
)
pygame.display.flip()
clock.tick(fps)
if __name__ == "__main__":
sys.exit(main())
|
cherry/models/robotics.py | acse-yl27218/cherry | 160 | 11170391 | #!/usr/bin/env python3
import torch as th
import torch.nn as nn
from cherry.nn import RoboticsLinear
class RoboticsMLP(nn.Module):
"""
[[Source]](https://github.com/seba-1511/cherry/blob/master/cherry/models/robotics.py)
**Description**
A multi-layer perceptron with proper initialization for robotic control.
**Credit**
Adapted from <NAME>'s implementation.
**Arguments**
* **inputs_size** (int) - Size of input.
* **output_size** (int) - Size of output.
* **layer_sizes** (list, *optional*, default=None) - A list of ints,
each indicating the size of a hidden layer.
(Defaults to two hidden layers of 64 units.)
**Example**
~~~python
target_qf = ch.models.robotics.RoboticsMLP(23,
34,
layer_sizes=[32, 32])
~~~
"""
def __init__(self, input_size, output_size, layer_sizes=None):
super(RoboticsMLP, self).__init__()
if layer_sizes is None:
layer_sizes = [64, 64]
if len(layer_sizes) > 0:
layers = [RoboticsLinear(input_size, layer_sizes[0]),
nn.Tanh()]
for in_, out_ in zip(layer_sizes[:-1], layer_sizes[1:]):
layers.append(RoboticsLinear(in_, out_))
layers.append(nn.Tanh())
layers.append(RoboticsLinear(layer_sizes[-1], output_size))
else:
layers = [RoboticsLinear(input_size, output_size)]
self.layers = nn.Sequential(*layers)
def forward(self, x):
return self.layers(x)
class RoboticsActor(RoboticsMLP):
"""
[[Source]](https://github.com/seba-1511/cherry/blob/master/cherry/models/robotics.py)
**Description**
A multi-layer perceptron with initialization designed for choosing
actions in continuous robotic environments.
**Credit**
Adapted from <NAME> implementation.
**Arguments**
* **inputs_size** (int) - Size of input.
* **output_size** (int) - Size of action size.
* **layer_sizes** (list, *optional*, default=None) - A list of ints,
each indicating the size of a hidden layer.
(Defaults to two hidden layers of 64 units.)
**Example**
~~~python
policy_mean = ch.models.robotics.Actor(28,
8,
layer_sizes=[64, 32, 16])
~~~
"""
def __init__(self, input_size, output_size, layer_sizes=None):
super(RoboticsMLP, self).__init__()
if layer_sizes is None:
layer_sizes = [64, 64]
if len(layer_sizes) > 0:
layers = [RoboticsLinear(input_size, layer_sizes[0]),
nn.Tanh()]
for in_, out_ in zip(layer_sizes[:-1], layer_sizes[1:]):
layers.append(RoboticsLinear(in_, out_))
layers.append(nn.Tanh())
layers.append(RoboticsLinear(layer_sizes[-1],
output_size,
gain=1.0))
else:
layers = [RoboticsLinear(input_size, output_size, gain=1.0)]
self.layers = nn.Sequential(*layers)
class LinearValue(nn.Module):
"""
[[Source]](https://github.com/seba-1511/cherry/blob/master/cherry/models/robotics.py)
**Description**
A linear state-value function, whose parameters are found by minimizing
least-squares.
**Credit**
Adapted from Tristan Deleu's implementation.
**References**
1. Duan et al. 2016. “Benchmarking Deep Reinforcement Learning for Continuous Control.”
2. [https://github.com/tristandeleu/pytorch-maml-rl](https://github.com/tristandeleu/pytorch-maml-rl)
**Arguments**
* **inputs_size** (int) - Size of input.
* **reg** (float, *optional*, default=1e-5) - Regularization coefficient.
**Example**
~~~python
states = replay.state()
rewards = replay.reward()
dones = replay.done()
returns = ch.td.discount(gamma, rewards, dones)
baseline = LinearValue(input_size)
baseline.fit(states, returns)
next_values = baseline(replay.next_states())
~~~
"""
def __init__(self, input_size, reg=1e-5):
super(LinearValue, self).__init__()
self.linear = nn.Linear(2 * input_size + 4, 1, bias=False)
self.reg = reg
def _features(self, states):
length = states.size(0)
ones = th.ones(length, 1).to(states.device)
al = th.arange(length, dtype=th.float32, device=states.device).view(-1, 1) / 100.0
return th.cat([states, states**2, al, al**2, al**3, ones], dim=1)
def fit(self, states, returns):
features = self._features(states)
reg = self.reg * th.eye(features.size(1))
reg = reg.to(states.device)
A = features.t() @ features + reg
b = features.t() @ returns
if hasattr(th, 'lstsq'): # Required for torch < 1.3.0
coeffs, _ = th.lstsq(b, A)
else:
coeffs, _ = th.gels(b, A)
self.linear.weight.data = coeffs.data.t()
def forward(self, states):
features = self._features(states)
return self.linear(features)
|
train_folder.py | dibyanshu0525/scenescoop | 120 | 11170412 | # This is a script to train a bunch of videos and leave it running for ever
import os
from glob import glob
import argparse
from argparse import Namespace
from os import getcwd, path
from scenescoop import main as scenescoop
def start(options):
# get all the videos in the input folder
videos = glob(options.input+'/*.*')
# run scenescoop on every video
for video in videos:
name = video.split('/')[-1]
args = Namespace(video=video, name=name, input_data=None, api=True)
scenescoop(args)
# once ready, move to the ready folder
os.rename(video, options.move + '/' + name)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Train a bunch of videos and leave it running for ever')
parser.add_argument('--input', type=str, help='Source folder containg the videos')
parser.add_argument('--move', type=str, help='Ready Folder. Where the input videos will move once they are done.')
args = parser.parse_args()
start(args)
|
tests/seahub/utils/test_normalize_file_path.py | weimens/seahub | 420 | 11170424 | <reponame>weimens/seahub<filename>tests/seahub/utils/test_normalize_file_path.py
import posixpath
from random import randint
from tests.common.utils import randstring
from seahub.test_utils import BaseTestCase
from seahub.utils import normalize_file_path
class NormalizeDirPathTest(BaseTestCase):
def test_normalize_file_path(self):
slash = '/'
folder_1 = randstring(3)
folder_2 = randstring(3)
random_slash = ''
for i in range(1, randint(1, 10)):
random_slash += slash
posix_path = posixpath.join(folder_1, folder_2)
correct_path = slash + posix_path
path_without_slash = posix_path
path_starts_with_random_slash = random_slash + posix_path
path_ends_with_random_slash = posix_path + random_slash
path_with_slash = random_slash + posix_path + random_slash
assert normalize_file_path(path_without_slash) == correct_path
assert normalize_file_path(path_starts_with_random_slash) == correct_path
assert normalize_file_path(path_ends_with_random_slash) == correct_path
assert normalize_file_path(path_with_slash) == correct_path
|
marrow/mailer/manager/dynamic.py | cynepiaadmin/mailer | 166 | 11170437 | <filename>marrow/mailer/manager/dynamic.py
# encoding: utf-8
import atexit
import threading
import weakref
import sys
import math
from functools import partial
from marrow.mailer.manager.futures import worker
from marrow.mailer.manager.util import TransportPool
try:
import queue
except ImportError:
import Queue as queue
try:
from concurrent import futures
except ImportError: # pragma: no cover
raise ImportError("You must install the futures package to use background delivery.")
__all__ = ['DynamicManager']
log = __import__('logging').getLogger(__name__)
def thread_worker(executor, jobs, timeout, maximum):
i = maximum + 1
try:
while i:
i -= 1
try:
work = jobs.get(True, timeout)
if work is None:
runner = executor()
if runner is None or runner._shutdown:
log.debug("Worker instructed to shut down.")
break
# Can't think of a test case for this; best to be safe.
del runner # pragma: no cover
continue # pragma: no cover
except queue.Empty: # pragma: no cover
log.debug("Worker death from starvation.")
break
else:
work.run()
else: # pragma: no cover
log.debug("Worker death from exhaustion.")
except: # pragma: no cover
log.critical("Unhandled exception in worker.", exc_info=True)
runner = executor()
if runner:
runner._threads.discard(threading.current_thread())
class WorkItem(object):
__slots__ = ('future', 'fn', 'args', 'kwargs')
def __init__(self, future, fn, args, kwargs):
self.future = future
self.fn = fn
self.args = args
self.kwargs = kwargs
def run(self):
if not self.future.set_running_or_notify_cancel():
return
try:
result = self.fn(*self.args, **self.kwargs)
except:
e = sys.exc_info()[1]
self.future.set_exception(e)
else:
self.future.set_result(result)
class ScalingPoolExecutor(futures.ThreadPoolExecutor):
def __init__(self, workers, divisor, timeout):
self._max_workers = workers
self.divisor = divisor
self.timeout = timeout
self._work_queue = queue.Queue()
self._threads = set()
self._shutdown = False
self._shutdown_lock = threading.Lock()
self._management_lock = threading.Lock()
atexit.register(self._atexit)
def shutdown(self, wait=True):
with self._shutdown_lock:
self._shutdown = True
for i in range(len(self._threads)):
self._work_queue.put(None)
if wait:
for thread in list(self._threads):
thread.join()
def _atexit(self): # pragma: no cover
self.shutdown(True)
def _spawn(self):
t = threading.Thread(target=thread_worker, args=(weakref.ref(self), self._work_queue, self.divisor, self.timeout))
t.daemon = True
t.start()
with self._management_lock:
self._threads.add(t)
def _adjust_thread_count(self):
pool = len(self._threads)
if pool < self._optimum_workers:
tospawn = int(self._optimum_workers - pool)
log.debug("Spawning %d thread%s." % (tospawn, tospawn != 1 and "s" or ""))
for i in range(tospawn):
self._spawn()
@property
def _optimum_workers(self):
return min(self._max_workers, math.ceil(self._work_queue.qsize() / float(self.divisor)))
class DynamicManager(object):
__slots__ = ('workers', 'divisor', 'timeout', 'executor', 'transport')
name = "Dynamic"
Executor = ScalingPoolExecutor
def __init__(self, config, transport):
self.workers = int(config.get('workers', 10)) # Maximum number of threads to create.
self.divisor = int(config.get('divisor', 10)) # Estimate the number of required threads by dividing the queue size by this.
self.timeout = float(config.get('timeout', 60)) # Seconds before starvation.
self.executor = None
self.transport = TransportPool(transport)
super(DynamicManager, self).__init__()
def startup(self):
log.info("%s manager starting up.", self.name)
log.debug("Initializing transport queue.")
self.transport.startup()
workers = self.workers
log.debug("Starting thread pool with %d workers." % (workers, ))
self.executor = self.Executor(workers, self.divisor, self.timeout)
log.info("%s manager ready.", self.name)
def deliver(self, message):
# Return the Future object so the application can register callbacks.
# We pass the message so the executor can do what it needs to to make
# the message thread-local.
return self.executor.submit(partial(worker, self.transport), message)
def shutdown(self, wait=True):
log.info("%s manager stopping.", self.name)
log.debug("Stopping thread pool.")
self.executor.shutdown(wait=wait)
log.debug("Draining transport queue.")
self.transport.shutdown()
log.info("%s manager stopped.", self.name)
|
airmozilla/manage/views/email_sending.py | mozilla/airmozilla | 115 | 11170474 | from django.shortcuts import render
from django.conf import settings
from .decorators import superuser_required
from airmozilla.manage import forms
from airmozilla.manage import sending
@superuser_required
def home(request):
context = {
'EMAIL_BACKEND': settings.EMAIL_BACKEND,
'EMAIL_FILE_PATH': getattr(settings, 'EMAIL_FILE_PATH', None),
}
sent_email = None
if request.method == 'POST':
form = forms.EmailSendingForm(request.POST)
if form.is_valid():
to = form.cleaned_data['to']
subject = form.cleaned_data['subject']
html_body = form.cleaned_data['html_body']
sent_email = sending.email_sending_test(
subject,
html_body,
to,
request,
)
sent_html_body, = [
x[0] for x in sent_email.alternatives if x[1] == 'text/html'
]
context['sent_html_body'] = sent_html_body
else:
initial = {
'to': '<EMAIL>',
'subject': 'This is a Test Subject',
'html_body': (
"<p>Some paragraph here first.</p>\n\n"
)
}
form = forms.EmailSendingForm(initial=initial)
context['form'] = form
context['sent_email'] = sent_email
return render(request, 'manage/email_sending.html', context)
|
tests/test_distributions.py | determinant-io/aboleth | 117 | 11170520 | <gh_stars>100-1000
"""Test distributions.py functionality."""
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from scipy.linalg import cho_solve
from scipy.stats import wishart
from aboleth.distributions import kl_sum, _chollogdet
from .conftest import SEED
def test_kl_normal_normal():
"""Test Normal/Normal KL."""
dim = (5, 10)
mu = np.zeros(dim, dtype=np.float32)
std = 1.0
q = tf.distributions.Normal(mu, std)
# Test 0 KL
p = tf.distributions.Normal(mu, std)
KL0 = kl_sum(q, p)
# Test diff var
std1 = 2.0
p = tf.distributions.Normal(mu, std1)
KL1 = kl_sum(q, p)
rKL1 = 0.5 * ((std / std1)**2 - 1 + np.log((std1 / std)**2)) * np.prod(dim)
# Test diff mu
mu1 = np.ones(dim, dtype=np.float32)
p = tf.distributions.Normal(mu1, std)
KL2 = kl_sum(q, p)
rKL2 = 0.5 * (np.sum((mu1 - mu)**2) / std**2)
tc = tf.test.TestCase()
with tc.test_session():
kl0 = KL0.eval()
assert np.isscalar(kl0)
assert kl0 == 0.
assert np.allclose(KL1.eval(), rKL1)
assert np.allclose(KL2.eval(), rKL2)
def test_kl_gaussian_normal(random):
"""Test Gaussian/Normal KL."""
dim = (5, 10)
Dim = (5, 10, 10)
mu0 = random.randn(*dim).astype(np.float32)
L0 = random_chol(Dim)
q = tfp.distributions.MultivariateNormalTriL(mu0, L0)
mu1 = random.randn(*dim).astype(np.float32)
std1 = 1.0
L1 = [(std1 * np.eye(dim[1])).astype(np.float32) for _ in range(dim[0])]
p = tf.distributions.Normal(mu1, std1)
KL = kl_sum(q, p)
KLr = KLdiv(mu0, L0, mu1, L1)
tc = tf.test.TestCase()
with tc.test_session():
kl = KL.eval()
assert np.isscalar(kl)
assert np.allclose(kl, KLr)
def test_kl_gaussian_gaussian(random):
"""Test Gaussian/Gaussian KL."""
dim = (5, 10)
Dim = (5, 10, 10)
mu0 = random.randn(*dim).astype(np.float32)
L0 = random_chol(Dim)
q = tfp.distributions.MultivariateNormalTriL(mu0, L0)
mu1 = random.randn(*dim).astype(np.float32)
L1 = random_chol(Dim)
p = tfp.distributions.MultivariateNormalTriL(mu1, L1)
KL = kl_sum(q, p)
KLr = KLdiv(mu0, L0, mu1, L1)
tc = tf.test.TestCase()
with tc.test_session():
assert np.allclose(KL.eval(), KLr)
def test_chollogdet():
"""Test log det with cholesky matrices."""
Dim = (5, 10, 10)
L = random_chol(Dim)
rlogdet = np.sum([logdet(l) for l in L])
tlogdet = _chollogdet(L)
L[0, 0, 0] = 1e-17 # Near zero to test numerics
L[1, 3, 3] = -1.
L[4, 5, 5] = -20.
nlogdet = _chollogdet(L)
tc = tf.test.TestCase()
with tc.test_session():
assert np.allclose(tlogdet.eval(), rlogdet)
assert not np.isnan(nlogdet.eval())
def random_chol(dim):
"""Generate random pos def matrices."""
D = dim[1]
n = dim[0]
np.random.seed(SEED)
C = wishart.rvs(df=D, scale=10 * np.eye(D), size=n)
np.random.seed(None)
L = np.array([np.linalg.cholesky(c).astype(np.float32) for c in C])
return L
def KLdiv(mu0, Lcov0, mu1, Lcov1):
"""Numpy KL calculation."""
tr, dist, ldet = 0., 0., 0.
D, n = mu0.shape
for m0, m1, L0, L1 in zip(mu0, mu1, Lcov0, Lcov1):
tr += np.trace(cho_solve((L1, True), L0.dot(L0.T)))
md = m1 - m0
dist += md.dot(cho_solve((L1, True), md))
ldet += logdet(L1) - logdet(L0)
KL = 0.5 * (tr + dist + ldet - D * n)
return KL
def logdet(L):
"""Log Determinant from Cholesky."""
return 2. * np.log(L.diagonal()).sum()
|
developer/cgi/check_url_redirect.py | dlawde/homebrew-cask | 7,155 | 11170538 | <reponame>dlawde/homebrew-cask
#!/usr/bin/python
from __future__ import print_function
import sys
import cgi
try: # Python 3
from urllib.request import Request, urlopen
except NameError: # Python 2
from urllib2 import Request, urlopen
url = sys.argv[1]
headers = {'User-Agent': 'Mozilla'}
if len(sys.argv) > 2:
ua = sys.argv[2]
headers = {'User-Agent': ua}
try:
req = Request(url, None, headers)
response = urlopen(req)
redir = response.geturl()
except Exception:
import traceback
redir = 'generic exception: ' + traceback.format_exc()
print(redir)
|
lldb/test/API/lang/cpp/class-loading-via-member-typedef/TestClassLoadingViaMemberTypedef.py | LaudateCorpus1/llvm-project | 605 | 11170539 | """
Tests loading of classes when the loading is triggered via a typedef inside the
class (and not via the normal LLDB lookup that first resolves the surrounding
class).
"""
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
@no_debug_info_test
def test(self):
self.build()
self.createTestTarget()
# Print the top-level typedef which triggers the loading of the class
# that the typedef is defined inside.
self.expect_expr(
"pull_in_classes",
result_type="StructWithMember::MemberTypedef",
result_value="0",
)
# Print the classes and check their types.
self.expect_expr(
"struct_to_print",
result_type="StructWithMember",
result_children=[
ValueCheck(
name="m",
type="StructWithNested::Nested<int>::OtherTypedef",
children=[ValueCheck(name="i", value="0", type="int")],
)
],
)
|
vultr/v1_backup.py | nickruhl/python-vultr | 117 | 11170549 | <filename>vultr/v1_backup.py<gh_stars>100-1000
'''Partial class to handle Vultr Backup API calls'''
from .utils import VultrBase
class VultrBackup(VultrBase):
'''Handles Vultr Backup API calls'''
def __init__(self, api_key):
VultrBase.__init__(self, api_key)
def list(self, params=None):
''' /v1/backup/list
GET - account
List all backups on the current account
Link: https://www.vultr.com/api/#backup_backup_list
'''
params = params if params else dict()
return self.request('/v1/backup/list', params, 'GET')
|
CSES_Problems/Repetitions/solution.py | gbrls/CompetitiveCode | 165 | 11170558 | <filename>CSES_Problems/Repetitions/solution.py
def repeat_counts(n):
if len(n)==1: # checking for the length of n. If it is 1 then returns 1
return 1
maxx=c=0 # initializing maxx and c as 0. maxx is used for maximum check and c is used for counting
for i in range(1,len(n)):
if n[i-1]==n[i]: # check if the element previous the current element is same or not
c+=1
if n[i-1]!=n[i] or i==len(n)-1: # check if the element previous is not same as the present one or the present index is the last index
c+=1 # count increased by 1 here because it is still taking account of the previous element
if maxx<c: # checks if c is the maximum or not
maxx=c
c=0 # initializes c back to 0 and there the next check goes on
return maxx
n = input()
print(repeat_counts(n)) # calls the repeat_counts(n) function and prints the returned value |
egs/csj/align1/local/gather_transcript.py | texpomru13/espnet | 5,053 | 11170598 | <gh_stars>1000+
#!/usr/bin/env python3
import sys
if __name__ == "__main__":
texts = {}
with open(sys.argv[1], "r", encoding="utf-8") as f:
line = f.readline()
while line:
entry = line.split(" ")
eid = entry[0]
trans = " ".join(entry[1:]).rstrip()
tid = eid.split("_")[0]
if tid in texts:
texts[tid] += trans
else:
texts[tid] = trans
line = f.readline()
with open(sys.argv[2], "w", encoding="utf-8") as fw:
for k, v in texts.items():
print("{} {}".format(k, v), file=fw)
|
server/intrinsic/management/commands/intrinsic_generate_legend.py | paulu/opensurfaces | 137 | 11170620 | from django.core.management.base import BaseCommand
import numpy as np
from photos.utils import numpy_to_pil
from colormath.color_objects import LabColor
class Command(BaseCommand):
args = ''
help = ''
def handle(self, *args, **options):
height = 16
bar = np.zeros((256, height, 3))
for i in xrange(256):
t = i / 255.0
lab0 = np.array([40, 30, -70])
lab1 = np.array([70, 30, 70])
lab = t * lab0 + (1 - t) * lab1
rgb = np.clip(LabColor(*lab).convert_to('rgb').get_value_tuple(), 0, 255)
for j in xrange(height):
bar[i, j, :] = rgb / 255.0
image = numpy_to_pil(bar)
image.save('bar.png')
print "Saved to bar.png"
|
todo/parser/subparsers/list_todos.py | tomasdanjonsson/td-cli | 154 | 11170630 | from todo.constants import COMMANDS
from todo.parser.base import BaseParser, set_value
class ListTodosParser(BaseParser):
"""
usage: td [--completed] [--uncompleted] [--group GROUP] [--interactive]
td l [-c] [-u] [-g GROUP] [-i]
td ls [-c] [-u] [-g GROUP] [-i]
td list [-c] [-u] [-g GROUP] [-i]
list todos
optional arguments:
-h, --help show this help message and exit
--completed, -c filter by completed todos
--uncompleted, -u filter by uncompleted todos
--group GROUP, -g GROUP
filter by name of group
--interactive, -i toggle interactive mode
`td` is the shortcut to `td list`
"""
command = COMMANDS.LIST_TODOS
def _add_arguments(self):
self.parser.add_argument(
"--completed",
"-c",
dest="state",
nargs=0,
action=set_value(True),
help="filter by completed todos",
)
self.parser.add_argument(
"--uncompleted",
"-u",
dest="state",
nargs=0,
action=set_value(False),
help="filter by uncompleted todos",
)
self.parser.add_argument("--group", "-g", action="store", help="filter by name of group")
self.parser.add_argument(
"--interactive", "-i", action="store_true", help="toggle interactive mode"
)
self.parser.usage = "td [--completed] [--uncompleted] [--group GROUP] [--interactive]"
|
test/tools/pca_test.py | Edelweiss35/deep-machine-learning | 708 | 11170649 | <reponame>Edelweiss35/deep-machine-learning<gh_stars>100-1000
from __future__ import division
import numpy as np
import scipy as sp
from scipy.io import loadmat
from dml.tool import pca,featurenormal,projectData,recoverData,displayData
import matplotlib.pyplot as plt
data = loadmat("../data/face/ex7data1.mat")
X = data['X'].transpose()
plt.axis([0,8,0,8])
'''
simple test for PCA
'''
X_norm,mu,sigma=featurenormal(X)
plt.plot(X[0,:],X[1,:],'o')
#print mu,sigma,'======='
U,S = pca(X_norm)
#print U,S
tt=mu+U[:,0]
tp=mu+U[:,1]
plt.plot([mu[0],tt[0]] ,[mu[1],tt[1]])
plt.plot([mu[0],tp[0]],[mu[1],tp[1]])
plt.show()
plt.axis([-3,3,-3,3])
plt.plot(X_norm[0,:],X_norm[1,:],'o')
Z=projectData(X_norm, U, 1);
X_rec=recoverData(Z, U, 1);
plt.plot(X_rec[0,:],X_rec[1,:],'o')
for i in range(X.shape[1]):
plt.plot([X_norm[0,i],X_rec[0,i]],[X_norm[1,i],X_rec[1,i]],'r')
plt.show()
'''
face images dimension reduction
'''
data = loadmat("../data/face/ex7faces.mat")
X = data['X'].transpose()
X_norm,mu,sigma=featurenormal(X)
fig = plt.figure()
fig.add_subplot(1,2, 1)
plt.imshow(displayData(X_norm[:,:100]) , cmap='gray')
#PCA STEPs
[U, S] = pca(X_norm);
print S
K = 100;
Z = projectData(X_norm, U, K);
X_rec = recoverData(Z, U, K);
fig.add_subplot(1,2, 2)
plt.imshow(displayData(X_rec[:,:100]) , cmap='gray')
plt.show()
|
shipshape/cli/testdata/workspace1/bad.py | google/shipshape | 283 | 11170652 | class foo:
def bla(x):
pass
|
examples/csg.py | ssebs/pg | 151 | 11170675 | <filename>examples/csg.py
import pg
class Window(pg.Window):
def setup(self):
self.wasd = pg.WASD(self, speed=5)
self.wasd.look_at((-2, 2, 2), (0, 0, 0))
self.context = pg.Context(pg.DirectionalLightProgram())
self.context.sampler = pg.Texture(0, 'examples/bronze.jpg')
self.context.use_texture = True
a = pg.Solid(pg.Cuboid(-1, 1, -1, 1, -1, 1))
b = pg.Solid(pg.Sphere(2, 1.35))
c = pg.Solid(pg.Cylinder((-1, 0, 0), (1, 0, 0), 0.5, 18))
d = pg.Solid(pg.Cylinder((0, -1, 0), (0, 1, 0), 0.5, 18))
e = pg.Solid(pg.Cylinder((0, 0, -1), (0, 0, 1), 0.5, 18))
solid = (a & b) - (c | d | e)
self.mesh = solid.mesh()
def update(self, t, dt):
matrix = pg.Matrix()
matrix = self.wasd.get_matrix(matrix)
matrix = matrix.perspective(65, self.aspect, 0.01, 100)
self.context.matrix = matrix
self.context.camera_position = self.wasd.position
def draw(self):
self.clear()
self.mesh.draw(self.context)
if __name__ == "__main__":
pg.run(Window)
|
utils/utility.py | xhl1993/multi_pairs_martingle_bot | 351 | 11170687 | <reponame>xhl1993/multi_pairs_martingle_bot
"""
General utility functions.
币安推荐码: 返佣10%
https://www.binancezh.pro/cn/register?ref=AIR1GC70
币安合约推荐码: 返佣10%
https://www.binancezh.com/cn/futures/ref/51bitquant
if you don't have a binance account, you can use the invitation link to register one:
https://www.binancezh.com/cn/futures/ref/51bitquant
or use the inviation code: 51bitquant
网格交易: 适合币圈的高波动率的品种,适合现货, 如果交易合约,需要注意防止极端行情爆仓。
服务器购买地址: https://www.ucloud.cn/site/global.html?invitation_code=C1x2EA81CD79B8C#dongjing
"""
import json
from pathlib import Path
from decimal import Decimal
def _get_trader_dir(temp_name: str):
"""
Get path where trader is running in.
"""
cwd = Path.cwd()
temp_path = cwd.joinpath(temp_name)
if temp_path.exists():
return cwd, temp_path
if not temp_path.exists():
temp_path.mkdir()
return cwd, temp_path
TRADER_DIR, TEMP_DIR = _get_trader_dir("trader")
def get_file_path(filename: str):
"""
Get path for temp file with filename.
"""
return TEMP_DIR.joinpath(filename)
def get_folder_path(folder_name: str):
"""
Get path for temp folder with folder name.
"""
folder_path = TEMP_DIR.joinpath(folder_name)
if not folder_path.exists():
folder_path.mkdir()
return folder_path
def load_json(filename: str):
"""
Load data from json file in temp path.
"""
filepath = get_file_path(filename)
if filepath.exists():
with open(filepath, mode="r", encoding="UTF-8") as f:
data = json.load(f)
return data
else:
save_json(filename, {})
return {}
def save_json(filename: str, data: dict):
"""
Save data into json file in temp path.
"""
filepath = get_file_path(filename)
with open(filepath, mode="w+", encoding="UTF-8") as f:
json.dump(
data,
f,
indent=4,
ensure_ascii=False
)
def round_to(value: float, target: float) -> float:
"""
Round price to price tick value.
"""
value = Decimal(str(value))
target = Decimal(str(target))
rounded = float(int(round(value / target)) * target)
return rounded
|
settings.py | liaopeiyuan/ml-arsenal-public | 280 | 11170699 | from datetime import datetime
PATH= "kail"
"""
Local
"""
if PATH=='kail':
print("Using paths on kail-main")
CHECKPOINTS='/data/kaggle/salt/checkpoints'
DATA='/data/kaggle/salt/'
RESULT='/data/ml-arsenal/projects/TGS_salt'
CODE='/data/ml-arsenal'
CUDA_DEVICES='0,1'
MODE='gpu'
GRAPHICS=True
IDENTIFIER = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
if PATH=='kail0':
print("Using paths on kail-main w. GTX 1080 Ti")
CHECKPOINTS='/data/kaggle/salt/checkpoints'
DATA='/data/kaggle/salt/'
RESULT='/data/ml-arsenal/projects/TGS_salt'
CODE='/data/ml-arsenal'
CUDA_DEVICES='0'
MODE='gpu'
GRAPHICS=True
IDENTIFIER = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
if PATH=='kail1':
print("Using paths on kail-main w. GTX 1070")
CHECKPOINTS='/data/kaggle/salt/checkpoints'
DATA='/data/kaggle/salt/'
RESULT='/data/ml-arsenal/projects/TGS_salt'
CODE='/data/ml-arsenal'
CUDA_DEVICES='1'
MODE='gpu'
GRAPHICS=True
IDENTIFIER = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
if PATH=='local':
print("Using local paths on alexanderliao@alexanderliao-Thinkpad-P50.")
CHECKPOINTS='/home/alexanderliao/data/Kaggle/competitions/tgs-salt-identification-challenge/checkpoints'
DATA='/home/alexanderliao/data/Kaggle/competitions/tgs-salt-identification-challenge'
RESULT='/home/alexanderliao/data/GitHub/ml-arsenal/projects/TGS_salt'
CODE='/home/alexanderliao/data/GitHub/ml-arsenal'
CUDA_DEVICES='0'
MODE='gpu'
GRAPHICS=True
IDENTIFIER = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
if PATH=='gcp0':
print("Using GCP paths on liaop20@kaggle.")
CHECKPOINTS='/home/liaop20/data/salt/checkpoints'
DATA='/home/liaop20/data/salt'
RESULT='/home/liaop20/ml-arsenal/projects/TGS_salt'
CODE='/home/liaop20/ml-arsenal'
CUDA_DEVICES='0'
MODE='gpu'
GRAPHICS=False
IDENTIFIER = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
if PATH=='gcp1':
print("Using GCP paths on liaop20@kaggle.")
CHECKPOINTS='/home/liaop20/data/salt/checkpoints'
DATA='/home/liaop20/data/salt'
RESULT='/home/liaop20/ml-arsenal/projects/TGS_salt'
CODE='/home/liaop20/ml-arsenal'
CUDA_DEVICES='1'
MODE='gpu'
GRAPHICS=False
IDENTIFIER = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
if PATH=='gcp2':
print("Using GCP paths on liaop20@kaggle.")
CHECKPOINTS='/home/liaop20/data/salt/checkpoints'
DATA='/home/liaop20/data/salt'
RESULT='/home/liaop20/ml-arsenal/projects/TGS_salt'
CODE='/home/liaop20/ml-arsenal'
CUDA_DEVICES='2'
MODE='gpu'
GRAPHICS=False
IDENTIFIER = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
if PATH=='gcp3':
print("Using GCP paths on liaop20@kaggle.")
CHECKPOINTS='/home/liaop20/data/salt/checkpoints'
DATA='/home/liaop20/data/salt'
RESULT='/home/liaop20/ml-arsenal/projects/TGS_salt'
CODE='/home/liaop20/ml-arsenal'
CUDA_DEVICES='3'
MODE='gpu'
GRAPHICS=False
IDENTIFIER = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
if PATH=='gcp':
print("Using GCP paths on liaop20@kaggle.")
CHECKPOINTS='/home/liaop20/data/salt/checkpoints'
DATA='/home/liaop20/data/salt'
RESULT='/home/liaop20/ml-arsenal/projects/TGS_salt'
CODE='/home/liaop20/ml-arsenal'
CUDA_DEVICES='0,1,2,3'
MODE='gpu'
GRAPHICS=False
IDENTIFIER = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
if PATH=='aaron':
print("Using paths on Aaron's PC.")
CHECKPOINTS='/mydisk/Programming/Git/salt/checkpoints'
DATA='/mydisk/Programming/Git/salt'
RESULT='/mydisk/Programming/Git/ml-arsenal/projects/TGS_salt'
CODE='/mydisk/Programming/Git/ml-arsenal'
CUDA_DEVICES='0'
MODE='gpu'
GRAPHICS=False
IDENTIFIER = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
print('')
|
flaskshop/account/utils.py | maquinuz/flask-shop | 141 | 11170712 | from functools import wraps
import phonenumbers
from flask import flash, abort
from flask_login import current_user
from phonenumbers.phonenumberutil import is_possible_number
from wtforms import ValidationError
from flaskshop.constant import Permission
class PhoneNumber(phonenumbers.PhoneNumber):
"""
A extended version of phonenumbers.PhoneNumber that provides
some neat and more pythonic, easy to access methods. This makes using a
PhoneNumber instance much easier, especially in templates and such.
"""
format_map = {
"E164": phonenumbers.PhoneNumberFormat.E164,
"INTERNATIONAL": phonenumbers.PhoneNumberFormat.INTERNATIONAL,
"NATIONAL": phonenumbers.PhoneNumberFormat.NATIONAL,
"RFC3966": phonenumbers.PhoneNumberFormat.RFC3966,
}
@classmethod
def from_string(cls, phone_number, region=None):
phone_number_obj = cls()
if region is None:
region = None
phonenumbers.parse(
number=phone_number,
region=region,
keep_raw_input=True,
numobj=phone_number_obj,
)
return phone_number_obj
def __unicode__(self):
format_string = "E164"
fmt = self.format_map[format_string]
return self.format_as(fmt)
def is_valid(self):
"""
checks whether the number supplied is actually valid
"""
return phonenumbers.is_valid_number(self)
def format_as(self, format):
return phonenumbers.format_number(self, format)
@property
def as_international(self):
return self.format_as(phonenumbers.PhoneNumberFormat.INTERNATIONAL)
@property
def as_e164(self):
return self.format_as(phonenumbers.PhoneNumberFormat.E164)
@property
def as_national(self):
return self.format_as(phonenumbers.PhoneNumberFormat.NATIONAL)
@property
def as_rfc3966(self):
return self.format_as(phonenumbers.PhoneNumberFormat.RFC3966)
def __len__(self):
return len(self.__unicode__())
def __eq__(self, other):
"""
Override parent equality because we store only string representation
of phone number, so we must compare only this string representation
"""
if (
isinstance(other, PhoneNumber)
or isinstance(other, phonenumbers.PhoneNumber)
or isinstance(other, str)
):
format_string = "E164"
default_region = None
fmt = self.format_map[format_string]
if isinstance(other, str):
# convert string to phonenumbers.PhoneNumber
# instance
try:
other = phonenumbers.parse(other, region=default_region)
except phonenumbers.NumberParseException:
# Conversion is not possible, thus not equal
return False
other_string = phonenumbers.format_number(other, fmt)
return self.format_as(fmt) == other_string
else:
return False
def __hash__(self):
return hash(self.__unicode__())
def to_python(value):
if value in (None, ""): # None or ''
phone_number = value
elif value and isinstance(value, str):
try:
phone_number = PhoneNumber.from_string(phone_number=value)
except phonenumbers.NumberParseException:
# the string provided is not a valid PhoneNumber.
phone_number = PhoneNumber(raw_input=value)
elif isinstance(value, phonenumbers.PhoneNumber) and not isinstance(
value, PhoneNumber
):
phone_number = PhoneNumber()
phone_number.merge_from(value)
elif isinstance(value, PhoneNumber):
phone_number = value
else:
# TODO: this should somehow show that it has invalid data, but not
# completely die for bad data in the database.
# (Same for the NumberParseException above)
phone_number = None
return phone_number
def validate_possible_number(value):
phone_number = to_python(value)
if phone_number and not is_possible_number(phone_number):
raise ValidationError("The phone number entered is not valid.")
def permission_required(permission):
def decorator(f):
@wraps(f)
def _deco(*args, **kwargs):
if current_user.is_authenticated and current_user.can(permission):
return f(*args, **kwargs)
abort(403)
return _deco
return decorator
def admin_required(f):
return permission_required(Permission.ADMINISTER)(f)
|
src/chap10-InfraAsCode/pulumi/__main__.py | VestiDev/python_devops_2019-book | 294 | 11170724 | <filename>src/chap10-InfraAsCode/pulumi/__main__.py
import json
import mimetypes
import os
from pulumi import export, FileAsset
from pulumi_aws import s3, route53, acm, cloudfront
import pulumi
config = pulumi.Config('proj1') # proj1 is project name defined in Pulumi.yaml
content_dir = config.require('local_webdir') # www-staging or www-prod
domain_name = config.require('domain_name') # staging.devops4all.dev or www.devops4all.dev
dns_zone_id = config.require('dns_zone_id')
web_bucket = s3.Bucket('s3-website-bucket', website={
"index_document": "index.html"
})
for file in os.listdir(content_dir):
filepath = os.path.join(content_dir, file)
mime_type, _ = mimetypes.guess_type(filepath)
obj = s3.BucketObject(file,
bucket=web_bucket.id,
source=FileAsset(filepath),
content_type=mime_type)
def public_read_policy_for_bucket(bucket_name):
return json.dumps({
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": "*",
"Action": [
"s3:GetObject"
],
"Resource": [
f"arn:aws:s3:::{bucket_name}/*",
]
}]
})
web_bucket_id = web_bucket.id
web_bucket_policy = s3.BucketPolicy("bucket-policy",
bucket=web_bucket_id,
policy=web_bucket_id.apply(public_read_policy_for_bucket))
# Split a domain name into its subdomain and parent domain names.
# e.g. "www.example.com" => "www", "example.com".
def get_domain_and_subdomain(domain):
names = domain.split(".")
if len(names) < 3:
return('', domain)
subdomain = names[0]
parent_domain = ".".join(names[1:])
return (subdomain, parent_domain)
(subdomain, parent_domain) = get_domain_and_subdomain(domain_name)
# zone = route53.Zone("route53_zone", name=parent_domain)
# create ACM certificate
cert = acm.Certificate('certificate', domain_name=domain_name, validation_method='DNS')
domain_validation_options = cert.domain_validation_options[0]
# Create a DNS record to prove that we _own_ the domain we're requesting a certificate for.
cert_validation_dns_record = route53.Record(
'cert-validation-record',
name=domain_validation_options['resourceRecordName'],
zone_id=dns_zone_id,
type=domain_validation_options['resourceRecordType'],
records=[domain_validation_options['resourceRecordValue']],
ttl=600)
# This is a _special_ resource that waits for ACM to complete validation via the DNS record
# checking for a status of "ISSUED" on the certificate itself. No actual resources are
# created (or updated or deleted).
cert_validation_completion = acm.CertificateValidation('cert-validation-completion',
certificate_arn=cert.arn,
validation_record_fqdns=[cert_validation_dns_record.fqdn])
cert_arn = cert_validation_completion.certificate_arn
# Create S3 bucket that will contain the CDN's request logs.
log_bucket = s3.Bucket('cdn-log-bucket', acl='private')
# Create CloudFront distribution pointing to web S3 bucket
cloudfront_distro = cloudfront.Distribution ( 'cloudfront-distro',
enabled=True,
aliases=[ domain_name ],
origins=[
{
'originId': web_bucket.arn,
'domainName': web_bucket.website_endpoint,
'customOriginConfig': {
'originProtocolPolicy': "http-only",
'httpPort': 80,
'httpsPort': 443,
'originSslProtocols': ["TLSv1.2"],
},
},
],
default_root_object="index.html",
default_cache_behavior={
'targetOriginId': web_bucket.arn,
'viewerProtocolPolicy': "redirect-to-https",
'allowedMethods': ["GET", "HEAD", "OPTIONS"],
'cachedMethods': ["GET", "HEAD", "OPTIONS"],
'forwardedValues': {
'cookies': { 'forward': "none" },
'queryString': False,
},
'minTtl': 0,
'defaultTtl': 600,
'maxTtl': 600,
},
price_class="PriceClass_100",
custom_error_responses=[
{ 'errorCode': 404, 'responseCode': 404, 'responsePagePath': "/404.html" },
],
restrictions={
'geoRestriction': {
'restrictionType': "none",
},
},
viewer_certificate={
'acmCertificateArn': cert_arn,
'sslSupportMethod': "sni-only",
},
logging_config={
'bucket': log_bucket.bucket_domain_name,
'includeCookies': False,
'prefix': domain_name,
})
# Create DNS record for the deployed site pointing to CloudFront DNS name
site_dns_record = route53.Record(
'site-dns-record',
name=subdomain,
zone_id=dns_zone_id,
type="A",
aliases=[
{
'name': cloudfront_distro.domain_name,
'zoneId': cloudfront_distro.hosted_zone_id,
'evaluateTargetHealth': True
}
])
#export('domain_validation_options', domain_validation_options)
export('web_bucket_id', web_bucket.id)
export('log_bucket_id', log_bucket.id)
export('website_url', web_bucket.website_endpoint)
export('cloudfront_domain', cloudfront_distro.domain_name) |
components/isceobj/Pause/__init__.py | vincentschut/isce2 | 1,133 | 11170769 | #!/usr/bin/env python3
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Copyright 2012 California Institute of Technology. ALL RIGHTS RESERVED.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# United States Government Sponsorship acknowledged. This software is subject to
# U.S. export control laws and regulations and has been classified as 'EAR99 NLR'
# (No [Export] License Required except when exporting to an embargoed country,
# end user, or in support of a prohibited end use). By downloading this software,
# the user agrees to comply with all applicable U.S. export laws and regulations.
# The user has the responsibility to obtain export licenses, or other export
# authority as may be required before exporting this software to any 'EAR99'
# embargoed foreign country or citizen of those countries.
#
# Author: <NAME>
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
## pause is a raw_input wrapper
def pause(cont="go",ex="exit",ignore=False, message="", bell=True):
"""pause function. Pauses execution awaiting input.
Takes up to three optional arguments to set the action strings:
cont = first positional or named arg whose value is a string that causes execution
to continue.
Default cont="go"
ex = second positional or named arg whose value is a string that causes execution
to stop.
Default ex="exit"
ignore = third positional or named arg whose value cause the pause to be ignored or
paid attention to.
Default False
message = and optional one-time message to send to the user"
bell = True: ring the bell when pause is reached.
"""
if not ignore:
x = ""
if message or bell:
message += chr(7)*bell
print(message)
while x != cont:
try:
x = raw_input(
"Type %s to continue; %s to exit: " % (cont, ex)
)
except KeyboardInterrupt:
return None
if x == ex:
# return the "INTERUPT" system error.
import errno
import sys
return sys.exit(errno.EINTR)
pass
pass
return None
|
wsgi.py | Yamtt/zmirror | 2,550 | 11170775 | <filename>wsgi.py
#!/usr/bin/env python3
# coding=utf-8
import sys
import os
sys.path.insert(0, os.path.dirname(__file__))
if os.path.dirname(__file__) != '':
os.chdir(os.path.dirname(__file__))
from zmirror.zmirror import app as application
__author__ = 'Aploium <<EMAIL>>'
def main():
from zmirror.zmirror import my_host_port, built_in_server_host, \
built_in_server_debug, built_in_server_extra_params, warnprint, \
errprint
warnprint("You may directly running zmirror, which is NOT recommend for PRODUCTION environment.\n"
"Please deploy it using Apache,You can find a deploy tutorial here:\n"
"https://github.com/aploium/zmirror/wiki/%E9%83%A8%E7%BD%B2%E6%94%AF%E6%8C%81HTTPS%E5%92%8CHTTP2.0%E7%9A%84%E9%95%9C%E5%83%8F")
if my_host_port is None:
my_host_port = 80
try:
application.run(
port=my_host_port,
# 如果配置文件中开启了多进程, 那么就关掉多线程, 否则默认启用多线程
threaded="processes" not in built_in_server_extra_params,
# 如果你想直接用本程序给外网访问, 请在 config.py 末尾加两行配置
# !!警告!! 无论如何都不要修改 config_default.py, 否则程序将无法通过 git pull 来升级
#
# built_in_server_host='0.0.0.0'
# built_in_server_debug=False
#
# ps:字母在行首, 行首不要有空格
# !!警告!! 无论如何都不要修改本文件, 否则程序将无法通过 git pull 来升级
debug=built_in_server_debug, # 默认是开启debug模式的
# 默认只允许本机访问, 如果你希望让外网访问, 请根据上面的注释修改配置文件
host=built_in_server_host,
**built_in_server_extra_params # extra params
)
except OSError as e:
if e.errno in (98, 10013): # Address already in use, 98 for linux, 10013 for win
errprint("Port {port} was occupied by other program, please close it.\n"
"You can see which process is using your port by the following command:\n"
" Linux: netstat -apn |grep \":{port}\"\n"
" Windows: netstat -ano |find \":{port}\"\n\n"
"Or change zmirror\'s port: change(add, if not exist) the `my_host_port` setting in `config.py`\n"
"eg: my_host_port=81".format(port=my_host_port))
exit()
else:
raise
if __name__ == '__main__':
main()
|
examples/excalibur_detector_modules.py | acolinisi/h5py | 1,657 | 11170789 | <filename>examples/excalibur_detector_modules.py
'''Virtual datasets: The 'Excalibur' use case
https://support.hdfgroup.org/HDF5/docNewFeatures/VDS/HDF5-VDS-requirements-use-cases-2014-12-10.pdf
'''
import h5py
raw_files = ["stripe_%d.h5" % stripe for stripe in range(1,7)]# get these names
in_key = 'data' # where is the data at the input?
outfile = 'full_detector.h5'
out_key = 'full_frame'
in_sh = h5py.File(raw_files[0], 'r')[in_key].shape # get the input shape
dtype = h5py.File(raw_files[0], 'r')[in_key].dtype # get the datatype
# now generate the output shape
vertical_gap = 10 # pixels spacing in the vertical
nfiles = len(raw_files)
nframes = in_sh[0]
width = in_sh[2]
height = (in_sh[1]*nfiles) + (vertical_gap*(nfiles-1))
out_sh = (nframes, height, width)
# Virtual target is a representation of the output dataset
layout = h5py.VirtualLayout(shape=out_sh, dtype=dtype)
offset = 0 # initial offset
for i in range(nfiles):
print("frame_number is: %s" % str(i)) # for feedback
vsource = h5py.VirtualSource(raw_files[i], in_key, shape=in_sh) #a representation of the input dataset
layout[:, offset:(offset + in_sh[1]), :] = vsource
offset += in_sh[1]+vertical_gap # increment the offset
# Create an output file.
with h5py.File(outfile, 'w', libver='latest') as f:
f.create_virtual_dataset(out_key, layout, fillvalue=0x1)
|
moto/elasticache/responses.py | symroe/moto | 5,460 | 11170816 | <reponame>symroe/moto
from moto.core.responses import BaseResponse
from .exceptions import PasswordTooShort, PasswordRequired
from .models import elasticache_backends
class ElastiCacheResponse(BaseResponse):
"""Handler for ElastiCache requests and responses."""
@property
def elasticache_backend(self):
"""Return backend instance specific for this region."""
return elasticache_backends[self.region]
def create_user(self):
params = self._get_params()
user_id = params.get("UserId")
user_name = params.get("UserName")
engine = params.get("Engine")
passwords = params.get("Passwords", [])
no_password_required = self._get_bool_param("NoPasswordRequired", False)
password_required = not no_password_required
if password_required and not passwords:
raise PasswordRequired
if any([len(p) < 16 for p in passwords]):
raise PasswordTooShort
access_string = params.get("AccessString")
user = self.elasticache_backend.create_user(
user_id=user_id,
user_name=user_name,
engine=engine,
passwords=passwords,
access_string=access_string,
no_password_required=no_password_required,
)
template = self.response_template(CREATE_USER_TEMPLATE)
return template.render(user=user)
def delete_user(self):
params = self._get_params()
user_id = params.get("UserId")
user = self.elasticache_backend.delete_user(user_id=user_id)
template = self.response_template(DELETE_USER_TEMPLATE)
return template.render(user=user)
def describe_users(self):
params = self._get_params()
user_id = params.get("UserId")
users = self.elasticache_backend.describe_users(user_id=user_id)
template = self.response_template(DESCRIBE_USERS_TEMPLATE)
return template.render(users=users)
USER_TEMPLATE = """<UserId>{{ user.id }}</UserId>
<UserName>{{ user.name }}</UserName>
<Status>{{ user.status }}</Status>
<Engine>{{ user.engine }}</Engine>
<MinimumEngineVersion>{{ user.minimum_engine_version }}</MinimumEngineVersion>
<AccessString>{{ user.access_string }}</AccessString>
<UserGroupIds>
{% for usergroupid in user.usergroupids %}
<member>{{ usergroupid }}</member>
{% endfor %}
</UserGroupIds>
<Authentication>
{% if user.no_password_required %}
<Type>no-password</Type>
{% else %}
<Type>password</Type>
<PasswordCount>{{ user.passwords|length }}</PasswordCount>
{% endif %}
</Authentication>
<ARN>{{ user.arn }}</ARN>"""
CREATE_USER_TEMPLATE = (
"""<CreateUserResponse xmlns="http://elasticache.amazonaws.com/doc/2015-02-02/">
<ResponseMetadata>
<RequestId>1549581b-12b7-11e3-895e-1334aEXAMPLE</RequestId>
</ResponseMetadata>
<CreateUserResult>
"""
+ USER_TEMPLATE
+ """
</CreateUserResult>
</CreateUserResponse>"""
)
DELETE_USER_TEMPLATE = (
"""<DeleteUserResponse xmlns="http://elasticache.amazonaws.com/doc/2015-02-02/">
<ResponseMetadata>
<RequestId>1549581b-12b7-11e3-895e-1334aEXAMPLE</RequestId>
</ResponseMetadata>
<DeleteUserResult>
"""
+ USER_TEMPLATE
+ """
</DeleteUserResult>
</DeleteUserResponse>"""
)
DESCRIBE_USERS_TEMPLATE = (
"""<DescribeUsersResponse xmlns="http://elasticache.amazonaws.com/doc/2015-02-02/">
<ResponseMetadata>
<RequestId>1549581b-12b7-11e3-895e-1334aEXAMPLE</RequestId>
</ResponseMetadata>
<DescribeUsersResult>
<Users>
{% for user in users %}
<member>
"""
+ USER_TEMPLATE
+ """
</member>
{% endfor %}
</Users>
<Marker></Marker>
</DescribeUsersResult>
</DescribeUsersResponse>"""
)
|
example.py | wangxiaoying/python-flamegraph | 404 | 11170831 | <filename>example.py
"""
Example usage of flamegraph.
To view a flamegraph run these commands:
$ python example.py
$ flamegraph.pl perf.log > perf.svg
$ inkview perf.svg
"""
import time
import sys
import flamegraph
def foo():
time.sleep(.1)
bar()
def bar():
time.sleep(.05)
if __name__ == "__main__":
flamegraph.start_profile_thread(fd=open("./perf.log", "w"))
N = 10
for x in xrange(N):
print "{}/{}".format(x, N)
foo()
|
docs3/source/conf.py | jmscslgroup/rosbagpy | 107 | 11170881 | <reponame>jmscslgroup/rosbagpy
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import sphinx_rtd_theme
sys.path.insert(0, os.path.abspath('../..'))
from pathlib import Path
import warnings
from datetime import datetime
sys.setrecursionlimit(1500)
HERE = Path(__file__).parent
sys.path[:0] = [str(HERE.parent), str(HERE / 'extensions')]
import sphinx_bootstrap_theme
from recommonmark.parser import CommonMarkParser
def setup(app):
app.add_css_file("style.css") # also can be a full URL
app.add_css_file("animate.min.css") # also can be a full URL
app.add_css_file("animate.css") # also can be a full URL
app.add_css_file("font-awesome.css") # also can be a full URL
app.add_css_file("font-awesome.min.css") # also can be a full URL
app.add_css_file("venobox.css") # also can be a full URL
app.add_css_file("fontfamily.css") # also can be a full URL
app.add_css_file("ionicons.min.css") # also can be a full URL
app.add_js_file("wow.min.js")
app.add_js_file("wow.js")
app.add_js_file("hoverIntent.js")
app.add_js_file("jquery.easing.min.js")
app.add_js_file("jquery.min.js")
app.add_js_file("superfish.min.js")
app.add_js_file("validate.js")
app.add_js_file("venobox.js")
app.add_js_file("venobox.min.js")
app.add_js_file("main.js")
app.add_config_value('markdown_parser_config', {
'auto_toc_tree_section': 'Content',
'enable_auto_doc_ref': True,
'enable_auto_toc_tree': True,
'enable_eval_rst': True,
'enable_inline_math': True,
'enable_math': True,
}, True)
# -- Project information -----------------------------------------------------
project = 'bagpy: Reading rosbag files simplified'
copyright = '2020, <NAME>'
author = '<NAME>'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.3.12'
# The full version, including alpha/beta/rc tags.
release = u'beta'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.doctest', 'sphinx.ext.todo', 'sphinx.ext.coverage',
'sphinx.ext.mathjax', 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode', 'sphinx.ext.napoleon', 'sphinx.ext.autosummary', 'sphinx_autodoc_typehints', # needs to be after napoleon
'sphinx_rtd_theme', 'm2r2', 'ytsphinx.youtube']
# Generate the API documentation when building
autosummary_generate = True
autodoc_member_order = 'bysource'
# autodoc_default_flags = ['members']
napoleon_google_docstring = False
napoleon_numpy_docstring = True
napoleon_include_init_with_doc = False
napoleon_use_rtype = True # having a separate entry generally helps readability
napoleon_use_param = True
napoleon_custom_sections = [('Params', 'Parameters')]
todo_include_todos = False
api_dir = HERE / 'api' # function_images
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'bootstrap'
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# (Optional) Logo. Should be small enough to fit the navbar (ideally 24x24).
# Path should be relative to the ``_static`` files directory.
html_logo = "favicon.png"
html_favicon = "favicon.ico"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
# Navigation bar title. (Default: ``project`` value)
#'navbar_title': "Bagpy",
# Tab name for entire site. (Default: "Site")
#'navbar_site_name': "Site",
# Tab name for the current pages TOC. (Default: "Page")
'navbar_pagenav_name': "Index",
# A list of tuples containing pages or urls to link to.
# Valid tuples should be in the following forms:
# (name, page) # a link to a page
# (name, "/aa/bb", 1) # a link to an arbitrary relative url
# (name, "http://example.com", True) # arbitrary absolute url
# Note the "1" or "True" value above as the third argument to indicate
# an arbitrary url.
# 'navbar_links': [
# ("Examples", "examples"),
# ("Link", "http://example.com", True),
# ],
# Global TOC depth for "site" navbar tab. (Default: 1)
# Switching to -1 shows all levels.
'globaltoc_depth': 2,
# Include hidden TOCs in Site navbar?
#
# Note: If this is "false", you cannot have mixed ``:hidden:`` and
# non-hidden ``toctree`` directives in the same page, or else the build
# will break.
#
# Values: "true" (default) or "false"
'globaltoc_includehidden': "true",
# HTML navbar class (Default: "navbar") to attach to <div> element.
# For black navbar, do "navbar navbar-inverse"
'navbar_class': "navbar",
# Fix navigation bar to top of page?
# Values: "true" (default) or "false"
'navbar_fixed_top': "true",
# Location of link to source.
# Options are "nav" (default), "footer" or anything else to exclude.
'source_link_position': "nav",
# Bootswatch (http://bootswatch.com/) theme.
#
# Options are nothing (default) or the name of a valid theme such
# such as "cosmo" or "sandstone".
#
# Example themes:
# * flatly
# * sandstone (v3 only)
# * united
# * yeti (v3 only)
'bootswatch_theme': "flatly",
# Choose Bootstrap version.
# Values: "3" (default) or "2" (in quotes)
'bootstrap_version': "3",
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
#html_sidebars = {
# '**': [
# 'relations.html', # needs 'show_related': True theme option to display
# 'searchbox.html',
# ]
# }
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'bagpydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'bagpydoc.tex', u'bagpydoc Documentation',
u'<NAME>', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'bagpydoc', u'bagpydoc Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'bagpydoc', u'bagpy Documentation',
author, 'bagpy', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
|
bsonvspb/test.py | yutiansut/opentick | 147 | 11170901 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''Python client for opentick.'''
import time
import json
from bson import BSON
import message_pb2
value = [99999999, 1.22222, 1.3222222, 1.422222]
values = []
for x in range(10):
values.append(value)
msg = {'0': 'test', '1': 1, '2': values}
print('json')
now = time.time()
for x in range(100000):
body = json.dumps(msg)
print('body size ', len(body))
print('serialize ', time.time() - now)
now = time.time()
for x in range(100000):
msg = json.loads(body)
print('deserialize', time.time() - now)
print('bson')
now = time.time()
for x in range(100000):
body = BSON.encode(msg)
print('body size ', len(body))
print('serialize ', time.time() - now)
now = time.time()
for x in range(100000):
msg = BSON(body).decode()
print('deserialize', time.time() - now)
m = message_pb2.Message()
m.cmd = 'test'
m.prepared = 1
value = message_pb2.Fields()
value.values.extend([message_pb2.Field(n=1), message_pb2.Field(d=1.2)])
value.values.extend([message_pb2.Field(d=1.3), message_pb2.Field(d=1.4)])
values = []
for x in range(10):
values.append(value)
m.values.extend(values)
print('pb:')
now = time.time()
for x in range(100000):
body = m.SerializeToString()
print('body size ', len(body))
print('serialize ', time.time() - now)
now = time.time()
for x in range(100000):
y = message_pb2.Message()
y.ParseFromString(body)
print('deserialize', time.time() - now)
|
test/run/t275.py | timmartin/skulpt | 2,671 | 11170905 | class X:
pass
x = X()
print x.__class__
print str(x.__class__)
print repr(x.__class__)
|
locations/spiders/aurecongroup.py | nbeecher/alltheplaces | 297 | 11170918 | # -*- coding: utf-8 -*-
import scrapy
from locations.items import GeojsonPointItem
class AureconGroupSpider(scrapy.Spider):
name = "aurecongroup"
allowed_domains = ["www.aurecon.com"]
download_delay = 0.1
start_urls = (
"https://www.aurecongroup.com/locations",
)
def parse(self, response):
for location in response.xpath('.//h4'):
addr = location.xpath('.//following-sibling::div')[0].xpath('.//div/span/following-sibling::div')[0]
addr = ' '.join([addr.xpath('.//span/text()').extract()[i].replace('\t', '').replace('\n', '').replace('\r', '') for i in range(2)])
coordinates = str(location.xpath('.//following-sibling::div//a[@target="_blank"]/@href').extract_first())
properties = {
'ref': location.xpath('.//following-sibling::div//span[@itemprop="telephone"]/text()').extract_first().strip(),
'brand': 'Aurecon Group',
'city': location.xpath('.//strong/text()').extract_first().replace('\t', '').replace('\n', '').replace('\r', ''),
'addr_full': addr,
'phone': location.xpath('.//following-sibling::div//span[@itemprop="telephone"]/text()').extract_first().strip(),
}
if coordinates:
coordinates = (coordinates.split('=')[1]).split(',')
properties['lat'] = float(coordinates[0])
properties['lon'] = float(coordinates[1])
yield GeojsonPointItem(**properties)
|
kansha/security.py | AnomalistDesignLLC/kansha | 161 | 11170923 | <gh_stars>100-1000
# -*- coding:utf-8 -*-
#--
# Copyright (c) 2012-2014 Net-ng.
# All rights reserved.
#
# This software is licensed under the BSD License, as described in
# the file LICENSE.txt, which you should have received as part of
# this distribution.
#--
from base64 import b64encode, b64decode
from Crypto import Random
from peak.rules import when
from Crypto.Cipher import Blowfish
from nagare import security
from nagare.security import form_auth, common
from .card import Card # Do not remove
from .board import Board # Do not remove
from .user.usermanager import UserManager
from .column import CardsCounter, Column # Do not remove
from .board import COMMENTS_PUBLIC, COMMENTS_MEMBERS
class Unauthorized(Exception):
pass
class Authentication(form_auth.Authentication):
KEY = '<KEY>'
def _create_user(self, username):
if username is not None:
return UserManager.get_app_user(username)
def check_password(self, username, _, password):
user = UserManager.get_by_username(username)
if not user or not user.email:
return False
return user.check_password(password)
def denies(self, detail):
raise Unauthorized()
def cookie_decode(self, cookie):
try:
a_iv, token = cookie.split(':')
iv = b64decode(a_iv)
cipher = Blowfish.new(self.KEY, Blowfish.MODE_CBC, iv)
cookie = cipher.decrypt(b64decode(token)).replace('@','')
except ValueError:
return (None, None)
return super(Authentication, self).cookie_decode(cookie)
def cookie_encode(self, *ids):
cookie = super(Authentication, self).cookie_encode(*ids)
bs = Blowfish.block_size
iv = Random.new().read(bs)
cipher = Blowfish.new(self.KEY, Blowfish.MODE_CBC, iv)
# pad cookie to block size.
# Cookie is ascii base64 + ':', so we can safely pad with '@'.
missing_bytes = bs - (len(cookie) % bs)
cookie += '@' * missing_bytes
return '%s:%s' % (
b64encode(iv),
b64encode(cipher.encrypt(cookie))
)
return cookie
class Rules(common.Rules):
@when(common.Rules.has_permission, "user is None")
def _(self, user, perm, subject):
"""Default security, if user is not logged return False"""
return False
@when(common.Rules.has_permission, "user and perm == 'view' and isinstance(subject, Board)")
@when(common.Rules.has_permission, "user is None and perm == 'view' and isinstance(subject, Board)")
def _(self, user, perm, board):
"""Test if user can see the board."""
return board.is_open or (user is not None and board.has_member(user) and not board.archived)
@when(common.Rules.has_permission, "user and perm == 'manage' and isinstance(subject, Board)")
def _(self, user, perm, board):
"""Test if users is one of the board's managers"""
return board.has_manager(user)
@when(common.Rules.has_permission, "user and (perm == 'edit') and isinstance(subject, Board)")
def _(self, user, perm, board):
"""Test if users is one of the board's members"""
return board.has_member(user)
@when(common.Rules.has_permission, "user and (perm == 'leave') and isinstance(subject, Board)")
def _(self, user, perm, board):
"""Test if users is one of the board's members"""
return board.has_member(user)
@when(common.Rules.has_permission, "user and (perm == 'edit') and isinstance(subject, Column)")
def _(self, user, perm, column):
return security.has_permissions('edit', column.board)
@when(common.Rules.has_permission, "user and (perm == 'edit') and isinstance(subject, Card)")
def _(self, user, perm, card):
return card.can_edit(user)
@when(common.Rules.has_permission, "user and (perm == 'create_board')")
def _(self, user, perm, subject):
"""If user is logged, he is allowed to create a board"""
return True
@when(common.Rules.has_permission, "user and (perm == 'edit') and isinstance(subject, CardsCounter)")
def _(self, user, perm, CardsCounter):
return security.has_permissions('edit', CardsCounter.column)
class SecurityManager(Authentication, Rules):
def __init__(self, crypto_key):
Authentication.__init__(self)
Rules.__init__(self)
self.KEY = crypto_key
|
semtorch/models/modules/basic.py | WaterKnight1998/SemTorch | 145 | 11170933 | """Basic Module for Semantic Segmentation"""
import torch
import torch.nn as nn
from collections import OrderedDict
__all__ = ['_ConvBNPReLU', '_ConvBN', '_BNPReLU', '_ConvBNReLU', '_DepthwiseConv', 'InvertedResidual',
'SeparableConv2d']
_USE_FIXED_PAD = False
def _pytorch_padding(kernel_size, stride=1, dilation=1, **_):
if _USE_FIXED_PAD:
return 0 # FIXME remove once verified
else:
padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2
# FIXME remove once verified
fp = _fixed_padding(kernel_size, dilation)
assert all(padding == p for p in fp)
return padding
def _fixed_padding(kernel_size, dilation):
kernel_size_effective = kernel_size + (kernel_size - 1) * (dilation - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
return [pad_beg, pad_end, pad_beg, pad_end]
class SeparableConv2d(nn.Module):
def __init__(self, inplanes, planes, kernel_size=3, stride=1, dilation=1, relu_first=True,
bias=False, norm_layer=nn.BatchNorm2d):
super().__init__()
depthwise = nn.Conv2d(inplanes, inplanes, kernel_size,
stride=stride, padding=dilation,
dilation=dilation, groups=inplanes, bias=bias)
bn_depth = norm_layer(inplanes)
pointwise = nn.Conv2d(inplanes, planes, 1, bias=bias)
bn_point = norm_layer(planes)
if relu_first:
self.block = nn.Sequential(OrderedDict([('relu', nn.ReLU()),
('depthwise', depthwise),
('bn_depth', bn_depth),
('pointwise', pointwise),
('bn_point', bn_point)
]))
else:
self.block = nn.Sequential(OrderedDict([('depthwise', depthwise),
('bn_depth', bn_depth),
('relu1', nn.ReLU(inplace=True)),
('pointwise', pointwise),
('bn_point', bn_point),
('relu2', nn.ReLU(inplace=True))
]))
def forward(self, x):
return self.block(x)
class _ConvBNReLU(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0,
dilation=1, groups=1, relu6=False, norm_layer=nn.BatchNorm2d):
super(_ConvBNReLU, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias=False)
self.bn = norm_layer(out_channels)
self.relu = nn.ReLU6(True) if relu6 else nn.ReLU(True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
class _ConvBNPReLU(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0,
dilation=1, groups=1, norm_layer=nn.BatchNorm2d):
super(_ConvBNPReLU, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias=False)
self.bn = norm_layer(out_channels)
self.prelu = nn.PReLU(out_channels)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.prelu(x)
return x
class _ConvBN(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0,
dilation=1, groups=1, norm_layer=nn.BatchNorm2d, **kwargs):
super(_ConvBN, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias=False)
self.bn = norm_layer(out_channels)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return x
class _BNPReLU(nn.Module):
def __init__(self, out_channels, norm_layer=nn.BatchNorm2d):
super(_BNPReLU, self).__init__()
self.bn = norm_layer(out_channels)
self.prelu = nn.PReLU(out_channels)
def forward(self, x):
x = self.bn(x)
x = self.prelu(x)
return x
# -----------------------------------------------------------------
# For MobileNet
# -----------------------------------------------------------------
class _DepthwiseConv(nn.Module):
"""conv_dw in MobileNet"""
def __init__(self, in_channels, out_channels, stride, norm_layer=nn.BatchNorm2d, **kwargs):
super(_DepthwiseConv, self).__init__()
self.conv = nn.Sequential(
_ConvBNReLU(in_channels, in_channels, 3, stride, 1, groups=in_channels, norm_layer=norm_layer),
_ConvBNReLU(in_channels, out_channels, 1, norm_layer=norm_layer))
def forward(self, x):
return self.conv(x)
# -----------------------------------------------------------------
# For MobileNetV2
# -----------------------------------------------------------------
class InvertedResidual(nn.Module):
def __init__(self, in_channels, out_channels, stride, expand_ratio, dilation=1, norm_layer=nn.BatchNorm2d):
super(InvertedResidual, self).__init__()
assert stride in [1, 2]
self.use_res_connect = stride == 1 and in_channels == out_channels
layers = list()
inter_channels = int(round(in_channels * expand_ratio))
if expand_ratio != 1:
# pw
layers.append(_ConvBNReLU(in_channels, inter_channels, 1, relu6=True, norm_layer=norm_layer))
layers.extend([
# dw
_ConvBNReLU(inter_channels, inter_channels, 3, stride, dilation, dilation,
groups=inter_channels, relu6=True, norm_layer=norm_layer),
# pw-linear
nn.Conv2d(inter_channels, out_channels, 1, bias=False),
norm_layer(out_channels)])
self.conv = nn.Sequential(*layers)
def forward(self, x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
|
test/unit/awsume/awsumepy/lib/test_aws_files.py | ignatenkobrain/awsume | 654 | 11170937 | <filename>test/unit/awsume/awsumepy/lib/test_aws_files.py
import os
import json
import pytest
import argparse
from io import StringIO
from pathlib import Path
from unittest.mock import patch, MagicMock, mock_open
from awsume.awsumepy.lib import constants
from awsume.awsumepy.lib import aws_files
def test_get_aws_files():
args = argparse.Namespace(config_file=None, credentials_file=None)
config = {}
config_file, credentials_file = aws_files.get_aws_files(args, config)
assert config_file == str(Path(constants.DEFAULT_CONFIG_FILE))
assert credentials_file == str(Path(constants.DEFAULT_CREDENTIALS_FILE))
def test_get_aws_files_args():
args = argparse.Namespace(config_file='my/config/file', credentials_file='my/credentials/file')
config = {}
config_file, credentials_file = aws_files.get_aws_files(args, config)
assert config_file == str(Path('my/config/file'))
assert credentials_file == str(Path('my/credentials/file'))
def test_get_aws_files_config():
args = argparse.Namespace(config_file=None, credentials_file=None)
config = {
'config-file': 'my/config/file',
'credentials-file': 'my/credentials/file',
}
config_file, credentials_file = aws_files.get_aws_files(args, config)
assert config_file == str(Path('my/config/file'))
assert credentials_file == str(Path('my/credentials/file'))
@patch.dict('os.environ', {'AWS_CONFIG_FILE': 'my/config/file', 'AWS_SHARED_CREDENTIALS_FILE': 'my/credentials/file'}, clear=True)
def test_get_aws_files_environment():
args = argparse.Namespace(config_file=None, credentials_file=None)
config = {}
config_file, credentials_file = aws_files.get_aws_files(args, config)
assert config_file == str(Path('my/config/file'))
assert credentials_file == str(Path('my/credentials/file'))
@patch('builtins.open')
@patch('configparser.ConfigParser')
def test_add_section(ConfigParser: MagicMock, open: MagicMock):
parser = MagicMock()
ConfigParser.return_value = parser
parser.has_section.return_value = True
aws_files.add_section('section-name', {'key': 'value', 'key2': 'value2'}, 'file-name', overwrite=True)
parser.read.assert_called_once_with('file-name')
parser.remove_section.assert_called_once_with('section-name')
parser.add_section.assert_called_once_with('section-name')
assert parser.set.call_count == 3
parser.set.assert_any_call('section-name', 'manager', 'awsume')
parser.write.assert_called_once()
open.assert_called_once()
@patch.object(aws_files, 'safe_print')
@patch('builtins.open')
@patch('configparser.ConfigParser')
def test_add_section_no_overwrite(ConfigParser: MagicMock, open: MagicMock, safe_print: MagicMock):
parser = MagicMock()
ConfigParser.return_value = parser
parser.has_section.return_value = True
aws_files.add_section('section-name', {'key': 'value', 'key2': 'value2'}, 'file-name', overwrite=False)
parser.read.assert_called_once_with('file-name')
parser.remove_section.assert_not_called()
parser.add_section.assert_not_called()
parser.set.assert_not_called()
@patch.object(aws_files, 'safe_print')
@patch('builtins.open')
@patch('configparser.ConfigParser')
def test_add_section_new_section(ConfigParser: MagicMock, open: MagicMock, safe_print: MagicMock):
parser = MagicMock()
ConfigParser.return_value = parser
parser.has_section.return_value = False
aws_files.add_section('section-name', {'key': 'value', 'key2': 'value2'}, 'file-name')
parser.read.assert_called_once_with('file-name')
parser.remove_section.assert_not_called()
parser.add_section.assert_called_once_with('section-name')
assert parser.set.call_count == 3
parser.set.assert_any_call('section-name', 'manager', 'awsume')
parser.write.assert_called_once()
open.assert_called_once()
@patch('builtins.open')
@patch('configparser.ConfigParser')
def test_delete_section(ConfigParser: MagicMock, open: MagicMock):
parser = MagicMock()
ConfigParser.return_value = parser
parser.has_section.return_value = True
aws_files.delete_section('section-name', 'file-name')
parser.read.assert_called_once_with('file-name')
parser.remove_section.assert_called_once_with('section-name')
parser.write.assert_called_once()
open.assert_called_once()
@patch('builtins.open')
@patch('configparser.ConfigParser')
def test_delete_section_no_section(ConfigParser: MagicMock, open: MagicMock):
parser = MagicMock()
ConfigParser.return_value = parser
parser.has_section.return_value = False
aws_files.delete_section('section-name', 'file-name')
parser.read.assert_called_once_with('file-name')
parser.remove_section.assert_not_called()
myfile = """
[default]
region = us-east-1
mfa_serial = arn:aws:iam::123123123123:mfa/admin
"""
@patch('builtins.open')
def test_read_aws_file(open: MagicMock):
open.return_value = StringIO(myfile)
result = aws_files.read_aws_file('my/file/')
assert result == {
'default': {
'region': 'us-east-1',
'mfa_serial': 'arn:aws:iam::123123123123:mfa/admin',
},
}
|
plugins/services/api.py | ajenti/ajen | 3,777 | 11170950 | from jadi import interface
class Service():
"""
Basic class to store service informations.
"""
def __init__(self, manager):
self.id = None
self.name = None
self.manager = manager
self.state = None
self.running = None
class ServiceOperationError(Exception):
"""
Exception class for services.
"""
def __init__(self, inner):
self.inner = inner
def __unicode__(self):
return '[ServiceOperationError %s]' % self.inner
@interface
class ServiceManager():
"""
Abstract interface for all managers.
"""
id = None
name = None
def list(self):
raise NotImplementedError
def get_service(self, _id):
raise NotImplementedError
def start(self, _id):
raise NotImplementedError
def stop(self, _id):
raise NotImplementedError
def restart(self, _id):
raise NotImplementedError
def kill(self, _id):
raise NotImplementedError
|
test/unit/conftest.py | jurecuhalev/snowflake-connector-python | 311 | 11170956 | <reponame>jurecuhalev/snowflake-connector-python
#
# Copyright (c) 2012-2021 Snowflake Computing Inc. All rights reserved.
#
import pytest
from snowflake.connector.telemetry_oob import TelemetryService
@pytest.fixture(autouse=True, scope="session")
def disable_oob_telemetry():
oob_telemetry_service = TelemetryService.get_instance()
original_state = oob_telemetry_service.enabled
oob_telemetry_service.disable()
yield None
if original_state:
oob_telemetry_service.enable()
|
libsaas/services/uservoice/articles.py | MidtownFellowship/libsaas | 155 | 11170964 | from libsaas import http, parsers
from libsaas.services import base
from . import resource
class ArticlesBase(resource.UserVoiceResource):
path = 'articles'
def wrap_object(self, obj):
return {'article': obj}
class Articles(ArticlesBase):
@base.apimethod
def search(self, page=None, per_page=None, query=None):
"""
Search for articles.
:var page: Where should paging start. If left as `None`, the first page
is returned.
:vartype page: int
:var per_page: How many objects sould be returned. If left as `None`,
10 objects are returned.
:vartype per_page: int
:var query: Search string.
:vartype query: str
"""
params = base.get_params(None, locals())
url = '{0}/{1}'.format(self.get_url(), 'search')
return http.Request('GET', url, params), parsers.parse_json
class Article(ArticlesBase):
@base.apimethod
def useful(self):
"""
Mark the article as useful.
"""
url = '{0}/{1}'.format(self.get_url(), 'useful')
return http.Request('POST', url), parsers.parse_json
|
Chapter_10/ch10_ex2.py | pauldevos/Mastering-Object-Oriented-Python-Second-Edition | 108 | 11170991 | <filename>Chapter_10/ch10_ex2.py
#!/usr/bin/env python3.7
"""
Mastering Object-Oriented Python 2e
Code Examples for Mastering Object-Oriented Python 2nd Edition
Chapter 10. Example 2. YAML. Base Definitions
"""
# Persistence Classes
# ========================================
from typing import List, Optional, Dict, Any
# Example 2: Cards
# ###################
from enum import Enum
class Suit(str, Enum):
Clubs = "♣"
Diamonds = "♦"
Hearts = "♥"
Spades = "♠"
class Card:
def __init__(self, rank: str, suit: Suit, hard: Optional[int]=None, soft: Optional[int]=None) -> None:
self.rank = rank
self.suit = suit
self.hard = hard or int(rank)
self.soft = soft or int(rank)
def __str__(self) -> str:
return f"{self.rank!s}{self.suit.value!s}"
class AceCard(Card):
def __init__(self, rank: str, suit: Suit) -> None:
super().__init__(rank, suit, 1, 11)
class FaceCard(Card):
def __init__(self, rank: str, suit: Suit) -> None:
super().__init__(rank, suit, 10, 10)
__test__ = {name: value for name, value in locals().items() if name.startswith("test_")}
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=False)
|
contrib/tvmop/opdef.py | mchoi8739/incubator-mxnet | 211 | 11170996 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
import tvm
import inspect
from tvm import autotvm
from itertools import product
__OP_DEF__ = []
class OpDef:
"""Specify the properties of an operator and
construct the value combination of the arguments
e.g., ldtype=["float32", "int32"], rdtype=["float16", "int16"],
then the argument combination is
[
{"ldtype": "float32", "rdtype": "float16"},
{"ldtype": "float32", "rdtype": "int16"},
{"ldtype": "int32", "rdtype": "float16"},
{"ldtype": "int32", "rdtype": "int16"},
]
Parameters
----------
func : function
The function to define the operator (in tvm compute and schedule).
It will get the argument combination extracted by this class.
name : str
function name.
target : str
{"cpu", "gpu", "cuda"}
auto_broadcast : bool
auto_broadcast=True allows one to implement broadcast computation
without considering whether dimension size equals to one.
TVM maps buffer[i][j][k] -> buffer[i][0][k] if dimension i's shape equals 1.
"""
def __init__(self, func, name, target, auto_broadcast, **kwargs):
# construct the value combination of the arguments
# e.g., ldtype=["float32", "int32"], rdtype=["float16", "int16"]
# arg_combination = [
# {"ldtype": "float32", "rdtype": "float16"},
# {"ldtype": "float32", "rdtype": "int16"},
# {"ldtype": "int32", "rdtype": "float16"},
# {"ldtype": "int32", "rdtype": "int16"},
# ]
self.attrs = kwargs.pop('attrs', [])
self.attrs_valid = kwargs.pop('attrs_valid', lambda **kwargs: True)
args = [k for k in kwargs]
values = [kwargs[k] if isinstance(kwargs[k], (list, tuple)) else [kwargs[k]]
for k in args]
cart_product = product(*values)
self.arg_combination = [{k: v for k, v in zip(args, comb_values)}
for comb_values in cart_product]
self.func = func
self.name = name
self.target = target
self.auto_broadcast = auto_broadcast
self.dispatchable = 'fallback' in inspect.signature(self.func).parameters
def __call__(self, *args, **kwargs):
return self.func(*args, **kwargs)
def invoke_all(self):
for each_kwargs in self.arg_combination:
if self.attrs_valid(**each_kwargs):
name = self.name \
+ ''.join(["{}_{}".format(key, each_kwargs[key]) for key in self.attrs])
if self.dispatchable is False:
sch, args = self.func(**each_kwargs)
yield sch, args, name
else:
# register dispatch schedules
config_space = autotvm.ConfigSpace()
with autotvm.task.ApplyConfig(config_space):
sch, args = self.func(fallback=False, **each_kwargs)
for i in range(len(config_space)):
config_entity = config_space.get(i)
with autotvm.task.ApplyConfig(config_entity):
sch, args = self.func(fallback=False, **each_kwargs)
subname = name + "index_" + str(i)
yield sch, args, subname
# register fallback schedule
config_space = autotvm.ConfigSpace()
with autotvm.task.ApplyConfig(config_space):
sch, args = self.func(fallback=True, **each_kwargs)
subname = name + "fallback"
yield sch, args, subname
def get_op_name(self, name, args):
return name + ''.join(["%s_%d" % (arg.dtype, len(arg.shape)) for arg in args if hasattr(arg, 'shape')])
def get_config_spaces(self):
for each_kwargs in self.arg_combination:
if self.attrs_valid(**each_kwargs) and self.dispatchable is True:
name = self.name \
+ ''.join(["{}_{}".format(key, each_kwargs[key]) for key in self.attrs])
config_space = autotvm.ConfigSpace()
with autotvm.task.ApplyConfig(config_space):
self.func(fallback=False, **each_kwargs)
yield config_space, name
def get_binds(self, args):
if self.auto_broadcast:
return {arg: tvm.decl_buffer(arg.shape, arg.dtype, buffer_type="auto_broadcast")
for arg in args}
return None
def defop(name, target=None, auto_broadcast=False, **kwargs):
"""Decorator to define a tvm operator.
Parameters
----------
name : str
function name
target : str
{"cpu", "gpu", "cuda"}
auto_broadcast : bool
auto_broadcast=True allows one to implement broadcast computation
without considering whether dimension size equals to one.
TVM maps buffer[i][j][k] -> buffer[i][0][k] if dimension i's shape equals 1.
Returns
-------
fdef : function
A wrapped operator definition function, which returns (schedule, [tensors])
"""
assert name is not None and len(name) > 0
target = "cpu" if target is None else target
def _defop(func):
opdef = OpDef(func, name, target, auto_broadcast, **kwargs)
__OP_DEF__.append(opdef)
return opdef
return _defop
|
tests/test_praw_scrapers/test_live_scrapers/test_Livestream.py | JosephLai241/Reddit-Scraper | 318 | 11170997 | <filename>tests/test_praw_scrapers/test_live_scrapers/test_Livestream.py
"""
Testing `Livestream.py`.
"""
import argparse
import os
import praw
import types
from dotenv import load_dotenv
from urs.praw_scrapers.live_scrapers import Livestream
from urs.utils.Global import date
class MakeArgs():
"""
Making dummy args to test Comments.py methods.
"""
@staticmethod
def parser_for_testing():
parser = argparse.ArgumentParser()
return parser
@staticmethod
def make_scraper_args():
parser = MakeArgs.parser_for_testing()
parser.add_argument("--live-subreddit")
parser.add_argument("--live-redditor")
parser.add_argument("--stream-submissions", action = "store_true")
return parser
class Login():
"""
Create a Reddit object with PRAW API credentials.
"""
@staticmethod
def create_reddit_object():
load_dotenv()
return praw.Reddit(
client_id = os.getenv("CLIENT_ID"),
client_secret = os.getenv("CLIENT_SECRET"),
user_agent = os.getenv("USER_AGENT"),
username = os.getenv("REDDIT_USERNAME"),
password = os.getenv("<PASSWORD>")
)
class TestSaveStreamCreateSkeletonMethod():
"""
Testing SaveStream class _create_skeleton() method.
"""
def test_create_skeleton_method_live_subreddit_default_streaming_comments_args(self):
parser = MakeArgs.make_scraper_args()
args = parser.parse_args("--live-subreddit askreddit".split())
skeleton = Livestream.SaveStream._create_skeleton(args)
assert skeleton["livestream_settings"]["subreddit"] == "askreddit"
assert skeleton["livestream_settings"]["included_reddit_objects"] == "comments"
assert skeleton["data"] == []
def test_create_skeleton_method_live_subreddit_streaming_submissions_args(self):
parser = MakeArgs.make_scraper_args()
args = parser.parse_args("--live-subreddit askreddit --stream-submissions".split())
skeleton = Livestream.SaveStream._create_skeleton(args)
assert skeleton["livestream_settings"]["subreddit"] == "askreddit"
assert skeleton["livestream_settings"]["included_reddit_objects"] == "submissions"
assert skeleton["data"] == []
def test_create_skeleton_method_live_redditor_default_streaming_comments_args(self):
parser = MakeArgs.make_scraper_args()
args = parser.parse_args("--live-redditor spez".split())
skeleton = Livestream.SaveStream._create_skeleton(args)
assert skeleton["livestream_settings"]["redditor"] == "spez"
assert skeleton["livestream_settings"]["included_reddit_objects"] == "comments"
assert skeleton["data"] == []
def test_create_skeleton_method_live_redditor_streaming_submissions_args(self):
parser = MakeArgs.make_scraper_args()
args = parser.parse_args("--live-redditor spez --stream-submissions".split())
skeleton = Livestream.SaveStream._create_skeleton(args)
assert skeleton["livestream_settings"]["redditor"] == "spez"
assert skeleton["livestream_settings"]["included_reddit_objects"] == "submissions"
assert skeleton["data"] == []
class TestSaveStreamMakeLivestreamDirMethod():
"""
Testing SaveStream class _make_livestream_dir() method.
"""
def test_make_livestream_dir_method_subreddits_subdirectory(self):
test_split_stream_info = ["r"]
stream_directory = Livestream.SaveStream._make_livestream_dir(test_split_stream_info)
assert stream_directory == f"../scrapes/{date}/livestream/subreddits"
def test_make_livestream_dir_method_redditors_subdirectory(self):
test_split_stream_info = ["u"]
stream_directory = Livestream.SaveStream._make_livestream_dir(test_split_stream_info)
assert stream_directory == f"../scrapes/{date}/livestream/redditors"
class TestSaveStreamGetTempFilenameMethod():
"""
Testing SaveStream class _get_temp_filename() method.
"""
def test_get_temp_filename_method_with_subreddit(self):
test_stream_info = "in r/askreddit"
stream_path = Livestream.SaveStream._get_temp_filename(test_stream_info)
assert stream_path == f"../scrapes/{date}/livestream/subreddits/askreddit.json"
def test_get_temp_filename_method_with_redditor(self):
test_stream_info = "by u/spez"
stream_path = Livestream.SaveStream._get_temp_filename(test_stream_info)
assert stream_path == f"../scrapes/{date}/livestream/redditors/spez.json"
class TestSaveStreamCreateTempFileMethod():
"""
Testing SaveStream class _create_temp_file() method.
"""
def test_create_temp_file_method(self):
test_skeleton = {
"test": 1
}
test_stream_path = "../scrapes/livestream/subreddits/askreddit.json"
if not os.path.isdir("../scrapes/livestream/subreddits"):
os.makedirs("../scrapes/livestream/subreddits")
Livestream.SaveStream._create_temp_file(test_skeleton, test_stream_path)
assert os.path.isfile(test_stream_path)
class TestSaveStreamRenameMethod():
"""
Testing SaveStream class _rename() method.
"""
def test_rename_method_with_subreddit(self):
test_duration = "00:00:15"
test_object_info = "comments"
test_start_stream = "18:06:06"
test_stream_path = f"../scrapes/{date}/livestream/subreddits/askreddit.json"
with open(test_stream_path, "w", encoding = "utf-8") as _:
pass
Livestream.SaveStream._rename(test_duration, test_object_info, test_start_stream, test_stream_path)
renamed_file = f"../scrapes/{date}/livestream/subreddits/askreddit-comments-18_06_06-00_00_15.json"
assert os.path.isfile(renamed_file)
def test_rename_method_with_redditor(self):
test_duration = "00:00:15"
test_object_info = "submissions"
test_start_stream = "18:06:06"
test_stream_path = f"../scrapes/{date}/livestream/redditors/spez.json"
with open(test_stream_path, "w", encoding = "utf-8") as _:
pass
Livestream.SaveStream._rename(test_duration, test_object_info, test_start_stream, test_stream_path)
renamed_file = f"../scrapes/{date}/livestream/redditors/spez-submissions-18_06_06-00_00_15.json"
assert os.path.isfile(renamed_file)
class TestSaveStreamWriteMethod():
"""
Testing SaveStream class write() method.
"""
def test_write_method(self):
pass
class TestLivestreamSetInfoAndObjectMethod():
"""
Testing Livestream class _set_info_and_object() method.
"""
def test_set_info_and_object_live_subreddit(self):
reddit = Login.create_reddit_object()
parser = MakeArgs.make_scraper_args()
args = parser.parse_args("--live-subreddit askreddit".split())
reddit_object, stream_info = Livestream.Livestream._set_info_and_object(args, reddit)
assert isinstance(reddit_object, praw.models.Subreddit)
assert stream_info == "in r/askreddit"
def test_set_info_and_object_live_redditor(self):
reddit = Login.create_reddit_object()
parser = MakeArgs.make_scraper_args()
args = parser.parse_args("--live-redditor spez".split())
reddit_object, stream_info = Livestream.Livestream._set_info_and_object(args, reddit)
assert isinstance(reddit_object, praw.models.Redditor)
assert stream_info == "by u/spez"
class TestLivestreamStreamSwitchMethod():
"""
Testing Livestream class _stream_switch() method.
"""
def test_stream_switch_method_default_stream_comments(self):
reddit = Login.create_reddit_object()
subreddit = reddit.subreddit("askreddit")
parser = MakeArgs.make_scraper_args()
args = parser.parse_args("--live-subreddit askreddit".split())
generator, object_info = Livestream.Livestream._stream_switch(args, subreddit)
assert isinstance(generator, types.GeneratorType)
assert object_info == "comments"
def test_stream_switch_method_stream_submissions(self):
reddit = Login.create_reddit_object()
subreddit = reddit.subreddit("askreddit")
parser = MakeArgs.make_scraper_args()
args = parser.parse_args("--live-subreddit askreddit --stream-submissions".split())
generator, object_info = Livestream.Livestream._stream_switch(args, subreddit)
assert isinstance(generator, types.GeneratorType)
assert object_info == "submissions"
class TestLivestreamNoSaveStreamMethod():
"""
Testing livestream class _no_save_stream() method.
"""
def test_no_save_stream_method(self):
pass
class TestLivestreamStreamMethod():
"""
Testing Livestream class stream() method.
"""
def test_stream_method_live_subreddit(self):
pass
def test_stream_method_live_redditor(self):
pass
|
tests/system/scripts/pinger.py | fquesnel/marathon | 3,556 | 11171014 | <filename>tests/system/scripts/pinger.py
#!/usr/bin/env python
""" This app "pinger" responses to /ping with pongs and will
response to /relay by pinging another app and respond with it's response
"""
import sys
import logging
import os
import platform
# Ensure compatibility with Python 2 and 3.
# See https://github.com/JioCloud/python-six/blob/master/six.py for details.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY2:
from SimpleHTTPServer import SimpleHTTPRequestHandler
from SocketServer import TCPServer as HTTPServer
from urllib2 import Request, urlopen
import urlparse
else:
from http.server import SimpleHTTPRequestHandler
from http.server import HTTPServer
from urllib.request import Request, urlopen
from urllib.parse import urlparse
if PY2:
byte_type = unicode # NOQA
def response_status(response):
return response.getcode()
else:
byte_type = bytes
def response_status(response):
return response.getcode()
def make_handler():
"""
Factory method that creates a handler class.
"""
class Handler(SimpleHTTPRequestHandler):
def handle_ping(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
marathonId = os.getenv("MARATHON_APP_ID", "NO_MARATHON_APP_ID_SET")
msg = "Pong {}".format(marathonId)
self.wfile.write(byte_type(msg, "UTF-8"))
return
def handle_relay(self):
"""
provided an URL localhost:7777 or app.marathon.mesos:7777 relay will
ping that url http://localhost:7777/ping and respond back.
It is used for network testing in a cluster.
"""
query = urlparse(self.path).query
query_components = dict(qc.split("=") for qc in query.split("&"))
logging.info(query_components)
full_url = 'http://{}/ping'.format(query_components['url'])
url_req = Request(full_url, headers={"User-Agent": "Mozilla/5.0"})
response = urlopen(url_req)
res = response.read()
status = response_status(response)
logging.debug("Relay request is %s, %s", res, status)
self.send_response(status)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(res)
marathonId = os.getenv("MARATHON_APP_ID", "NO_MARATHON_APP_ID_SET")
msg = "\nRelay from {}".format(marathonId)
self.wfile.write(byte_type(msg, "UTF-8"))
return
def do_GET(self):
try:
logging.debug("Got GET request")
if self.path == '/ping':
return self.handle_ping()
elif self.path.startswith('/relay-ping'):
return self.handle_relay()
else:
return self.handle_ping()
except Exception:
logging.exception('Could not handle GET request')
raise
def do_POST(self):
try:
logging.debug("Got POST request")
return self.handle_ping()
except Exception:
logging.exception('Could not handle POST request')
raise
return Handler
if __name__ == "__main__":
logging.basicConfig(
format='%(asctime)s %(levelname)-8s: %(message)s',
level=logging.DEBUG)
logging.info(platform.python_version())
logging.debug(sys.argv)
port = int(sys.argv[1])
taskId = os.getenv("MESOS_TASK_ID", "<UNKNOWN>")
HTTPServer.allow_reuse_address = True
httpd = HTTPServer(("", port), make_handler())
msg = "AppMock[%s]: has taken the stage at port %d. "
logging.info(msg, taskId, port)
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
logging.info("Shutting down.")
httpd.shutdown()
httpd.socket.close()
|
core/dbt/events/stubs.py | f1fe/dbt | 3,156 | 11171036 | from typing import (
Any,
List,
NamedTuple,
Optional,
Dict,
)
# N.B.:
# These stubs were autogenerated by stubgen and then hacked
# to pieces to ensure we had something other than "Any" types
# where using external classes to instantiate event subclasses
# in events/types.py.
#
# This goes away when we turn mypy on for everything.
#
# Don't trust them too much at all!
class _ReferenceKey(NamedTuple):
database: Any
schema: Any
identifier: Any
class _CachedRelation:
referenced_by: Any
inner: Any
class BaseRelation:
path: Any
type: Optional[Any]
quote_character: str
include_policy: Any
quote_policy: Any
dbt_created: bool
class InformationSchema(BaseRelation):
information_schema_view: Optional[str]
class CompiledNode():
compiled_sql: Optional[str]
extra_ctes_injected: bool
extra_ctes: List[Any]
relation_name: Optional[str]
class CompiledModelNode(CompiledNode):
resource_type: Any
class ParsedModelNode():
resource_type: Any
class ParsedHookNode():
resource_type: Any
index: Optional[int]
class RunResult():
status: str
timing: List[Any]
thread_id: str
execution_time: float
adapter_response: Dict[str, Any]
message: Optional[str]
failures: Optional[int]
node: Any
|
algorithms/appo/policy_manager.py | magicly/sample-factory | 320 | 11171056 | import random
import numpy as np
class PolicyManager:
"""
This class currently implements the most simple mapping between agents in the envs and their associated policies.
We just pick a random policy from the population for every agent at the beginning of the episode.
Methods of this class can potentially be overloaded to provide a more clever mapping, e.g. we can minimize the
number of different policies per rollout worker thus minimizing the amount of communication required.
"""
def __init__(self, cfg, num_agents):
self.rng = np.random.RandomState(seed=random.randint(0, 2**32 - 1))
self.num_agents = num_agents
self.num_policies = cfg.num_policies
self.mix_policies_in_one_env = cfg.pbt_mix_policies_in_one_env
self.resample_env_policy_every = 10 # episodes
self.env_policies = dict()
self.env_policy_requests = dict()
def get_policy_for_agent(self, agent_idx, env_idx):
num_requests = self.env_policy_requests.get(env_idx, 0)
if num_requests % (self.num_agents * self.resample_env_policy_every) == 0:
if self.mix_policies_in_one_env:
self.env_policies[env_idx] = [self._sample_policy() for _ in range(self.num_agents)]
else:
policy = self._sample_policy()
self.env_policies[env_idx] = [policy] * self.num_agents
self.env_policy_requests[env_idx] = num_requests + 1
return self.env_policies[env_idx][agent_idx]
def _sample_policy(self):
return self.rng.randint(0, self.num_policies)
|
src/deutschland/strahlenschutz/__init__.py | andreasbossard/deutschland | 445 | 11171059 | <reponame>andreasbossard/deutschland
# flake8: noqa
"""
ODL-Info API
Daten zur radioaktiven Belastung in Deutschland # noqa: E501
The version of the OpenAPI document: 0.0.1
Generated by: https://openapi-generator.tech
"""
__version__ = "1.0.0"
# import ApiClient
from deutschland.strahlenschutz.api_client import ApiClient
# import Configuration
from deutschland.strahlenschutz.configuration import Configuration
# import exceptions
from deutschland.strahlenschutz.exceptions import (
ApiAttributeError,
ApiException,
ApiKeyError,
ApiTypeError,
ApiValueError,
OpenApiException,
)
|
plenum/server/consensus/primary_selector.py | IDunion/indy-plenum | 148 | 11171067 | from abc import ABCMeta, abstractmethod
from typing import List
from common.exceptions import LogicError
from plenum.server.batch_handlers.node_reg_handler import NodeRegHandler
from stp_core.common.log import getlogger
logger = getlogger()
class PrimariesSelector(metaclass=ABCMeta):
@abstractmethod
def select_master_primary(self, view_no: int) -> str:
pass
@abstractmethod
def select_primaries(self, view_no: int) -> List[str]:
pass
class RoundRobinConstantNodesPrimariesSelector(PrimariesSelector):
def __init__(self, validators: List[str]) -> None:
self.validators = validators
def select_master_primary(self, view_no: int) -> str:
return self.validators[view_no % len(self.validators)]
def select_primaries(self, view_no: int) -> List[str]:
master_primary = self.select_master_primary(view_no)
return [master_primary] + self._select_backup_primaries(view_no, master_primary)
def _select_backup_primaries(self, view_no: int, master_primary) -> List[str]:
N = len(self.validators)
F = (N - 1) // 3
return self.select_backup_primaries_round_robin(view_no, self.validators, F, master_primary)
@staticmethod
def select_backup_primaries_round_robin(view_no: int, validators: List[str], backup_instance_count: int,
master_primary: str):
primaries = []
i = 1
while len(primaries) < backup_instance_count:
backup_primary = validators[(view_no + i) % len(validators)]
if backup_primary != master_primary:
primaries.append(backup_primary)
i += 1
return primaries
class RoundRobinNodeRegPrimariesSelector(PrimariesSelector):
def __init__(self, node_reg_handler: NodeRegHandler) -> None:
self.node_reg_handler = node_reg_handler
def select_master_primary(self, view_no: int) -> str:
# use committed node reg at the beginning of view to make sure that N-F nodes selected the same Primary at view change start
return self._do_select_master_primary(view_no,
self.node_reg_handler.committed_node_reg_at_beginning_of_view)
def select_primaries(self, view_no: int) -> List[str]:
# use uncommitted_node_reg_at_beginning_of_view to have correct primaries in audit
master_primary = self._do_select_master_primary(view_no,
self.node_reg_handler.uncommitted_node_reg_at_beginning_of_view)
return [master_primary] + self._select_backup_primaries(view_no, master_primary)
def _do_select_master_primary(self, view_no: int, node_reg) -> str:
# Get a list of nodes to be used for selection as the one at the beginning of last view
# to guarantee that same primaries will be selected on all nodes once view change is started.
# Remark: It's possible that there is no nodeReg for some views if no txns have been ordered there
view_no_for_selection = view_no - 1 if view_no > 1 else 0
while view_no_for_selection > 0 and view_no_for_selection not in node_reg:
view_no_for_selection -= 1
if view_no_for_selection not in node_reg:
raise LogicError("Can not find view_no {} in node_reg_at_beginning_of_view {}".format(view_no,
node_reg))
node_reg_to_use = node_reg[view_no_for_selection]
return node_reg_to_use[view_no % len(node_reg_to_use)]
def _select_backup_primaries(self, view_no: int, master_primary) -> List[str]:
N = len(self.node_reg_handler.active_node_reg)
F = (N - 1) // 3
return RoundRobinConstantNodesPrimariesSelector.select_backup_primaries_round_robin(view_no,
self.node_reg_handler.active_node_reg,
F,
master_primary)
|
dnachisel/builtin_specifications/codon_optimization/MaximizeCAI.py | simone-pignotti/DnaChisel | 124 | 11171070 | <reponame>simone-pignotti/DnaChisel
import numpy as np
from .BaseCodonOptimizationClass import BaseCodonOptimizationClass
from ...Specification.SpecEvaluation import SpecEvaluation
class MaximizeCAI(BaseCodonOptimizationClass):
"""Codon-optimize a coding sequence for a given species. Maximizes the CAI.
To be precise, the score computed by this specification is N*log(CAI) where
N is the number of codons. Maximizing this score also maximizes the CAI.
Index (CAI). For a sequence with N codons, the CAI is the geometric mean
of the Relative Codon Adaptiveness (RCA) of the different codons. The RCA
of a codon is (f_i/fmax_i) were fi is the frequency of an oligo in the
codon usage table, and fmax is the maximal frequency of the synonymous
codons.
So N*log(CAI) = sum_i ( log(f_i) - log(fmax_i) )
This score is between -inf. and 0 (0 meaning a perfectly optimal sequence).
Parameters
----------
species
Species for which the sequence will be codon-optimized.
Either a TaxID (this requires a web connection as the corresponding table
will be downloaded from the internet) or the name of the species to
codon-optimize for (the name must be supported by ``python_codon_tables``
e.g. ``e_coli``, ``s_cerevisiae``, ``h_sapiens``, ``c_elegans``,
``b_subtilis``, ``d_melanogaster``).
Note that a ``codon_usage_table`` can be provided instead, or even in
addition, for species whose codon usage table cannot be auto-imported.
location
Either a DnaChisel Location or a tuple of the form (start, end, strand)
or just (start, end), with strand defaulting to +1, indicating the
position of the gene to codon-optimize. If not provided, the whole
sequence is considered as the gene. The location should have a length
that is a multiple of 3. The location strand is either 1 if the gene is
encoded on the (+) strand, or -1 for antisense.
codon_usage_table
A dict of the form ``{'*': {"TGA": 0.112, "TAA": 0.68}, 'K': ...}``
giving the RSCU table (relative usage of each codon). Only provide if
no ``species`` parameter was provided.
boost
Score multiplicator (=weight) for when the specification is used as an
optimization objective alongside competing objectives.
Examples
--------
>>> objective = MaximizeCAI(
>>> species = "E. coli",
>>> location = (150, 300), # coordinates of a gene
>>> strand = -1
>>> )
"""
shorthand_name = "use_best_codon"
def __init__(
self, species=None, location=None, codon_usage_table=None, boost=1.0
):
BaseCodonOptimizationClass.__init__(
self,
species=species,
location=location,
codon_usage_table=codon_usage_table,
boost=boost,
)
self.codons_translations = self.get_codons_translations()
if "log_best_frequencies" not in self.codon_usage_table:
self.codon_usage_table["log_best_frequencies"] = {
aa: np.log(max(aa_data.values()))
for aa, aa_data in self.codon_usage_table.items()
if len(aa) == 1
}
if "log_codons_frequencies" not in self.codon_usage_table:
self.codon_usage_table["log_codons_frequencies"] = {
codon: np.log(frequency or 0.001)
for aa, frequencies in self.codon_usage_table.items()
for codon, frequency in frequencies.items()
if len(aa) == 1
}
def evaluate(self, problem):
"""Evaluate!"""
codons = self.get_codons(problem)
ct = self.codons_translations
if len(codons) == 1:
# We are evaluating a single codon. Easy!
codon = codons[0]
freq = self.codon_usage_table["log_codons_frequencies"][codon]
optimal = self.codon_usage_table["log_best_frequencies"][ct[codon]]
score = freq - optimal
return SpecEvaluation(
self,
problem,
score=freq - optimal,
locations=[] if (freq == optimal) else [self.location],
message="Codon opt. on window %s scored %.02E"
% (self.location, score),
)
current_usage = [
self.codon_usage_table["log_codons_frequencies"][codon]
for codon in codons
]
optimal_usage = [
self.codon_usage_table["log_best_frequencies"][ct[codon]]
for codon in codons
]
non_optimality = np.array(optimal_usage) - np.array(current_usage)
nonoptimal_indices = np.nonzero(non_optimality)[0]
locations = self.codons_indices_to_locations(nonoptimal_indices)
score = -non_optimality.sum()
return SpecEvaluation(
self,
problem,
score=score,
locations=locations,
message="Codon opt. on window %s scored %.02E"
% (self.location, score),
)
def label_parameters(self):
return ["(custom table)" if self.species is None else self.species]
def short_label(self):
result = "best-codon-optimize"
if self.species is not None:
result += " (%s)" % self.species
return result
|
graph__networkx__d3__dot_graphviz/custom_graph.py | DazEB2/SimplePyScripts | 117 | 11171072 | <filename>graph__networkx__d3__dot_graphviz/custom_graph.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# SOURCE: https://networkx.readthedocs.io/en/stable/examples/drawing/weighted_graph.html
import matplotlib.pyplot as plt
import networkx as nx
G = nx.Graph()
G.add_edge('a', 'b')
G.add_edge('a', 'c')
G.add_edge('c', 'd')
G.add_edge('c', 'e')
G.add_edge('c', 'f')
G.add_edge('a', 'd')
pos = nx.spring_layout(G) # positions for all nodes
# nodes
nx.draw_networkx_nodes(G, pos, node_size=700)
# edges
nx.draw_networkx_edges(G, pos, edgelist=G.edges(), width=6)
# labels
nx.draw_networkx_labels(G, pos, font_size=20, font_family='sans-serif')
plt.axis('off')
plt.savefig("custom_graph.png") # save as png
plt.show() # display
|
Lib/test/test_compiler/testcorpus/02_expr_attr.py | diogommartins/cinder | 1,886 | 11171074 | <gh_stars>1000+
a.b
a.b.c.d
|
tests/test_utils.py | edouard-lopez/colorful | 517 | 11171077 | # -*- coding: utf-8 -*-
"""
colorful
~~~~~~~~
Terminal string styling done right, in Python.
:copyright: (c) 2017 by <NAME> <<EMAIL>>
:license: MIT, see LICENSE for more details.
"""
import os
import pytest
# do not overwrite module
os.environ['COLORFUL_NO_MODULE_OVERWRITE'] = '1'
import colorful.utils as utils # noqa
@pytest.mark.parametrize('hex_value', [
'#FFFFFF',
'#0000FF',
'#FF0000',
'#00FF00',
'#808080',
'#FFFF00',
'#00FFFF',
'#EF8BA0',
'#012345',
'#999FFF',
'#05fecA'
])
def test_hex_to_rgb_conversion(hex_value):
"""
Test the conversion from a RGB hex value to a RGB channel triplet
"""
red, green, blue = utils.hex_to_rgb(hex_value)
assert '#{:02X}{:02X}{:02X}'.format(red, green, blue).lower() == hex_value.lower()
@pytest.mark.parametrize('hex_error_value', [
'#FFFFF',
'#FFFFFFF',
'#FFFFFG',
'#fffffG',
])
def test_hex_to_rgb_error(hex_error_value):
"""
Test if the given hex values are invalid
"""
with pytest.raises(ValueError):
utils.hex_to_rgb(hex_error_value)
|
social/apps/flask_app/template_filters.py | raccoongang/python-social-auth | 1,987 | 11171079 | from social_flask.template_filters import backends, login_redirect
|
plugins/normalise/setup.py | eesprit/alerta-contrib | 114 | 11171087 |
from setuptools import setup, find_packages
version = '5.3.1'
setup(
name="alerta-normalise",
version=version,
description='Alerta plugin for alert normalisation',
url='https://github.com/alerta/alerta-contrib',
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
packages=find_packages(),
py_modules=['alerta_normalise'],
include_package_data=True,
zip_safe=True,
entry_points={
'alerta.plugins': [
'normalise = alerta_normalise:NormaliseAlert'
]
}
)
|
asv/benchmarks/benchmarks.py | karban8/tardis | 176 | 11171091 | # Write the benchmarking functions here.
# See "Writing benchmarks" in the asv docs for more information.
import numpy as np
from tardis.tests import montecarlo_test_wrappers as montecarlo
LINE_SIZE = 10000000
class TimeSuite:
"""
An example benchmark that times the performance of various kinds
of iterating over dictionaries in Python.
"""
def setup(self):
self.line = np.arange(LINE_SIZE, 1, -1).astype(np.float64)
def time_binarysearch(self):
for _ in range(LINE_SIZE):
montecarlo.binary_search_wrapper(
self.line, np.random.random() * LINE_SIZE, 0, LINE_SIZE - 1
)
def time_compute_distance2outer(self):
for _ in range(1000000):
montecarlo.compute_distance2outer_wrapper(0.0, 0.5, 1.0)
montecarlo.compute_distance2outer_wrapper(1.0, 0.5, 1.0)
montecarlo.compute_distance2outer_wrapper(0.3, 1.0, 1.0)
montecarlo.compute_distance2outer_wrapper(0.3, -1.0, 1.0)
montecarlo.compute_distance2outer_wrapper(0.5, 0.0, 1.0)
def time_compute_distance2inner(self):
for _ in range(1000000):
montecarlo.compute_distance2inner_wrapper(1.5, -1.0, 1.0)
montecarlo.compute_distance2inner_wrapper(0.0, 0.0, 0.0)
montecarlo.compute_distance2inner_wrapper(1.2, -0.7, 1.0)
def time_compute_distance2line(self):
for _ in range(1000000):
montecarlo.compute_distance2line_wrapper(
2.20866912e15,
-0.251699059004,
1.05581082105e15,
1.06020910733e15,
1693440.0,
5.90513983371e-07,
1.0602263591e15,
1.06011723237e15,
2,
)
montecarlo.compute_distance2line_wrapper(
2.23434667994e15,
-0.291130548401,
1.05581082105e15,
1.06733618121e15,
1693440.0,
5.90513983371e-07,
1.06738407486e15,
1.06732933961e15,
3,
)
def time_compute_distance2electron(self):
for _ in range(1000000):
montecarlo.compute_distance2electron_wrapper(0.0, 0.0, 2.0, 2.0)
|
capstone/capweb/templatetags/api_url.py | rachelaus/capstone | 134 | 11171095 | from django import template
from capapi import api_reverse
register = template.Library()
@register.simple_tag()
def api_url(url_name, *args, **kwargs):
""" Like the {% url %} tag, but output includes the full domain. """
return api_reverse(url_name, args=args, kwargs=kwargs) |
tests/unit/test_utils.py | 0xflotus/xfer | 244 | 11171142 | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# ==============================================================================
from unittest import TestCase
import mxnet as mx
from xfer import utils
class UtilsTestCase(TestCase):
def setUp(self):
pass
def test_serialize_ctx_fn(self):
op = utils.serialize_ctx_fn(mx.cpu)
assert op == 'cpu'
op = utils.serialize_ctx_fn(mx.gpu)
assert op == 'gpu'
with self.assertRaises(ValueError):
utils.serialize_ctx_fn('cpu')
def test_deserialize_ctx_fn(self):
op = utils.deserialize_ctx_fn('cpu')
assert op == mx.cpu
assert op == mx.context.cpu
op = utils.deserialize_ctx_fn('gpu')
assert op == mx.gpu
assert op == mx.context.gpu
with self.assertRaises(ValueError):
utils.deserialize_ctx_fn(mx.cpu)
with self.assertRaises(ValueError):
utils.deserialize_ctx_fn(5)
def test_assert_repurposer_file_exists(self):
with self.assertRaises(NameError):
utils._assert_repurposer_file_exists(['madeupfile'])
with self.assertRaises(NameError):
utils._assert_repurposer_file_exists([3])
|
conf/config.py | bopopescu/pspider | 168 | 11171163 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Created on : 2019-03-12 15:44
# @Author : zpy
# @Software: PyCharm
dev = True
if dev:
from conf.dev_config import *
else:
from conf.product_config import * |
bootcamp/messenger/tests.py | elviva404/bootcamp | 115 | 11171173 | <reponame>elviva404/bootcamp
from django.test import TestCase, Client
from django.contrib.auth.models import User
class MessengerViewsTest(TestCase):
def setUp(self):
self.client = Client()
User.objects.create_user(
username='test_user',
email='<EMAIL>',
password='<PASSWORD>'
)
User.objects.create_user(
username='test_user_1',
email='<EMAIL>',
password='<PASSWORD>'
)
self.client.login(username='test_user', password='<PASSWORD>')
def test_inbox(self):
response = self.client.get('/messages/')
self.assertEqual(response.status_code, 200)
def test_messages(self):
response = self.client.get('/messages/no_user/')
self.assertEqual(response.status_code, 200)
response = self.client.get('/messages/test_user/')
self.assertEqual(response.status_code, 200)
def test_new_message(self):
response = self.client.get('/messages/new/')
self.assertEqual(response.status_code, 200)
response = self.client.post('/messages/new/', {
'to': 'test_user_1',
'message': 'test message'
})
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, '/messages/test_user_1/')
|
pyjswidgets/pyjamas/ui/TextBoxBase.ie6.py | takipsizad/pyjs | 739 | 11171218 | <gh_stars>100-1000
class TextBoxBase:
def getCursorPos(self):
JS("""
try {
var elem = this['getElement']();
var tr = elem['document']['selection']['createRange']();
if (tr['parentElement']()['uniqueID'] != elem['uniqueID'])
return -1;
return -tr['move']("character", -65535);
}
catch (e) {
return 0;
}
""")
def getSelectionLength(self):
JS("""
try {
var elem = this['getElement']();
var tr = elem['document']['selection']['createRange']();
if (tr['parentElement']()['uniqueID'] != elem['uniqueID'])
return 0;
return tr['text']['length'];
}
catch (e) {
return 0;
}
""")
def setSelectionRange(self, pos, length):
JS("""
try {
var elem = this['getElement']();
var tr = elem['createTextRange']();
tr['collapse'](true);
tr['moveStart']('character', @{{pos}});
tr['moveEnd']('character', @{{length}});
tr['select']();
}
catch (e) {
}
""")
|
software/glasgow/applet/interface/jtag_probe/__init__.py | whitequark/glasgow | 280 | 11171246 | # Ref: IEEE Std 1149.1-2001
# Accession: G00018
# Transport layers
# ----------------
#
# The industry has defined a number of custom JTAG transport layers, such as cJTAG, Spy-Bi-Wire,
# and so on. As long as these comprise a straightforward serialization of the four JTAG signals,
# it is possible to reuse most of this applet by defining a TransportLayerProbeAdapter, with
# the same interface as JTAGProbeAdapter.
#
# Sideband signals
# ----------------
#
# Devices using JTAG for programming and debugging (as opposed to boundary scan) often define
# a number of sideband input or output signals, such as a reset signal or a program success signal.
# The probe driver allows setting or retrieving the state of up to 8 auxiliary signals provided
# by the probe adapter, synchronized to the normal JTAG command stream.
#
# By convention, aux[0:1] are {TRST#.Z, TRST#.O} if the probe adapter provides TRST#.
import struct
import logging
import asyncio
import argparse
from amaranth import *
from amaranth.lib.cdc import FFSynchronizer
from ....support.bits import *
from ....support.logging import *
from ....support.arepl import *
from ....gateware.pads import *
from ....database.jedec import *
from ....arch.jtag import *
from ... import *
class JTAGProbeBus(Elaboratable):
def __init__(self, pads):
self._pads = pads
self.tck = Signal(reset=1)
self.tms = Signal(reset=1)
self.tdo = Signal(reset=1)
self.tdi = Signal(reset=1)
self.trst_z = Signal(reset=0)
self.trst_o = Signal(reset=0)
def elaborate(self, platform):
m = Module()
pads = self._pads
m.d.comb += [
pads.tck_t.oe.eq(1),
pads.tck_t.o.eq(self.tck),
pads.tms_t.oe.eq(1),
pads.tms_t.o.eq(self.tms),
pads.tdi_t.oe.eq(1),
pads.tdi_t.o.eq(self.tdi),
]
m.submodules += [
FFSynchronizer(pads.tdo_t.i, self.tdo),
]
if hasattr(pads, "trst_t"):
m.d.sync += [
pads.trst_t.oe.eq(~self.trst_z),
pads.trst_t.o.eq(~self.trst_o)
]
return m
BIT_AUX_TRST_Z = 0b01
BIT_AUX_TRST_O = 0b10
class JTAGProbeAdapter(Elaboratable):
def __init__(self, bus, period_cyc):
self.bus = bus
self._period_cyc = period_cyc
self.stb = Signal()
self.rdy = Signal()
self.tms = Signal()
self.tdo = Signal()
self.tdi = Signal()
self.aux_i = C(0)
self.aux_o = Cat(bus.trst_z, bus.trst_o)
def elaborate(self, platform):
m = Module()
half_cyc = int(self._period_cyc // 2)
timer = Signal(range(half_cyc+1))
with m.FSM() as fsm:
with m.State("TCK-H"):
m.d.comb += self.bus.tck.eq(1)
with m.If(timer != 0):
m.d.sync += timer.eq(timer - 1)
with m.Else():
with m.If(self.stb):
m.d.sync += [
timer .eq(half_cyc - 1),
self.bus.tms .eq(self.tms),
self.bus.tdi .eq(self.tdi),
]
m.next = "TCK-L"
with m.Else():
m.d.comb += self.rdy.eq(1)
with m.State("TCK-L"):
m.d.comb += self.bus.tck.eq(0)
with m.If(timer != 0):
m.d.sync += timer.eq(timer - 1)
with m.Else():
m.d.sync += [
timer .eq(half_cyc - 1),
self.tdo.eq(self.bus.tdo),
]
m.next = "TCK-H"
return m
CMD_MASK = 0b11110000
CMD_SHIFT_TMS = 0b00000000
CMD_SHIFT_TDIO = 0b00010000
CMD_GET_AUX = 0b10000000
CMD_SET_AUX = 0b10010000
# CMD_SHIFT_{TMS,TDIO}
BIT_DATA_OUT = 0b0001
BIT_DATA_IN = 0b0010
BIT_LAST = 0b0100
# CMD_SHIFT_TMS
BIT_TDI = 0b1000
class JTAGProbeDriver(Elaboratable):
def __init__(self, adapter, out_fifo, in_fifo):
self.adapter = adapter
self._out_fifo = out_fifo
self._in_fifo = in_fifo
def elaborate(self, platform):
m = Module()
cmd = Signal(8)
count = Signal(16)
bitno = Signal(3)
align = Signal(3)
shreg_o = Signal(8)
shreg_i = Signal(8)
with m.FSM() as fsm:
with m.State("RECV-COMMAND"):
m.d.comb += self._in_fifo.flush.eq(1)
with m.If(self._out_fifo.readable):
m.d.comb += self._out_fifo.re.eq(1)
m.d.sync += cmd.eq(self._out_fifo.dout)
m.next = "COMMAND"
with m.State("COMMAND"):
with m.If(((cmd & CMD_MASK) == CMD_SHIFT_TMS) |
((cmd & CMD_MASK) == CMD_SHIFT_TDIO)):
m.next = "RECV-COUNT-1"
with m.Elif((cmd & CMD_MASK) == CMD_GET_AUX):
m.next = "SEND-AUX"
with m.Elif((cmd & CMD_MASK) == CMD_SET_AUX):
m.next = "RECV-AUX"
with m.State("SEND-AUX"):
with m.If(self._in_fifo.writable):
m.d.comb += [
self._in_fifo.we.eq(1),
self._in_fifo.din.eq(self.adapter.aux_i),
]
m.next = "RECV-COMMAND"
with m.State("RECV-AUX"):
with m.If(self._out_fifo.readable):
m.d.comb += self._out_fifo.re.eq(1)
m.d.sync += self.adapter.aux_o.eq(self._out_fifo.dout)
m.next = "RECV-COMMAND"
with m.State("RECV-COUNT-1"):
with m.If(self._out_fifo.readable):
m.d.comb += self._out_fifo.re.eq(1)
m.d.sync += count[0:8].eq(self._out_fifo.dout)
m.next = "RECV-COUNT-2"
with m.State("RECV-COUNT-2"):
with m.If(self._out_fifo.readable):
m.d.comb += self._out_fifo.re.eq(1),
m.d.sync += count[8:16].eq(self._out_fifo.dout)
m.next = "RECV-BITS"
with m.State("RECV-BITS"):
with m.If(count == 0):
m.next = "RECV-COMMAND"
with m.Else():
with m.If(count > 8):
m.d.sync += bitno.eq(0)
with m.Else():
m.d.sync += [
align.eq(8 - count[:3]),
bitno.eq(8 - count[:3]),
]
with m.If(cmd & BIT_DATA_OUT):
with m.If(self._out_fifo.readable):
m.d.comb += self._out_fifo.re.eq(1)
m.d.sync += shreg_o.eq(self._out_fifo.dout)
m.next = "SHIFT-SETUP"
with m.Else():
m.d.sync += shreg_o.eq(0b11111111)
m.next = "SHIFT-SETUP"
with m.State("SHIFT-SETUP"):
m.d.sync += self.adapter.stb.eq(1)
with m.If((cmd & CMD_MASK) == CMD_SHIFT_TMS):
m.d.sync += self.adapter.tms.eq(shreg_o[0])
m.d.sync += self.adapter.tdi.eq((cmd & BIT_TDI) != 0)
with m.Else():
m.d.sync += self.adapter.tms.eq(0)
with m.If(cmd & BIT_LAST):
m.d.sync += self.adapter.tms.eq(count == 1)
m.d.sync += self.adapter.tdi.eq(shreg_o[0])
m.d.sync += [
shreg_o.eq(Cat(shreg_o[1:], 1)),
count.eq(count - 1),
bitno.eq(bitno + 1),
]
m.next = "SHIFT-CAPTURE"
with m.State("SHIFT-CAPTURE"):
m.d.sync += self.adapter.stb.eq(0)
with m.If(self.adapter.rdy):
m.d.sync += shreg_i.eq(Cat(shreg_i[1:], self.adapter.tdo))
with m.If(bitno == 0):
m.next = "SEND-BITS"
with m.Else():
m.next = "SHIFT-SETUP"
with m.State("SEND-BITS"):
with m.If(cmd & BIT_DATA_IN):
with m.If(self._in_fifo.writable):
m.d.comb += self._in_fifo.we.eq(1),
with m.If(count == 0):
m.d.comb += self._in_fifo.din.eq(shreg_i >> align)
with m.Else():
m.d.comb += self._in_fifo.din.eq(shreg_i)
m.next = "RECV-BITS"
with m.Else():
m.next = "RECV-BITS"
return m
class JTAGProbeSubtarget(Elaboratable):
def __init__(self, pads, out_fifo, in_fifo, period_cyc):
self._pads = pads
self._out_fifo = out_fifo
self._in_fifo = in_fifo
self._period_cyc = period_cyc
def elaborate(self, platform):
m = Module()
m.submodules.bus = JTAGProbeBus(self._pads)
m.submodules.adapter = JTAGProbeAdapter(m.submodules.bus, self._period_cyc)
m.submodules.driver = JTAGProbeDriver(m.submodules.adapter, self._out_fifo, self._in_fifo)
return m
class JTAGProbeError(GlasgowAppletError):
pass
class JTAGProbeStateTransitionError(JTAGProbeError):
def __init__(self, message, old_state, new_state):
super().__init__(message.format(old_state, new_state))
self.old_state = old_state
self.new_state = new_state
class JTAGProbeInterface:
scan_ir_max_length = 128
scan_dr_max_length = 1024
def __init__(self, interface, logger, has_trst=False, __name__=__name__):
self.lower = interface
self._logger = logger
self._level = logging.DEBUG if self._logger.name == __name__ else logging.TRACE
self.has_trst = has_trst
self._state = "Unknown"
self._current_ir = None
def _log_l(self, message, *args):
self._logger.log(self._level, "JTAG-L: " + message, *args)
def _log_h(self, message, *args):
self._logger.log(self._level, "JTAG-H: " + message, *args)
# Low-level operations
async def flush(self):
self._log_l("flush")
await self.lower.flush()
async def set_aux(self, value):
self._log_l("set aux=%s", format(value, "08b"))
await self.lower.write(struct.pack("<BB",
CMD_SET_AUX, value))
async def get_aux(self):
await self.lower.write(struct.pack("<B",
CMD_GET_AUX))
value, = await self.lower.read(1)
self._log_l("get aux=%s", format(value, "08b"))
return value
async def set_trst(self, active):
if not self.has_trst:
raise JTAGProbeError("cannot set TRST#: adapter does not provide TRST#")
if active is None:
self._log_l("set trst=z")
await self.set_aux(BIT_AUX_TRST_Z)
else:
self._log_l("set trst=%d", active)
await self.set_aux(BIT_AUX_TRST_O if active else 0)
async def shift_tms(self, tms_bits, tdi=False):
tms_bits = bits(tms_bits)
self._log_l("shift tms=<%s>", dump_bin(tms_bits))
await self.lower.write(struct.pack("<BH",
CMD_SHIFT_TMS|BIT_DATA_OUT|(BIT_TDI if tdi else 0), len(tms_bits)))
await self.lower.write(tms_bits)
def _shift_last(self, last):
if last:
if self._state == "Shift-IR":
self._log_l("state Shift-IR → Exit1-IR")
self._state = "Exit1-IR"
elif self._state == "Shift-DR":
self._log_l("state Shift-DR → Exit1-DR")
self._state = "Exit1-DR"
@staticmethod
def _chunk_count(count, last, chunk_size=0xffff):
assert count >= 0
while count > chunk_size:
yield chunk_size, False
count -= chunk_size
yield count, last
@staticmethod
def _chunk_bits(bits, last, chunk_size=0xffff):
offset = 0
while len(bits) - offset > chunk_size:
yield bits[offset:offset + chunk_size], False
offset += chunk_size
yield bits[offset:], last
async def _shift_dummy(self, count, last=False):
for count, chunk_last in self._chunk_count(count, last):
await self.lower.write(struct.pack("<BH",
CMD_SHIFT_TDIO|(BIT_LAST if chunk_last else 0), count))
async def shift_tdio(self, tdi_bits, *, prefix=0, suffix=0, last=True):
assert self._state in ("Shift-IR", "Shift-DR")
tdi_bits = bits(tdi_bits)
tdo_bits = bits()
self._log_l("shift tdio-i=%d,<%s>,%d", prefix, dump_bin(tdi_bits), suffix)
await self._shift_dummy(prefix)
for tdi_bits, chunk_last in self._chunk_bits(tdi_bits, last and suffix == 0):
await self.lower.write(struct.pack("<BH",
CMD_SHIFT_TDIO|BIT_DATA_IN|BIT_DATA_OUT|(BIT_LAST if chunk_last else 0),
len(tdi_bits)))
tdi_bytes = bytes(tdi_bits)
await self.lower.write(tdi_bytes)
tdo_bytes = await self.lower.read(len(tdi_bytes))
tdo_bits += bits(tdo_bytes, len(tdi_bits))
await self._shift_dummy(suffix, last)
self._log_l("shift tdio-o=%d,<%s>,%d", prefix, dump_bin(tdo_bits), suffix)
self._shift_last(last)
return tdo_bits
async def shift_tdi(self, tdi_bits, *, prefix=0, suffix=0, last=True):
assert self._state in ("Shift-IR", "Shift-DR")
tdi_bits = bits(tdi_bits)
self._log_l("shift tdi=%d,<%s>,%d", prefix, dump_bin(tdi_bits), suffix)
await self._shift_dummy(prefix)
for tdi_bits, chunk_last in self._chunk_bits(tdi_bits, last and suffix == 0):
await self.lower.write(struct.pack("<BH",
CMD_SHIFT_TDIO|BIT_DATA_OUT|(BIT_LAST if chunk_last else 0),
len(tdi_bits)))
tdi_bytes = bytes(tdi_bits)
await self.lower.write(tdi_bytes)
await self._shift_dummy(suffix, last)
self._shift_last(last)
async def shift_tdo(self, count, *, prefix=0, suffix=0, last=True):
assert self._state in ("Shift-IR", "Shift-DR")
tdo_bits = bits()
await self._shift_dummy(prefix)
for count, chunk_last in self._chunk_count(count, last and suffix == 0):
await self.lower.write(struct.pack("<BH",
CMD_SHIFT_TDIO|BIT_DATA_IN|(BIT_LAST if chunk_last else 0),
count))
tdo_bytes = await self.lower.read((count + 7) // 8)
tdo_bits += bits(tdo_bytes, count)
await self._shift_dummy(suffix, last)
self._log_l("shift tdo=%d,<%s>,%d", prefix, dump_bin(tdo_bits), suffix)
self._shift_last(last)
return tdo_bits
async def pulse_tck(self, count):
assert self._state in ("Run-Test/Idle", "Pause-IR", "Pause-DR")
self._log_l("pulse tck count=%d", count)
for count, last in self._chunk_count(count, last=True):
await self.lower.write(struct.pack("<BH",
CMD_SHIFT_TDIO, count))
# State machine transitions
def _state_error(self, new_state):
raise JTAGProbeStateTransitionError("cannot transition from state {} to {}",
self._state, new_state)
async def enter_test_logic_reset(self, force=True):
if force:
self._log_l("state * → Test-Logic-Reset")
elif self._state != "Test-Logic-Reset":
self._log_l("state %s → Test-Logic-Reset", self._state)
else:
return
await self.shift_tms((1,1,1,1,1))
self._state = "Test-Logic-Reset"
async def enter_run_test_idle(self):
if self._state == "Run-Test/Idle": return
self._log_l("state %s → Run-Test/Idle", self._state)
if self._state == "Test-Logic-Reset":
await self.shift_tms((0,))
elif self._state in ("Exit1-IR", "Exit1-DR"):
await self.shift_tms((1,0))
elif self._state in ("Pause-IR", "Pause-DR"):
await self.shift_tms((1,1,0))
elif self._state in ("Update-IR", "Update-DR"):
await self.shift_tms((0,))
else:
self._state_error("Run-Test/Idle")
self._state = "Run-Test/Idle"
async def enter_shift_ir(self):
if self._state == "Shift-IR": return
self._log_l("state %s → Shift-IR", self._state)
if self._state == "Test-Logic-Reset":
await self.shift_tms((0,1,1,0,0))
elif self._state in ("Run-Test/Idle", "Update-IR", "Update-DR"):
await self.shift_tms((1,1,0,0))
elif self._state in ("Pause-DR"):
await self.shift_tms((1,1,1,1,0,0))
elif self._state in ("Pause-IR"):
await self.shift_tms((1,0))
else:
self._state_error("Shift-IR")
self._state = "Shift-IR"
async def enter_pause_ir(self):
if self._state == "Pause-IR": return
self._log_l("state %s → Pause-IR", self._state)
if self._state == "Exit1-IR":
await self.shift_tms((0,))
else:
self._state_error("Pause-IR")
self._state = "Pause-IR"
async def enter_update_ir(self):
if self._state == "Update-IR": return
self._log_l("state %s → Update-IR", self._state)
if self._state == "Shift-IR":
await self.shift_tms((1,1))
elif self._state == "Exit1-IR":
await self.shift_tms((1,))
else:
self._state_error("Update-IR")
self._state = "Update-IR"
async def enter_shift_dr(self):
if self._state == "Shift-DR": return
self._log_l("state %s → Shift-DR", self._state)
if self._state == "Test-Logic-Reset":
await self.shift_tms((0,1,0,0))
elif self._state in ("Run-Test/Idle", "Update-IR", "Update-DR"):
await self.shift_tms((1,0,0))
elif self._state in ("Pause-IR"):
await self.shift_tms((1,1,1,0,0))
elif self._state in ("Pause-DR"):
await self.shift_tms((1,0))
else:
self._state_error("Shift-DR")
self._state = "Shift-DR"
async def enter_pause_dr(self):
if self._state == "Pause-DR": return
self._log_l("state %s → Pause-DR", self._state)
if self._state == "Exit1-DR":
await self.shift_tms((0,))
else:
self._state_error("Pause-DR")
self._state = "Pause-DR"
async def enter_update_dr(self):
if self._state == "Update-DR": return
self._log_l("state %s → Update-DR", self._state)
if self._state == "Shift-DR":
await self.shift_tms((1,1))
elif self._state == "Exit1-DR":
await self.shift_tms((1,))
else:
self._state_error("Update-DR")
self._state = "Update-DR"
# High-level register manipulation
async def pulse_trst(self):
self._log_h("pulse trst")
await self.set_trst(True)
# IEEE 1149.1 3.6.1 (d): "To ensure deterministic operation of the test logic, TMS should
# be held at 1 while the signal applied at TRST* changes from [active] to [inactive]."
await self.shift_tms((1,))
await self.set_trst(False)
self._current_ir = None
async def test_reset(self):
self._log_h("test reset")
await self.enter_test_logic_reset()
await self.enter_run_test_idle()
self._current_ir = None
async def run_test_idle(self, count):
self._log_h("run-test/idle count=%d", count)
await self.enter_run_test_idle()
await self.pulse_tck(count)
async def exchange_ir(self, data, *, prefix=0, suffix=0):
data = bits(data)
self._current_ir = (prefix, data, suffix)
self._log_h("exchange ir-i=%d,<%s>,%d", prefix, dump_bin(data), suffix)
await self.enter_shift_ir()
data = await self.shift_tdio(data, prefix=prefix, suffix=suffix)
await self.enter_update_ir()
self._log_h("exchange ir-o=%d,<%s>,%d", prefix, dump_bin(data), suffix)
return data
async def read_ir(self, count, *, prefix=0, suffix=0):
self._current_ir = (prefix, bits((1,)) * count, suffix)
await self.enter_shift_ir()
data = await self.shift_tdo(count, prefix=prefix, suffix=suffix)
await self.enter_update_ir()
self._log_h("read ir=%d,<%s>,%d", prefix, dump_bin(data), suffix)
return data
async def write_ir(self, data, *, prefix=0, suffix=0, elide=True):
data = bits(data)
if (prefix, data, suffix) == self._current_ir and elide:
self._log_h("write ir (elided)")
return
self._current_ir = (prefix, data, suffix)
self._log_h("write ir=%d,<%s>,%d", prefix, dump_bin(data), suffix)
await self.enter_shift_ir()
await self.shift_tdi(data, prefix=prefix, suffix=suffix)
await self.enter_update_ir()
async def exchange_dr(self, data, *, prefix=0, suffix=0):
self._log_h("exchange dr-i=%d,<%s>,%d", prefix, dump_bin(data), suffix)
await self.enter_shift_dr()
data = await self.shift_tdio(data, prefix=prefix, suffix=suffix)
await self.enter_update_dr()
self._log_h("exchange dr-o=%d,<%s>,%d", prefix, dump_bin(data), suffix)
return data
async def read_dr(self, count, *, prefix=0, suffix=0):
await self.enter_shift_dr()
data = await self.shift_tdo(count, prefix=prefix, suffix=suffix)
await self.enter_update_dr()
self._log_h("read dr=%d,<%s>,%d", prefix, dump_bin(data), suffix)
return data
async def write_dr(self, data, *, prefix=0, suffix=0):
data = bits(data)
self._log_h("write dr=%d,<%s>,%d", prefix, dump_bin(data), suffix)
await self.enter_shift_dr()
await self.shift_tdi(data, prefix=prefix, suffix=suffix)
await self.enter_update_dr()
# Shift chain introspection
async def _scan_xr(self, xr, *, max_length=None, check=True, idempotent=True):
assert xr in ("ir", "dr")
if idempotent:
self._log_h("scan %s idempotent", xr)
else:
self._log_h("scan %s", xr)
if max_length is None:
if xr == "ir":
max_length = self.scan_ir_max_length
if xr == "dr":
max_length = self.scan_dr_max_length
if xr == "ir":
await self.enter_shift_ir()
if xr == "dr":
await self.enter_shift_dr()
# Add 1 so that registers of exactly `max_length` could be scanned successfully.
data_0 = await self.shift_tdio((0,) * (max_length + 1), last=False)
data_1 = await self.shift_tdio((1,) * (max_length + 1), last=not idempotent)
try:
value = None
for length in range(max_length + 1):
if data_1[length] == 1:
if data_0[length:].to_int() == 0:
value = data_0[:length]
break
if value is None:
self._log_h("scan %s overlong", xr)
if check:
raise JTAGProbeError("{} shift chain is too long".format(xr.upper()))
elif len(value) == 0:
self._log_h("scan %s empty", xr)
if check:
raise JTAGProbeError("{} shift chain is empty".format(xr.upper()))
else:
self._log_h("scan %s length=%d data=<%s>",
xr, length, dump_bin(data_0[:length]))
return value
finally:
if idempotent:
if value is None or length == 0:
# Idempotent scan requested, but isn't possible: finish shifting.
await self.shift_tdi((1,), last=True)
else:
# Idempotent scan is possible: shift scanned data back.
await self.shift_tdi(value, last=True)
await self.enter_run_test_idle()
async def scan_ir(self, *, max_length=None, check=True):
return await self._scan_xr("ir", max_length=max_length, check=check, idempotent=False)
async def scan_dr(self, *, max_length=None, check=True):
return await self._scan_xr("dr", max_length=max_length, check=check, idempotent=True)
async def scan_ir_length(self, *, max_length=None):
return len(await self.scan_ir(max_length=max_length))
async def scan_dr_length(self, *, max_length=None):
return len(await self.scan_dr(max_length=max_length))
async def scan_reset_dr_ir(self):
"""Capture IR values and IDCODE/BYPASS DR values using Test-Logic-Reset."""
await self.test_reset()
# Scan DR chain first, since scanning IR chain will latch BYPASS into every IR.
dr_value = await self._scan_xr("dr", idempotent=False)
ir_value = await self._scan_xr("ir", idempotent=False)
return (dr_value, ir_value)
# Blind interrogation
def interrogate_dr(self, dr_value, *, check=True):
"""Split DR value captured after TAP reset into IDCODE/BYPASS chunks."""
idcodes = []
offset = 0
while offset < len(dr_value):
if dr_value[offset]:
if len(dr_value) - offset >= 32:
dr_chunk = dr_value[offset:offset + 32]
idcode = int(dr_chunk)
if dr_chunk[1:12] == bits("00001111111"):
self._log_h("invalid dr idcode=%08x", idcode)
if check:
raise JTAGProbeError("TAP #{} has invalid DR IDCODE={:08x}"
.format(len(idcodes), idcode))
return
else:
self._log_h("found dr idcode=%08x (tap #%d)", idcode, len(idcodes))
idcodes.append(idcode)
offset += 32
else:
self._log_h("truncated dr idcode=<%s>", dump_bin(dr_value[offset:]))
if check:
raise JTAGProbeError("TAP #{} has truncated DR IDCODE=<{}>"
.format(len(idcodes), dump_bin(dr_value[offset:])))
return
else:
self._log_h("found dr bypass (tap #%d)", len(idcodes))
idcodes.append(None)
offset += 1
return idcodes
def interrogate_ir(self, ir_value, tap_count, *, ir_lengths=None, check=True):
"""Split IR value captured after TAP reset to determine IR boundaries."""
assert tap_count > 0
# Each captured IR value in a chain must start with <10>. However, the rest of captured
# IR bits has unspecified value, which may include <10>.
ir_starts = []
while True:
ir_start = ir_value.find((1,0), start=ir_starts[-1] + 1 if ir_starts else 0)
if ir_start == -1:
break
ir_starts.append(ir_start)
# There must be at least as many captured IRs in the chain as there are IDCODE/BYPASS DRs.
if tap_count > len(ir_starts):
self._log_h("invalid ir taps=%d starts=%d", tap_count, len(ir_starts))
if check:
raise JTAGProbeError("IR capture has fewer <10> transitions than TAPs")
return
# The chain must start with a valid captured IR value.
if ir_starts[0] != 0:
self._log_h("invalid ir starts[0]=%d", ir_starts[0])
if check:
raise JTAGProbeError("IR capture does not start with <10> transition")
return
# If IR lengths are specified explicitly, use them but validate first.
if ir_lengths is not None:
if len(ir_lengths) != tap_count:
self._log_h("invalid ir taps=%d user-lengths=%d", tap_count, len(ir_lengths))
if check:
raise JTAGProbeError("IR length count differs from TAP count")
return
if sum(ir_lengths) != len(ir_value):
self._log_h("invalid ir total-length=%d user-total-length=%d",
sum(ir_lengths), len(ir_value))
if check:
raise JTAGProbeError("IR capture length differs from sum of IR lengths")
return
ir_offset = 0
for tap_index, ir_length in enumerate(ir_lengths):
if (ir_offset + ir_length not in ir_starts and
ir_offset + ir_length != len(ir_value)):
self._log_h("misaligned ir (tap #%d)", tap_index)
if check:
raise JTAGProbeError("IR length for TAP #{:d} misaligns next TAP"
.format(tap_index))
return
self._log_h("explicit ir length=%d (tap #%d)", ir_length, tap_index)
ir_offset += ir_length
return list(ir_lengths)
# If there's only one device in the chain, then the entire captured IR belongs to it.
elif tap_count == 1:
ir_length = len(ir_value)
self._log_h("found ir length=%d (single tap)", ir_length)
return [ir_length]
# If there are no more captured IRs than devices in the chain, then IR lengths can be
# determined unambiguously.
elif tap_count == len(ir_starts):
ir_layout = []
for ir_start0, ir_start1 in zip(ir_starts, ir_starts[1:] + [len(ir_value)]):
ir_length = ir_start1 - ir_start0
self._log_h("found ir length=%d (tap #%d)", ir_length, len(ir_layout))
ir_layout.append(ir_length)
return ir_layout
# Otherwise IR lengths are ambiguous.
else:
ir_chunks = []
for ir_start0, ir_start1 in zip(ir_starts, ir_starts[1:] + [len(ir_value)]):
ir_chunks.append(ir_start1 - ir_start0)
self._log_h("ambiguous ir taps=%d chunks=[%s]",
tap_count, ",".join("{:d}".format(chunk) for chunk in ir_chunks))
if check:
raise JTAGProbeError("IR capture insufficiently constrains IR lengths")
return
async def select_tap(self, index, *, ir_lengths=None):
dr_value, ir_value = await self.scan_reset_dr_ir()
idcodes = self.interrogate_dr(dr_value)
ir_layout = self.interrogate_ir(ir_value, tap_count=len(idcodes), ir_lengths=ir_lengths)
return TAPInterface.from_layout(self, ir_layout, index=index)
class TAPInterface:
@classmethod
def from_layout(cls, lower, ir_layout, *, index):
if index not in range(len(ir_layout)):
raise JTAGProbeError("TAP #{:d} is not a part of {:d}-TAP chain"
.format(index, len(ir_layout)))
return cls(lower, ir_length=ir_layout[index],
ir_prefix=sum(ir_layout[:index]), ir_suffix=sum(ir_layout[index + 1:]),
dr_prefix=len(ir_layout[:index]), dr_suffix=len(ir_layout[index + 1:]))
def __init__(self, lower, *, ir_length, ir_prefix=0, ir_suffix=0, dr_prefix=0, dr_suffix=0):
self.lower = lower
self.ir_length = ir_length
self._ir_prefix = ir_prefix
self._ir_suffix = ir_suffix
self._dr_prefix = dr_prefix
self._dr_suffix = dr_suffix
async def test_reset(self):
await self.lower.test_reset()
async def run_test_idle(self, count):
await self.lower.run_test_idle(count)
async def exchange_ir(self, data):
data = bits(data)
assert len(data) == self.ir_length
return await self.lower.exchange_ir(data,
prefix=self._ir_prefix, suffix=self._ir_suffix)
async def read_ir(self):
return await self.lower.read_ir(self.ir_length,
prefix=self._ir_prefix, suffix=self._ir_suffix)
async def write_ir(self, data, *, elide=True):
data = bits(data)
assert len(data) == self.ir_length
await self.lower.write_ir(data, elide=elide,
prefix=self._ir_prefix, suffix=self._ir_suffix)
async def exchange_dr(self, data):
return await self.lower.exchange_dr(data,
prefix=self._dr_prefix, suffix=self._dr_suffix)
async def read_dr(self, length):
return await self.lower.read_dr(length,
prefix=self._dr_prefix, suffix=self._dr_suffix)
async def write_dr(self, data):
await self.lower.write_dr(data,
prefix=self._dr_prefix, suffix=self._dr_suffix)
async def scan_dr(self, *, check=True, max_length=None):
if max_length is not None:
max_length = self._dr_prefix + max_length + self._dr_suffix
data = await self.lower.scan_dr(check=check, max_length=max_length)
if data is None:
return data
if check and len(data) == self._dr_prefix + self._dr_suffix:
raise JTAGProbeError("DR shift chain is empty")
assert len(data) > self._dr_prefix + self._dr_suffix
if self._dr_suffix:
return data[self._dr_prefix:-self._dr_suffix]
else:
return data[self._dr_prefix:]
async def scan_dr_length(self, *, max_length=None):
if max_length is not None:
max_length = self._dr_prefix + max_length + self._dr_suffix
length = await self.lower.scan_dr_length(max_length=max_length)
if length == self._dr_prefix + self._dr_suffix:
raise JTAGProbeError("DR shift chain is empty")
assert length > self._dr_prefix + self._dr_suffix
return length - self._dr_prefix - self._dr_suffix
class JTAGProbeApplet(GlasgowApplet, name="jtag-probe"):
logger = logging.getLogger(__name__)
help = "test integrated circuits via IEEE 1149.1 JTAG"
description = """
Identify, test and debug integrated circuits and board assemblies via IEEE 1149.1 JTAG.
"""
__pins = ("tck", "tms", "tdi", "tdo", "trst")
@classmethod
def add_build_arguments(cls, parser, access):
super().add_build_arguments(parser, access)
for pin in ("tck", "tms", "tdi", "tdo"):
access.add_pin_argument(parser, pin, default=True)
access.add_pin_argument(parser, "trst")
parser.add_argument(
"-f", "--frequency", metavar="FREQ", type=int, default=100,
help="set TCK frequency to FREQ kHz (default: %(default)s)")
def build(self, target, args):
self.mux_interface = iface = target.multiplexer.claim_interface(self, args)
iface.add_subtarget(JTAGProbeSubtarget(
pads=iface.get_pads(args, pins=self.__pins),
out_fifo=iface.get_out_fifo(),
in_fifo=iface.get_in_fifo(auto_flush=False),
period_cyc=target.sys_clk_freq // (args.frequency * 1000),
))
@classmethod
def add_run_arguments(cls, parser, access):
super().add_run_arguments(parser, access)
parser.add_argument(
"--scan-ir-max-length", metavar="LENGTH", type=int,
default=JTAGProbeInterface.scan_ir_max_length,
help="give up scanning IRs longer than LENGTH bits (default: %(default)s)")
parser.add_argument(
"--scan-dr-max-length", metavar="LENGTH", type=int,
default=JTAGProbeInterface.scan_dr_max_length,
help="give up scanning DRs longer than LENGTH bits (default: %(default)s)")
def ir_lengths(args):
lengths = []
for arg in args.split(","):
try:
length = int(arg, 10)
if length >= 2:
lengths.append(length)
continue
except ValueError:
pass
raise argparse.ArgumentTypeError("{!r} is not a valid IR length"
.format(arg))
return lengths
parser.add_argument(
"--ir-lengths", metavar="IR-LENGTH,...", default=None, type=ir_lengths,
help="set IR lengths of each TAP to corresponding IR-LENGTH (default: autodetect)")
async def run(self, device, args):
iface = await device.demultiplexer.claim_interface(self, self.mux_interface, args)
jtag_iface = JTAGProbeInterface(iface, self.logger, has_trst=args.pin_trst is not None)
jtag_iface.scan_ir_max_length = args.scan_ir_max_length
jtag_iface.scan_dr_max_length = args.scan_dr_max_length
return jtag_iface
@classmethod
def add_run_tap_arguments(cls, parser):
parser.add_argument(
"--tap-index", metavar="INDEX", type=int,
help="select TAP #INDEX for communication (default: select only TAP)")
async def run_tap(self, cls, device, args):
jtag_iface = await self.run_lower(cls, device, args)
dr_value, ir_value = await jtag_iface.scan_reset_dr_ir()
idcodes = jtag_iface.interrogate_dr(dr_value)
ir_layout = jtag_iface.interrogate_ir(ir_value,
tap_count=len(idcodes), ir_lengths=args.ir_lengths)
tap_index = args.tap_index
if tap_index is None:
if len(idcodes) > 1:
raise JTAGProbeError("multiple TAPs found; use explicit --tap-index")
else:
tap_index = 0
return TAPInterface.from_layout(jtag_iface, ir_layout, index=tap_index)
@classmethod
def add_interact_arguments(cls, parser):
p_operation = parser.add_subparsers(dest="operation", metavar="OPERATION", required=True)
p_scan = p_operation.add_parser(
"scan", help="scan JTAG chain and attempt to identify devices",
formatter_class=parser.formatter_class,
description="""
Reset the JTAG TAPs and shift IDCODE or BYPASS register values out to determine
the count and (if available) identity of the devices in the scan chain.
""")
p_enumerate_ir = p_operation.add_parser(
"enumerate-ir", help="(DANGEROUS) use heuristics to enumerate JTAG IR values",
formatter_class=parser.formatter_class,
description="""
THIS COMMAND CAN PERMANENTLY DAMAGE DEVICE UNDER TEST.
IEEE 1149.1 requires every unimplemented IR value to select the BYPASS DR.
By selecting every possible IR value and measuring DR lengths, it is possible to
discover IR values that definitively correspond to non-BYPASS DRs.
Due to the design of JTAG state machine, measuring DR length requires going
through Capture-DR and Update-DR states for instructions that may have
IRREVERSIBLE or UNDEFINED behavior. Although this command updates the DR with
the data just captured from it, IEEE 1149.1 does not require this operation
to be idempotent. Additionally, many devices are not strictly compliant and
in practice may perform IRREVERSIBLE or UNDEFINED actions during operations
that IEEE 1149.1 requires to be benign, such as selecting an unimplemented
instruction, or shifting into DR. USE THIS COMMAND AT YOUR OWN RISK.
DR length measurement can have one of the following four results:
* DR[n], n > 1: non-BYPASS n-bit DR.
* DR[1]: (likely) BYPASS or (less likely) non-BYPASS 1-bit DR.
This result is not shown because most IR values correspond to DR[1].
* DR[0]: TDI connected directly to TDO.
This is not allowed by IEEE 1149.1, but is very common in practice.
* DR[?]: (commonly) no connection to TDO or (less commonly) complex logic
connected between TDI and TDO that is active during Shift-DR.
This is not allowed by IEEE 1149.1, but is common in practice.
""")
p_enumerate_ir.add_argument(
"tap_indexes", metavar="INDEX", type=int, nargs="+",
help="enumerate IR values for TAP(s) #INDEX")
async def interact(self, device, args, jtag_iface):
dr_value, ir_value = await jtag_iface.scan_reset_dr_ir()
self.logger.info("shifted %d-bit DR=<%s>", len(dr_value), dump_bin(dr_value))
self.logger.info("shifted %d-bit IR=<%s>", len(ir_value), dump_bin(ir_value))
idcodes = jtag_iface.interrogate_dr(dr_value)
if len(idcodes) == 0:
self.logger.warning("DR interrogation discovered no TAPs")
return
self.logger.info("discovered %d TAPs", len(idcodes))
if args.operation == "scan":
ir_layout = jtag_iface.interrogate_ir(ir_value,
tap_count=len(idcodes), ir_lengths=args.ir_lengths, check=False)
if not ir_layout:
self.logger.warning("IR interrogation failed")
ir_layout = ["?" for _ in idcodes]
for tap_index, (idcode_value, ir_length) in enumerate(zip(idcodes, ir_layout)):
if idcode_value is None:
self.logger.info("TAP #%d: IR[%s] BYPASS",
tap_index, ir_length)
else:
idcode = DR_IDCODE.from_int(idcode_value)
mfg_name = jedec_mfg_name_from_bank_num(idcode.mfg_id >> 7,
idcode.mfg_id & 0x7f)
if mfg_name is None:
mfg_name = "unknown"
self.logger.info("TAP #%d: IR[%s] IDCODE=%#010x",
tap_index, ir_length, idcode_value)
self.logger.info("manufacturer=%#05x (%s) part=%#06x version=%#03x",
idcode.mfg_id, mfg_name, idcode.part_id, idcode.version)
if args.operation == "enumerate-ir":
ir_layout = jtag_iface.interrogate_ir(ir_value,
tap_count=len(idcodes), ir_lengths=args.ir_lengths)
for tap_index in args.tap_indexes:
ir_length = ir_layout[tap_index]
self.logger.info("TAP #%d: IR[%d]", tap_index, ir_length)
tap_iface = TAPInterface.from_layout(jtag_iface, ir_layout, index=tap_index)
for ir_value in range(0, (1 << ir_length)):
ir_value = bits(ir_value & (1 << bit) for bit in range(ir_length))
await tap_iface.test_reset()
await tap_iface.write_ir(ir_value)
dr_value = await tap_iface.scan_dr(check=False)
if dr_value is None:
dr_length = "?"
level = logging.WARN
else:
dr_length = len(dr_value)
if dr_length == 0:
level = logging.WARN
elif dr_length == 1:
level = logging.DEBUG
else:
level = logging.INFO
self.logger.log(level, " IR=%s DR[%s]", ir_value, dr_length)
@classmethod
def add_repl_arguments(cls, parser):
# Inheriting from the JTAG probe applet does not inherit the REPL.
if cls is not JTAGProbeApplet:
return super().add_repl_arguments(parser)
parser.add_argument(
"--tap-index", metavar="INDEX", type=int,
help="select TAP #INDEX instead of the full chain")
async def repl(self, device, args, jtag_iface):
# See explanation in add_repl_arguments().
if type(self) is not JTAGProbeApplet:
return await super().repl(device, args, jtag_iface)
if args.tap_index is None:
iface = jtag_iface
self.logger.info("dropping to REPL for JTAG chain; "
"use 'help(iface)' to see available APIs")
else:
iface = await jtag_iface.select_tap(args.tap_index, ir_lengths=args.ir_lengths)
self.logger.info("dropping to REPL for TAP #%d; "
"use 'help(iface)' to see available APIs",
args.tap_index)
await AsyncInteractiveConsole(
locals={"iface":iface},
run_callback=jtag_iface.flush
).interact()
# -------------------------------------------------------------------------------------------------
import unittest
class JTAGInterrogationTestCase(unittest.TestCase):
def setUp(self):
self.iface = JTAGProbeInterface(interface=None, logger=JTAGProbeApplet.logger)
def test_dr_empty(self):
self.assertEqual(self.iface.interrogate_dr(bits("")), [])
def test_dr_bypass(self):
self.assertEqual(self.iface.interrogate_dr(bits("0")), [None])
def test_dr_idcode(self):
dr = bits("00111011101000000000010001110111")
self.assertEqual(self.iface.interrogate_dr(dr), [0x3ba00477])
def test_dr_truncated(self):
dr = bits("0011101110100000000001000111011")
with self.assertRaisesRegex(JTAGProbeError,
r"^TAP #0 has truncated DR IDCODE=<1101110001000000000010111011100>$"):
self.iface.interrogate_dr(dr)
self.assertEqual(self.iface.interrogate_dr(dr, check=False), None)
def test_dr_bypass_idcode(self):
dr = bits("001110111010000000000100011101110")
self.assertEqual(self.iface.interrogate_dr(dr), [None, 0x3ba00477])
def test_dr_idcode_bypass(self):
dr = bits("000111011101000000000010001110111")
self.assertEqual(self.iface.interrogate_dr(dr), [0x3ba00477, None])
def test_dr_invalid(self):
dr = bits("00000000000000000000000011111111")
with self.assertRaisesRegex(JTAGProbeError,
r"^TAP #0 has invalid DR IDCODE=000000ff$"):
self.iface.interrogate_dr(dr)
self.assertEqual(self.iface.interrogate_dr(dr, check=False), None)
def test_ir_1tap_0start(self):
ir = bits("0100")
with self.assertRaisesRegex(JTAGProbeError,
r"^IR capture does not start with <10> transition$"):
self.iface.interrogate_ir(ir, 1)
self.assertEqual(self.iface.interrogate_ir(ir, 1, check=False),
None)
def test_ir_1tap_0start_1length(self):
ir = bits("0100")
with self.assertRaisesRegex(JTAGProbeError,
r"^IR capture does not start with <10> transition$"):
self.iface.interrogate_ir(ir, 1, ir_lengths=[4])
self.assertEqual(self.iface.interrogate_ir(ir, 1, ir_lengths=[4], check=False),
None)
def test_ir_1tap_1start(self):
ir = bits("0001")
self.assertEqual(self.iface.interrogate_ir(ir, 1),
[4])
def test_ir_1tap_2start(self):
ir = bits("0101")
self.assertEqual(self.iface.interrogate_ir(ir, 1),
[4])
def test_ir_1tap_2start_1length(self):
ir = bits("0101")
self.assertEqual(self.iface.interrogate_ir(ir, 1, ir_lengths=[4]),
[4])
def test_ir_1tap_2start_1length_over(self):
ir = bits("0101")
with self.assertRaisesRegex(JTAGProbeError,
r"^IR capture length differs from sum of IR lengths$"):
self.iface.interrogate_ir(ir, 1, ir_lengths=[5])
self.assertEqual(self.iface.interrogate_ir(ir, 1, ir_lengths=[5], check=False),
None)
def test_ir_2tap_1start(self):
ir = bits("0001")
with self.assertRaisesRegex(JTAGProbeError,
r"^IR capture has fewer <10> transitions than TAPs$"):
self.iface.interrogate_ir(ir, 2)
self.assertEqual(self.iface.interrogate_ir(ir, 2, check=False),
None)
def test_ir_2tap_1start_2length(self):
ir = bits("0001")
with self.assertRaisesRegex(JTAGProbeError,
r"^IR capture has fewer <10> transitions than TAPs$"):
self.iface.interrogate_ir(ir, 2, ir_lengths=[2, 2])
self.assertEqual(self.iface.interrogate_ir(ir, 2, ir_lengths=[2, 2], check=False),
None)
def test_ir_2tap_2start(self):
ir = bits("01001")
self.assertEqual(self.iface.interrogate_ir(ir, 2),
[3, 2])
def test_ir_2tap_3start(self):
ir = bits("01001001")
with self.assertRaisesRegex(JTAGProbeError,
r"^IR capture insufficiently constrains IR lengths$"):
self.iface.interrogate_ir(ir, 2)
self.assertEqual(self.iface.interrogate_ir(ir, 2, check=False),
None)
def test_ir_2tap_3start_1length(self):
ir = bits("01001001")
with self.assertRaisesRegex(JTAGProbeError,
r"^IR length count differs from TAP count$"):
self.iface.interrogate_ir(ir, 3, ir_lengths=[1])
self.assertEqual(self.iface.interrogate_ir(ir, 3, ir_lengths=[1], check=False),
None)
def test_ir_2tap_3start_2length(self):
ir = bits("01001001")
self.assertEqual(self.iface.interrogate_ir(ir, 2, ir_lengths=[6, 2]),
[6, 2])
self.assertEqual(self.iface.interrogate_ir(ir, 2, ir_lengths=[3, 5]),
[3, 5])
class JTAGProbeAppletTestCase(GlasgowAppletTestCase, applet=JTAGProbeApplet):
@synthesis_test
def test_build(self):
self.assertBuilds()
|
scripts/examples/Arduino/Portenta-H7/04-Image-Filters/cartoon_filter.py | jiskra/openmv | 1,761 | 11171263 | # Cartoon Filter
#
# This example shows off a simple cartoon filter on images. The cartoon
# filter works by joining similar pixel areas of an image and replacing
# the pixels in those areas with the area mean.
import sensor, image, time
sensor.reset()
sensor.set_pixformat(sensor.GRAYSCALE) # or GRAYSCALE...
sensor.set_framesize(sensor.QVGA) # or QQVGA...
sensor.skip_frames(time = 2000)
clock = time.clock()
while(True):
clock.tick()
# seed_threshold controls the maximum area growth of a colored
# region. Making this larger will merge more pixels.
# floating_threshold controls the maximum pixel-to-pixel difference
# when growing a region. Settings this very high will quickly combine
# all pixels in the image. You should keep this small.
# cartoon() will grow regions while both thresholds are statisfied...
img = sensor.snapshot().cartoon(seed_threshold=0.05, floating_thresholds=0.05)
print(clock.fps())
|
tests/modelwrapper_test.py | llv22/baal_tf2.4_mac | 575 | 11171281 | <gh_stars>100-1000
import math
import unittest
from unittest.mock import Mock
import numpy as np
import pytest
import torch
from torch import nn
from torch.utils.data import Dataset, DataLoader
from baal.modelwrapper import ModelWrapper, mc_inference
from baal.utils.metrics import ClassificationReport
class DummyDataset(Dataset):
def __init__(self, n_in=1):
self.n_in = n_in
def __len__(self):
return 20
def __getitem__(self, item):
x = torch.from_numpy(np.ones([3, 10, 10]) * item / 255.).float()
if self.n_in > 1:
x = [x] * self.n_in
return x, (torch.FloatTensor([item % 2]))
class DummyModel(nn.Module):
def __init__(self):
super(DummyModel, self).__init__()
self.conv = nn.Conv2d(3, 8, kernel_size=10)
self.relu = nn.ReLU()
self.dropout = nn.Dropout()
self.linear = nn.Linear(8, 1)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
x = self.conv(x)
x = self.relu(x)
x = x.view(x.shape[0], -1)
x = self.dropout(x)
x = self.linear(x)
x = self.sigmoid(x)
return x
class ModelWrapperMultiOutMultiInTest(unittest.TestCase):
def setUp(self):
class MultiOutModel(nn.Module):
def __init__(self):
super().__init__()
self.model = DummyModel()
def forward(self, x):
return [self.model(x)] * 2
self._crit = nn.MSELoss()
self.criterion = lambda x, y: self._crit(x[0], y) + self._crit(x[1], y)
self.model = MultiOutModel()
self.wrapper = ModelWrapper(self.model, self.criterion)
self.optim = torch.optim.SGD(self.wrapper.get_params(), 0.01)
self.dataset = DummyDataset()
def test_train_on_batch(self):
self.wrapper.train()
old_param = list(map(lambda x: x.clone(), self.model.parameters()))
input, target = [torch.stack(v) for v in zip(*(self.dataset[0], self.dataset[1]))]
self.wrapper.train_on_batch(input, target, self.optim)
new_param = list(map(lambda x: x.clone(), self.model.parameters()))
assert any([not torch.allclose(i, j) for i, j in zip(old_param, new_param)])
def test_test_on_batch(self):
self.wrapper.eval()
input, target = [torch.stack(v) for v in zip(*(self.dataset[0], self.dataset[1]))]
preds = torch.stack(
[self.wrapper.test_on_batch(input, target, cuda=False) for _ in range(10)]
).view(10, -1)
# Same loss
assert torch.allclose(torch.mean(preds, 0), preds[0])
preds = torch.stack(
[
self.wrapper.test_on_batch(
input, target, cuda=False, average_predictions=10
)
for _ in range(10)
]
).view(10, -1)
assert torch.allclose(torch.mean(preds, 0), preds[0])
def test_predict_on_batch(self):
self.wrapper.eval()
input = torch.stack((self.dataset[0][0], self.dataset[1][0]))
# iteration == 1
pred = self.wrapper.predict_on_batch(input, 1, False)
assert pred[0].size() == (2, 1, 1)
# iterations > 1
pred = self.wrapper.predict_on_batch(input, 10, False)
assert pred[0].size() == (2, 1, 10)
# iteration == 1
self.wrapper = ModelWrapper(self.model, self.criterion, replicate_in_memory=False)
pred = self.wrapper.predict_on_batch(input, 1, False)
assert pred[0].size() == (2, 1, 1)
# iterations > 1
pred = self.wrapper.predict_on_batch(input, 10, False)
assert pred[0].size() == (2, 1, 10)
def test_out_of_mem_raises_error(self):
self.wrapper.eval()
input = torch.stack((self.dataset[0][0], self.dataset[1][0]))
with pytest.raises(RuntimeError) as e_info:
self.wrapper.predict_on_batch(input, 0, False)
assert 'CUDA ran out of memory while BaaL tried to replicate data' in str(e_info.value)
def test_raising_type_errors(self):
iterations = math.inf
self.wrapper.eval()
input = torch.stack((self.dataset[0][0], self.dataset[1][0]))
with pytest.raises(TypeError):
self.wrapper.predict_on_batch(input, iterations, False)
def test_using_cuda_raises_error_while_testing(self):
'''CUDA is not available on test environment'''
self.wrapper.eval()
input = torch.stack((self.dataset[0][0], self.dataset[1][0]))
with pytest.raises(Exception):
self.wrapper.predict_on_batch(input, 1, True)
def test_train(self):
history = self.wrapper.train_on_dataset(self.dataset, self.optim, 10, 2, use_cuda=False,
workers=0)
assert len(history) == 2
def test_test(self):
l = self.wrapper.test_on_dataset(self.dataset, 10, use_cuda=False, workers=0)
assert np.isfinite(l)
l = self.wrapper.test_on_dataset(
self.dataset, 10, use_cuda=False, workers=0, average_predictions=10
)
assert np.isfinite(l)
def test_predict(self):
l = self.wrapper.predict_on_dataset(self.dataset, 10, 20, use_cuda=False,
workers=0)
self.wrapper.eval()
assert np.allclose(
self.wrapper.predict_on_batch(self.dataset[0][0].unsqueeze(0), 20)[0].detach().numpy(),
l[0][0])
assert np.allclose(
self.wrapper.predict_on_batch(self.dataset[19][0].unsqueeze(0), 20)[0][
0].detach().numpy(),
l[0][19])
assert l[0].shape == (len(self.dataset), 1, 20)
# Test generators
l_gen = self.wrapper.predict_on_dataset_generator(self.dataset, 10, 20, use_cuda=False,
workers=0)
assert np.allclose(next(l_gen)[0][0], l[0][0])
for last in l_gen:
pass # Get last item
assert np.allclose(last[0][-1], l[0][-1])
# Test Half
l_gen = self.wrapper.predict_on_dataset_generator(self.dataset, 10, 20, use_cuda=False,
workers=0, half=True)
l = self.wrapper.predict_on_dataset(self.dataset, 10, 20, use_cuda=False, workers=0,
half=True)
assert next(l_gen)[0].dtype == np.float16
assert l[0].dtype == np.float16
data_s = []
l_gen = self.wrapper.predict_on_dataset_generator(data_s, 10, 20, use_cuda=False,
workers=0, half=True)
assert len(list(l_gen)) == 0
class ModelWrapperTest(unittest.TestCase):
def setUp(self):
# self.model = nn.Sequential(
# nn.Linear(10, 8), nn.ReLU(), nn.Dropout(), nn.Linear(8, 1), nn.Sigmoid()
# )
self.model = DummyModel()
self.criterion = nn.BCEWithLogitsLoss()
self.wrapper = ModelWrapper(self.model, self.criterion)
self.optim = torch.optim.SGD(self.wrapper.get_params(), 0.01)
self.dataset = DummyDataset()
def test_train_on_batch(self):
self.wrapper.train()
old_param = list(map(lambda x: x.clone(), self.model.parameters()))
input, target = torch.randn([1, 3, 10, 10]), torch.randn(1, 1)
self.wrapper.train_on_batch(input, target, self.optim)
new_param = list(map(lambda x: x.clone(), self.model.parameters()))
assert any([not torch.allclose(i, j) for i, j in zip(old_param, new_param)])
# test reset weights properties
linear_weights = list(self.wrapper.model.named_children())[3][1].weight.clone()
conv_weights = list(self.wrapper.model.named_children())[0][1].weight.clone()
self.wrapper.reset_fcs()
linear_new_weights = list(self.wrapper.model.named_children())[3][1].weight.clone()
conv_new_weights = list(self.wrapper.model.named_children())[0][1].weight.clone()
assert all([not torch.allclose(i, j) for i, j in zip(linear_new_weights, linear_weights)])
assert all([torch.allclose(i, j) for i, j in zip(conv_new_weights, conv_weights)])
self.wrapper.reset_all()
conv_next_new_weights = list(self.wrapper.model.named_children())[0][1].weight.clone()
assert all(
[not torch.allclose(i, j) for i, j in zip(conv_new_weights, conv_next_new_weights)])
def test_test_on_batch(self):
self.wrapper.eval()
input, target = torch.randn([1, 3, 10, 10]), torch.randn(1, 1)
preds = torch.stack(
[self.wrapper.test_on_batch(input, target, cuda=False) for _ in range(10)]
).view(10, -1)
# Same loss
assert torch.allclose(torch.mean(preds, 0), preds[0])
preds = torch.stack(
[
self.wrapper.test_on_batch(
input, target, cuda=False, average_predictions=10
)
for _ in range(10)
]
).view(10, -1)
assert torch.allclose(torch.mean(preds, 0), preds[0])
def test_predict_on_batch(self):
self.wrapper.eval()
input = torch.randn([2, 3, 10, 10])
# iteration == 1
pred = self.wrapper.predict_on_batch(input, 1, False)
assert pred.size() == (2, 1, 1)
# iterations > 1
pred = self.wrapper.predict_on_batch(input, 10, False)
assert pred.size() == (2, 1, 10)
# iteration == 1
self.wrapper = ModelWrapper(self.model, self.criterion, replicate_in_memory=False)
pred = self.wrapper.predict_on_batch(input, 1, False)
assert pred.size() == (2, 1, 1)
# iterations > 1
pred = self.wrapper.predict_on_batch(input, 10, False)
assert pred.size() == (2, 1, 10)
def test_train(self):
history = self.wrapper.train_on_dataset(self.dataset, self.optim, 10, 2, use_cuda=False,
workers=0)
assert len(history) == 2
def test_test(self):
l = self.wrapper.test_on_dataset(self.dataset, 10, use_cuda=False, workers=0)
assert np.isfinite(l)
l = self.wrapper.test_on_dataset(
self.dataset, 10, use_cuda=False, workers=0, average_predictions=10
)
assert np.isfinite(l)
def test_predict(self):
l = self.wrapper.predict_on_dataset(self.dataset, 10, 20, use_cuda=False,
workers=0)
self.wrapper.eval()
assert np.allclose(
self.wrapper.predict_on_batch(self.dataset[0][0].unsqueeze(0), 20)[0].detach().numpy(),
l[0])
assert np.allclose(
self.wrapper.predict_on_batch(self.dataset[19][0].unsqueeze(0), 20)[0].detach().numpy(),
l[19])
assert l.shape == (len(self.dataset), 1, 20)
# Test generators
l_gen = self.wrapper.predict_on_dataset_generator(self.dataset, 10, 20, use_cuda=False,
workers=0)
assert np.allclose(next(l_gen)[0], l[0])
for last in l_gen:
pass # Get last item
assert np.allclose(last[-1], l[-1])
# Test Half
l_gen = self.wrapper.predict_on_dataset_generator(self.dataset, 10, 20, use_cuda=False,
workers=0, half=True)
l = self.wrapper.predict_on_dataset(self.dataset, 10, 20, use_cuda=False, workers=0,
half=True)
assert next(l_gen).dtype == np.float16
assert l.dtype == np.float16
def test_states(self):
input = torch.randn([1, 3, 10, 10])
def pred_with_dropout(replicate_in_memory):
self.wrapper = ModelWrapper(self.model, self.criterion,
replicate_in_memory=replicate_in_memory)
self.wrapper.train()
# Dropout make the pred changes
preds = torch.stack(
[
self.wrapper.predict_on_batch(input, iterations=1, cuda=False)
for _ in range(10)
]
).view(10, -1)
assert not torch.allclose(torch.mean(preds, 0), preds[0])
pred_with_dropout(replicate_in_memory=True)
pred_with_dropout(replicate_in_memory=False)
def pred_without_dropout(replicate_in_memory):
self.wrapper = ModelWrapper(self.model, self.criterion,
replicate_in_memory=replicate_in_memory)
# Dropout is not active in eval
self.wrapper.eval()
preds = torch.stack(
[
self.wrapper.predict_on_batch(input, iterations=1, cuda=False)
for _ in range(10)
]
).view(10, -1)
assert torch.allclose(torch.mean(preds, 0), preds[0])
pred_without_dropout(replicate_in_memory=True)
pred_without_dropout(replicate_in_memory=False)
def test_add_metric(self):
self.wrapper.add_metric('cls_report', lambda: ClassificationReport(2))
assert 'test_cls_report' in self.wrapper.metrics
assert 'train_cls_report' in self.wrapper.metrics
self.wrapper.train_on_dataset(self.dataset, self.optim, 32, 2, False)
self.wrapper.test_on_dataset(self.dataset, 32, False)
assert (self.wrapper.metrics['train_cls_report'].value['accuracy'] != 0).any()
assert (self.wrapper.metrics['test_cls_report'].value['accuracy'] != 0).any()
def test_train_and_test(self):
res = self.wrapper.train_and_test_on_datasets(self.dataset, self.dataset, self.optim,
32, 5, False, return_best_weights=False)
assert len(res) == 5
res = self.wrapper.train_and_test_on_datasets(self.dataset, self.dataset, self.optim,
32, 5, False, return_best_weights=True)
assert len(res) == 2
assert len(res[0]) == 5
assert isinstance(res[1], dict)
mock = Mock()
mock.side_effect = (((np.linspace(0, 50) - 10) / 10) ** 2).tolist()
self.wrapper.test_on_dataset = mock
res = self.wrapper.train_and_test_on_datasets(self.dataset, self.dataset,
self.optim, 32, 50,
False, return_best_weights=True, patience=1)
assert len(res) == 2
assert len(res[0]) < 50
mock = Mock()
mock.side_effect = (((np.linspace(0, 50) - 10) / 10) ** 2).tolist()
self.wrapper.test_on_dataset = mock
res = self.wrapper.train_and_test_on_datasets(self.dataset, self.dataset,
self.optim, 32, 50,
False, return_best_weights=True, patience=1,
min_epoch_for_es=20)
assert len(res) == 2
assert len(res[0]) < 50 and len(res[0]) > 20
def test_multi_input_model():
class MultiInModel(nn.Module):
def __init__(self):
super().__init__()
self.model = DummyModel()
def forward(self, x):
# We get two inputs
x1, x2 = x
# We merge those inputs
return self.model(x1) + self.model(x2)
model = MultiInModel()
wrapper = ModelWrapper(model, None)
dataset = DummyDataset(n_in=2)
assert len(dataset[0]) == 2
b = next(iter(DataLoader(dataset, 15, False)))[0]
l = wrapper.predict_on_batch(b, iterations=10, cuda=False)
assert l.shape[0] == 15 and l.shape[-1] == 10
if __name__ == '__main__':
pytest.main()
|
sonarqube/tests/test_check.py | vbarbaresi/integrations-core | 663 | 11171296 | # (C) Datadog, Inc. 2020-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import os
import pytest
from .common import PROJECT
from .metrics import WEB_METRICS
pytestmark = [pytest.mark.integration, pytest.mark.usefixtures('dd_environment')]
def test_check(aggregator, dd_run_check, sonarqube_check, web_instance):
check = sonarqube_check(web_instance)
dd_run_check(check)
global_tags = ['endpoint:{}'.format(web_instance['web_endpoint'])]
global_tags.extend(web_instance['tags'])
project_tag = 'project:{}'.format(PROJECT)
for metric in WEB_METRICS:
tags = [project_tag]
tags.extend(global_tags)
aggregator.assert_metric(metric, tags=tags)
aggregator.assert_service_check('sonarqube.api_access', status=check.OK, tags=global_tags)
def test_version_metadata(datadog_agent, dd_run_check, sonarqube_check, web_instance):
check = sonarqube_check(web_instance)
check.check_id = 'test:123'
version_data = [str(part) for part in os.environ['SONARQUBE_VERSION'].split('.')]
version_parts = {'version.{}'.format(name): part for name, part in zip(('major', 'minor', 'patch'), version_data)}
version_parts['version.scheme'] = 'semver'
dd_run_check(check)
datadog_agent.assert_metadata('test:123', version_parts)
|
bh_modules/rubykeywords.py | jfcherng-sublime/ST-BracketHighlighter | 1,047 | 11171297 | """
BracketHighlighter.
Copyright (c) 2013 - 2016 <NAME> <<EMAIL>>
License: MIT
"""
import re
RE_DEF = re.compile(r"\s*(?:(?:private|public|protected)\s+)?(def).*?")
RE_KEYWORD = re.compile(r"(\s*\b)[\w\W]*")
SPECIAL_KEYWORDS = ('do',)
NORMAL_KEYWORDS = ('for', 'until', 'unless', 'while', 'class', 'module', 'if', 'begin', 'case')
def post_match(view, name, style, first, second, center, bfr, threshold):
"""Strip whitespace from being targeted with highlight."""
if first is not None:
# Strip whitespace from the beginning of first bracket
open_bracket = bfr[first.begin:first.end]
if open_bracket not in SPECIAL_KEYWORDS:
open_bracket_stripped = open_bracket.strip()
if open_bracket_stripped not in NORMAL_KEYWORDS:
m = RE_DEF.match(open_bracket)
if m:
first = first.move(first.begin + m.start(1), first.begin + m.end(1))
else:
m = RE_KEYWORD.match(open_bracket)
if m:
first = first.move(first.begin + m.end(1), first.end)
return first, second, style
|
libs/ConfigHelpers.py | JanSkalny/RootTheBox | 635 | 11171302 | import logging
import imghdr
import hashlib
from base64 import b64decode
from tornado.options import options
from datetime import datetime
from past.builtins import basestring
from libs.XSSImageCheck import is_xss_image
from libs.ValidationError import ValidationError
def save_config():
logging.info("Saving current config to: %s" % options.config)
with open(options.config, "w") as fp:
fp.write("##########################")
fp.write(" Root the Box Config File ")
fp.write("##########################\n")
fp.write(
"# Documentation: %s\n"
% "https://github.com/moloch--/RootTheBox/wiki/Configuration-File-Details"
)
fp.write("# Last updated: %s\n" % datetime.now())
for group in options.groups():
# Shitty work around for Tornado 4.1
if "rootthebox.py" in group.lower() or group == "":
continue
fp.write("\n# [ %s ]\n" % group.title())
opt = list(options.group_dict(group).items())
for key, value in opt:
try:
# python2
value_type = basestring
except NameError:
# python 3
value_type = str
if isinstance(value, value_type):
# Str/Unicode needs to have quotes
fp.write('%s = "%s"\n' % (key, value))
else:
# Int/Bool/List use __str__
fp.write("%s = %s\n" % (key, value))
def save_config_image(b64_data):
image_data = bytearray(b64decode(b64_data))
if len(image_data) < (2048 * 2048):
ext = imghdr.what("", h=image_data)
file_name = "/story/%s.%s" % (hashlib.sha1(image_data).hexdigest(), ext)
if ext in ["png", "jpeg", "gif", "bmp"] and not is_xss_image(image_data):
with open("files" + file_name, "wb") as fp:
fp.write(image_data)
return file_name
else:
raise ValidationError(
"Invalid image format, avatar must be: .png .jpeg .gif or .bmp"
)
else:
raise ValidationError("The image is too large")
def create_demo_user():
from models.Team import Team
from models.User import User
from models.GameLevel import GameLevel
from models import dbsession
if Team.by_name("player") is None:
user = User()
user.handle = "player"
user.password = "<PASSWORD>"
user.name = "player"
user.email = "<EMAIL>"
team = Team()
team.name = "player"
team.motto = "Don't hate the player"
team.money = 0
team.game_levels.append(GameLevel.all()[0])
team.members.append(user)
dbsession.add(user)
dbsession.add(team)
dbsession.commit()
|
pymde/preprocess/test_data_matrix.py | kruus/pymde | 379 | 11171315 | <filename>pymde/preprocess/test_data_matrix.py
import numpy as np
import scipy.sparse as sp
import torch
from pymde import preprocess
import pymde.testing as testing
@testing.cpu_and_cuda
def test_all_distances_numpy(device):
del device
np.random.seed(0)
data_matrix = np.random.randn(4, 2)
graph = preprocess.data_matrix.distances(data_matrix)
assert graph.n_items == data_matrix.shape[0]
assert graph.n_edges == 6
testing.assert_all_equal(
graph.edges,
torch.tensor([[0, 1], [0, 2], [0, 3], [1, 2], [1, 3], [2, 3]]),
)
for e, d in zip(graph.edges, graph.distances):
e = e.cpu().numpy()
d = d.item()
true_distance = np.linalg.norm(data_matrix[e[0]] - data_matrix[e[1]])
testing.assert_allclose(true_distance, d)
@testing.cpu_and_cuda
def test_all_distances_torch(device):
np.random.seed(0)
data_matrix = torch.tensor(
np.random.randn(4, 2), dtype=torch.float, device=device
)
graph = preprocess.data_matrix.distances(data_matrix)
assert graph.n_items == data_matrix.shape[0]
assert graph.n_edges == 6
testing.assert_all_equal(
graph.edges,
torch.tensor([[0, 1], [0, 2], [0, 3], [1, 2], [1, 3], [2, 3]]),
)
for e, d in zip(graph.edges, graph.distances):
e = e
d = d
true_distance = (data_matrix[e[0]] - data_matrix[e[1]]).norm()
testing.assert_allclose(true_distance, d)
@testing.cpu_and_cuda
def test_all_distances_sparse(device):
del device
np.random.seed(0)
data_matrix = sp.csr_matrix(np.random.randn(4, 2))
graph = preprocess.data_matrix.distances(data_matrix)
data_matrix = data_matrix.todense()
assert graph.n_items == data_matrix.shape[0]
assert graph.n_edges == 6
testing.assert_all_equal(
graph.edges,
torch.tensor([[0, 1], [0, 2], [0, 3], [1, 2], [1, 3], [2, 3]]),
)
for e, d in zip(graph.edges, graph.distances):
e = e.cpu().numpy()
d = d.item()
true_distance = np.linalg.norm(data_matrix[e[0]] - data_matrix[e[1]])
testing.assert_allclose(true_distance, d)
@testing.cpu_and_cuda
def test_some_distances_numpy(device):
del device
np.random.seed(0)
max_distances = 50
retain_fraction = max_distances / int(500 * (499) / 2)
data_matrix = np.random.randn(500, 2)
graph = preprocess.data_matrix.distances(
data_matrix, retain_fraction=retain_fraction
)
assert graph.n_items == data_matrix.shape[0]
assert graph.n_edges == max_distances
for e, d in zip(graph.edges, graph.distances):
e = e.cpu().numpy()
d = d.item()
true_distance = np.linalg.norm(data_matrix[e[0]] - data_matrix[e[1]])
testing.assert_allclose(true_distance, d)
@testing.cpu_and_cuda
def test_some_distances_torch(device):
np.random.seed(0)
max_distances = 50
retain_fraction = max_distances / int(500 * (499) / 2)
data_matrix = torch.tensor(
np.random.randn(500, 2), dtype=torch.float, device=device
)
graph = preprocess.data_matrix.distances(
data_matrix, retain_fraction=retain_fraction
)
data_matrix = data_matrix.cpu().numpy()
assert graph.n_items == data_matrix.shape[0]
assert graph.n_edges == max_distances
for e, d in zip(graph.edges, graph.distances):
e = e.cpu().numpy()
d = d.item()
true_distance = np.linalg.norm(data_matrix[e[0]] - data_matrix[e[1]])
testing.assert_allclose(true_distance, d)
@testing.cpu_and_cuda
def test_some_distances_sparse(device):
del device
np.random.seed(0)
max_distances = 50
retain_fraction = max_distances / int(500 * (499) / 2)
data_matrix = sp.csr_matrix(np.random.randn(500, 2))
graph = preprocess.data_matrix.distances(
data_matrix, retain_fraction=retain_fraction
)
data_matrix = data_matrix.todense()
assert graph.n_items == data_matrix.shape[0]
assert graph.n_edges == max_distances
for e, d in zip(graph.edges, graph.distances):
e = e.cpu().numpy()
d = d.item()
true_distance = np.linalg.norm(data_matrix[e[0]] - data_matrix[e[1]])
testing.assert_allclose(true_distance, d)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.