id
stringlengths 3
8
| content
stringlengths 100
981k
|
---|---|
6294
|
from visual import *
print("""
Click to place spheres under falling string.
Right button drag or Ctrl-drag to rotate view.
Middle button drag or Alt-drag to zoom in or out.
On a two-button mouse, middle is left + right.
""")
# <NAME>
scene.title = "Drape"
restlength = 0.02
m = 0.010 * restlength
g = 9.8
dt = 0.002
k = 3
damp = (1-0)**dt
nspheres = 3
floor = 0
# Create the stringy thing:
band = curve( x = arange(-1,1,restlength),
y = 1,
radius = 0.02
)
band.p = band.pos * 0
scene.range = 1.5
scene.autoscale = 0
# Let the user position obstacles:
spheres = []
for i in range(nspheres):
s = sphere( pos = scene.mouse.getclick().pos, #(i*0.6 - 0.7,0.5 + i*0.1,0),
radius = 0.25,
color = (abs(sin(i)),cos(i)**2,(i%10)/10.0) )
spheres.append( s )
while True:
rate(1.0 / dt)
if scene.mouse.clicked:
i = len(spheres)
s = sphere( pos = scene.mouse.getclick().pos,
radius = 0.25,
color = (abs(sin(i)),cos(i)**2,(i%10)/10.0) )
spheres.append( s )
if floor:
below = less(band.pos[:,1],-1)
band.p[:,1] = where( below, 0, band.p[:,1] )
band.pos[:,1] = where( below, -1, band.pos[:,1] )
# need a more physical way to make 'damped springs' than this!
band.p = band.p * damp
#band.p[0] = 0 # nail down left endpoint
#band.p[-1] = 0 # nail down right endpoint
band.pos = band.pos + band.p/m*dt
#gravity
band.p[:,1] = band.p[:,1] - m * g * dt
# force[n] is the force on point n from point n+1 (to the right):
length = (band.pos[1:] - band.pos[:-1])
dist = sqrt(sum(length*length,-1))
force = k * ( dist - restlength )
force = length/dist[:,newaxis] * force[:,newaxis]
band.p[:-1] = band.p[:-1] + force*dt
band.p[1:] = band.p[1:] - force*dt
# color based on "stretch": blue -> white -> red
c = clip( dist/restlength * 0.5, 0, 2 )
# blue (compressed) -> white (relaxed) -> red (tension)
band.red[1:] = where( less(c,1), c, 1 )
band.green[1:] = where( less(c,1), c, 2-c )
band.blue[1:] = where( less(c,1), 1, 2-c )
for s in spheres:
dist = mag( band.pos - s.pos )[:,newaxis]
inside = less( dist, s.radius )
if sometrue(inside):
R = ( band.pos - s.pos ) / dist
surface = s.pos + (s.radius)*R
band.pos = surface*inside + band.pos*(1-inside)
pdotR = sum(asarray(band.p)*asarray(R),-1)
band.p = band.p - R*pdotR[:,newaxis]*inside
|
6303
|
import argparse
import numpy as np
import os
import sys
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from saliency.visualizer.smiles_visualizer import SmilesVisualizer
def visualize(dir_path):
parent_dir = os.path.dirname(dir_path)
saliency_vanilla = np.load(os.path.join(dir_path, "saliency_vanilla.npy"))
saliency_smooth = np.load(os.path.join(dir_path, "saliency_smooth.npy"))
saliency_bayes = np.load(os.path.join(dir_path, "saliency_bayes.npy"))
visualizer = SmilesVisualizer()
os.makedirs(os.path.join(parent_dir, "result_vanilla"), exist_ok=True)
os.makedirs(os.path.join(parent_dir, "result_smooth"), exist_ok=True)
os.makedirs(os.path.join(parent_dir, "result_bayes"), exist_ok=True)
test_idx = np.load(os.path.join(dir_path, "test_idx.npy"))
answer = np.load(os.path.join(dir_path, "answer.npy"))
output = np.load(os.path.join(dir_path, "output.npy"))
smiles_all = np.load(os.path.join(parent_dir, "smiles.npy"))
def calc_range(saliency):
vmax = float('-inf')
vmin = float('inf')
for v in saliency:
vmax = max(vmax, np.max(v))
vmin = min(vmin, np.min(v))
return vmin, vmax
v_range_vanilla = calc_range(saliency_vanilla)
v_range_smooth = calc_range(saliency_smooth)
v_range_bayes = calc_range(saliency_bayes)
def get_scaler(v_range):
def scaler(saliency_):
saliency = np.copy(saliency_)
minv, maxv = v_range
if maxv == minv:
saliency = np.zeros_like(saliency)
else:
pos = saliency >= 0.0
saliency[pos] = saliency[pos]/maxv
nega = saliency < 0.0
saliency[nega] = saliency[nega]/(np.abs(minv))
return saliency
return scaler
scaler_vanilla = get_scaler(v_range_vanilla)
scaler_smooth = get_scaler(v_range_smooth)
scaler_bayes = get_scaler(v_range_bayes)
def color(x):
if x > 0:
# Red for positive value
return 1., 1. - x, 1. - x
else:
# Blue for negative value
x *= -1
return 1. - x, 1. - x, 1.
for i, id in enumerate(test_idx):
smiles = smiles_all[id]
out = output[i]
ans = answer[i]
# legend = "t:{}, p:{}".format(ans, out)
legend = ''
ext = '.png' # '.svg'
# visualizer.visualize(
# saliency_vanilla[id], smiles, save_filepath=os.path.join(parent_dir, "result_vanilla", str(id) + ext),
# visualize_ratio=1.0, legend=legend, scaler=scaler_vanilla, color_fn=color)
# visualizer.visualize(
# saliency_smooth[id], smiles, save_filepath=os.path.join(parent_dir, "result_smooth", str(id) + ext),
# visualize_ratio=1.0, legend=legend, scaler=scaler_smooth, color_fn=color)
visualizer.visualize(
saliency_bayes[id], smiles, save_filepath=os.path.join(parent_dir, "result_bayes", str(id) + ext),
visualize_ratio=1.0, legend=legend, scaler=scaler_bayes, color_fn=color)
def plot_result(prediction, answer, save_filepath='result.png'):
plt.scatter(prediction, answer, marker='.')
plt.plot([-100, 100], [-100, 100], c='r')
max_v = max(np.max(prediction), np.max(answer))
min_v = min(np.min(prediction), np.min(answer))
plt.xlim([min_v-0.1, max_v+0.1])
plt.xlabel("prediction")
plt.ylim([min_v-0.1, max_v+0.1])
plt.ylabel("ground truth")
plt.savefig(save_filepath)
plt.close()
def main():
parser = argparse.ArgumentParser(
description='Regression with own dataset.')
parser.add_argument('--dirpath', '-d', type=str, default='./results/M_30_3_32_32')
args = parser.parse_args()
path = args.dirpath
n_split = 5
output = []
answer = []
for i in range(n_split):
suffix = str(i) + "-" + str(n_split)
output.append(np.load(os.path.join(path, suffix, "output.npy")))
answer.append(np.load(os.path.join(path, suffix, "answer.npy")))
output = np.concatenate(output)
answer = np.concatenate(answer)
plot_result(output, answer, save_filepath=os.path.join(path, "result.png"))
for i in range(n_split):
suffix = str(i) + "-" + str(n_split)
print(suffix)
visualize(os.path.join(path, suffix))
if __name__ == '__main__':
main()
|
6308
|
import numpy as np
from PySide2.QtCore import QSignalBlocker, Signal
from PySide2.QtWidgets import QGridLayout, QWidget
from hexrd.ui.scientificspinbox import ScientificDoubleSpinBox
DEFAULT_ENABLED_STYLE_SHEET = 'background-color: white'
DEFAULT_DISABLED_STYLE_SHEET = 'background-color: #F0F0F0'
INVALID_MATRIX_STYLE_SHEET = 'background-color: red'
class MatrixEditor(QWidget):
data_modified = Signal()
def __init__(self, data, parent=None):
super().__init__(parent)
self._data = data
# If this is not None, then only the elements present in the
# list (as (i, j) items) will be enabled.
self._enabled_elements = None
# If this is set, it will be called every time the data updates
# to apply equality constraints.
self._apply_constraints_func = None
# Whether or not the matrix is currently invalid
self.matrix_invalid = False
# Reason the matrix is currently invalid
self.matrix_invalid_reason = ''
self.setLayout(QGridLayout())
self.add_spin_boxes()
self.update_gui()
def add_spin_boxes(self):
layout = self.layout()
for i in range(self.rows):
for j in range(self.cols):
sb = self.create_spin_box()
layout.addWidget(sb, i, j)
def create_spin_box(self):
sb = ScientificDoubleSpinBox()
sb.setKeyboardTracking(False)
sb.valueChanged.connect(self.element_modified)
return sb
def element_modified(self):
self.update_data()
@property
def data(self):
return self._data
@data.setter
def data(self, v):
if not np.array_equal(self._data, v):
if self._data.shape != v.shape:
msg = (f'Shape {v.shape} does not match original shape '
f'{self._data.shape}')
raise AttributeError(msg)
self._data = v
self.reset_disabled_values()
self.update_gui()
@property
def rows(self):
return self.data.shape[0]
@property
def cols(self):
return self.data.shape[1]
def update_data(self):
self.data[:] = self.gui_data
self.apply_constraints()
self.data_modified.emit()
def update_gui(self):
self.gui_data = self.data
@property
def gui_data(self):
row_range = range(self.rows)
col_range = range(self.cols)
return [[self.gui_value(i, j) for j in col_range] for i in row_range]
@gui_data.setter
def gui_data(self, v):
blockers = [QSignalBlocker(w) for w in self.all_widgets] # noqa: F841
for i in range(self.rows):
for j in range(self.cols):
self.set_gui_value(i, j, v[i][j])
@property
def all_widgets(self):
row_range = range(self.rows)
col_range = range(self.cols)
return [self.widget(i, j) for j in col_range for i in row_range]
@property
def enabled_widgets(self):
widgets = []
for i in range(self.rows):
for j in range(self.cols):
if (i, j) in self.enabled_elements:
widgets.append(self.widget(i, j))
return widgets
def widget(self, row, col):
return self.layout().itemAtPosition(row, col).widget()
def gui_value(self, row, col):
return self.widget(row, col).value()
def set_gui_value(self, row, col, val):
self.widget(row, col).setValue(val)
def set_matrix_invalid(self, s):
self.matrix_invalid = True
self.matrix_invalid_reason = s
self.update_tooltips()
self.update_enable_states()
def set_matrix_valid(self):
self.matrix_invalid = False
self.matrix_invalid_reason = ''
self.update_tooltips()
self.update_enable_states()
def update_tooltips(self):
if self.matrix_invalid:
tooltip = self.matrix_invalid_reason
else:
tooltip = ''
for w in self.enabled_widgets:
w.setToolTip(tooltip)
def update_enable_states(self):
enable_all = self.enabled_elements is None
for i in range(self.rows):
for j in range(self.cols):
w = self.widget(i, j)
enable = enable_all or (i, j) in self.enabled_elements
w.setEnabled(enable)
enabled_str = 'enabled' if enable else 'disabled'
style_sheet = getattr(self, f'{enabled_str}_style_sheet')
w.setStyleSheet(style_sheet)
def reset_disabled_values(self):
# Resets all disabled values to zero, then applies constraints
for i in range(self.rows):
for j in range(self.cols):
if not self.widget(i, j).isEnabled():
self.data[i, j] = 0.0
self.apply_constraints()
self.update_gui()
@property
def enabled_style_sheet(self):
if self.matrix_invalid:
return INVALID_MATRIX_STYLE_SHEET
return DEFAULT_ENABLED_STYLE_SHEET
@property
def disabled_style_sheet(self):
return DEFAULT_DISABLED_STYLE_SHEET
@property
def enabled_elements(self):
return self._enabled_elements
@enabled_elements.setter
def enabled_elements(self, v):
if self._enabled_elements != v:
self._enabled_elements = v
self.update_enable_states()
self.reset_disabled_values()
@property
def apply_constraints_func(self):
return self._apply_constraints_func
@apply_constraints_func.setter
def apply_constraints_func(self, v):
if self._apply_constraints_func != v:
self._apply_constraints_func = v
self.apply_constraints()
def apply_constraints(self):
if (func := self.apply_constraints_func) is None:
return
func(self.data)
self.update_gui()
if __name__ == '__main__':
import sys
from PySide2.QtWidgets import QApplication, QDialog, QVBoxLayout
if len(sys.argv) < 2:
sys.exit('Usage: <script> <matrix_size>')
rows, cols = [int(x) for x in sys.argv[1].split('x')]
data = np.ones((rows, cols))
app = QApplication(sys.argv)
dialog = QDialog()
layout = QVBoxLayout()
dialog.setLayout(layout)
editor = MatrixEditor(data)
layout.addWidget(editor)
# def constraints(x):
# x[2][2] = x[1][1]
# editor.enabled_elements = [(1, 1), (3, 4)]
# editor.apply_constraints_func = constraints
def on_data_modified():
print(f'Data modified: {editor.data}')
editor.data_modified.connect(on_data_modified)
dialog.finished.connect(app.quit)
dialog.show()
app.exec_()
|
6309
|
from django.conf.urls.defaults import *
urlpatterns = patterns('pytorque.views',
(r'^$', 'central_dispatch_view'),
(r'^browse$', 'central_dispatch_view'),
(r'^monitor$', 'central_dispatch_view'),
(r'^submit$', 'central_dispatch_view'),
(r'^stat$', 'central_dispatch_view'),
(r'^login/$', 'login'),
(r'^logout/$', 'logout'),
# (r'^$', 'central_dispatch_view'),
(r'^user/(?P<username>\w{0,50})/$', 'index'),
(r'^user/(?P<username>\w{0,50})/browse$', 'browse'),
# (r'^user/(?P<username>\w{0,50})/monitor', 'monitor'),
# (r'^user/(?P<username>\w{0,50})/submit', 'submit'),
# (r'^user/(?P<username>\w{0,50})/stat', 'stat'),
)
|
6315
|
def main():
n = 111
gen = (n * 7 for x in range(10))
if 777 in gen:
print("Yes!")
if __name__ == '__main__':
main()
|
6356
|
import abc
from typing import Dict, Callable
import tensorflow as tf
from flink_ml_framework.context import Context
from flink_ml_framework.java_file import *
from ..runner import tf_helper, io_helper
from ..runner.output_writer import DirectOutputWriter
try:
from flink_ml_tensorflow.tensorflow_context import TFContext
except:
from flink_ml_tensorflow2.tensorflow_context import TFContext
# noinspection PyUnresolvedReferences
from tensorflow_io.core.python.ops import core_ops
__all__ = ['TF1_TYPE', 'TF2_TYPE']
TF1_TYPE = 'tf1'
TF2_TYPE = 'tf2'
class BaseEntry(abc.ABC):
def __init__(self, func_name, engine_type):
self.func_name = func_name
self.engine_type = engine_type
@staticmethod
def get_func_by_name(func_name):
"""
Get function by the func name
:param func_name: func name
:return: function
"""
if '.' not in func_name:
if func_name in globals():
return globals()[func_name]
else:
raise RuntimeError('cannot find function[{}]'.format(func_name))
else:
module_name, func_name = func_name.rsplit('.', 1)
import importlib
# load the module, will raise ImportError if module cannot be loaded
m = importlib.import_module(module_name)
# get the class, will raise AttributeError if class cannot be found
c = getattr(m, func_name)
return c
@abc.abstractmethod
def construct_args(self, **kwargs):
pass
def is_batch(self):
return True
def post_process(self, **kwargs):
pass
def entry_func(self, context: Context):
tf_context = TFContext(context)
properties = tf_context.properties
print('properties', properties, flush=True)
# intra_op_parallelism is set by akdl, because there is a bug in TensorFlow 1.x
# See: https://stackoverflow.com/questions/34426268/restricting-number-of-cores-used
intra_op_parallelism = int(properties['ALINK:intra_op_parallelism'])
if self.engine_type == TF1_TYPE:
tf_helper.set_intra_op_parallelism(intra_op_parallelism_threads=intra_op_parallelism)
elif self.engine_type == TF2_TYPE:
tf.config.threading.set_intra_op_parallelism_threads(intra_op_parallelism)
num_workers = int(properties['ALINK:num_workers'])
work_dir = properties['ALINK:work_dir']
cluster, task_type, task_index = tf_context.export_estimator_cluster()
if self.is_batch():
java_queue_file = JavaFile(context.from_java(), context.to_java())
dataset_file = os.path.join(work_dir, 'dataset.tfrecords')
dataset, dataset_length = io_helper.convert_java_queue_file_to_repeatable_dataset(java_queue_file,
dataset_file)
print("number of records: " + str(dataset_length), flush=True)
dataset_fn: Callable[[], tf.data.TFRecordDataset] = lambda: tf.data.TFRecordDataset(dataset_file)
else:
dataset_fn: Callable[[], tf.data.TFRecordDataset] = lambda: tf_context.flink_stream_dataset()
dataset = None
dataset_file = None
dataset_length = None
saved_model_dir = os.path.join(work_dir, 'savedmodel')
user_params: Dict = json.loads(properties['ALINK:user_defined_params'])
for i in range(1, 1024):
key = "ALINK:bc_" + str(i)
if key in properties:
user_params[key] = context.properties[key]
key = "ALINK:model_dir"
if key in properties:
user_params[key] = properties[key]
output_writer = DirectOutputWriter(tf_context.from_java(), tf_context.to_java())
locals_copy = locals().copy()
locals_copy.pop("self")
print("locals_copy = ", locals_copy, flush=True)
args = self.construct_args(**locals_copy)
func = self.get_func_by_name(self.func_name)
func(args)
print("task_type = {}, task_index = {}: done tf_user_main".format(task_type, task_index), flush=True)
local_vars = locals().copy()
local_vars.pop('self')
self.post_process(**local_vars)
print("task_type = {}, task_index = {}: exit".format(task_type, task_index), flush=True)
output_writer.close()
|
6388
|
import os
import glob
import cv2
import numpy as np
import torch
from torchvision.transforms import transforms
from natsort import natsorted
from models import resmasking_dropout1
from utils.datasets.fer2013dataset import EMOTION_DICT
from barez import show
transform = transforms.Compose(
[
transforms.ToPILImage(),
transforms.ToTensor(),
]
)
def activations_mask(tensor):
tensor = torch.squeeze(tensor, 0)
tensor = torch.mean(tensor, 0)
tensor = tensor.detach().cpu().numpy()
tensor = np.maximum(tensor, 0)
tensor = cv2.resize(tensor, (224, 224))
tensor = tensor - np.min(tensor)
tensor = tensor / np.max(tensor)
heatmap = cv2.applyColorMap(np.uint8(255 * tensor), cv2.COLORMAP_JET)
return heatmap
model = resmasking_dropout1(3, 7)
# state = torch.load('./saved/checkpoints/resmasking_dropout1_rot30_2019Nov17_14.33')
state = torch.load("./saved/checkpoints/Z_resmasking_dropout1_rot30_2019Nov30_13.32")
model.load_state_dict(state["net"])
model.cuda()
model.eval()
for image_path in natsorted(
glob.glob("/home/z/research/bkemo/images/**/*.png", recursive=True)
):
image_name = os.path.basename(image_path)
print(image_name)
# image_path = '/home/z/research/bkemo/images/disgust/0.0_dc10a3_1976_0.png'
image = cv2.imread(image_path)
image = cv2.resize(image, (224, 224))
tensor = transform(image)
tensor = torch.unsqueeze(tensor, 0)
tensor = tensor.cuda()
# output = model(tensor)
x = model.conv1(tensor) # 112
x = model.bn1(x)
x = model.relu(x)
x = model.maxpool(x) # 56
x = model.layer1(x) # 56
m = model.mask1(x)
x = x * (1 + m)
x = model.layer2(x) # 28
m = model.mask2(x)
x = x * (1 + m)
x = model.layer3(x) # 14
heat_1 = activations_mask(x)
m = model.mask3(x)
x = x * (1 + m)
# heat_2 = activations_mask(m)
x = model.layer4(x) # 7
m = model.mask4(x)
x = x * (1 + m)
x = model.avgpool(x)
x = torch.flatten(x, 1)
output = model.fc(x)
# print(np.sum(heat_1 - heat_2))
# show(np.concatenate((image, heat_1, heat_2), axis=1))
cv2.imwrite(
"./masking_provements/{}".format(image_name),
np.concatenate((image, heat_1), axis=1),
)
# np.concatenate((image, heat_1, heat_2), axis=1))
# output = output.cpu().numpy()
# print(EMOTION_DICT[torch.argmax(output, 1).item()])
|
6401
|
from PyQt5.QtWidgets import *
from matplotlib.backends.backend_qt5agg import FigureCanvas
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
class PstaticWidget(QWidget):
def __init__(self, parent=None):
QWidget.__init__(self, parent)
self.fig_pstatic = Figure()
self.fig_pstatic.set_facecolor('#ffffff')
self.canvas_pstatic = FigureCanvas(self.fig_pstatic)
vertical_layout = QVBoxLayout()
vertical_layout.addWidget(self.canvas_pstatic)
self.canvas_pstatic.axes_pstatic = self.canvas_pstatic.figure.add_subplot(111)
self.setLayout(vertical_layout)
self.canvas_pstatic.axes_pstatic.set_xticks([])
self.canvas_pstatic.axes_pstatic.set_yticks([])
self.canvas_pstatic.axes_pstatic.axis('off')
self.fig_pstatic.subplots_adjust(left=0.12, bottom=0.15, right=0.985, top=0.95)
self.toolbar = NavigationToolbar(self.canvas_pstatic, self)
self.toolbar.setFixedHeight(25)
vertical_layout.addWidget(self.toolbar)
|
6469
|
import abc
class DistortionABC(metaclass=abc.ABCMeta):
maptype = None
@abc.abstractmethod
def apply(self, xy_in):
"""Apply distortion mapping"""
pass
@abc.abstractmethod
def apply_inverse(self, xy_in):
"""Apply inverse distortion mapping"""
pass
|
6476
|
import os
from setuptools import setup
# Read the version
g = {}
with open(os.path.join("editorconfig", "version.py"), "rt") as fp:
exec(fp.read(), g)
v = g['VERSION']
version = ".".join(str(x) for x in v[:3])
if v[3] != "final":
version += "-" + v[3]
setup(
name='EditorConfig',
version=version,
author='EditorConfig Team',
packages=['editorconfig'],
url='http://editorconfig.org/',
license='python',
description='EditorConfig File Locator and Interpreter for Python',
long_description=open('README.rst').read(),
entry_points = {
'console_scripts': [
'editorconfig = editorconfig.__main__:main',
]
},
classifiers=[
'License :: OSI Approved :: Python Software Foundation License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: Implementation :: PyPy',
],
)
|
6519
|
import time
import pykeyboard
# TODO: Replace following two lines with the code that activate the application.
print('Activate the application 3 seconds.')
time.sleep(3)
k = pykeyboard.PyKeyboard()
k.press_key(k.left_key)
time.sleep(1) # Hold down left key for 1 second.
k.release_key(k.left_key)
|
6526
|
import os
from nltk.translate.bleu_score import corpus_bleu
from nltk.translate.bleu_score import SmoothingFunction
import json
from tqdm import tqdm, trange
from random import sample
import numpy as np
import pickle
import argparse
import bert_eval_acc
import svm_eval_acc
smooth = SmoothingFunction()
def eval_bleu(ref, pred):
"""
:param ref: list(list(list(any))), a list of reference sentences, each element of the list is a list of references
:param pred: list(list(any)), a list of predictions
:return: corpus bleu score
"""
return corpus_bleu(ref, pred, smoothing_function=smooth.method1)
def eval_bleu_detail(ref, pred):
"""
:param ref: list(list(list(any))), a list of reference sentences, each element of the list is a list of references
:param pred: list(list(any)), a list of predictions
:return: corpus bleu score
"""
return corpus_bleu(ref, pred, weights=[1, 0, 0, 0], smoothing_function=smooth.method1),\
corpus_bleu(ref, pred, weights=[0, 1, 0, 0], smoothing_function=smooth.method1), \
corpus_bleu(ref, pred, weights=[0, 0, 1, 0], smoothing_function=smooth.method1), \
corpus_bleu(ref, pred, weights=[0, 0, 0, 1], smoothing_function=smooth.method1)
def count_ngram(hyps_resp, n):
"""
Count the number of unique n-grams
:param hyps_resp: list, a list of responses
:param n: int, n-gram
:return: the number of unique n-grams in hyps_resp
"""
if len(hyps_resp) == 0:
print("ERROR, eval_distinct get empty input")
return
if type(hyps_resp[0]) != list:
print("ERROR, eval_distinct takes in a list of <class 'list'>, get a list of {} instead".format(
type(hyps_resp[0])))
return
ngram = set()
for resp in hyps_resp:
if len(resp) < n:
continue
for i in range(len(resp) - n + 1):
ngram.add(' '.join(resp[i: i + n]))
return len(ngram)
def eval_distinct_detail(hyps_resp):
"""
compute distinct score for the hyps_resp
:param hyps_resp: list, a list of hyps responses
:return: average distinct score for 1, 2-gram
"""
if len(hyps_resp) == 0:
print("ERROR, eval_distinct get empty input")
return
if type(hyps_resp[0]) != list:
print("ERROR, eval_distinct takes in a list of <class 'list'>, get a list of {} instead".format(
type(hyps_resp[0])))
return
hyps_resp = [[str(x) for x in l] for l in hyps_resp]
hyps_resp = [(' '.join(i)).split() for i in hyps_resp]
num_tokens = sum([len(i) for i in hyps_resp])
dist1 = count_ngram(hyps_resp, 1) / float(num_tokens)
dist2 = count_ngram(hyps_resp, 2) / float(num_tokens)
return dist1, dist2
def eval_f1(ref, pred):
"""
:param ref: list(list(list(any))), a list of reference sentences, each element of the list is a list of references
:param pred: list(list(any)), a list of predictions
:return: f1 score
"""
assert len(ref) == len(pred) > 0
precisions = []
recalls = []
for i, s in enumerate(pred):
ref_set = set()
for rs in ref[i]:
for w in rs:
ref_set.add(w)
pred_set = set()
for w in s:
pred_set.add(w)
p = 0
for w in s:
if w in ref_set:
p += 1
if len(s) > 0:
p /= len(s)
r = 0
for rs in ref[i]:
for w in rs:
if w in pred_set:
r += 1
tot_l = sum([len(rs) for rs in ref[i]])
if tot_l > 0:
r /= tot_l
precisions.append(p)
recalls.append(r)
precision = sum(precisions) / len(precisions)
recall = sum(recalls) / len(recalls)
return 0.0 if precision == recall == 0 else 2 * precision * recall / (precision + recall)
def calc_metrics_value(task, fn, n_sample=None):
with open(fn) as f:
res = [json.loads(i) for i in f.readlines()]
s0_pred, s0_ref = [], []
s1_pred, s1_ref = [], []
for d in res:
if d['style'] == 0:
s0_ref.append([list(d['resp'])])
s0_pred.append(list(d['pred_style0'][0]))
else:
s1_ref.append([list(d['resp'])])
s1_pred.append(list(d['pred_style1'][0]))
if n_sample:
assert len(s0_ref) >= n_sample
assert len(s1_ref) >= n_sample
sampled_idxs = sample(range(len(s0_ref)), n_sample)
s0_ref = [x for i, x in enumerate(s0_ref) if i in sampled_idxs]
s0_pred = [x for i, x in enumerate(s0_pred) if i in sampled_idxs]
sampled_idxs = sample(range(len(s1_ref)), n_sample)
s1_ref = [x for i, x in enumerate(s1_ref) if i in sampled_idxs]
s1_pred = [x for i, x in enumerate(s1_pred) if i in sampled_idxs]
bleu_s0 = eval_bleu_detail(s0_ref, s0_pred)
bleu_s1 = eval_bleu_detail(s1_ref, s1_pred)
dist_s0 = eval_distinct_detail(s0_pred)
dist_s1 = eval_distinct_detail(s1_pred)
f1_s0 = eval_f1(s0_ref, s0_pred)
f1_s1 = eval_f1(s1_ref, s1_pred)
for k in range(1, 4):
print('%d-gram BLEU:' % k,
's0', bleu_s0[k - 1] * 100,
's1', bleu_s1[k - 1] * 100,
'mean', (bleu_s0[k - 1] + bleu_s1[k - 1]) / 2 * 100)
print('F1:',
's0', f1_s0 * 100, 's1', f1_s1 * 100,
'mean', (f1_s0 + f1_s1) / 2 * 100)
print('Dist:',
's0', dist_s0[1] * 100, 's1', dist_s1[1] * 100,
'mean', (dist_s0[1] + dist_s1[1]) / 2 * 100)
parser = argparse.ArgumentParser()
parser.add_argument('--eval_file_path', help='path of the eval file', required=True)
args = parser.parse_args()
file_path = args.eval_file_path
calc_metrics_value(None, file_path)
print("Evaluating acc results:")
bert_eval_acc.main(file_path)
svm_eval_acc.main(file_path)
|
6542
|
from db import db
class RisklayerPrognosis(db.Model):
__tablename__ = 'risklayer_prognosis'
datenbestand = db.Column(db.TIMESTAMP, primary_key=True, nullable=False)
prognosis = db.Column(db.Float, nullable=False)
# class RisklayerPrognosisSchema(SQLAlchemyAutoSchema):
# class Meta:
# strict = True
# model = RisklayerPrognosis
#
# timestamp = fields.Timestamp(data_key="datenbestand")
# prognosis = fields.Number(data_key="prognosis")
|
6579
|
import json
d1 = {}
with open("/home/qinyuan/zs/out/bart-large-with-description-grouped-1e-5-outerbsz4-innerbsz32-adapterdim4-unfreeze-dec29/test_predictions.jsonl") as fin:
for line in fin:
d = json.loads(line)
d1[d["id"]] = d["output"][0]["answer"]
d2 = {}
dq = {}
with open("/home/qinyuan/zs/out/bart-large-zsre-with-description-LR2e-5-FREQ32-dec27/test_predictions_submitted.jsonl") as fin:
for line in fin:
d = json.loads(line)
d2[d["id"]] = d["output"][0]["answer"]
dq[d["id"]] = d["input"]
d3 = {}
with open("/home/qinyuan/zs/data/structured_zeroshot-test.jsonl") as fin:
for line in fin:
d = json.loads(line)
d3[d["id"]] = [item["answer"] for item in d["output"]]
count = 0
win1 = 0
win2 = 0
for key in d1.keys():
if d1[key]!= d2[key]:
print("{}. {}. {}. {}. {}".format(key, dq[key], d1[key], d2[key], d3[key]))
count += 1
if d1[key] in d3[key] and d2[key] not in d3[key]:
win1 += 1
print(d1[key])
print(d2[key])
if d2[key] in d3[key] and d1[key] not in d3[key]:
win2 += 1
print(d1[key])
print(d2[key])
print(count)
print(win1)
print(win2)
|
6592
|
import os
import imp
from setuptools import setup, find_packages
__version__ = imp.load_source(
"hsfs.version", os.path.join("hsfs", "version.py")
).__version__
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="hsfs",
version=__version__,
install_requires=[
"pyhumps==1.6.1",
"requests",
"furl",
"boto3",
"pandas",
"numpy",
"pyjks",
"mock",
"avro==1.10.2",
"sqlalchemy",
"PyMySQL",
],
extras_require={
"dev": [
"pytest",
"flake8",
"black"],
"docs": [
"mkdocs==1.1.2",
"mkdocs-material==6.2.2",
"mike==0.5.5",
"sphinx==3.5.4",
"keras_autodoc @ git+https://[email protected]/moritzmeister/keras-autodoc@split-tags-properties",
"markdown-include"],
"hive": ["pyhopshive[thrift]"]
},
author="Logical Clocks AB",
author_email="<EMAIL>",
description="HSFS: An environment independent client to interact with the Hopsworks Featurestore",
license="Apache License 2.0",
keywords="Hopsworks, Feature Store, Spark, Machine Learning, MLOps, DataOps",
url="https://github.com/logicalclocks/feature-store-api",
download_url="https://github.com/logicalclocks/feature-store-api/releases/tag/"
+ __version__,
packages=find_packages(),
long_description=read("../README.md"),
long_description_content_type="text/markdown",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Topic :: Utilities",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3",
"Intended Audience :: Developers",
],
)
|
6610
|
import torch
from torch import nn
from torch.nn import functional as F
from torchdrug import layers
class ConditionalFlow(nn.Module):
"""
Conditional flow transformation from `Masked Autoregressive Flow for Density Estimation`_.
.. _Masked Autoregressive Flow for Density Estimation:
https://arxiv.org/pdf/1705.07057.pdf
Parameters:
input_dim (int): input & output dimension
condition_dim (int): condition dimension
hidden_dims (list of int, optional): hidden dimensions
activation (str or function, optional): activation function
"""
def __init__(self, input_dim, condition_dim, hidden_dims=None, activation="relu"):
super(ConditionalFlow, self).__init__()
self.input_dim = input_dim
self.output_dim = input_dim
if hidden_dims is None:
hidden_dims = []
self.mlp = layers.MLP(condition_dim, list(hidden_dims) + [input_dim * 2], activation)
self.rescale = nn.Parameter(torch.zeros(1))
def forward(self, input, condition):
"""
Transform data into latent representations.
Parameters:
input (Tensor): input representations
condition (Tensor): conditional representations
Returns:
(Tensor, Tensor): latent representations, log-likelihood of the transformation
"""
scale, bias = self.mlp(condition).chunk(2, dim=-1)
scale = (F.tanh(scale) * self.rescale)
output = (input + bias) * scale.exp()
log_det = scale
return output, log_det
def reverse(self, latent, condition):
"""
Transform latent representations into data.
Parameters:
latent (Tensor): latent representations
condition (Tensor): conditional representations
Returns:
(Tensor, Tensor): input representations, log-likelihood of the transformation
"""
scale, bias = self.mlp(condition).chunk(2, dim=-1)
scale = (F.tanh(scale) * self.rescale)
output = latent / scale.exp() - bias
log_det = scale
return output, log_det
|
6612
|
import numpy as np
from sklearn.utils.multiclass import type_of_target
from mindware.base_estimator import BaseEstimator
from mindware.components.utils.constants import type_dict, MULTILABEL_CLS, IMG_CLS, TEXT_CLS, OBJECT_DET
from mindware.components.feature_engineering.transformation_graph import DataNode
class Classifier(BaseEstimator):
"""This class implements the classification task. """
def initialize(self, data: DataNode, **kwargs):
if self.metric is None:
self.metric = 'acc'
# Check the task type: {binary, multiclass}
task_type = type_of_target(data.data[1])
if task_type in type_dict:
task_type = type_dict[task_type]
else:
raise ValueError("Invalid Task Type: %s!" % task_type)
self.task_type = task_type
super().initialize(data=data, **kwargs)
def fit(self, data: DataNode, **kwargs):
"""
Fit the classifier to given training data.
:param data: instance of DataNode
:return: self
"""
if self._ml_engine is None:
self.initialize(data=data, **kwargs)
super().fit(data, **kwargs)
return self
def predict(self, X, batch_size=None, n_jobs=1):
"""
Predict classes for X.
:param X: Datanode
:param batch_size: int
:param n_jobs: int
:return: y : array of shape = [n_samples]
The predicted classes.
"""
if not isinstance(X, DataNode):
raise ValueError("X is supposed to be a Data Node, but get %s" % type(X))
return super().predict(X, batch_size=batch_size, n_jobs=n_jobs)
def refit(self):
return super().refit()
def predict_proba(self, X, batch_size=None, n_jobs=1):
"""
Predict probabilities of classes for all samples X.
:param X: Datanode
:param batch_size: int
:param n_jobs: int
:return: y : array of shape = [n_samples, n_classes]
The predicted class probabilities.
"""
if not isinstance(X, DataNode):
raise ValueError("X is supposed to be a Data Node, but get %s" % type(X))
pred_proba = super().predict_proba(X, batch_size=batch_size, n_jobs=n_jobs)
if self.task_type != MULTILABEL_CLS:
assert (
np.allclose(
np.sum(pred_proba, axis=1),
np.ones_like(pred_proba[:, 0]))
), "Prediction probability does not sum up to 1!"
# Check that all probability values lie between 0 and 1.
assert (
(pred_proba >= 0).all() and (pred_proba <= 1).all()
), "Found prediction probability value outside of [0, 1]!"
return pred_proba
def get_tree_importance(self, data: DataNode):
from lightgbm import LGBMClassifier
import pandas as pd
X, y = self.data_transformer(data).data
lgb = LGBMClassifier(random_state=1)
lgb.fit(X, y)
_importance = lgb.feature_importances_
h = {}
h['feature_id'] = np.array(range(len(_importance)))
h['feature_importance'] = _importance
return pd.DataFrame(h)
def get_linear_importance(self, data: DataNode):
from sklearn.linear_model import LogisticRegression
import pandas as pd
X, y = self.data_transformer(data).data
clf = LogisticRegression(random_state=1)
clf.fit(X, y)
_ef = clf.coef_
std_array = np.std(_ef, ddof=1, axis=0)
abs_array = abs(_ef)
mean_array = np.mean(abs_array, axis=0)
_importance = std_array / mean_array
h = {}
h['feature_id'] = np.array(range(len(_importance)))
h['feature_importance'] = _importance
return pd.DataFrame(h)
def get_linear_impact(self, data: DataNode):
from sklearn.linear_model import LogisticRegression
import pandas as pd
if (len(set(data.data[1]))) > 2:
print('ERROR! Only binary classification is supported!')
return 0
X, y = self.data_transformer(data).data
clf = LogisticRegression(random_state=1)
clf.fit(X, y)
_ef = clf.coef_
_impact = _ef[0]
h = {}
h['feature_id'] = np.array(range(len(_impact)))
h['feature_impact'] = _impact
return pd.DataFrame(h)
class Regressor(BaseEstimator):
"""This class implements the regression task. """
def initialize(self, data: DataNode, **kwargs):
self.metric = 'mse' if self.metric is None else self.metric
# Check the task type: {continuous}
task_type = type_dict['continuous']
self.task_type = task_type
super().initialize(data=data, **kwargs)
def fit(self, data, **kwargs):
"""
Fit the regressor to given training data.
:param data: DataNode
:return: self
"""
if self._ml_engine is None:
self.initialize(data=data, **kwargs)
super().fit(data, **kwargs)
return self
def predict(self, X, batch_size=None, n_jobs=1):
"""
Make predictions for X.
:param X: DataNode
:param batch_size: int
:param n_jobs: int
:return: y : array of shape = [n_samples] or [n_samples, n_labels]
The predicted classes.
"""
if not isinstance(X, DataNode):
raise ValueError("X is supposed to be a Data Node, but get %s" % type(X))
return super().predict(X, batch_size=batch_size, n_jobs=n_jobs)
def get_tree_importance(self, data: DataNode):
from lightgbm import LGBMRegressor
import pandas as pd
X, y = self.data_transformer(data).data
lgb = LGBMRegressor(random_state=1)
lgb.fit(X, y)
_importance = lgb.feature_importances_
h = {}
h['feature_id'] = np.array(range(len(_importance)))
h['feature_importance'] = _importance
return pd.DataFrame(h)
def get_linear_impact(self, data: DataNode):
from sklearn.linear_model import LinearRegression
import pandas as pd
X, y = self.data_transformer(data).data
reg = LinearRegression()
reg.fit(X, y)
_impact = reg.coef_
h = {}
h['feature_id'] = np.array(range(len(_impact)))
h['feature_impact'] = _impact
return pd.DataFrame(h)
|
6624
|
import logging as log
class Log:
def __init__(self, level):
self.level = level
log.basicConfig(format='%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s',
level=level)
self.log = log
def info(self, msg):
self.log.info(msg)
def debug(self, msg):
self.log.debug(msg)
def warn(self, msg):
self.log.warn(msg)
def error(self, msg):
self.log.error(msg)
|
6650
|
from pathlib import Path
import pytest
from oval_graph.arf_xml_parser.arf_xml_parser import ARFXMLParser
def get_arf_report_path(src="global_test_data/ssg-fedora-ds-arf.xml"):
return str(Path(__file__).parent.parent / src)
@pytest.mark.parametrize("rule_id, result", [
(
"xccdf_org.ssgproject.content_rule_accounts_passwords_pam_faillock_deny",
"false",
),
(
"xccdf_org.ssgproject.content_rule_sshd_disable_gssapi_auth",
"false",
),
(
"xccdf_org.ssgproject.content_rule_service_debug-shell_disabled",
"true",
),
(
"xccdf_org.ssgproject.content_rule_mount_option_dev_shm_noexec",
"false",
),
(
"xccdf_org.ssgproject.content_rule_audit_rules_unsuccessful_file_modification_creat",
"false",
),
(
"xccdf_org.ssgproject.content_rule_audit_rules_file_deletion_events_rmdir",
"false",
),
(
"xccdf_org.ssgproject.content_rule_require_singleuser_auth",
"true",
),
])
def test_parsing_and_evaluate_scan_rule(rule_id, result):
path = get_arf_report_path()
parser = ARFXMLParser(path)
oval_tree = parser.get_oval_tree(rule_id)
assert oval_tree.evaluate_tree() == result
def test_parsing_arf_report_without_system_data():
path = get_arf_report_path("global_test_data/arf_no_system_data.xml")
rule_id = "xccdf_com.example.www_rule_test-fail"
parser = ARFXMLParser(path)
oval_tree = parser.get_oval_tree(rule_id)
assert oval_tree.evaluate_tree() == "false"
@pytest.mark.parametrize("rule_id, pattern", [
("hello", "404 rule \"hello\" not found!"),
("xccdf_org.ssgproject.content_rule_ntpd_specify_remote_server", "notselected"),
("xccdf_org.ssgproject.content_rule_configure_bind_crypto_policy", "notchecked"),
("xccdf_org.ssgproject.content_rule_ensure_gpgcheck_local_packages", "notapplicable"),
])
def test_parsing_bad_rule(rule_id, pattern):
path = get_arf_report_path()
parser = ARFXMLParser(path)
with pytest.raises(Exception, match=pattern):
assert parser.get_oval_tree(rule_id)
def test_use_bad_report_file():
src = 'global_test_data/xccdf_org.ssgproject.content_profile_ospp-results-initial.xml'
path = get_arf_report_path(src)
with pytest.raises(Exception, match=r"arf\b|ARF\b"):
assert ARFXMLParser(path)
|
6653
|
from setuptools import setup, Extension, find_packages
import subprocess
import errno
import re
import os
import shutil
import sys
import zipfile
from urllib.request import urlretrieve
import numpy
from Cython.Build import cythonize
isWindows = os.name == 'nt'
isMac = sys.platform == 'darwin'
is64Bit = sys.maxsize > 2**32
# adapted from cffi's setup.py
# the following may be overridden if pkg-config exists
libraries = ['lensfun']
include_dirs = []
library_dirs = []
extra_compile_args = []
extra_link_args = []
def _ask_pkg_config(resultlist, option, result_prefix='', sysroot=False):
pkg_config = os.environ.get('PKG_CONFIG','pkg-config')
try:
p = subprocess.Popen([pkg_config, option, 'lensfun'],
stdout=subprocess.PIPE)
except OSError as e:
if e.errno != errno.ENOENT:
raise
else:
t = p.stdout.read().decode().strip()
if p.wait() == 0:
res = t.split()
# '-I/usr/...' -> '/usr/...'
for x in res:
assert x.startswith(result_prefix)
res = [x[len(result_prefix):] for x in res]
sysroot = sysroot and os.environ.get('PKG_CONFIG_SYSROOT_DIR', '')
if sysroot:
# old versions of pkg-config don't support this env var,
# so here we emulate its effect if needed
res = [path if path.startswith(sysroot)
else sysroot + path
for path in res]
resultlist[:] = res
def use_pkg_config():
_ask_pkg_config(include_dirs, '--cflags-only-I', '-I', sysroot=True)
_ask_pkg_config(extra_compile_args, '--cflags-only-other')
_ask_pkg_config(library_dirs, '--libs-only-L', '-L', sysroot=True)
_ask_pkg_config(extra_link_args, '--libs-only-other')
_ask_pkg_config(libraries, '--libs-only-l', '-l')
if isWindows or isMac:
cmake_build = os.path.abspath('external/lensfun/build')
install_dir = os.path.join(cmake_build, 'install')
include_dirs += [os.path.join(install_dir, 'include', 'lensfun')]
library_dirs += [os.path.join(install_dir, 'lib')]
else:
use_pkg_config()
# this must be after use_pkg_config()!
include_dirs += [numpy.get_include()]
# for version_helper.h
include_dirs += [os.path.abspath('lensfunpy')]
def clone_submodules():
if not os.path.exists('external/lensfun/README.md'):
print('lensfun git submodule not cloned yet, will invoke "git submodule update --init" now')
if os.system('git submodule update --init') != 0:
raise Exception('git failed')
def windows_lensfun_compile():
clone_submodules()
cwd = os.getcwd()
# Download cmake to build lensfun
cmake_version = '3.13.4'
cmake_url = 'https://github.com/Kitware/CMake/releases/download/v{v}/cmake-{v}-win32-x86.zip'.format(v=cmake_version)
cmake = os.path.abspath('external/cmake-{}-win32-x86/bin/cmake.exe'.format(cmake_version))
# Download vcpkg to build dependencies of lensfun
vcpkg_commit = '2021.05.12'
vcpkg_url = 'https://github.com/Microsoft/vcpkg/archive/{}.zip'.format(vcpkg_commit)
vcpkg_dir = os.path.abspath('external/vcpkg-{}'.format(vcpkg_commit))
vcpkg_bootstrap = os.path.join(vcpkg_dir, 'bootstrap-vcpkg.bat')
vcpkg = os.path.join(vcpkg_dir, 'vcpkg.exe')
files = [(cmake_url, 'external', cmake),
(vcpkg_url, 'external', vcpkg_bootstrap)]
for url, extractdir, extractcheck in files:
if not os.path.exists(extractcheck):
path = 'external/' + os.path.basename(url)
if not os.path.exists(path):
print('Downloading', url)
try:
urlretrieve(url, path)
except:
# repeat once in case of network issues
urlretrieve(url, path)
with zipfile.ZipFile(path) as z:
print('Extracting', path, 'into', extractdir)
z.extractall(extractdir)
if not os.path.exists(path):
raise RuntimeError(path + ' not found!')
# Bootstrap vcpkg
os.chdir(vcpkg_dir)
if not os.path.exists(vcpkg):
code = os.system(vcpkg_bootstrap)
if code != 0:
sys.exit(code)
# lensfun depends on glib2, so let's build it with vcpkg
vcpkg_arch = 'x64' if is64Bit else 'x86'
vcpkg_triplet = '{}-windows'.format(vcpkg_arch)
code = os.system(vcpkg + ' install glib:' + vcpkg_triplet)
if code != 0:
sys.exit(code)
vcpkg_install_dir = os.path.join(vcpkg_dir, 'installed', vcpkg_triplet)
# bundle runtime dlls
vcpkg_bin_dir = os.path.join(vcpkg_install_dir, 'bin')
glib2_dll = os.path.join(vcpkg_bin_dir, 'glib-2.0-0.dll')
# configure and compile lensfun
if not os.path.exists(cmake_build):
os.mkdir(cmake_build)
os.chdir(cmake_build)
# temporary hack to avoid https://stackoverflow.com/a/53547931
# (python module not needed here anyway)
patch_path = '../apps/CMakeLists.txt'
with open(patch_path) as f:
content = f.read()
content = content.replace('IF(PYTHON)', 'IF(FALSE)')
with open(patch_path, 'w') as f:
f.write(content)
cmds = [cmake + ' .. -G "NMake Makefiles" -DCMAKE_BUILD_TYPE=Release ' +\
'-DBUILD_TESTS=off -DINSTALL_HELPER_SCRIPTS=off ' +\
'-DCMAKE_TOOLCHAIN_FILE={}/scripts/buildsystems/vcpkg.cmake '.format(vcpkg_dir) +\
'-DGLIB2_BASE_DIR={} -DGLIB2_DLL={} -DCMAKE_INSTALL_PREFIX=install'.format(vcpkg_install_dir, glib2_dll),
cmake + ' --build .',
cmake + ' --build . --target install',
]
for cmd in cmds:
print(cmd)
code = os.system(cmd)
if code != 0:
sys.exit(code)
os.chdir(cwd)
dll_runtime_libs = [('lensfun.dll', os.path.join(install_dir, 'bin')),
('glib-2.0-0.dll', vcpkg_bin_dir),
# dependencies of glib
('pcre.dll', vcpkg_bin_dir),
('iconv-2.dll', vcpkg_bin_dir),
('charset-1.dll', vcpkg_bin_dir),
('intl-8.dll', vcpkg_bin_dir),
]
for filename, folder in dll_runtime_libs:
src = os.path.join(folder, filename)
dest = 'lensfunpy/' + filename
print('copying', src, '->', dest)
shutil.copyfile(src, dest)
def mac_lensfun_compile():
clone_submodules()
# configure and compile lensfun
cwd = os.getcwd()
if not os.path.exists(cmake_build):
os.mkdir(cmake_build)
os.chdir(cmake_build)
install_name_dir = os.path.join(install_dir, 'lib')
cmds = ['cmake .. -DCMAKE_BUILD_TYPE=Release ' +\
'-DBUILD_TESTS=off -DINSTALL_HELPER_SCRIPTS=off ' +\
'-DCMAKE_INSTALL_PREFIX=install ' +\
'-DCMAKE_INSTALL_NAME_DIR=' + install_name_dir,
'cmake --build .',
'cmake --build . --target install',
]
for cmd in cmds:
print(cmd)
code = os.system(cmd)
if code != 0:
sys.exit(code)
os.chdir(cwd)
def bundle_db_files():
import glob
db_files = 'lensfunpy/db_files'
if not os.path.exists(db_files):
os.makedirs(db_files)
for path in glob.glob('external/lensfun/data/db/*.xml'):
dest = os.path.join(db_files, os.path.basename(path))
print('copying', path, '->', dest)
shutil.copyfile(path, dest)
package_data = {'lensfunpy': []}
# evil hack, check cmd line for relevant commands
# custom cmdclasses didn't work out in this case
cmdline = ''.join(sys.argv[1:])
needsCompile = any(s in cmdline for s in ['install', 'bdist', 'build_ext', 'wheel', 'nosetests'])
if isWindows and needsCompile:
windows_lensfun_compile()
package_data['lensfunpy'].append('*.dll')
elif isMac and needsCompile:
mac_lensfun_compile()
if any(s in cmdline for s in ['clean', 'sdist']):
# When running sdist after a previous run of bdist or build_ext
# then even with the 'clean' command the .egg-info folder stays.
# This folder contains SOURCES.txt which in turn is used by sdist
# to include package data files, but we don't want .dll's and .xml
# files in our source distribution. Therefore, to prevent accidents,
# we help a little...
egg_info = 'lensfunpy.egg-info'
print('removing', egg_info)
shutil.rmtree(egg_info, ignore_errors=True)
if 'sdist' not in cmdline:
# This assumes that the lensfun version from external/lensfun was used.
# If that's not the case, the bundled files may fail to load, for example,
# if lensfunpy was linked against an older lensfun version already on
# the system (Linux mostly) and the database format changed in an incompatible way.
# In that case, loading of bundled files can still be disabled
# with Database(load_bundled=False).
package_data['lensfunpy'].append('db_files/*.xml')
bundle_db_files()
# Support for optional Cython line tracing
# run the following to generate a test coverage report:
# $ export LINETRACE=1
# $ python setup.py build_ext --inplace
# $ nosetests --with-coverage --cover-html --cover-package=lensfunpy
compdirectives = {}
macros = []
if (os.environ.get('LINETRACE', False)):
compdirectives['linetrace'] = True
macros.append(('CYTHON_TRACE', '1'))
extensions = cythonize([Extension("lensfunpy._lensfun",
include_dirs=include_dirs,
sources=[os.path.join('lensfunpy', '_lensfun.pyx')],
libraries=libraries,
library_dirs=library_dirs,
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
define_macros=macros
)],
compiler_directives=compdirectives)
# make __version__ available (https://stackoverflow.com/a/16084844)
exec(open('lensfunpy/_version.py').read())
setup(
name = 'lensfunpy',
version = __version__,
description = 'Lens distortion correction for Python, a wrapper for lensfun',
long_description = open('README.rst').read(),
author = '<NAME>',
author_email = '<EMAIL>',
url = 'https://github.com/letmaik/lensfunpy',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Cython',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Operating System :: MacOS',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Topic :: Multimedia :: Graphics',
'Topic :: Software Development :: Libraries',
],
packages = find_packages(),
ext_modules = extensions,
package_data = package_data,
install_requires=['numpy']
)
|
6695
|
import unittest
from QuerySciGraph import QuerySciGraph
class QuerySciGraphTestCase(unittest.TestCase):
def test_get_disont_ids_for_mesh_id(self):
disont_ids = QuerySciGraph.get_disont_ids_for_mesh_id('MESH:D005199')
known_ids = {'DOID:13636'}
self.assertSetEqual(disont_ids, known_ids)
def test_query_sub_phenotypes_for_phenotype(self):
sub_phenotypes = QuerySciGraph.query_sub_phenotypes_for_phenotype("HP:0000107") # Renal cyst
known_phenotypes = {'HP:0100877': 'Renal diverticulum',
'HP:0000108': 'Renal corticomedullary cysts',
'HP:0000803': 'Renal cortical cysts',
'HP:0000003': 'Multicystic kidney dysplasia',
'HP:0008659': 'Multiple small medullary renal cysts',
'HP:0005562': 'Multiple renal cysts',
'HP:0000800': 'Cystic renal dysplasia',
'HP:0012581': 'Solitary renal cyst'}
self.assertDictEqual(sub_phenotypes, known_phenotypes)
if __name__ == '__main__':
unittest.main()
|
6698
|
import re
from precise_bbcode.bbcode.tag import BBCodeTag
from precise_bbcode.tag_pool import tag_pool
color_re = re.compile(r'^([a-z]+|#[0-9abcdefABCDEF]{3,6})$')
class SubTag(BBCodeTag):
name = 'sub'
def render(self, value, option=None, parent=None):
return '<sub>%s</sub>' % value
class PreTag(BBCodeTag):
name = 'pre'
render_embedded = False
def render(self, value, option=None, parent=None):
return '<pre>%s</pre>' % value
class SizeTag(BBCodeTag):
name = 'size'
definition_string = '[size={RANGE=4,7}]{TEXT}[/size]'
format_string = '<span style="font-size:{RANGE=4,7}px;">{TEXT}</span>'
class FruitTag(BBCodeTag):
name = 'fruit'
definition_string = '[fruit]{CHOICE=tomato,orange,apple}[/fruit]'
format_string = '<h5>{CHOICE=tomato,orange,apple}</h5>'
class PhoneLinkTag(BBCodeTag):
name = 'phone'
definition_string = '[phone]{PHONENUMBER}[/phone]'
format_string = '<a href="tel:{PHONENUMBER}">{PHONENUMBER}</a>'
def render(self, value, option=None, parent=None):
href = 'tel:{}'.format(value)
return '<a href="{0}">{0}</a>'.format(href, value)
class StartsWithATag(BBCodeTag):
name = 'startswitha'
definition_string = '[startswitha]{STARTSWITH=a}[/startswitha]'
format_string = '<span>{STARTSWITH=a}</span>'
class RoundedBBCodeTag(BBCodeTag):
name = 'rounded'
class Options:
strip = False
def render(self, value, option=None, parent=None):
if option and re.search(color_re, option) is not None:
return '<div class="rounded" style="border-color:{};">{}</div>'.format(option, value)
return '<div class="rounded">{}</div>'.format(value)
tag_pool.register_tag(SubTag)
tag_pool.register_tag(PreTag)
tag_pool.register_tag(SizeTag)
tag_pool.register_tag(FruitTag)
tag_pool.register_tag(PhoneLinkTag)
tag_pool.register_tag(StartsWithATag)
tag_pool.register_tag(RoundedBBCodeTag)
|
6714
|
import torch
import sys
import os
sys.path.append(os.getcwd())
from utils.helper_modules import Sequential2
from unimodals.common_models import Linear, MLP, MaxOut_MLP
from datasets.imdb.get_data import get_dataloader
from fusions.common_fusions import Concat
from objective_functions.objectives_for_supervised_learning import MFM_objective
from objective_functions.recon import sigmloss1d
from training_structures.Supervised_Learning import train, test
filename = "best_mfm.pt"
traindata, validdata, testdata = get_dataloader(
"../video/multimodal_imdb.hdf5", "../video/mmimdb", vgg=True, batch_size=128)
classes = 23
n_latent = 512
fuse = Sequential2(Concat(), MLP(2*n_latent, n_latent, n_latent//2)).cuda()
encoders = [MaxOut_MLP(512, 512, 300, n_latent, False).cuda(
), MaxOut_MLP(512, 1024, 4096, n_latent, False).cuda()]
head = Linear(n_latent//2, classes).cuda()
decoders = [MLP(n_latent, 600, 300).cuda(), MLP(n_latent, 2048, 4096).cuda()]
intermediates = [MLP(n_latent, n_latent//2, n_latent//2).cuda(),
MLP(n_latent, n_latent//2, n_latent//2).cuda()]
recon_loss = MFM_objective(2.0, [sigmloss1d, sigmloss1d], [
1.0, 1.0], criterion=torch.nn.BCEWithLogitsLoss())
train(encoders, fuse, head, traindata, validdata, 1000, decoders+intermediates, early_stop=True, task="multilabel",
objective_args_dict={"decoders": decoders, "intermediates": intermediates}, save=filename, optimtype=torch.optim.AdamW, lr=5e-3, weight_decay=0.01, objective=recon_loss)
print("Testing:")
model = torch.load(filename).cuda()
test(model, testdata, method_name="MFM", dataset="imdb",
criterion=torch.nn.BCEWithLogitsLoss(), task="multilabel")
|
6738
|
from __future__ import absolute_import
from relaax.server.parameter_server import parameter_server_base
from relaax.server.common import session
from . import ddpg_model
class ParameterServer(parameter_server_base.ParameterServerBase):
def init_session(self):
self.session = session.Session(ddpg_model.SharedParameters())
self.session.op_initialize()
self.session.op_init_target_weights()
def n_step(self):
return self.session.op_n_step()
def score(self):
return self.session.op_score()
def get_session(self):
return self.session
|
6744
|
import argparse
import copy
import logging
import sys
from dataclasses import dataclass
from datetime import datetime, timedelta
from slack_sdk import WebClient
from typing import Dict, Optional, List
import pytz
from hunter import config
from hunter.attributes import get_back_links
from hunter.config import ConfigError, Config
from hunter.data_selector import DataSelector
from hunter.grafana import GrafanaError, Grafana, Annotation
from hunter.graphite import GraphiteError
from hunter.importer import DataImportError, Importers
from hunter.report import Report
from hunter.series import (
AnalysisOptions,
ChangePointGroup,
SeriesComparison,
compare,
AnalyzedSeries,
)
from hunter.slack import SlackNotifier, NotificationError
from hunter.test_config import TestConfigError, TestConfig, GraphiteTestConfig
from hunter.util import parse_datetime, DateFormatError, interpolate
@dataclass
class HunterError(Exception):
message: str
class Hunter:
__conf: Config
__importers: Importers
__grafana: Optional[Grafana]
__slack: Optional[SlackNotifier]
def __init__(self, conf: Config):
self.__conf = conf
self.__importers = Importers(conf)
self.__grafana = None
self.__slack = self.__maybe_create_slack_notifier()
def list_tests(self, group_names: Optional[List[str]]):
if group_names is not None:
test_names = []
for group_name in group_names:
group = self.__conf.test_groups.get(group_name)
if group is None:
raise HunterError(f"Test group not found: {group_name}")
test_names += (t.name for t in group)
else:
test_names = self.__conf.tests
for test_name in sorted(test_names):
print(test_name)
def list_test_groups(self):
for group_name in sorted(self.__conf.test_groups):
print(group_name)
def get_test(self, test_name: str) -> TestConfig:
test = self.__conf.tests.get(test_name)
if test is None:
raise HunterError(f"Test not found {test_name}")
return test
def get_tests(self, *names: str) -> List[TestConfig]:
tests = []
for name in names:
group = self.__conf.test_groups.get(name)
if group is not None:
tests += group
else:
test = self.__conf.tests.get(name)
if test is not None:
tests.append(test)
else:
raise HunterError(f"Test or group not found: {name}")
return tests
def list_metrics(self, test: TestConfig):
importer = self.__importers.get(test)
for metric_name in importer.fetch_all_metric_names(test):
print(metric_name)
def analyze(
self, test: TestConfig, selector: DataSelector, options: AnalysisOptions
) -> AnalyzedSeries:
importer = self.__importers.get(test)
series = importer.fetch_data(test, selector)
analyzed_series = series.analyze(options)
change_points = analyzed_series.change_points_by_time
report = Report(series, change_points)
print(test.name + ":")
print(report.format_log_annotated())
return analyzed_series
def __get_grafana(self) -> Grafana:
if self.__grafana is None:
self.__grafana = Grafana(self.__conf.grafana)
return self.__grafana
def update_grafana_annotations(self, test: GraphiteTestConfig, series: AnalyzedSeries):
grafana = self.__get_grafana()
begin = datetime.fromtimestamp(series.time()[0], tz=pytz.UTC)
end = datetime.fromtimestamp(series.time()[len(series.time()) - 1], tz=pytz.UTC)
logging.info(f"Fetching Grafana annotations for test {test.name}...")
tags_to_query = ["hunter", "change-point", "test:" + test.name]
old_annotations_for_test = grafana.fetch_annotations(begin, end, list(tags_to_query))
logging.info(f"Found {len(old_annotations_for_test)} annotations")
created_count = 0
for metric_name, change_points in series.change_points.items():
path = test.get_path(series.branch_name(), metric_name)
metric_tag = f"metric:{metric_name}"
tags_to_create = (
tags_to_query
+ [metric_tag]
+ test.tags
+ test.annotate
+ test.metrics[metric_name].annotate
)
substitutions = {
"TEST_NAME": test.name,
"METRIC_NAME": metric_name,
"GRAPHITE_PATH": [path],
"GRAPHITE_PATH_COMPONENTS": path.split("."),
"GRAPHITE_PREFIX": [test.prefix],
"GRAPHITE_PREFIX_COMPONENTS": test.prefix.split("."),
}
tmp_tags_to_create = []
for t in tags_to_create:
tmp_tags_to_create += interpolate(t, substitutions)
tags_to_create = tmp_tags_to_create
old_annotations = [a for a in old_annotations_for_test if metric_tag in a.tags]
old_annotation_times = set((a.time for a in old_annotations if a.tags))
target_annotations = []
for cp in change_points:
attributes = series.attributes_at(cp.index)
annotation_text = get_back_links(attributes)
target_annotations.append(
Annotation(
id=None,
time=datetime.fromtimestamp(cp.time, tz=pytz.UTC),
text=annotation_text,
tags=tags_to_create,
)
)
target_annotation_times = set((a.time for a in target_annotations))
to_delete = [a for a in old_annotations if a.time not in target_annotation_times]
if to_delete:
logging.info(
f"Removing {len(to_delete)} annotations "
f"for test {test.name} and metric {metric_name}..."
)
grafana.delete_annotations(*(a.id for a in to_delete))
to_create = [a for a in target_annotations if a.time not in old_annotation_times]
if to_create:
logging.info(
f"Creating {len(to_create)} annotations "
f"for test {test.name} and metric {metric_name}..."
)
grafana.create_annotations(*to_create)
created_count += len(to_create)
if created_count == 0:
logging.info("All annotations up-to-date. No new annotations needed.")
else:
logging.info(f"Created {created_count} annotations.")
def remove_grafana_annotations(self, test: Optional[TestConfig], force: bool):
"""Removes all Hunter annotations (optionally for a given test) in Grafana"""
grafana = self.__get_grafana()
if test:
logging.info(f"Fetching Grafana annotations for test {test.name}...")
else:
logging.info(f"Fetching Grafana annotations...")
tags_to_query = {"hunter", "change-point"}
if test:
tags_to_query.add("test:" + test.name)
annotations = grafana.fetch_annotations(None, None, list(tags_to_query))
if not annotations:
logging.info("No annotations found.")
return
if not force:
print(
f"Are you sure to remove {len(annotations)} annotations from {grafana.url}? [y/N]"
)
decision = input().strip()
if decision.lower() != "y" and decision.lower() != "yes":
return
logging.info(f"Removing {len(annotations)} annotations...")
grafana.delete_annotations(*(a.id for a in annotations))
def regressions(
self, test: TestConfig, selector: DataSelector, options: AnalysisOptions
) -> bool:
importer = self.__importers.get(test)
# Even if user is interested only in performance difference since some point X,
# we really need to fetch some earlier points than X.
# Otherwise, if performance went down very early after X, e.g. at X + 1, we'd have
# insufficient number of data points to compute the baseline performance.
# Instead of using `since-` selector, we're fetching everything from the
# beginning and then we find the baseline performance around the time pointed by
# the original selector.
since_version = selector.since_version
since_commit = selector.since_commit
since_time = selector.since_time
baseline_selector = copy.deepcopy(selector)
baseline_selector.last_n_points = sys.maxsize
baseline_selector.branch = None
baseline_selector.since_version = None
baseline_selector.since_commit = None
baseline_selector.since_time = since_time - timedelta(days=30)
baseline_series = importer.fetch_data(test, baseline_selector)
if since_version:
baseline_index = baseline_series.find_by_attribute("version", since_version)
if not baseline_index:
raise HunterError(f"No runs of test {test.name} with version {since_version}")
baseline_index = max(baseline_index)
elif since_commit:
baseline_index = baseline_series.find_by_attribute("commit", since_commit)
if not baseline_index:
raise HunterError(f"No runs of test {test.name} with commit {since_commit}")
baseline_index = max(baseline_index)
else:
baseline_index = baseline_series.find_first_not_earlier_than(since_time)
baseline_series = baseline_series.analyze()
if selector.branch:
target_series = importer.fetch_data(test, selector).analyze()
else:
target_series = baseline_series
cmp = compare(baseline_series, baseline_index, target_series, target_series.len())
regressions = []
for metric_name, stats in cmp.stats.items():
direction = baseline_series.metric(metric_name).direction
m1 = stats.mean_1
m2 = stats.mean_2
change_percent = stats.forward_rel_change() * 100.0
if m2 * direction < m1 * direction and stats.pvalue < options.max_pvalue:
regressions.append(
" {:16}: {:#8.3g} --> {:#8.3g} ({:+6.1f}%)".format(
metric_name, m1, m2, change_percent
)
)
if regressions:
print(f"{test.name}:")
for r in regressions:
print(r)
else:
print(f"{test.name}: OK")
return len(regressions) > 0
def __maybe_create_slack_notifier(self):
if not self.__conf.slack:
return None
return SlackNotifier(WebClient(token=self.__conf.slack.bot_token))
def notify_slack(
self,
test_change_points: Dict[str, AnalyzedSeries],
selector: DataSelector,
channels: List[str],
since: datetime,
):
if not self.__slack:
logging.error(
"Slack definition is missing from the configuration, cannot send notification"
)
return
self.__slack.notify(test_change_points, selector=selector, channels=channels, since=since)
def validate(self):
valid = True
unique_metrics = set()
for name, test in self.__conf.tests.items():
logging.info("Checking {}".format(name))
test_metrics = test.fully_qualified_metric_names()
for test_metric in test_metrics:
if test_metric not in unique_metrics:
unique_metrics.add(test_metric)
else:
valid = False
logging.error(f"Found duplicated metric: {test_metric}")
try:
importer = self.__importers.get(test)
series = importer.fetch_data(test)
for metric, metric_data in series.data.items():
if not metric_data:
logging.warning(f"Test's metric does not have data: {name} {metric}")
except Exception as err:
logging.error(f"Invalid test definition: {name}\n{repr(err)}\n")
valid = False
logging.info(f"Validation finished: {'VALID' if valid else 'INVALID'}")
if not valid:
exit(1)
def setup_data_selector_parser(parser: argparse.ArgumentParser):
parser.add_argument(
"--branch", metavar="STRING", dest="branch", help="name of the branch", nargs="?"
)
parser.add_argument(
"--metrics",
metavar="LIST",
dest="metrics",
help="a comma-separated list of metrics to analyze",
)
parser.add_argument(
"--attrs",
metavar="LIST",
dest="attributes",
help="a comma-separated list of attribute names associated with the runs "
"(e.g. commit, branch, version); "
"if not specified, it will be automatically filled based on available information",
)
since_group = parser.add_mutually_exclusive_group()
since_group.add_argument(
"--since-commit",
metavar="STRING",
dest="since_commit",
help="the commit at the start of the time span to analyze",
)
since_group.add_argument(
"--since-version",
metavar="STRING",
dest="since_version",
help="the version at the start of the time span to analyze",
)
since_group.add_argument(
"--since",
metavar="DATE",
dest="since_time",
help="the start of the time span to analyze; "
"accepts ISO, and human-readable dates like '10 weeks ago'",
)
until_group = parser.add_mutually_exclusive_group()
until_group.add_argument(
"--until-commit",
metavar="STRING",
dest="until_commit",
help="the commit at the end of the time span to analyze",
)
until_group.add_argument(
"--until-version",
metavar="STRING",
dest="until_version",
help="the version at the end of the time span to analyze",
)
until_group.add_argument(
"--until",
metavar="DATE",
dest="until_time",
help="the end of the time span to analyze; same syntax as --since",
)
parser.add_argument(
"--last",
type=int,
metavar="COUNT",
dest="last_n_points",
help="the number of data points to take from the end of the series"
)
def data_selector_from_args(args: argparse.Namespace) -> DataSelector:
data_selector = DataSelector()
if args.branch:
data_selector.branch = args.branch
if args.metrics is not None:
data_selector.metrics = list(args.metrics.split(","))
if args.attributes is not None:
data_selector.attributes = list(args.attributes.split(","))
if args.since_commit is not None:
data_selector.since_commit = args.since_commit
if args.since_version is not None:
data_selector.since_version = args.since_version
if args.since_time is not None:
data_selector.since_time = parse_datetime(args.since_time)
if args.until_commit is not None:
data_selector.until_commit = args.until_commit
if args.until_version is not None:
data_selector.until_version = args.until_version
if args.until_time is not None:
data_selector.until_time = parse_datetime(args.until_time)
if args.last_n_points is not None:
data_selector.last_n_points = args.last_n_points
return data_selector
def setup_analysis_options_parser(parser: argparse.ArgumentParser):
parser.add_argument(
"-P, --p-value",
dest="pvalue",
type=float,
default=0.001,
help="maximum accepted P-value of a change-point; "
"P denotes the probability that the change-point has "
"been found by a random coincidence, rather than a real "
"difference between the data distributions",
)
parser.add_argument(
"-M",
"--magnitude",
dest="magnitude",
type=float,
default=0.0,
help="minimum accepted magnitude of a change-point "
"computed as abs(new_mean / old_mean - 1.0); use it "
"to filter out stupidly small changes like < 0.01",
)
parser.add_argument(
"--window",
default=50,
type=int,
dest="window",
help="the number of data points analyzed at once; "
"the window size affects the discriminative "
"power of the change point detection algorithm; "
"large windows are less susceptible to noise; "
"however, a very large window may cause dismissing short regressions "
"as noise so it is best to keep it short enough to include not more "
"than a few change points (optimally at most 1)",
)
def analysis_options_from_args(args: argparse.Namespace) -> AnalysisOptions:
conf = AnalysisOptions()
if args.pvalue is not None:
conf.max_pvalue = args.pvalue
if args.magnitude is not None:
conf.min_magnitude = args.magnitude
if args.window is not None:
conf.window_len = args.window
return conf
def main():
logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.INFO)
parser = argparse.ArgumentParser(description="Hunts performance regressions in Fallout results")
subparsers = parser.add_subparsers(dest="command")
list_tests_parser = subparsers.add_parser("list-tests", help="list available tests")
list_tests_parser.add_argument("group", help="name of the group of the tests", nargs="*")
list_metrics_parser = subparsers.add_parser(
"list-metrics", help="list available metrics for a test"
)
list_metrics_parser.add_argument("test", help="name of the test")
subparsers.add_parser("list-groups", help="list available groups of tests")
analyze_parser = subparsers.add_parser(
"analyze",
help="analyze performance test results",
formatter_class=argparse.RawTextHelpFormatter,
)
analyze_parser.add_argument("tests", help="name of the test or group of the tests", nargs="+")
analyze_parser.add_argument(
"--update-grafana",
help="Update Grafana dashboards with appropriate annotations of change points",
action="store_true",
)
analyze_parser.add_argument(
"--notify-slack",
help="Send notification containing a summary of change points to given Slack channels",
nargs="+",
)
analyze_parser.add_argument(
"--cph-report-since",
help="Sets a limit on the date range of the Change Point History reported to Slack. Same syntax as --since.",
metavar="DATE",
dest="cph_report_since",
)
setup_data_selector_parser(analyze_parser)
setup_analysis_options_parser(analyze_parser)
regressions_parser = subparsers.add_parser("regressions", help="find performance regressions")
regressions_parser.add_argument(
"tests", help="name of the test or group of the tests", nargs="+"
)
setup_data_selector_parser(regressions_parser)
setup_analysis_options_parser(regressions_parser)
remove_annotations_parser = subparsers.add_parser("remove-annotations")
remove_annotations_parser.add_argument(
"tests", help="name of the test or test group", nargs="*"
)
remove_annotations_parser.add_argument(
"--force", help="don't ask questions, just do it", dest="force", action="store_true"
)
validate_parser = subparsers.add_parser("validate",
help="validates the tests and metrics defined in the configuration")
try:
args = parser.parse_args()
conf = config.load_config()
hunter = Hunter(conf)
if args.command == "list-groups":
hunter.list_test_groups()
if args.command == "list-tests":
group_names = args.group if args.group else None
hunter.list_tests(group_names)
if args.command == "list-metrics":
test = hunter.get_test(args.test)
hunter.list_metrics(test)
if args.command == "analyze":
update_grafana_flag = args.update_grafana
slack_notification_channels = args.notify_slack
slack_cph_since = parse_datetime(args.cph_report_since)
data_selector = data_selector_from_args(args)
options = analysis_options_from_args(args)
tests = hunter.get_tests(*args.tests)
tests_analyzed_series = {test.name: None for test in tests}
for test in tests:
try:
analyzed_series = hunter.analyze(test, selector=data_selector, options=options)
if update_grafana_flag:
if not isinstance(test, GraphiteTestConfig):
raise GrafanaError(f"Not a Graphite test")
hunter.update_grafana_annotations(test, analyzed_series)
if slack_notification_channels:
tests_analyzed_series[test.name] = analyzed_series
except DataImportError as err:
logging.error(err.message)
except GrafanaError as err:
logging.error(
f"Failed to update grafana dashboards for {test.name}: {err.message}"
)
if slack_notification_channels:
hunter.notify_slack(
tests_analyzed_series,
selector=data_selector,
channels=slack_notification_channels,
since=slack_cph_since,
)
if args.command == "regressions":
data_selector = data_selector_from_args(args)
options = analysis_options_from_args(args)
tests = hunter.get_tests(*args.tests)
regressing_test_count = 0
errors = 0
for test in tests:
try:
regressions = hunter.regressions(
test, selector=data_selector, options=options
)
if regressions:
regressing_test_count += 1
except HunterError as err:
logging.error(err.message)
errors += 1
except DataImportError as err:
logging.error(err.message)
errors += 1
if regressing_test_count == 0:
print("No regressions found!")
elif regressing_test_count == 1:
print("Regressions in 1 test found")
else:
print(f"Regressions in {regressing_test_count} tests found")
if errors > 0:
print(f"Some tests were skipped due to import / analyze errors. Consult error log.")
if args.command == "remove-annotations":
if args.tests:
tests = hunter.get_tests(*args.tests)
for test in tests:
hunter.remove_grafana_annotations(test, args.force)
else:
hunter.remove_grafana_annotations(None, args.force)
if args.command == "validate":
hunter.validate()
if args.command is None:
parser.print_usage()
except ConfigError as err:
logging.error(err.message)
exit(1)
except TestConfigError as err:
logging.error(err.message)
exit(1)
except GraphiteError as err:
logging.error(err.message)
exit(1)
except GrafanaError as err:
logging.error(err.message)
exit(1)
except DataImportError as err:
logging.error(err.message)
exit(1)
except HunterError as err:
logging.error(err.message)
exit(1)
except DateFormatError as err:
logging.error(err.message)
exit(1)
except NotificationError as err:
logging.error(err.message)
exit(1)
if __name__ == "__main__":
main()
|
6749
|
from pyatool import PYAToolkit
# 个性化的函数需要toolkit形参,即使不需要使用
def test_b(toolkit):
return 'i am test_b, running on {}'.format(toolkit.device_id)
# 封装adb命令成为方法
PYAToolkit.bind_cmd(func_name='test_a', command='shell pm list package | grep google')
# 或者绑定个性化的函数
PYAToolkit.bind_func(real_func=test_b)
# 是否需要log
PYAToolkit.switch_logger(True)
# 初始化
d = PYAToolkit('123456F')
assert d.is_connected()
# 它也支持远程控制(还不够稳定,暂不推荐
# d = PYAToolkit('123456F', mode='remote')
# 已经绑定的方法直接调用即可
result = d.test_a()
# 可能的输出
# package:com.google.android.webview
# 个性化函数也一样
result = d.test_b()
# i am test_b, running on 123456F
# 也可以通过 `std` 或 `standard_func` 调用(会有代码自动补全,比较方便)
# 仅限标准库,自己拓展的库只支持直接调用
d.std.get_current_activity(toolkit=d)
# 获取所有已经注册的函数
all_functions = d.current_function()
print(all_functions)
# 下面列举所有标准函数的使用方法,有任何问题欢迎反馈或自己改
# 打印出机器id,仅供测试用
d.hello_world()
# 展示所有已安装的包
installed_package = d.show_package()
# 栈顶活动名
current_activity_name = d.get_current_activity()
# 安装指定apk(支持url与path),例子里的安装可能比较久因为是从github下的,可以自己改
d.install_from(url=r'https://github.com/williamfzc/simhand2/releases/download/v0.1.2/app-debug.apk')
# d.install_from(path=r'/Users/admin/some_path/some_apk.apk')
# 检测包是否已安装
target_package_name = 'com.github.williamfzc.simhand2'
is_installed = d.is_installed(package_name=target_package_name)
# 清理缓存
d.clean_cache(target_package_name)
if is_installed:
d.uninstall(target_package_name)
# 获取手机ip
local_address = d.get_ip_address()
print(local_address)
# 切换wifi状态
d.switch_wifi(False)
# 切换飞行模式
d.switch_airplane(True)
d.switch_airplane(False)
d.switch_wifi(True)
# 切换输入法
d.set_ime('com.sohu.inputmethod.sogouoem/.SogouIME')
# push and pull
d.push('./README.md', '/sdcard/')
d.pull('/sdcard/README.md', './haha.md')
# send keyevent
d.input_key_event(26)
d.input_key_event(26)
# swipe
d.swipe(500, 1200, 500, 200)
# click
d.click(200, 200)
|
6754
|
import os
import numpy as np
import torch
import argparse
from hparams import create_hparams
from model import lcm
from train import load_model
from torch.utils.data import DataLoader
from reader import TextMelIDLoader, TextMelIDCollate, id2sp
from inference_utils import plot_data
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--checkpoint_path', type=str,
help='directory to save checkpoints')
parser.add_argument('--hparams', type=str,
required=False, help='comma separated name=value pairs')
args = parser.parse_args()
checkpoint_path=args.checkpoint_path
hparams = create_hparams(args.hparams)
model = load_model(hparams)
model.load_state_dict(torch.load(checkpoint_path)['state_dict'], strict=False)
_ = model.eval()
def gen_embedding(speaker):
training_list = hparams.training_list
train_set_A = TextMelIDLoader(training_list, hparams.mel_mean_std, hparams.speaker_A,
hparams.speaker_B,
shuffle=False,pids=[speaker])
collate_fn = TextMelIDCollate(lcm(hparams.n_frames_per_step_encoder,
hparams.n_frames_per_step_decoder))
train_loader_A = DataLoader(train_set_A, num_workers=1, shuffle=False,
sampler=None,
batch_size=1, pin_memory=False,
drop_last=True, collate_fn=collate_fn)
with torch.no_grad():
speaker_embeddings = []
for i,batch in enumerate(train_loader_A):
#print i
x, y = model.parse_batch(batch)
text_input_padded, mel_padded, text_lengths, mel_lengths, speaker_id = x
speaker_id, speaker_embedding = model.speaker_encoder.inference(mel_padded)
speaker_embedding = speaker_embedding.data.cpu().numpy()
speaker_embeddings.append(speaker_embedding)
speaker_embeddings = np.vstack(speaker_embeddings)
print(speaker_embeddings.shape)
if not os.path.exists('outdir/embeddings'):
os.makedirs('outdir/embeddings')
np.save('outdir/embeddings/%s.npy'%speaker, speaker_embeddings)
plot_data([speaker_embeddings],
'outdir/embeddings/%s.pdf'%speaker)
print('Generating embedding of %s ...'%hparams.speaker_A)
gen_embedding(hparams.speaker_A)
print('Generating embedding of %s ...'%hparams.speaker_B)
gen_embedding(hparams.speaker_B)
|
6761
|
import email.utils as em
import re
class Main():
def __init__(self):
self.n = int(input())
for i in range(self.n):
self.s = em.parseaddr(input())
if re.match(r'^[a-zA-Z](\w|-|\.|_)+@[a-zA-Z]+\.[a-zA-Z]{0,3}$', self.s[1]):
print(em.formataddr(self.s))
if __name__ == '__main__':
obj = Main()
|
6782
|
from selenium import webdriver
from time import sleep
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
def Dm(driver,user,message):
''' This function is used to direct message a single user/group '''
driver.get('https://www.instagram.com/direct/inbox/')
send_message_button = WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.XPATH, '//*[@id="react-root"]/section/div/div[2]/div/div/div[2]/div/div[3]/div/button'))).click()
search_user = WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH, '/html/body/div[5]/div/div/div[2]/div[1]/div/div[2]/input')))
search_user.send_keys(user)
selector = WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH, '/html/body/div[5]/div/div/div[2]/div[2]/div/div/div[3]/button/span'))).click()
next_button = WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH, '/html/body/div[5]/div/div/div[1]/div/div[2]/div/button/div'))).click()
try:
text = WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH, '//*[@id="react-root"]/section/div/div[2]/div/div/div[2]/div[2]/div/div[2]/div/div/div[2]/textarea')))
text.send_keys(message)
send = WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH, '//*[@id="react-root"]/section/div/div[2]/div/div/div[2]/div[2]/div/div[2]/div/div/div[3]/button'))).click()
driver.get('https://www.instagram.com/direct/inbox/')
except:
print('No message sent to '+user)
driver.get('https://www.instagram.com/direct/inbox/')
|
6806
|
import pybullet as p
#p.connect(p.UDP,"192.168.86.100")
p.connect(p.SHARED_MEMORY)
p.resetSimulation()
objects = [p.loadURDF("plane.urdf", 0.000000,0.000000,0.000000,0.000000,0.000000,0.000000,1.000000)]
objects = [p.loadURDF("samurai.urdf", 0.000000,0.000000,0.000000,0.000000,0.000000,0.000000,1.000000)]
objects = [p.loadURDF("pr2_gripper.urdf", 0.500000,0.300006,0.700000,-0.000000,-0.000000,-0.000031,1.000000)]
pr2_gripper = objects[0]
print ("pr2_gripper=")
print (pr2_gripper)
jointPositions=[ 0.550569, 0.000000, 0.549657, 0.000000 ]
for jointIndex in range (p.getNumJoints(pr2_gripper)):
p.resetJointState(pr2_gripper,jointIndex,jointPositions[jointIndex])
pr2_cid = p.createConstraint(pr2_gripper,-1,-1,-1,p.JOINT_FIXED,[0,0,0],[0.2,0,0],[0.500000,0.300006,0.700000])
print ("pr2_cid")
print (pr2_cid)
objects = [p.loadURDF("kuka_iiwa/model_vr_limits.urdf", 1.400000,-0.200000,0.600000,0.000000,0.000000,0.000000,1.000000)]
kuka = objects[0]
jointPositions=[ -0.000000, -0.000000, 0.000000, 1.570793, 0.000000, -1.036725, 0.000001 ]
for jointIndex in range (p.getNumJoints(kuka)):
p.resetJointState(kuka,jointIndex,jointPositions[jointIndex])
p.setJointMotorControl2(kuka,jointIndex,p.POSITION_CONTROL,jointPositions[jointIndex],0)
objects = [p.loadURDF("lego/lego.urdf", 1.000000,-0.200000,0.700000,0.000000,0.000000,0.000000,1.000000)]
objects = [p.loadURDF("lego/lego.urdf", 1.000000,-0.200000,0.800000,0.000000,0.000000,0.000000,1.000000)]
objects = [p.loadURDF("lego/lego.urdf", 1.000000,-0.200000,0.900000,0.000000,0.000000,0.000000,1.000000)]
objects = p.loadSDF("gripper/wsg50_one_motor_gripper_new_free_base.sdf")
kuka_gripper = objects[0]
print ("kuka gripper=")
print(kuka_gripper)
p.resetBasePositionAndOrientation(kuka_gripper,[0.923103,-0.200000,1.250036],[-0.000000,0.964531,-0.000002,-0.263970])
jointPositions=[ 0.000000, -0.011130, -0.206421, 0.205143, -0.009999, 0.000000, -0.010055, 0.000000 ]
for jointIndex in range (p.getNumJoints(kuka_gripper)):
p.resetJointState(kuka_gripper,jointIndex,jointPositions[jointIndex])
p.setJointMotorControl2(kuka_gripper,jointIndex,p.POSITION_CONTROL,jointPositions[jointIndex],0)
kuka_cid = p.createConstraint(kuka, 6, kuka_gripper,0,p.JOINT_FIXED, [0,0,0], [0,0,0.05],[0,0,0])
objects = [p.loadURDF("jenga/jenga.urdf", 1.300000,-0.700000,0.750000,0.000000,0.707107,0.000000,0.707107)]
objects = [p.loadURDF("jenga/jenga.urdf", 1.200000,-0.700000,0.750000,0.000000,0.707107,0.000000,0.707107)]
objects = [p.loadURDF("jenga/jenga.urdf", 1.100000,-0.700000,0.750000,0.000000,0.707107,0.000000,0.707107)]
objects = [p.loadURDF("jenga/jenga.urdf", 1.000000,-0.700000,0.750000,0.000000,0.707107,0.000000,0.707107)]
objects = [p.loadURDF("jenga/jenga.urdf", 0.900000,-0.700000,0.750000,0.000000,0.707107,0.000000,0.707107)]
objects = [p.loadURDF("jenga/jenga.urdf", 0.800000,-0.700000,0.750000,0.000000,0.707107,0.000000,0.707107)]
objects = [p.loadURDF("table/table.urdf", 1.000000,-0.200000,0.000000,0.000000,0.000000,0.707107,0.707107)]
objects = [p.loadURDF("teddy_vhacd.urdf", 1.050000,-0.500000,0.700000,0.000000,0.000000,0.707107,0.707107)]
objects = [p.loadURDF("cube_small.urdf", 0.950000,-0.100000,0.700000,0.000000,0.000000,0.707107,0.707107)]
objects = [p.loadURDF("sphere_small.urdf", 0.850000,-0.400000,0.700000,0.000000,0.000000,0.707107,0.707107)]
objects = [p.loadURDF("duck_vhacd.urdf", 0.850000,-0.400000,0.900000,0.000000,0.000000,0.707107,0.707107)]
objects = p.loadSDF("kiva_shelf/model.sdf")
ob = objects[0]
p.resetBasePositionAndOrientation(ob,[0.000000,1.000000,1.204500],[0.000000,0.000000,0.000000,1.000000])
objects = [p.loadURDF("teddy_vhacd.urdf", -0.100000,0.600000,0.850000,0.000000,0.000000,0.000000,1.000000)]
objects = [p.loadURDF("sphere_small.urdf", -0.100000,0.955006,1.169706,0.633232,-0.000000,-0.000000,0.773962)]
objects = [p.loadURDF("cube_small.urdf", 0.300000,0.600000,0.850000,0.000000,0.000000,0.000000,1.000000)]
objects = [p.loadURDF("table_square/table_square.urdf", -1.000000,0.000000,0.000000,0.000000,0.000000,0.000000,1.000000)]
ob = objects[0]
jointPositions=[ 0.000000 ]
for jointIndex in range (p.getNumJoints(ob)):
p.resetJointState(ob,jointIndex,jointPositions[jointIndex])
objects = [p.loadURDF("husky/husky.urdf", 2.000000,-5.000000,1.000000,0.000000,0.000000,0.000000,1.000000)]
ob = objects[0]
jointPositions=[ 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 ]
for jointIndex in range (p.getNumJoints(ob)):
p.resetJointState(ob,jointIndex,jointPositions[jointIndex])
p.setGravity(0.000000,0.000000,0.000000)
p.setGravity(0,0,-10)
p.stepSimulation()
p.disconnect()
|
6808
|
from __future__ import annotations
from typing import Optional, Dict, List, Union, Type, TYPE_CHECKING
from datetime import date, datetime
import pandas as pd
import numpy as np
import re
import locale
try:
locale.setlocale(locale.LC_ALL, "en_US.UTF-8")
except locale.Error:
# Readthedocs has a problem, but difficult to replicate
locale.setlocale(locale.LC_ALL, "")
from . import CoreScript
from ..models import ColumnModel
from ..types import MimeType
if TYPE_CHECKING:
from ..schema import Schema
from ..models import DataSourceModel
class WranglingScript:
"""Get, review and restructure tabular data."""
def __init__(self):
self.check_source = CoreScript().check_source
self.core = CoreScript()
self.DATE_FORMATS = {
"date": {"fmt": ["%Y-%m-%d"], "txt": ["YYYY-MM-DD"]},
"datetime": {
"fmt": ["%Y-%m-%d %H:%M:%S", "%Y-%m-%d %H:%M:%S %Z%z"],
"txt": ["YYYY-MM-DD hh:mm:ss", "YYYY-MM-DD hh:mm:ss UTC+0000"],
},
"year": {"fmt": ["%Y"], "txt": ["YYYY"]},
}
def get_dataframe(
self,
source: str,
preserve: Union[str, List[str]] = None,
filetype: MimeType = MimeType.CSV,
names: Optional[List[str]] = None,
nrows: Optional[int] = None,
) -> Union[Dict[str, pd.DataFrame], pd.DataFrame]:
"""Return a Pandas dataframe from a given source.
Accepts default pandas parameters for Excel and CSV, but the objective is to preserve the source data with
little data conversion outside of the data wrangling process. With this in mind, a
Parameters
----------
source: str
Source filename.
preserve: str or list of str, default None
Column names where variable type guessing must be prevented and the original data preserved.
Critical for foreign key references with weird formats, like integers with leading `0`.
filetype: MimeType, default MimeType.CSV
Pandas can read a diversity of filetypes, but whyqd has only been tested on `xls`, `xlsx` and `csv`.
names: list of str, default None
If the source data has no header row, explicitly pass a list of names - in the correct order - to address
the data.
nrows: int, default None
A specified number of rows to return. For review, it is faster to load only a small number.
Returns
-------
DataFrame or dict of DataFrame
"""
self.check_source(source)
# If the dtypes have not been set, then ensure that any provided preserved columns remain untouched
# i.e. no forcing of text to numbers
# defaulting to `dtype = object` ...
kwargs = {}
if preserve:
if not isinstance(preserve, list):
preserve = [preserve]
# kwargs["dtype"] = {k: object for k in preserve}
kwargs["dtype"] = {k: pd.StringDtype() for k in preserve}
if names:
kwargs["header"] = None
kwargs["names"] = names
if nrows:
kwargs["nrows"] = nrows
# Check filetype
if filetype in [MimeType.XLS, MimeType.XLSX]:
# This will default to returning a dictionary of dataframes for each sheet
kwargs["sheet_name"] = None
df = pd.read_excel(source, **kwargs)
keys = list(df.keys())
for k in keys:
if df[k].empty:
del df[k]
if len(df.keys()) == 1:
df = df[keys[0]]
if filetype == MimeType.CSV:
# New in pandas 1.3: will ignore encoding errors - perfect for this initial wrangling process
kwargs["encoding_errors"] = "ignore"
# Supposed to help with fruity separater guessing
kwargs["engine"] = "python"
if not nrows:
df = pd.read_csv(source, **kwargs)
else:
kwargs["iterator"] = True
kwargs["chunksize"] = 10000
df_iterator = pd.read_csv(source, **kwargs)
df = pd.concat(df_iterator, ignore_index=True)
return df
def get_dataframe_from_datasource(self, data: DataSourceModel) -> pd.DataFrame:
"""Return the dataframe for a data source.
Parameters
----------
data: DataSourceModel
Returns
-------
pd.DataFrame
"""
path = data.path
try:
self.core.check_source(path)
except FileNotFoundError:
path = str(self.directory / data.source)
self.core.check_source(path)
df_columns = [d.name for d in data.columns]
names = [d.name for d in data.names] if data.names else None
df = self.get_dataframe(
source=path,
filetype=data.mime,
names=names,
preserve=[d.name for d in data.preserve if d.name in df_columns],
)
if isinstance(df, dict):
if df:
df = df[data.sheet_name]
else:
# It's an empty df for some reason. Maybe excessive filtering.
df = pd.DataFrame()
if df.empty:
raise ValueError(
f"Data source contains no data ({data.path}). Review actions to see if any were more destructive than expected."
)
return df
def get_dataframe_columns(self, df: pd.DataFrame) -> List(ColumnModel):
"""Returns a list of ColumnModels from a source DataFrame.
Parameters
----------
df: pd.DataFrame
Should be derived from `get_dataframe` with a sensible default for `nrows` being 50.
Returns
-------
List of ColumnModel
"""
# Prepare summary
columns = [
{"name": k, "type": "number"}
if v in ["float64", "int64"]
else {"name": k, "type": "date"}
if v in ["datetime64[ns]"]
else {"name": k, "type": "string"}
for k, v in df.dtypes.apply(lambda x: x.name).to_dict().items()
]
return [ColumnModel(**c) for c in columns]
def deduplicate_columns(self, df: pd.DataFrame, schema: Type[Schema]) -> pd.Index:
"""
Source: https://stackoverflow.com/a/65254771/295606
Source: https://stackoverflow.com/a/55405151
Returns a new column list permitting deduplication of dataframes which may result from merge.
Parameters
----------
df: pd.DataFrame
fields: list of FieldModel
Destination Schema fields
Returns
-------
pd.Index
Updated column names
"""
column_index = pd.Series(df.columns.tolist())
if df.columns.has_duplicates:
duplicates = column_index[column_index.duplicated()].unique()
for name in duplicates:
dups = column_index == name
replacements = [f"{name}{i}" if i != 0 else name for i in range(dups.sum())]
column_index.loc[dups] = replacements
# Fix any fields with the same name as any of the target fields
# Do this to 'force' schema assignment
for name in [f.name for f in schema.get.fields]:
dups = column_index == name
replacements = [f"{name}{i}__dd" if i != 0 else f"{name}__dd" for i in range(dups.sum())]
column_index.loc[dups] = replacements
return pd.Index(column_index)
# def check_column_unique(self, source: str, key: str) -> bool:
# """
# Test a column in a dataframe to ensure all values are unique.
# Parameters
# ----------
# source: Source filename
# key: Column name of field where data are to be tested for uniqueness
# Raises
# ------
# ValueError if not unique
# Returns
# -------
# bool, True if unique
# """
# df = self.get_dataframe(source, key)
# if len(df[key]) != len(df[key].unique()):
# import warnings
# filename = source.split("/")[-1] # Obfuscate the path
# e = "'{}' contains non-unique rows in column `{}`".format(filename, key)
# # raise ValueError(e)
# warnings.warn(e)
# return True
# def check_date_format(self, date_type: str, date_value: str) -> bool:
# # https://stackoverflow.com/a/37045601
# # https://www.saltycrane.com/blog/2009/05/converting-time-zones-datetime-objects-python/
# for fmt in self.DATE_FORMATS[date_type]["fmt"]:
# try:
# if date_value == datetime.strptime(date_value, fmt).strftime(fmt):
# return True
# except ValueError:
# continue
# raise ValueError(f"Incorrect date format, should be: `{self.DATE_FORMATS[date_type]['txt']}`")
###################################################################################################
### Pandas type parsers
###################################################################################################
def parse_dates(self, x: Union[None, str]) -> Union[pd.NaT, date.isoformat]:
"""
This is the hard-won 'trust nobody', certainly not Americans, date parser.
TODO: Replace with https://github.com/scrapinghub/dateparser
The only concern is that dateparser.parse(x).date().isoformat() will coerce *any* string to a date,
no matter *what* it is.
"""
if pd.isnull(x):
return pd.NaT
# Check if to_datetime can handle things
if not pd.isnull(pd.to_datetime(x, errors="coerce", dayfirst=True)):
return date.isoformat(pd.to_datetime(x, errors="coerce", dayfirst=True))
# Manually see if coersion will work
x = str(x).strip()[:10]
x = re.sub(r"[\\/,\.]", "-", x)
try:
y, m, d = x.split("-")
except ValueError:
return pd.NaT
if len(y) < 4:
# Swap the day and year positions
# Ignore US dates
d, m, y = x.split("-")
# Fat finger on 1999 ... not going to check for other date errors as no way to figure out
if y[0] == "9":
y = "1" + y[1:]
x = "{}-{}-{}".format(y, m, d)
try:
x = datetime.strptime(x, "%Y-%m-%d")
except ValueError:
return pd.NaT
x = date.isoformat(x)
try:
pd.Timestamp(x)
return x
except pd.errors.OutOfBoundsDatetime:
return pd.NaT
def parse_float(self, x: Union[str, int, float]) -> Union[np.nan, float]:
"""
Regex to extract wrecked floats: https://stackoverflow.com/a/385597
Checked against: https://regex101.com/
"""
try:
return float(x)
except ValueError:
re_float = re.compile(
r"""(?x)
^
\D* # first, match an optional sign *and space*
( # then match integers or f.p. mantissas:
\d+ # start out with a ...
(
\.\d* # mantissa of the form a.b or a.
)? # ? takes care of integers of the form a
|\.\d+ # mantissa of the form .b
)
([eE][+-]?\d+)? # finally, optionally match an exponent
$"""
)
try:
x = re_float.match(x).group(1)
x = re.sub(r"[^e0-9,-\.]", "", str(x))
return locale.atof(x)
except (ValueError, AttributeError):
return np.nan
|
6817
|
import os
# Restrict the script to run on CPU
os.environ ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = ""
# Import Keras Tensoflow Backend
# from keras import backend as K
import tensorflow as tf
# Configure it to use only specific CPU Cores
config = tf.ConfigProto(intra_op_parallelism_threads=4,
inter_op_parallelism_threads=4,
device_count={"CPU": 1, "GPU": 0},
allow_soft_placement=True)
# import tensorflow as tf
import numpy as np
from IEOMAP_dataset_AC import dataset, IeomapSentenceIterator
from sklearn.metrics import confusion_matrix
from models_AC import SentenceModel
import json
import os
def emotion_recognition(n_run, epochs, batch_size, embedding_size, first_rnn_size, dropout, embedding, num_speakers):
########################################################################################################################
# Hyper-parameters
########################################################################################################################
split_size = 0.8 # Split proportion of train and test data
#log_dir = './logs_AC/RNN_without_ID/1'
log_dir = './logs_AC/RNN_' \
+ str(num_speakers) + '/' + str(n_run) + '/'
#log_dir = './logs_AC/RNN_' + embedding + 'Emb' + str(embedding_size) + '_1layer' + str(2*first_rnn_size) + '/' + str(n_run)
train_log_dir = log_dir + 'train'
val_log_dir = log_dir + 'val'
########################################################################################################################
# Initialize the Data set
########################################################################################################################
sentences, targets, data_info, speakers = dataset(mode='sentences', embedding=embedding, embedding_size=embedding_size)
train_data = IeomapSentenceIterator(sentences[0], targets[0], data_info['sentences_length'][0], speakers[0])
val_data = IeomapSentenceIterator(sentences[1], targets[1], data_info['sentences_length'][1], speakers[1])
test_data = IeomapSentenceIterator(sentences[2], targets[2], data_info['sentences_length'][2], speakers[2])
########################################################################################################################
# Initialize the model
########################################################################################################################
g = SentenceModel(vocab_size=(data_info['vocabulary_size'] + 1),
embedding_size=embedding_size,
first_rnn_size=first_rnn_size,
num_classes=data_info['num_classes'],
dropout=dropout,
embedding=embedding,
num_speakers=num_speakers)
# Store model setup
model_setup = {'vocab_size': (data_info['vocabulary_size'] + 1),
'embedding_size': embedding_size,
'first_rnn_size': first_rnn_size,
'num_classes': data_info['num_classes'],
'dropout': dropout,
'embedding': embedding,
'num_speakers': num_speakers}
dirname = os.path.dirname(log_dir)
if not os.path.exists(dirname):
os.makedirs(dirname)
with open(log_dir + 'model_setup.p', 'w') as file:
json.dump(model_setup, file, indent=4)
########################################################################################################################
# Initialize the parameters
########################################################################################################################
sess = tf.Session(config=config)
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
saver = tf.train.Saver()
epoch = 0
best_epoch = 0
train_conf_matrix = 0
val_conf_matrix = 0
test_conf_matrix = 0
best_acc = 0
########################################################################################################################
# Performance Indicators
########################################################################################################################
writer_train = tf.summary.FileWriter(train_log_dir, sess.graph)
writer_val = tf.summary.FileWriter(val_log_dir)
accuracy_tf = tf.placeholder(tf.float32, [])
precision_tf = tf.placeholder(tf.float32, [])
recall_tf = tf.placeholder(tf.float32, [])
summary_op = tf.summary.scalar('accuracy', accuracy_tf)
summary_op = tf.summary.scalar('precision', precision_tf)
summary_op = tf.summary.scalar('recall', recall_tf)
########################################################################################################################
# Model training procedure
########################################################################################################################
while train_data.epoch < epochs: # and train_data.epoch < best_epoch + 20:
sentences_batch, sentences_length_batch, targets_batch, speakers_batch = train_data.next_batch(batch_size)
preds, _ = sess.run([g['preds'],
g['ts']],
feed_dict={g['x']: np.array(sentences_batch),
g['y']: np.array(targets_batch).reshape(len(targets_batch)),
g['speaker']: np.array(speakers_batch),
g['seqlen']: np.array(sentences_length_batch).reshape(len(targets_batch))})
####################################################################################################################
# Calculate the Train data Confusion Matrix
####################################################################################################################
train_conf_matrix += confusion_matrix(targets_batch, preds, labels=range(data_info['num_classes']))
####################################################################################################################
# Add the end of each training epoch compute the validation results and store the relevant information
####################################################################################################################
if train_data.epoch != epoch:
while val_data.epoch == epoch:
sentences_batch, sentences_length_batch, targets_batch, speakers_batch = val_data.next_batch(batch_size)
preds = sess.run([g['preds']],
feed_dict={g['x']: np.array(sentences_batch),
g['y']: np.array(targets_batch).reshape(len(targets_batch)),
g['speaker']: np.array(speakers_batch),
g['seqlen']: np.array(sentences_length_batch).reshape(
len(targets_batch))})
############################################################################################################
# Calculate the Test data Confusion Matrix
############################################################################################################
val_conf_matrix += confusion_matrix(targets_batch, preds[0], labels=range(data_info['num_classes']))
################################################################################################################
# Compute Accuracy, Precision and Recall
################################################################################################################
train_CM_size = len(train_conf_matrix)
total_train = sum(sum(train_conf_matrix))
train_TP = np.diagonal(train_conf_matrix)
train_FP = [sum(train_conf_matrix[:, i]) - train_TP[i] for i in range(train_CM_size)]
train_FN = [sum(train_conf_matrix[i, :]) - train_TP[i] for i in range(train_CM_size)]
train_TN = train_CM_size - train_TP - train_FP - train_FN
train_precision = train_TP / (train_TP + train_FP) # aka True Positive Rate
train_recall = train_TP / (train_TP + train_FN)
total_train_correct = sum(train_TP)
total_train_accuracy = total_train_correct / total_train
total_train_precision = sum(train_precision) / train_CM_size
total_train_recall = sum(train_recall) / train_CM_size
val_CM_size = len(val_conf_matrix)
total_val = sum(sum(val_conf_matrix))
val_TP = np.diagonal(val_conf_matrix)
val_FP = [sum(val_conf_matrix[:, i]) - val_TP[i] for i in range(val_CM_size)]
val_FN = [sum(val_conf_matrix[i, :]) - val_TP[i] for i in range(val_CM_size)]
val_TN = val_CM_size - val_TP - val_FP - val_FN
val_precision = val_TP / (val_TP + val_FP)
val_recall = val_TP / (val_TP + val_FN)
total_val_correct = sum(val_TP)
total_val_accuracy = total_val_correct / total_val
total_val_precision = sum(val_precision) / val_CM_size
total_val_recall = sum(val_recall) / val_CM_size
################################################################################################################
# Store Accuracy Precision Recall
################################################################################################################
train_acc_summary = tf.Summary(
value=[tf.Summary.Value(tag="accuracy", simple_value=total_train_accuracy), ])
train_prec_summary = tf.Summary(
value=[tf.Summary.Value(tag="precision", simple_value=total_train_precision), ])
train_rec_summary = tf.Summary(value=[tf.Summary.Value(tag="recall", simple_value=total_train_recall), ])
val_acc_summary = tf.Summary(value=[tf.Summary.Value(tag="accuracy", simple_value=total_val_accuracy), ])
val_prec_summary = tf.Summary(
value=[tf.Summary.Value(tag="precision", simple_value=total_val_precision), ])
val_rec_summary = tf.Summary(value=[tf.Summary.Value(tag="recall", simple_value=total_val_recall), ])
writer_train.add_summary(train_acc_summary, epoch)
writer_train.add_summary(train_prec_summary, epoch)
writer_train.add_summary(train_rec_summary, epoch)
writer_val.add_summary(val_acc_summary, epoch)
writer_val.add_summary(val_prec_summary, epoch)
writer_val.add_summary(val_rec_summary, epoch)
writer_train.flush()
writer_val.flush()
################################################################################################################
# Print the confusion matrix and store important information
################################################################################################################
print(train_conf_matrix)
print(val_conf_matrix)
if best_acc < total_val_accuracy:
saver.save(sess, log_dir + "acc_best_validation_model.ckpt")
best_acc = total_val_accuracy
best_epoch = epoch
store_info = {'epoch': best_epoch,
'train_conf_matrix': list([list(x) for x in train_conf_matrix]),
'train_accuracy': total_train_accuracy,
'train_precision': list(train_precision),
'total_train_precision': total_train_precision,
'train_recall': list(train_recall),
'total_train_recall': total_train_recall,
'val_conf_matrix': list([list(x) for x in val_conf_matrix]),
'val_accuracy': total_val_accuracy,
'val_precision': list(val_precision),
'total_val_precision': total_val_precision,
'val_recall': list(val_recall),
'total_val_recall': total_val_recall}
store_convergence_info = {'epoch': train_data.epoch,
'train_conf_matrix': list([list(x) for x in train_conf_matrix]),
'train_accuracy': total_train_accuracy,
'train_precision': list(train_precision),
'total_train_precision': total_train_precision,
'train_recall': list(train_recall),
'total_train_recall': total_train_recall,
'val_conf_matrix': list([list(x) for x in val_conf_matrix]),
'val_accuracy': total_val_accuracy,
'val_precision': list(val_precision),
'total_val_precision': total_val_precision,
'val_recall': list(val_recall),
'total_val_recall': total_val_recall}
################################################################################################################
# Get ready for the next epoch
################################################################################################################
epoch += 1
train_conf_matrix = 0
val_conf_matrix = 0
################################################################################################################
####################################################################################################################
# Add the end of training compute the test results and store the relevant information
####################################################################################################################
while test_data.epoch == 0:
sentences_batch, sentences_length_batch, targets_batch, speakers_batch = test_data.next_batch(batch_size)
preds = sess.run([g['preds']],
feed_dict={g['x']: np.array(sentences_batch),
g['y']: np.array(targets_batch).reshape(len(targets_batch)),
g['speaker']: np.array(speakers_batch),
g['seqlen']: np.array(sentences_length_batch).reshape(
len(targets_batch))})
############################################################################################################
# Calculate the Test data Confusion Matrix
############################################################################################################
test_conf_matrix += confusion_matrix(targets_batch, preds[0], labels=range(data_info['num_classes']))
################################################################################################################
# Compute Accuracy, Precision and Recall
################################################################################################################
test_CM_size = len(test_conf_matrix)
total_test = sum(sum(test_conf_matrix))
test_TP = np.diagonal(test_conf_matrix)
test_FP = [sum(test_conf_matrix[:, i]) - test_TP[i] for i in range(test_CM_size)]
test_FN = [sum(test_conf_matrix[i, :]) - test_TP[i] for i in range(test_CM_size)]
test_TN = test_CM_size - test_TP - test_FP - test_FN
test_precision = test_TP / (test_TP + test_FP)
test_recall = test_TP / (test_TP + test_FN)
total_test_correct = sum(test_TP)
total_test_accuracy = total_test_correct / total_test
total_test_precision = sum(test_precision) / test_CM_size
total_test_recall = sum(test_recall) / test_CM_size
################################################################################################################
# Print the confusion matrix and store important information
################################################################################################################
print(test_conf_matrix)
store_convergence_info['test_conf_matrix'] = list([list(x) for x in test_conf_matrix])
store_convergence_info['test_accuracy'] = total_test_accuracy
store_convergence_info['test_precision'] = list(test_precision)
store_convergence_info['total_test_precision'] = total_test_precision
store_convergence_info['test_recall'] = list(test_recall)
store_convergence_info['total_test_recall'] = total_test_recall
# trick to be able to save numpy.int64 into json
def default(o):
if isinstance(o, np.int64): return int(o)
raise TypeError
with open(log_dir + 'convergence_results.p', 'w') as file:
json.dump(store_convergence_info, file, default=default, indent=4)
saver.save(sess, log_dir + "convergence_model.ckpt")
####################################################################################################################
# Add the end of training compute the test results of the best validation model and store the relevant information
####################################################################################################################
saver.restore(sess, log_dir + "acc_best_validation_model.ckpt")
test_conf_matrix = 0
while test_data.epoch == 1:
sentences_batch, sentences_length_batch, targets_batch, speakers_batch = test_data.next_batch(batch_size)
preds = sess.run([g['preds']],
feed_dict={g['x']: np.array(sentences_batch),
g['y']: np.array(targets_batch).reshape(len(targets_batch)),
g['speaker']: np.array(speakers_batch),
g['seqlen']: np.array(sentences_length_batch).reshape(
len(targets_batch))})
############################################################################################################
# Calculate the Test data Confusion Matrix
############################################################################################################
test_conf_matrix += confusion_matrix(targets_batch, preds[0], labels=range(data_info['num_classes']))
################################################################################################################
# Compute Accuracy, Precision and Recall
################################################################################################################
test_CM_size = len(test_conf_matrix)
total_test = sum(sum(test_conf_matrix))
test_TP = np.diagonal(test_conf_matrix)
test_FP = [sum(test_conf_matrix[:, i]) - test_TP[i] for i in range(test_CM_size)]
test_FN = [sum(test_conf_matrix[i, :]) - test_TP[i] for i in range(test_CM_size)]
test_TN = test_CM_size - test_TP - test_FP - test_FN
test_precision = test_TP / (test_TP + test_FP)
test_recall = test_TP / (test_TP + test_FN)
total_test_correct = sum(test_TP)
total_test_accuracy = total_test_correct / total_test
total_test_precision = sum(test_precision) / test_CM_size
total_test_recall = sum(test_recall) / test_CM_size
################################################################################################################
# Print the confusion matrix and store important information
################################################################################################################
print(test_conf_matrix)
store_info['test_conf_matrix'] = list([list(x) for x in test_conf_matrix])
store_info['test_accuracy'] = total_test_accuracy
store_info['test_precision'] = list(test_precision)
store_info['total_test_precision'] = total_test_precision
store_info['test_recall'] = list(test_recall)
store_info['total_test_recall'] = total_test_recall
with open(log_dir + 'acc_best_validation_results.p', 'w') as file:
json.dump(store_info, file, default=default, indent=4)
|
6821
|
def task_pos_args():
def show_params(param1, pos):
print('param1 is: {0}'.format(param1))
for index, pos_arg in enumerate(pos):
print('positional-{0}: {1}'.format(index, pos_arg))
return {'actions':[(show_params,)],
'params':[{'name':'param1',
'short':'p',
'default':'default value'},
],
'pos_arg': 'pos',
'verbosity': 2,
}
|
6852
|
from dataclasses import dataclass, field
from datetime import date, datetime, time, timezone
from pathlib import Path
from typing import Any, Dict, Optional, Union
import ciso8601
import pytest
from mashumaro import DataClassDictMixin
from mashumaro.exceptions import UnserializableField
from mashumaro.types import SerializationStrategy
from .entities import (
MutableString,
MyList,
ThirdPartyType,
TypedDictRequiredKeys,
)
def test_ciso8601_datetime_parser():
@dataclass
class DataClass(DataClassDictMixin):
x: datetime = field(metadata={"deserialize": "ciso8601"})
should_be = DataClass(x=datetime(2021, 1, 2, 3, 4, 5, tzinfo=timezone.utc))
instance = DataClass.from_dict({"x": "2021-01-02T03:04:05Z"})
assert instance == should_be
def test_ciso8601_date_parser():
@dataclass
class DataClass(DataClassDictMixin):
x: date = field(metadata={"deserialize": "ciso8601"})
should_be = DataClass(x=date(2021, 1, 2))
instance = DataClass.from_dict({"x": "2021-01-02T03:04:05Z"})
assert instance == should_be
def test_ciso8601_time_parser():
@dataclass
class DataClass(DataClassDictMixin):
x: time = field(metadata={"deserialize": "ciso8601"})
should_be = DataClass(x=time(3, 4, 5))
instance = DataClass.from_dict({"x": "2021-01-02T03:04:05Z"})
assert instance == should_be
def test_pendulum_datetime_parser():
@dataclass
class DataClass(DataClassDictMixin):
x: datetime = field(metadata={"deserialize": "pendulum"})
should_be = DataClass(x=datetime(2008, 12, 29, 7, tzinfo=timezone.utc))
instance = DataClass.from_dict({"x": "2009-W01 0700"})
assert instance == should_be
def test_pendulum_date_parser():
@dataclass
class DataClass(DataClassDictMixin):
x: date = field(metadata={"deserialize": "pendulum"})
should_be = DataClass(x=date(2008, 12, 29))
instance = DataClass.from_dict({"x": "2009-W01"})
assert instance == should_be
def test_pendulum_time_parser():
@dataclass
class DataClass(DataClassDictMixin):
x: time = field(metadata={"deserialize": "pendulum"})
should_be = DataClass(x=time(3, 4, 5))
instance = DataClass.from_dict({"x": "2009-W01 030405"})
assert instance == should_be
def test_unsupported_datetime_parser_engine():
with pytest.raises(UnserializableField):
@dataclass
class DataClass(DataClassDictMixin):
x: datetime = field(metadata={"deserialize": "unsupported"})
def test_global_function_datetime_parser():
@dataclass
class DataClass(DataClassDictMixin):
x: datetime = field(
metadata={"deserialize": ciso8601.parse_datetime_as_naive}
)
should_be = DataClass(x=datetime(2021, 1, 2, 3, 4, 5))
instance = DataClass.from_dict({"x": "2021-01-02T03:04:05+03:00"})
assert instance == should_be
def test_local_function_datetime_parser():
def parse_dt(s):
return ciso8601.parse_datetime_as_naive(s)
@dataclass
class DataClass(DataClassDictMixin):
x: datetime = field(metadata={"deserialize": parse_dt})
should_be = DataClass(x=datetime(2021, 1, 2, 3, 4, 5))
instance = DataClass.from_dict({"x": "2021-01-02T03:04:05+03:00"})
assert instance == should_be
def test_class_method_datetime_parser():
class DateTimeParser:
@classmethod
def parse_dt(cls, s: str) -> datetime:
return datetime.fromisoformat(s)
@dataclass
class DataClass(DataClassDictMixin):
x: datetime = field(metadata={"deserialize": DateTimeParser.parse_dt})
should_be = DataClass(x=datetime(2021, 1, 2, 3, 4, 5))
instance = DataClass.from_dict({"x": "2021-01-02T03:04:05"})
assert instance == should_be
def test_class_instance_method_datetime_parser():
class DateTimeParser:
def __call__(self, s: str) -> datetime:
return datetime.fromisoformat(s)
@dataclass
class DataClass(DataClassDictMixin):
x: datetime = field(metadata={"deserialize": DateTimeParser()})
should_be = DataClass(x=datetime(2021, 1, 2, 3, 4, 5))
instance = DataClass.from_dict({"x": "2021-01-02T03:04:05"})
assert instance == should_be
def test_callable_class_instance_datetime_parser():
class CallableDateTimeParser:
def __call__(self, s):
return ciso8601.parse_datetime(s)
@dataclass
class DataClass(DataClassDictMixin):
x: datetime = field(metadata={"deserialize": CallableDateTimeParser()})
should_be = DataClass(x=datetime(2021, 1, 2, 3, 4, 5, tzinfo=timezone.utc))
instance = DataClass.from_dict({"x": "2021-01-02T03:04:05Z"})
assert instance == should_be
def test_lambda_datetime_parser():
@dataclass
class DataClass(DataClassDictMixin):
x: datetime = field(
metadata={"deserialize": lambda s: ciso8601.parse_datetime(s)}
)
should_be = DataClass(x=datetime(2021, 1, 2, 3, 4, 5, tzinfo=timezone.utc))
instance = DataClass.from_dict({"x": "2021-01-02T03:04:05Z"})
assert instance == should_be
def test_derived_dataclass_metadata_deserialize_option():
@dataclass
class A:
x: datetime = field(metadata={"deserialize": ciso8601.parse_datetime})
@dataclass
class B(A, DataClassDictMixin):
y: datetime = field(metadata={"deserialize": ciso8601.parse_datetime})
should_be = B(
x=datetime(2021, 1, 2, 3, 4, 5, tzinfo=timezone.utc),
y=datetime(2021, 1, 2, 3, 4, 5, tzinfo=timezone.utc),
)
instance = B.from_dict(
{"x": "2021-01-02T03:04:05Z", "y": "2021-01-02T03:04:05Z"}
)
assert instance == should_be
def test_bytearray_overridden():
@dataclass
class DataClass(DataClassDictMixin):
x: bytearray = field(
metadata={"deserialize": lambda s: s.upper().encode()}
)
should_be = DataClass(x=bytearray(b"ABC"))
instance = DataClass.from_dict({"x": "abc"})
assert instance == should_be
def test_path_like_overridden():
@dataclass
class DataClass(DataClassDictMixin):
x: Path = field(
metadata={"deserialize": lambda s: Path(str(s).upper())}
)
should_be = DataClass(x=Path("/ABC"))
instance = DataClass.from_dict({"x": "/abc"})
assert instance == should_be
def test_datetime_serialize_option():
@dataclass
class DataClass(DataClassDictMixin):
x: datetime = field(
metadata={"serialize": lambda v: v.strftime("%Y-%m-%d %H:%M:%S")}
)
should_be = {"x": "2021-01-02 03:04:05"}
instance = DataClass(x=datetime(2021, 1, 2, 3, 4, 5, tzinfo=timezone.utc))
assert instance.to_dict() == should_be
def test_third_party_type_overridden():
@dataclass
class DataClass(DataClassDictMixin):
x: ThirdPartyType = field(
metadata={
"deserialize": lambda v: ThirdPartyType(v),
"serialize": lambda v: v.value,
}
)
should_be = DataClass(x=ThirdPartyType(123))
instance = DataClass.from_dict({"x": 123})
assert instance == should_be
assert instance.to_dict() == {"x": 123}
def test_serializable_type_overridden():
@dataclass
class DataClass(DataClassDictMixin):
x: MutableString = field(
metadata={
"deserialize": lambda s: MutableString(s.upper()),
"serialize": lambda v: str(v).lower(),
}
)
should_be = DataClass(x=MutableString("ABC"))
instance = DataClass.from_dict({"x": "abc"})
assert instance == should_be
assert instance.to_dict() == {"x": "abc"}
def test_optional_overridden():
@dataclass
class DataClass(DataClassDictMixin):
x: Optional[ThirdPartyType] = field(
metadata={
"deserialize": lambda v: ThirdPartyType(v),
"serialize": lambda v: v.value,
}
)
instance = DataClass.from_dict({"x": 123})
assert instance
assert instance.x.value == 123
dct = instance.to_dict()
assert dct["x"] == 123
def test_union_overridden():
@dataclass
class DataClass(DataClassDictMixin):
x: Union[int, str, float, ThirdPartyType] = field(
metadata={
"deserialize": lambda v: ThirdPartyType(v),
"serialize": lambda v: v.value,
}
)
instance = DataClass.from_dict({"x": 1})
assert instance == DataClass(x=ThirdPartyType(value=1))
assert instance.to_dict() == {"x": 1}
def test_serialization_strategy():
class TestSerializationStrategy(SerializationStrategy):
def serialize(self, value):
return [value]
def deserialize(self, value):
return value[0]
@dataclass
class DataClass(DataClassDictMixin):
x: int = field(
metadata={"serialization_strategy": TestSerializationStrategy()}
)
instance = DataClass(x=123)
assert DataClass.from_dict({"x": [123]}) == instance
assert instance.to_dict() == {"x": [123]}
def test_collection_derived_custom_class():
@dataclass
class DataClass(DataClassDictMixin):
x: MyList = field(
metadata={"serialize": lambda v: v, "deserialize": lambda v: v}
)
instance = DataClass(x=[1, 2, 3])
assert DataClass.from_dict({"x": [1, 2, 3]}) == instance
assert instance.to_dict() == {"x": [1, 2, 3]}
def test_dataclass_with_typed_dict_overridden():
def serialize_x(x: TypedDictRequiredKeys) -> Dict[str, Any]:
return {"int": int(x["int"]), "float": float(x["float"])}
def deserialize_x(x: Dict[str, Any]) -> TypedDictRequiredKeys:
return TypedDictRequiredKeys(int=x["int"], float=x["float"])
@dataclass
class DataClass(DataClassDictMixin):
x: TypedDictRequiredKeys = field(
metadata={"serialize": serialize_x, "deserialize": deserialize_x}
)
obj = DataClass(x=TypedDictRequiredKeys(int=1, float=2.0))
data = {"x": {"int": 1, "float": 2.0}}
assert DataClass.from_dict(data) == obj
assert obj.to_dict() == data
|
6854
|
import ast
import re
import sys
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
import astunparse
from tests.common import AstunparseCommonTestCase
class DumpTestCase(AstunparseCommonTestCase, unittest.TestCase):
def assertASTEqual(self, dump1, dump2):
# undo the pretty-printing
dump1 = re.sub(r"(?<=[\(\[])\n\s+", "", dump1)
dump1 = re.sub(r"\n\s+", " ", dump1)
self.assertEqual(dump1, dump2)
def check_roundtrip(self, code1, filename="internal", mode="exec"):
ast_ = compile(str(code1), filename, mode, ast.PyCF_ONLY_AST)
dump1 = astunparse.dump(ast_)
dump2 = ast.dump(ast_)
self.assertASTEqual(dump1, dump2)
|
6895
|
import copy
from django.conf import settings
from django.test.utils import override_settings
from rest_framework import status, test
class PermissionsTest(test.APITransactionTestCase):
"""
Abstract class for permissions tests.
Methods `get_urls_configs`, `get_users_with_permission`,
`get_users_without_permissions` have to be overridden.
Logical example:
class ExamplePermissionsTest(PermissionsTest):
def get_users_with_permission(self, url, method):
if is_unreachable(url):
# no one can has access to unreachable url
return []
return [user_with_permission]
def get_users_without_permissions(self, url, method):
if is_unreachable(url):
# everybody does not have access to to unreachable url
return [user_with_permission, user_without_permission]
return [user_without_permission]
def get_urls_configs(self):
yield {'url': 'http://testserver/some/url, 'method': 'GET'}
yield {'url': 'http://testserver/some/unreachable/url', 'method': 'POST'}
...
"""
def get_urls_configs(self):
"""
Return list or generator of url configs.
Each url config is dictionary with such keys:
- url: url itself
- method: request method
- data: data which will be sent in request
url config example:
{
'url': 'http://testserver/api/backup/',
'method': 'POST',
'data': {'backup_source': 'backup/source/url'}
}
"""
raise NotImplementedError()
def get_users_with_permission(self, url, method):
"""
Return list of users which can access given url with given method
"""
raise NotImplementedError()
def get_users_without_permissions(self, url, method):
"""
Return list of users which can not access given url with given method
"""
raise NotImplementedError()
def test_permissions(self):
"""
Go through all url configs ands checks that user with permissions
can request them and users without - can't
"""
for conf in self.get_urls_configs():
url, method = conf['url'], conf['method']
data = conf['data'] if 'data' in conf else {}
for user in self.get_users_with_permission(url, method):
self.client.force_authenticate(user=user)
response = getattr(self.client, method.lower())(url, data=data)
self.assertFalse(
response.status_code
in (status.HTTP_403_FORBIDDEN, status.HTTP_404_NOT_FOUND),
'Error. User %s can not reach url: %s (method:%s). (Response status code %s, data %s)'
% (user, url, method, response.status_code, response.data),
)
for user in self.get_users_without_permissions(url, method):
self.client.force_authenticate(user=user)
response = getattr(self.client, method.lower())(url, data=data)
unreachable_statuses = (
status.HTTP_403_FORBIDDEN,
status.HTTP_404_NOT_FOUND,
status.HTTP_409_CONFLICT,
)
self.assertTrue(
response.status_code in unreachable_statuses,
'Error. User %s can reach url: %s (method:%s). (Response status code %s, data %s)'
% (user, url, method, response.status_code, response.data),
)
class ListPermissionsTest(test.APITransactionTestCase):
"""
Abstract class that tests what objects user receive in list.
Method `get_users_and_expected_results` has to be overridden.
Method `get_url` have to be defined.
"""
def get_url(self):
return None
def get_users_and_expected_results(self):
"""
Return list or generator of dictionaries with such keys:
- user - user which we want to test
- expected_results - list of dictionaries with fields which user has
to receive as answer from server
"""
pass
def test_list_permissions(self):
for user_and_expected_result in self.get_users_and_expected_results():
user = user_and_expected_result['user']
expected_results = user_and_expected_result['expected_results']
self.client.force_authenticate(user=user)
response = self.client.get(self.get_url())
self.assertEqual(
len(expected_results),
len(response.data),
'User %s receive wrong number of objects. Expected: %s, received %s'
% (user, len(expected_results), len(response.data)),
)
for actual, expected in zip(response.data, expected_results):
for key, value in expected.items():
self.assertEqual(actual[key], value)
def override_waldur_core_settings(**kwargs):
waldur_settings = copy.deepcopy(settings.WALDUR_CORE)
waldur_settings.update(kwargs)
return override_settings(WALDUR_CORE=waldur_settings)
|
6904
|
import pytest
from nesta.packages.misc_utils.guess_sql_type import guess_sql_type
@pytest.fixture
def int_data():
return [1,2,4,False]
@pytest.fixture
def text_data():
return ['a', True, 2,
('A very long sentence A very long sentence A '
'very long sentence A very long sentence'), 'd']
@pytest.fixture
def float_data():
return [1,2.3,True,None]
@pytest.fixture
def bool_data():
return [True,False,None]
def test_guess_sql_type_int(int_data):
assert guess_sql_type(int_data) == 'INTEGER'
def test_guess_sql_type_float(float_data):
assert guess_sql_type(float_data) == 'FLOAT'
def test_guess_sql_type_bool(bool_data):
assert guess_sql_type(bool_data) == 'BOOLEAN'
def test_guess_sql_type_str(text_data):
assert guess_sql_type(text_data, text_len=10) == 'TEXT'
assert guess_sql_type(text_data, text_len=100).startswith('VARCHAR(')
|
6989
|
import asyncio
from collections import defaultdict
from datetime import timedelta
import pytest
from yui.api import SlackAPI
from yui.bot import Bot
from yui.box import Box
from yui.types.slack.response import APIResponse
from yui.utils import json
from .util import FakeImportLib
def test_bot_init(event_loop, monkeypatch, bot_config):
importlib = FakeImportLib()
monkeypatch.setattr('importlib.import_module', importlib.import_module)
bot_config.APPS = ['yui.app1', 'yui.app2']
box = Box()
bot = Bot(bot_config, event_loop, using_box=box)
assert bot.config == bot_config
assert bot.channels == []
assert bot.ims == []
assert bot.groups == []
assert bot.restart is False
assert isinstance(bot.api, SlackAPI)
assert bot.box is box
assert isinstance(bot.queue, asyncio.Queue)
assert importlib.import_queue == [
'yui.app1',
'yui.app2',
]
@pytest.mark.asyncio
async def test_call(event_loop, bot_config, response_mock):
token = 'asdf<PASSWORD>'
response_mock.post(
'https://slack.com/api/test11',
body=json.dumps({'res': 'hello world!'}),
headers={'content-type': 'application/json'},
status=200,
)
response_mock.post(
'https://slack.com/api/test12',
body=json.dumps({'res': 'hello world!', 'data': {'extra': 'wow'}}),
headers={'content-type': 'application/json'},
status=200,
)
response_mock.post(
'https://slack.com/api/test21',
body=json.dumps({'error': 'aaa'}),
headers={'content-type': 'application/json'},
status=404,
)
response_mock.post(
'https://slack.com/api/test22',
body=json.dumps({'error': 'aaa'}),
headers={'content-type': 'application/json'},
status=404,
)
response_mock.post(
'https://slack.com/api/test3',
body=json.dumps({'res': 'hello world!'}),
headers={'content-type': 'application/json'},
status=200,
)
box = Box()
bot = Bot(bot_config, event_loop, using_box=box)
bot.api.throttle_interval = defaultdict(lambda: timedelta(0))
res = await bot.call('test11')
assert res == APIResponse(
body={'res': 'hello world!'},
status=200,
headers={'content-type': 'application/json'},
)
res = await bot.call('test12', data={'extra': 'wow'})
assert res == APIResponse(
body={'res': 'hello world!', 'data': {'extra': 'wow'}},
status=200,
headers={'content-type': 'application/json'},
)
res = await bot.call('test21')
assert res == APIResponse(
body={'error': 'aaa'},
status=404,
headers={'content-type': 'application/json'},
)
res = await bot.call('test22', data={'extra': 'wow'})
assert res == APIResponse(
body={'error': 'aaa'},
status=404,
headers={'content-type': 'application/json'},
)
res = await bot.call('test3', token=token)
assert res == APIResponse(
body={'res': 'hello world!'},
status=200,
headers={'content-type': 'application/json'},
)
|
6995
|
from setuptools import setup
setup(
name="ambient_api",
version="1.5.6",
packages=["ambient_api"],
url="https://github.com/avryhof/ambient_api",
license="MIT",
author="<NAME>",
author_email="<EMAIL>",
description="A Python class for accessing the Ambient Weather API.",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
],
install_requires=["requests", "urllib3"],
)
|
6997
|
class RegipyException(Exception):
"""
This is the parent exception for all regipy exceptions
"""
pass
class RegipyGeneralException(RegipyException):
"""
General exception
"""
pass
class RegistryValueNotFoundException(RegipyException):
pass
class NoRegistrySubkeysException(RegipyException):
pass
class NoRegistryValuesException(RegipyException):
pass
class RegistryKeyNotFoundException(RegipyException):
pass
class UnidentifiedHiveException(RegipyException):
pass
class RegistryRecoveryException(RegipyException):
pass
class RegistryParsingException(RegipyException):
"""
Raised when there is a parsing error, most probably a corrupted hive
"""
pass
class NtSidDecodingException(RegipyException):
"""
Raised when the binary Windows NT SID representation can not be decoded
"""
|
6998
|
from collections import deque
class Solution:
"""
@param n: a positive integer
@return: the minimum number of replacements
"""
def integerReplacement(self, n):
# Write your code here
steps = 0
if n == 1:
return steps
queue = deque([n])
while queue:
size = len(queue)
print(queue, steps)
for _ in range(size):
num = queue.popleft()
if num == 1:
return steps
if num % 2 == 0:
queue.append(num // 2)
else:
queue.append(num + 1)
queue.append(num - 1)
steps += 1
return 0
|
7043
|
import sys
import math
import numpy as np
from datetime import datetime
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class DecAtt(nn.Module):
def __init__(self, num_units, num_classes, embedding_size, dropout, device=0,
training=True, project_input=True,
use_intra_attention=False, distance_biases=10, max_sentence_length=30):
"""
Create the model based on MLP networks.
:param num_units: size of the networks
:param num_classes: number of classes in the problem
:param embedding_size: size of each word embedding
:param use_intra_attention: whether to use intra-attention model
:param training: whether to create training tensors (optimizer)
:p/word_embeddingaram project_input: whether to project input embeddings to a
different dimensionality
:param distance_biases: number of different distances with biases used
in the intra-attention model
"""
super().__init__()
self.arch = "DecAtt"
self.num_units = num_units
self.num_classes = num_classes
self.project_input = project_input
self.embedding_size = embedding_size
self.distance_biases = distance_biases
self.intra_attention = False
self.max_sentence_length = max_sentence_length
self.device = device
self.bias_embedding = nn.Embedding(max_sentence_length,1)
self.linear_layer_project = nn.Linear(embedding_size, num_units, bias=False)
#self.linear_layer_intra = nn.Sequential(nn.Linear(num_units, num_units), nn.ReLU(), nn.Linear(num_units, num_units), nn.ReLU())
self.linear_layer_attend = nn.Sequential(nn.Dropout(p=dropout), nn.Linear(num_units, num_units), nn.ReLU(),
nn.Dropout(p=dropout), nn.Linear(num_units, num_units), nn.ReLU())
self.linear_layer_compare = nn.Sequential(nn.Dropout(p=dropout), nn.Linear(num_units*2, num_units), nn.ReLU(),
nn.Dropout(p=dropout), nn.Linear(num_units, num_units), nn.ReLU())
self.linear_layer_aggregate = nn.Sequential(nn.Dropout(p=dropout), nn.Linear(num_units*2, num_units), nn.ReLU(),
nn.Dropout(p=dropout), nn.Linear(num_units, num_units), nn.ReLU(),
nn.Linear(num_units, num_classes), nn.LogSoftmax())
self.init_weight()
def init_weight(self):
self.linear_layer_project.weight.data.normal_(0, 0.01)
self.linear_layer_attend[1].weight.data.normal_(0, 0.01)
self.linear_layer_attend[1].bias.data.fill_(0)
self.linear_layer_attend[4].weight.data.normal_(0, 0.01)
self.linear_layer_attend[4].bias.data.fill_(0)
self.linear_layer_compare[1].weight.data.normal_(0, 0.01)
self.linear_layer_compare[1].bias.data.fill_(0)
self.linear_layer_compare[4].weight.data.normal_(0, 0.01)
self.linear_layer_compare[4].bias.data.fill_(0)
self.linear_layer_aggregate[1].weight.data.normal_(0, 0.01)
self.linear_layer_aggregate[1].bias.data.fill_(0)
self.linear_layer_aggregate[4].weight.data.normal_(0, 0.01)
self.linear_layer_aggregate[4].bias.data.fill_(0)
#self.word_embedding.weight.data.copy_(torch.from_numpy(self.pretrained_emb))
def attention_softmax3d(self, raw_attentions):
reshaped_attentions = raw_attentions.view(-1, raw_attentions.size(2))
out = nn.functional.softmax(reshaped_attentions, dim=1)
return out.view(raw_attentions.size(0),raw_attentions.size(1),raw_attentions.size(2))
def _transformation_input(self, embed_sent):
embed_sent = self.linear_layer_project(embed_sent)
result = embed_sent
if self.intra_attention:
f_intra = self.linear_layer_intra(embed_sent)
f_intra_t = torch.transpose(f_intra, 1, 2)
raw_attentions = torch.matmul(f_intra, f_intra_t)
time_steps = embed_sent.size(1)
r = torch.arange(0, time_steps)
r_matrix = r.view(1,-1).expand(time_steps,time_steps)
raw_index = r_matrix-r.view(-1,1)
clipped_index = torch.clamp(raw_index,0,self.distance_biases-1)
clipped_index = Variable(clipped_index.long())
if torch.cuda.is_available():
clipped_index = clipped_index.to(self.device)
bias = self.bias_embedding(clipped_index)
bias = torch.squeeze(bias)
raw_attentions += bias
attentions = self.attention_softmax3d(raw_attentions)
attended = torch.matmul(attentions, embed_sent)
result = torch.cat([embed_sent,attended],2)
return result
def attend(self, sent1, sent2, lsize_list, rsize_list):
"""
Compute inter-sentence attention. This is step 1 (attend) in the paper
:param sent1: tensor in shape (batch, time_steps, num_units),
the projected sentence 1
:param sent2: tensor in shape (batch, time_steps, num_units)
:return: a tuple of 3-d tensors, alfa and beta.
"""
repr1 = self.linear_layer_attend(sent1)
repr2 = self.linear_layer_attend(sent2)
repr2 = torch.transpose(repr2,1,2)
raw_attentions = torch.matmul(repr1, repr2)
#self.mask = generate_mask(lsize_list, rsize_list)
# masked = mask(self.raw_attentions, rsize_list)
#masked = raw_attentions * self.mask
att_sent1 = self.attention_softmax3d(raw_attentions)
beta = torch.matmul(att_sent1, sent2) #input2_soft
raw_attentions_t = torch.transpose(raw_attentions,1,2).contiguous()
#self.mask_t = torch.transpose(self.mask, 1, 2).contiguous()
# masked = mask(raw_attentions_t, lsize_list)
#masked = raw_attentions_t * self.mask_t
att_sent2 = self.attention_softmax3d(raw_attentions_t)
alpha = torch.matmul(att_sent2,sent1) #input1_soft
return alpha, beta
def compare(self, sentence, soft_alignment):
"""
Apply a feed forward network to compare o ne sentence to its
soft alignment with the other.
:param sentence: embedded and projected sentence,
shape (batch, time_steps, num_units)
:param soft_alignment: tensor with shape (batch, time_steps, num_units)
:return: a tensor (batch, time_steps, num_units)
"""
sent_alignment = torch.cat([sentence, soft_alignment],2)
out = self.linear_layer_compare(sent_alignment)
#out, (state, _) = self.lstm_compare(out)
return out
def aggregate(self, v1, v2):
"""
Aggregate the representations induced from both sentences and their
representations
:param v1: tensor with shape (batch, time_steps, num_units)
:param v2: tensor with shape (batch, time_steps, num_units)
:return: logits over classes, shape (batch, num_classes)
"""
v1_sum = torch.sum(v1,1)
v2_sum = torch.sum(v2,1)
out = self.linear_layer_aggregate(torch.cat([v1_sum,v2_sum],1))
return out
def forward(self, sent1, sent2, ext_feats=None, word_to_doc_count=None, raw_sent1=None, raw_sent2=None):
lsize_list = [len(s.split(" ")) for s in raw_sent1]
rsize_list = [len(s.split(" ")) for s in raw_sent2]
sent1 = sent1.permute(0, 2, 1)
sent2 = sent2.permute(0, 2, 1)
sent1 = self._transformation_input(sent1)
sent2 = self._transformation_input(sent2)
alpha, beta = self.attend(sent1, sent2, lsize_list, rsize_list)
v1 = self.compare(sent1, beta)
v2 = self.compare(sent2, alpha)
logits = self.aggregate(v1, v2)
return logits
|
7044
|
import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
import torch.nn.functional as F
import torch
import numpy as np
import cv2
import pdb
def sigmoid(x):
return 1 / (1 + math.exp(-x))
def norm_angle(angle):
norm_angle = sigmoid(10 * (abs(angle) / 0.7853975 - 1))
return norm_angle
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU()
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU()
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out = out + residual
out = self.relu(out)
return out
###''' self-attention; relation-attention '''
class ResNet_AT(nn.Module):
def __init__(self, block, layers, num_classes=1000, end2end=True, at_type=''):
self.inplanes = 64
self.end2end = end2end
super(ResNet_AT, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU()
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.dropout = nn.Dropout(0.5)
self.dropout2 = nn.Dropout(0.6)
self.alpha = nn.Sequential(nn.Linear(512, 1),
nn.Sigmoid())
self.beta = nn.Sequential(nn.Linear(1024, 1),
nn.Sigmoid())
self.pred_fc1 = nn.Linear(512, 7)
self.pred_fc2 = nn.Linear(1024, 7)
self.at_type = at_type
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x='', phrase='train', AT_level='first_level',vectors='',vm='',alphas_from1='',index_matrix=''):
vs = []
alphas = []
assert phrase == 'train' or phrase == 'eval'
assert AT_level == 'first_level' or AT_level == 'second_level' or AT_level == 'pred'
if phrase == 'train':
num_pair = 3
for i in range(num_pair):
f = x[:, :, :, :, i] # x[128,3,224,224]
f = self.conv1(f)
f = self.bn1(f)
f = self.relu(f)
f = self.maxpool(f)
f = self.layer1(f)
f = self.layer2(f)
f = self.layer3(f)
f = self.layer4(f)
f = self.avgpool(f)
f = f.squeeze(3).squeeze(2) # f[1, 512, 1, 1] ---> f[1, 512]
# MN_MODEL(first Level)
vs.append(f)
alphas.append(self.alpha(self.dropout(f)))
vs_stack = torch.stack(vs, dim=2)
alphas_stack = torch.stack(alphas, dim=2)
if self.at_type == 'self-attention':
vm1 = vs_stack.mul(alphas_stack).sum(2).div(alphas_stack.sum(2))
if self.at_type == 'self_relation-attention':
vm1 = vs_stack.mul(alphas_stack).sum(2).div(alphas_stack.sum(2))
betas = []
for i in range(len(vs)):
vs[i] = torch.cat([vs[i], vm1], dim=1)
betas.append(self.beta(self.dropout(vs[i])))
cascadeVs_stack = torch.stack(vs, dim=2)
betas_stack = torch.stack(betas, dim=2)
output = cascadeVs_stack.mul(betas_stack * alphas_stack).sum(2).div((betas_stack * alphas_stack).sum(2))
if self.at_type == 'self-attention':
vm1 = self.dropout(vm1)
pred_score = self.pred_fc1(vm1)
if self.at_type == 'self_relation-attention':
output = self.dropout2(output)
pred_score = self.pred_fc2(output)
return pred_score
if phrase == 'eval':
if AT_level == 'first_level':
f = self.conv1(x)
f = self.bn1(f)
f = self.relu(f)
f = self.maxpool(f)
f = self.layer1(f)
f = self.layer2(f)
f = self.layer3(f)
f = self.layer4(f)
f = self.avgpool(f)
f = f.squeeze(3).squeeze(2) # f[1, 512, 1, 1] ---> f[1, 512]
# MN_MODEL(first Level)
alphas = self.alpha(self.dropout(f))
return f, alphas
if AT_level == 'second_level':
assert self.at_type == 'self_relation-attention'
vms = index_matrix.permute(1, 0).mm(vm) # [381, 21783] -> [21783,381] * [381,512] --> [21783, 512]
vs_cate = torch.cat([vectors, vms], dim=1)
betas = self.beta(self.dropout(vs_cate))
''' keywords: mean_fc ; weight_sourcefc; sum_alpha; weightmean_sourcefc '''
''' alpha * beta '''
weight_catefc = vs_cate.mul(alphas_from1) # [21570,512] * [21570,1] --->[21570,512]
alpha_beta = alphas_from1.mul(betas)
sum_alphabetas = index_matrix.mm(alpha_beta) # [380,21570] * [21570,1] -> [380,1]
weightmean_catefc = index_matrix.mm(weight_catefc).div(sum_alphabetas)
weightmean_catefc = self.dropout2(weightmean_catefc)
pred_score = self.pred_fc2(weightmean_catefc)
return pred_score
if AT_level == 'pred':
if self.at_type == 'self-attention':
pred_score = self.pred_fc1(self.dropout(vm))
return pred_score
''' self-attention; relation-attention '''
def resnet18_at(pretrained=False, **kwargs):
# Constructs base a ResNet-18 model.
model = ResNet_AT(BasicBlock, [2, 2, 2, 2], **kwargs)
return model
|
7048
|
import spacy
from spacy.scorer import PRFScore
import typer
from pathlib import Path
from wasabi import Printer, table
import operator
import benepar
import clausecat_component
import clausecat_model
import clausecat_reader
import clause_segmentation
import clause_aggregation
msg = Printer()
def main(model_path: Path, eval_path: Path):
"""This script is used to evaluate the clausecat component"""
nlp = spacy.load(model_path)
reader = clausecat_reader.ClausecatCorpus(eval_path)
examples = reader(nlp)
clausecat = nlp.get_pipe("clausecat")
scorer = {
"POSITIVE": PRFScore(),
"NEGATIVE": PRFScore(),
"NEUTRAL": PRFScore(),
"ANAMNESIS": PRFScore(),
}
for i, example in enumerate(examples):
prediction = example.predicted
reference = example.reference
# Prediction
prediction = clausecat(prediction)
# Iterate through prediction and references
for pred_clause, ref_clause in zip(prediction._.clauses, reference._.clauses):
prediction_cats = pred_clause["cats"]
reference_cats = ref_clause["cats"]
prediction_class = max(prediction_cats.items(), key=operator.itemgetter(1))[
0
]
# Add to matrix
for label in prediction_cats:
if label != prediction_class:
prediction = 0
else:
prediction = 1
if prediction == 0 and reference_cats[label] != 0:
scorer[label].fn += 1
elif prediction == 1 and reference_cats[label] != 1:
scorer[label].fp += 1
elif prediction == 1 and reference_cats[label] == 1:
scorer[label].tp += 1
# Printing
textcat_data = []
avg_fscore = 0
avg_recall = 0
avg_precision = 0
for label in scorer:
textcat_data.append(
(
label,
round(scorer[label].fscore, 2),
round(scorer[label].recall, 2),
round(scorer[label].precision, 2),
)
)
avg_fscore += scorer[label].fscore
avg_recall += scorer[label].recall
avg_precision += scorer[label].precision
textcat_data.append(
(
"AVERAGE",
round(avg_fscore / len(scorer), 2),
round(avg_recall / len(scorer), 2),
round(avg_precision / len(scorer), 2),
)
)
header = ("Label", "F-Score", "Recall", "Precision")
print(table(textcat_data, header=header, divider=True))
if __name__ == "__main__":
typer.run(main)
|
7069
|
import sys
import pprint
import json
import datetime
import uuid
import urllib
import types
import traceback
from django.core.urlresolvers import reverse, resolve
from django.http import HttpResponseRedirect, Http404, HttpResponseServerError, HttpResponseNotFound
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.views.decorators.cache import never_cache
from django.views.debug import ExceptionReporter, get_safe_settings
from django.template import TemplateDoesNotExist, Context
from django.template.loader import render_to_string
from django.utils.encoding import force_bytes
from django.shortcuts import render
from splunkdj.decorators.render import render_to
from splunkdj.utility import make_splunkweb_url
from urlparse import urlparse
import logging
logger = logging.getLogger('spl.django.service')
error_logger = logging.getLogger('spl.django.request_error')
def format(value):
"""
Format values appropriately for json.dumps:
- Basic types will remain the same
- Unicode will be converted to str
- Everything else will be formatted using pprint
"""
if value is None:
return value
if isinstance(value, (int, long, str, float, list, dict, tuple, bool, unicode)):
return value
return str(pprint.pformat(value))
def get_exception_info(request):
# We use Django's debug reporter, even though we are doing our own template.
# This is because it has a great way of collecting all the useful info we
# need, so no reason not to leverage it
exc_info = sys.exc_info()
reporter = ExceptionReporter(request, *exc_info)
ctx = reporter.get_traceback_data()
# This is a refactor of what the technical_500_template contains, just
# doing the logic in Python rather than in a template. We collect all this
# information so that we can log it.
exception_type = ctx['exception_type'] if 'exception_type' in ctx else "No exception supplied"
exception_value = ctx['exception_value'] if 'exception_value' in ctx else "No exception supplied"
django_version = ctx["django_version_info"]
python_executable = ctx['sys_executable']
python_version = ctx['sys_version_info']
python_path = ctx['sys_path']
server_time = str(ctx['server_time'])
unicode_hint = None
if 'unicode_hint' in ctx:
unicdoe_hint = ctx['unicode_hint']
last_frame = None
if 'lastframe' in ctx:
frame_info = ctx['lastframe']
last_frame = "%s in %s, line %s" % (frame_info['filename'], frame_info['function'], frame_info['lineno'])
loaders = []
if 'template_does_not_exist' in ctx and 'loader_debug_info' in ctx and ctx['loader_debug_info']:
for loader in ctx['loader_debug_info']:
loader_info = {"name": loader['loader'], "templates": []}
for tmpl in loader['templates']:
loader_info['templates'].append({"file": tmpl['name'], "exists": tmpl['exists']})
loaders.append(loader_info)
template_errors = None
if 'template_info' in ctx and ctx['template_info']:
template_info = ctx['template_info']
template_errors = {
"name": template_info['name'],
"line": template_info['line'],
"message": template_info['message']
}
exception_info = []
if 'frames' in ctx:
frames = ctx['frames']
for frame in frames:
frame_info = {
"filename": frame['filename'],
"function": frame['function'],
"line": frame['lineno'],
"context_line": frame['context_line'],
"vars": []
}
if 'vars' in frame:
for var in frame['vars']:
frame_info['vars'].append({
"variable": str(var[0]),
"value": format(var[1])
})
exception_info.append(frame_info)
request_info = {
"path_info": request.path_info,
"method": request.META['REQUEST_METHOD'],
"url": request.build_absolute_uri(),
"GET": {},
"POST": {},
"FILES": {},
"COOKIES": {},
"META": {}
}
if hasattr(request, "GET"):
for key, value in request.GET.iteritems():
request_info['GET'][key] = format(value)
if "filtered_POST" in ctx:
for key, value in ctx['filtered_POST'].iteritems():
request_info['POST'][key] = format(value)
if hasattr(request, "FILES"):
for key, value in request.FILES.iteritems():
request_info['FILES'][key] = format(value)
if hasattr(request, "COOKIES"):
for key, value in request.COOKIES.iteritems():
request_info['COOKIES'][key] = format(value)
if hasattr(request, "META"):
for key, value in request.META.iteritems():
request_info['META'][key] = format(value)
settings_info = {}
for key, value in ctx['settings'].iteritems():
settings_info[key] = format(value)
ctx['errorid'] = errorid = uuid.uuid4().hex
full_info = dict(
__time=datetime.datetime.now().isoformat(),
__uuid=errorid,
settings=settings_info,
request=request_info,
traceback=exception_info,
stack=traceback.format_exc(exc_info[2]),
last_frame=last_frame,
template_loaders=loaders,
template_errors=template_errors,
unicode_hint=unicdoe_hint,
exception_type=exception_type,
exception_value=exception_value,
django_version=django_version,
python_version=python_version,
python_executable=python_executable,
python_path=python_path,
server_time=server_time
)
return (errorid, ctx, full_info)
def redirector(request, app, view):
params = {}
for (key, val) in request.GET.iteritems():
params[key] = val
full_name = "%s:%s" % (app, view)
if not view or not app:
logger.error("Redirector requires both 'app' and 'view' to be set, received: app='%s' view='%s'" % (app, view))
raise Error("Redirector requires both 'app' and 'view' to be set, received: app='%s' view='%s'" % (app, view))
return HttpResponseRedirect(reverse(full_name, kwargs=params))
def default_search(request):
app = request.app_name
lang_code = request.LANGUAGE_CODE
return HttpResponseRedirect(make_splunkweb_url("/%s/app/%s/search" % (lang_code, app)))
def default_flashtimeline(request):
app = request.app_name
lang_code = request.LANGUAGE_CODE
return HttpResponseRedirect(make_splunkweb_url("/%s/app/%s/flashtimeline" % (lang_code, app)))
@render_to()
@login_required
def default_template_render(request, template_name):
app = request.app_name
template_path = "%s:%s.html" % (app, template_name)
return {
"TEMPLATE": template_path
}
@never_cache
def handle404(request):
# This code is modified from views/debug.py in Django, as we want to display
# a debug style view, just modified slightly.
exc_info = sys.exc_info()
exception = exc_info[1]
try:
tried = exception.args[0]['tried']
except (IndexError, TypeError, KeyError):
tried = []
urlconf = getattr(request, 'urlconf', settings.ROOT_URLCONF)
if isinstance(urlconf, types.ModuleType):
urlconf = urlconf.__name__
c = Context({
'urlconf': urlconf,
'root_urlconf': settings.ROOT_URLCONF,
'request_path': request.path_info[1:], # Trim leading slash
'urlpatterns': tried,
'reason': force_bytes(exception, errors='replace'),
'request': request,
'settings': get_safe_settings(),
})
return HttpResponseNotFound(render_to_string('splunkdj:404.html', context_instance=c))
@never_cache
def handle500(request):
# Let's attempt to render a more useful error message
errorid, ctx, exception = get_exception_info(request)
# We log the raw error to the log file, so that splunk can pick it up as
# JSON.
error_logger.error(json.dumps(exception, sort_keys=True))
# Build up the URL for making the query
lang_code = request.LANGUAGE_CODE
query_args = {
"q": 'search index=_internal sourcetype=django_error "%s" | head 1 | spath' % errorid,
"display.events.maxlines": 0,
"display.general.type": "events",
"earliest": 0,
"latest": ""
}
query_string = urllib.urlencode(query_args)
ctx['search_url'] = make_splunkweb_url("/%s/app/search/search?%s" % (lang_code, query_string))
return HttpResponseServerError(render_to_string('splunkdj:500.html', context_instance=Context(ctx)))
@never_cache
@render_to('splunkdj:page_config.html', mimetype="application/javascript")
@login_required
def get_page_config(request):
referer = request.META.get("HTTP_REFERER", "")
app = ""
app_label = ""
if referer:
try:
parsed = urlparse(referer)
parsed_path = parsed.path.replace("/%s/" % settings.MOUNT, "/")
resolved = resolve(parsed_path)
app = resolved.app_name
if app:
app_label = request.service.apps[app]["label"]
except Exception, e:
# If there was an error here, don't kill the entire page
# just return some default info
app = app or ""
app_label = app_label or app
zone_info = request.service.get('/services/search/timeparser/tz').body.read()
return {
"autoload": "1" == request.GET.get("autoload", "0"),
"config": json.dumps({
"SPLUNKD_FREE_LICENSE": request.user.is_free,
"MRSPARKLE_ROOT_PATH": "/%s" % str(settings.SPLUNK_WEB_MOUNT).strip("/"),
"DJANGO_ROOT_PATH": "/%s" % str(settings.RAW_MOUNT),
"MRSPARKLE_PORT_NUMBER": str(settings.SPLUNK_WEB_PORT),
"DJANGO_PORT_NUMBER": str(settings.DJANGO_PORT),
"LOCALE": str(request.LANGUAGE_CODE),
"JS_LOGGER_MODE": "None",
"USERNAME": str(request.user.username),
"USER_DISPLAYNAME": str(request.user.realname),
"APP": str(app),
"APP_DISPLAYNAME": str(app_label),
"SERVER_ZONEINFO": str(zone_info),
})
}
|
7082
|
from functools import partial
import tensorflow as tf
_EPSILON = tf.keras.backend.epsilon()
def register_keras_custom_object(cls):
tf.keras.utils.get_custom_objects()[cls.__name__] = cls
return cls
def binary_focal_loss(y_true, y_pred, gamma, *, pos_weight=None, from_logits=False, label_smoothing=None):
y_pred = tf.convert_to_tensor(y_pred)
if not y_pred.dtype.is_floating:
y_pred = tf.dtypes.cast(y_pred, dtype=tf.float32)
if from_logits:
return _binary_focal_loss_from_logits(labels=y_true,
logits=y_pred,
gamma=gamma,
pos_weight=pos_weight,
label_smoothing=label_smoothing)
else:
return _binary_focal_loss_from_probs(labels=y_true,
p=y_pred,
gamma=gamma,
pos_weight=pos_weight,
label_smoothing=label_smoothing)
@register_keras_custom_object
class BinaryFocalLoss(tf.keras.losses.Loss):
def __init__(self, gamma, *, pos_weight=None, from_logits=False, label_smoothing=None, **kwargs):
super().__init__(**kwargs)
self.gamma = gamma
self.pos_weight = pos_weight
self.from_logits = from_logits
self.label_smoothing = label_smoothing
def get_config(self):
config = super().get_config()
config.update(gamma=self.gamma,
pos_weight=self.pos_weight,
from_logits=self.from_logits,
label_smoothing=self.label_smoothing)
return config
def call(self, y_true, y_pred):
return binary_focal_loss(y_true=y_true,
y_pred=y_pred,
gamma=self.gamma,
pos_weight=self.pos_weight,
from_logits=self.from_logits,
label_smoothing=self.label_smoothing)
# Helper functions below
def _process_labels(labels, label_smoothing, dtype):
labels = tf.dtypes.cast(labels, dtype=dtype)
if label_smoothing is not None:
labels = (1 - label_smoothing) * labels + label_smoothing * 0.5
return labels
def _binary_focal_loss_from_logits(labels, logits, gamma, pos_weight, label_smoothing):
labels = _process_labels(labels=labels, label_smoothing=label_smoothing, dtype=logits.dtype)
# Compute probabilities for the positive class
p = tf.math.sigmoid(logits)
if label_smoothing is None:
labels_shape = labels.shape
logits_shape = logits.shape
if not labels_shape.is_fully_defined() or labels_shape != logits_shape:
labels_shape = tf.shape(labels)
logits_shape = tf.shape(logits)
shape = tf.broadcast_dynamic_shape(labels_shape, logits_shape)
labels = tf.broadcast_to(labels, shape)
logits = tf.broadcast_to(logits, shape)
if pos_weight is None:
loss_func = tf.nn.sigmoid_cross_entropy_with_logits
else:
loss_func = partial(tf.nn.weighted_cross_entropy_with_logits, pos_weight=pos_weight)
loss = loss_func(labels=labels, logits=logits)
modulation_pos = (1 - p)**gamma
modulation_neg = p**gamma
mask = tf.dtypes.cast(labels, dtype=tf.bool)
modulation = tf.where(mask, modulation_pos, modulation_neg)
return modulation * loss
# Terms for the positive and negative class components of the loss
pos_term = labels * ((1 - p)**gamma)
neg_term = (1 - labels) * (p**gamma)
# Term involving the log and ReLU
log_weight = pos_term
if pos_weight is not None:
log_weight *= pos_weight
log_weight += neg_term
log_term = tf.math.log1p(tf.math.exp(-tf.math.abs(logits)))
log_term += tf.nn.relu(-logits)
log_term *= log_weight
# Combine all the terms into the loss
loss = neg_term * logits + log_term
return loss
def _binary_focal_loss_from_probs(labels, p, gamma, pos_weight, label_smoothing):
q = 1 - p
# For numerical stability (so we don't inadvertently take the log of 0)
p = tf.math.maximum(p, _EPSILON)
q = tf.math.maximum(q, _EPSILON)
# Loss for the positive examples
pos_loss = -(q**gamma) * tf.math.log(p)
if pos_weight is not None:
pos_loss *= pos_weight
# Loss for the negative examples
neg_loss = -(p**gamma) * tf.math.log(q)
# Combine loss terms
if label_smoothing is None:
labels = tf.dtypes.cast(labels, dtype=tf.bool)
loss = tf.where(labels, pos_loss, neg_loss)
else:
labels = _process_labels(labels=labels, label_smoothing=label_smoothing, dtype=p.dtype)
loss = labels * pos_loss + (1 - labels) * neg_loss
return loss
|
7104
|
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.remote.webelement import WebElement
class Select(WebElement):
"""
Implements logic to work with Web List UI elements
"""
@property
def is_multiple(self):
value = self.get_attribute('multiple')
return value is not None and not value == 'false'
def select_option(self, option):
"""
Performs selection of provided item from Web List
@params option - string item name
"""
items_list = self.get_options()
for item in items_list:
if item.get_attribute("value") == option:
item.click()
break
def get_options(self):
"""
Performs search for provided item in Web List
"""
return self.find_elements_by_tag_name('option')
def get_attribute_selected(self, attribute):
"""
Performs search of selected item from Web List
Return attribute of selected item
@params attribute - string attribute name
"""
items_list = self.get_options()
return next(iter([item.get_attribute(attribute) for item in items_list if item.is_selected()]), None)
def get_value_selected(self):
"""
Performs search of selected item from Web List
Return value of selected item
"""
return self.get_attribute_selected('value')
def get_text_selected(self):
"""
Performs search of selected item from Web List
Return text of selected item
"""
return self.get_attribute_selected('text')
def select_by_visible_text(self, text):
"""
Performs search of selected item from Web List
@params text - string visible text
"""
xpath = './/option[normalize-space(.) = {0}]'.format(self._escape_string(text))
opts = self.find_elements_by_xpath(xpath)
matched = False
for opt in opts:
self._set_selected(opt)
if not self.is_multiple:
return
matched = True
# in case the target option isn't found by xpath
# attempt to find it by direct comparison among options which contain at least the longest token from the text
if len(opts) == 0 and ' ' in text:
sub_string_without_space = self._get_longest_token(text)
if sub_string_without_space == "":
candidates = self.get_options()
else:
xpath = ".//option[contains(.,{0})]".format(self._escape_string(sub_string_without_space))
candidates = self.find_elements_by_xpath(xpath)
for candidate in candidates:
if text == candidate.text:
self._set_selected(candidate)
if not self.is_multiple:
return
matched = True
if not matched:
raise NoSuchElementException("Could not locate element with visible text: " + str(text))
@staticmethod
def _escape_string(value):
if '"' in value and "'" in value:
substrings = value.split('"')
result = ['concat(']
for substring in substrings:
result.append('"{0}"'.format(substring))
result.append(', \'"\', ')
result.pop()
if value.endswith('"'):
result.append(', \'"\'')
return ''.join(result) + ')'
if '"' in value:
return "'{0}'".format(value)
return '"{0}"'.format(value)
@staticmethod
def _get_longest_token(value):
items = value.split(' ')
longest = ''
for item in items:
if len(item) > len(longest):
longest = item
return longest
@staticmethod
def _set_selected(option):
if not option.is_selected():
option.click()
|
7134
|
from datetime import datetime
from typing import Optional, Dict, List, Union
from schema import Schema, Or
from src.monitorables.nodes.node import Node
from src.utils.exceptions import InvalidDictSchemaException
class ChainlinkNode(Node):
def __init__(self, node_name: str, node_id: str, parent_id: str) -> None:
super().__init__(node_name, node_id, parent_id)
# Metrics
self._went_down_at_prometheus = None
self._current_height = None
self._total_block_headers_received = None
self._max_pending_tx_delay = None
self._process_start_time_seconds = None
self._total_gas_bumps = None
self._total_gas_bumps_exceeds_limit = None
self._no_of_unconfirmed_txs = None
self._total_errored_job_runs = None
self._current_gas_price_info = {
'percentile': None,
'price': None,
}
self._eth_balance_info = {}
# This variable stores the url of the source used to get prometheus node
# data. Note that this had to be done because multiple prometheus
# sources can be associated with the same node, where at the same time
# only one source is available, and sources switch from time to time.
self._last_prometheus_source_used = None
# This stores the timestamp of the last successful monitoring round.
self._last_monitored_prometheus = None
@property
def is_down_prometheus(self) -> bool:
return self._went_down_at_prometheus is not None
@property
def went_down_at_prometheus(self) -> Optional[float]:
return self._went_down_at_prometheus
@property
def current_height(self) -> Optional[int]:
return self._current_height
@property
def total_block_headers_received(self) -> Optional[int]:
return self._total_block_headers_received
@property
def max_pending_tx_delay(self) -> Optional[int]:
return self._max_pending_tx_delay
@property
def process_start_time_seconds(self) -> Optional[float]:
return self._process_start_time_seconds
@property
def total_gas_bumps(self) -> Optional[int]:
return self._total_gas_bumps
@property
def total_gas_bumps_exceeds_limit(self) -> Optional[int]:
return self._total_gas_bumps_exceeds_limit
@property
def no_of_unconfirmed_txs(self) -> Optional[int]:
return self._no_of_unconfirmed_txs
@property
def total_errored_job_runs(self) -> Optional[int]:
return self._total_errored_job_runs
@property
def current_gas_price_info(self) -> Dict[str, Optional[float]]:
return self._current_gas_price_info
@property
def eth_balance_info(self) -> Dict[str, Union[str, float]]:
return self._eth_balance_info
@property
def last_prometheus_source_used(self) -> Optional[str]:
return self._last_prometheus_source_used
@property
def last_monitored_prometheus(self) -> Optional[float]:
return self._last_monitored_prometheus
@staticmethod
def get_int_prometheus_metric_attributes() -> List[str]:
"""
:return: A list of all variable names representing integer prometheus
: metrics.
"""
return [
'current_height',
'total_block_headers_received',
'max_pending_tx_delay', 'total_gas_bumps',
'total_gas_bumps_exceeds_limit', 'no_of_unconfirmed_txs',
'total_errored_job_runs'
]
@staticmethod
def get_float_prometheus_metric_attributes() -> List[str]:
"""
:return: A list of all variable names representing float prometheus
: metrics.
"""
return [
'went_down_at_prometheus', 'process_start_time_seconds',
'last_monitored_prometheus'
]
@staticmethod
def get_dict_prometheus_metric_attributes() -> List[str]:
"""
:return: A list of all variable names representing dict prometheus
: metrics.
"""
return ['current_gas_price_info', 'eth_balance_info']
@staticmethod
def get_str_prometheus_metric_attributes() -> List[str]:
"""
:return: A list of all variable names representing string prometheus
: metrics.
"""
return ['last_prometheus_source_used']
def get_all_prometheus_metric_attributes(self) -> List[str]:
"""
:return: A list of all variable names representing prometheus metrics
"""
str_prometheus_metric_attributes = \
self.get_str_prometheus_metric_attributes()
int_prometheus_metric_attributes = \
self.get_int_prometheus_metric_attributes()
float_prometheus_metric_attributes = \
self.get_float_prometheus_metric_attributes()
dict_prometheus_metric_attributes = \
self.get_dict_prometheus_metric_attributes()
return [
*str_prometheus_metric_attributes,
*int_prometheus_metric_attributes,
*float_prometheus_metric_attributes,
*dict_prometheus_metric_attributes
]
def get_int_metric_attributes(self) -> List[str]:
"""
:return: A list of all variable names representing int metrics.
"""
int_prometheus_metric_attributes = \
self.get_int_prometheus_metric_attributes()
return [*int_prometheus_metric_attributes]
def get_float_metric_attributes(self) -> List[str]:
"""
:return: A list of all variable names representing float metrics.
"""
float_prometheus_metric_attributes = \
self.get_float_prometheus_metric_attributes()
return [*float_prometheus_metric_attributes]
def get_dict_metric_attributes(self) -> List[str]:
"""
:return: A list of all variable names representing dict metrics.
"""
dict_prometheus_metric_attributes = \
self.get_dict_prometheus_metric_attributes()
return [*dict_prometheus_metric_attributes]
def get_str_metric_attributes(self) -> List[str]:
"""
:return: A list of all variable names representing str metrics.
"""
str_prometheus_metric_attributes = \
self.get_str_prometheus_metric_attributes()
return [*str_prometheus_metric_attributes]
def get_all_metric_attributes(self) -> List[str]:
"""
:return: A list of all variable names representing metrics
"""
prometheus_metric_attributes = \
self.get_all_prometheus_metric_attributes()
return [*prometheus_metric_attributes]
def set_went_down_at_prometheus(
self, went_down_at_prometheus: Optional[float]) -> None:
self._went_down_at_prometheus = went_down_at_prometheus
def set_prometheus_as_down(self, downtime: Optional[float]) -> None:
"""
This function sets the node's prometheus interface as down. It sets the
time that the interface was initially down to the parameter 'downtime'
if it is not None, otherwise it sets it to the current timestamp.
:param downtime:
:return:
"""
if downtime is None:
self.set_went_down_at_prometheus(datetime.now().timestamp())
else:
self.set_went_down_at_prometheus(downtime)
def set_prometheus_as_up(self) -> None:
"""
This function sets a node's prometheus interface as up. A node's
interface is said to be up if went_down_at_prometheus is None.
:return: None
"""
self.set_went_down_at_prometheus(None)
def set_current_height(self, new_height: Optional[int]) -> None:
self._current_height = new_height
def set_total_block_headers_received(
self, new_total_block_headers_received: Optional[int]) -> None:
self._total_block_headers_received = new_total_block_headers_received
def set_max_pending_tx_delay(
self, new_max_pending_tx_delay: Optional[int]) -> None:
self._max_pending_tx_delay = new_max_pending_tx_delay
def set_process_start_time_seconds(
self, new_process_start_time_seconds: Optional[float]) -> None:
self._process_start_time_seconds = new_process_start_time_seconds
def set_total_gas_bumps(self, new_total_gas_bumps: Optional[int]) -> None:
self._total_gas_bumps = new_total_gas_bumps
def set_total_gas_bumps_exceeds_limit(
self, new_total_gas_bumps_exceeds_limit: Optional[int]) -> None:
self._total_gas_bumps_exceeds_limit = new_total_gas_bumps_exceeds_limit
def set_no_of_unconfirmed_txs(
self, new_no_of_unconfirmed_txs: Optional[int]) -> None:
self._no_of_unconfirmed_txs = new_no_of_unconfirmed_txs
def set_total_errored_job_runs(
self, new_total_errored_job_runs: Optional[int]) -> None:
self._total_errored_job_runs = new_total_errored_job_runs
def set_current_gas_price_info(self, new_percentile: Optional[float],
new_price: Optional[float]) -> None:
"""
This method sets the current_gas_price_info dict based on the new
percentile and price. This is done in this way to protect the Dict
schema.
:param new_percentile: The new percentile to be stored
:param new_price: The new gas to be stored
:return: None
"""
self._current_gas_price_info['percentile'] = new_percentile
self._current_gas_price_info['price'] = new_price
@staticmethod
def _new_eth_balance_info_valid(new_eth_balance_info: Dict) -> bool:
"""
This method checks that the new eth_balance_info dict obeys the required
schema.
:param new_eth_balance_info: The dict to check
:return: True if the dict obeys the required schema
: False otherwise
"""
schema = Schema(Or({
'address': str,
'balance': float,
'latest_usage': float,
}, {}))
return schema.is_valid(new_eth_balance_info)
def set_eth_balance_info(
self, new_eth_balance_info: Dict[str, Union[str, float]]) -> None:
"""
This method sets the new_eth_balance_info. It first checks that the new
dict obeys the required schema. If not, an InvalidDictSchemaException is
raised.
:param new_eth_balance_info: The new eth_balance_info to store.
:return: None
"""""
if self._new_eth_balance_info_valid(new_eth_balance_info):
self._eth_balance_info = new_eth_balance_info
else:
raise InvalidDictSchemaException('new_eth_balance_info')
def set_last_prometheus_source_used(
self, new_last_prometheus_source_used: Optional[str]) -> None:
self._last_prometheus_source_used = new_last_prometheus_source_used
def set_last_monitored_prometheus(
self, new_last_monitored_prometheus: Optional[float]) -> None:
self._last_monitored_prometheus = new_last_monitored_prometheus
def reset(self) -> None:
"""
This method resets all metrics to their initial state
:return: None
"""
self.set_went_down_at_prometheus(None)
self.set_current_height(None)
self.set_total_block_headers_received(None)
self.set_max_pending_tx_delay(None)
self.set_process_start_time_seconds(None)
self.set_total_gas_bumps(None)
self.set_total_gas_bumps_exceeds_limit(None)
self.set_no_of_unconfirmed_txs(None)
self.set_total_errored_job_runs(None)
self.set_current_gas_price_info(None, None)
self.set_eth_balance_info({})
self.set_last_prometheus_source_used(None)
self.set_last_monitored_prometheus(None)
|
7139
|
from __future__ import absolute_import, print_function, unicode_literals
if __name__ == "__main__":
from .cli import cli
cli.wormhole()
else:
# raise ImportError('this module should not be imported')
pass
|
7148
|
import torch
import torch.nn.functional as F
class SelfAttnFunc(torch.autograd.Function):
@staticmethod
def forward(
ctx,
use_time_mask,
is_training,
heads,
scale,
inputs,
input_weights,
output_weights,
input_biases,
output_biases,
mask,
is_additive_mask,
dropout_prob,
):
use_biases_t = torch.tensor([input_biases is not None])
heads_t = torch.tensor([heads])
scale_t = torch.tensor([scale])
dropout_prob_t = torch.tensor([dropout_prob])
null_tensor = torch.tensor([])
head_dim = inputs.size(2) // heads
# Input Linear GEMM
# input1: (activations) [seql_q, seqs, embed_dim(1024)]
# input2: (weights) [embed_dim*3 (3072), embed_dim (1024)] (transpose [0,1])
# output: [seql_q, seqs, embed_dim*3]
# GEMM: ( (seql_q*seqs) x embed_dim ) x ( embed_dim x embed_dim*3 ) = (seql_q*seqs x embed_dim*3)
if use_biases_t[0]:
input_lin_results = torch.addmm(
input_biases,
inputs.view(inputs.size(0) * inputs.size(1), inputs.size(2)),
input_weights.transpose(0, 1),
beta=1.0,
alpha=1.0,
)
else:
input_lin_results = torch.mm(
inputs.view(inputs.size(0) * inputs.size(1), inputs.size(2)), input_weights.transpose(0, 1)
)
input_lin_results = input_lin_results.view(inputs.size(0), inputs.size(1), input_weights.size(0))
# Slice out q,k,v from one big Input Linear outuput (should only impact meta data, no copies!)
# Sequences and heads are combined to make the batch of the Batched GEMM
# input_lin_results: [seql_q, seqs, heads(16), 3, head_dim(64)]
# input_lin_results: [seql_q, batches=seqs*heads, 3, head_dim]
input_lin_results = input_lin_results.view(inputs.size(0), inputs.size(1) * heads, 3, head_dim)
queries = input_lin_results[:, :, 0, :]
keys = input_lin_results[:, :, 1, :]
values = input_lin_results[:, :, 2, :]
# Matmul1 Batched GEMMs
# The output tensor is specified prior to the Batch GEMM because baddbmm requires its specification
# baddbmm is used to apply the scale parameter via the Batched GEMM's alpha parameter instead of
# a separate elementwise operation.
# Input1: (Queries) [seql_q, seqs*heads, head_dim] tranpose(0,1)
# Input2: (Keys) [seql_k, seqs*heads, head_dim] transpose(0,1)
# output: [seqs*heads, seql_q, seql_k]
# GEMM: Per batch: ( seql_q x head_dim ) x ( head_dim x seql_k ) = ( seql_q x seql_k )
matmul1_results = torch.empty(
(queries.size(1), queries.size(0), keys.size(0)), dtype=queries.dtype, device=torch.device("cuda")
)
matmul1_results = torch.baddbmm(
matmul1_results,
queries.transpose(0, 1),
keys.transpose(0, 1).transpose(1, 2),
out=matmul1_results,
beta=0.0,
alpha=scale_t[0],
)
if mask is not None:
# Self Attention Time Mask
if use_time_mask:
assert len(mask.size()) == 2, "Timing mask is not 2D!"
assert mask.size(0) == mask.size(1), "Sequence length should match!"
mask = mask.to(torch.bool)
matmul1_results = matmul1_results.masked_fill_(mask, float("-inf"))
# Key Padding Mask
else:
batches, seql_q, seql_k = matmul1_results.size()
seqs = int(batches / heads)
matmul1_results = matmul1_results.view(seqs, heads, seql_q, seql_k)
if is_additive_mask:
matmul1_results = matmul1_results + mask.unsqueeze(1).unsqueeze(2)
else:
mask = mask.to(torch.bool)
matmul1_results = matmul1_results.masked_fill_(mask.unsqueeze(1).unsqueeze(2), float("-inf"))
matmul1_results = matmul1_results.view(seqs * heads, seql_q, seql_k)
softmax_results = F.softmax(matmul1_results, dim=-1)
# Dropout - is not executed for inference
if is_training:
dropout_results, dropout_mask = torch._fused_dropout(softmax_results, p=(1.0 - dropout_prob_t[0]))
else:
dropout_results = softmax_results
dropout_mask = null_tensor
# Matmul2 Batched GEMMs
# The output tensor specification is needed here to specify the non-standard output.
# Given that pytorch cannot currently perform autograd with an output tensor specified,
# this requires a backward pass specified.
# Input1: from_softmax [seqs*heads, seql_q, seql_k]
# Input2: (values) [seql_v, seqs*heads, head_dim] transpose(0,1)
# Output: [seql_q, seqs*heads, head_dim] transpose(0,1)
# GEMM: Per batch: ( seql_q x seql_k ) x ( seql_k x head_dim ) = (seql_q x head_dim)
matmul2_results = torch.empty(
(dropout_results.size(1), dropout_results.size(0), values.size(2)),
dtype=dropout_results.dtype,
device=torch.device("cuda"),
).transpose(1, 0)
matmul2_results = torch.bmm(dropout_results, values.transpose(0, 1), out=matmul2_results)
matmul2_results = (
matmul2_results.transpose(0, 1).contiguous().view(inputs.size(0), inputs.size(1), inputs.size(2))
)
# Output Linear GEMM
# Input1: (activations) [seql_q, seqs, embed_dim=heads*head_dim]
# Input2: (weights) [ embed_dim, embed_dim ] transpose(0,1)
# Output: [ seql_q, seqs, embed_dim ]
# GEMM: ( seql_q*seqs x embed_dim ) x ( embed_dim x embed_dim ) = ( seql_q*seqs x embed_dim )
if use_biases_t[0]:
outputs = torch.addmm(
output_biases,
matmul2_results.view(inputs.size(0) * inputs.size(1), inputs.size(2)),
output_weights.transpose(0, 1),
beta=1.0,
alpha=1.0,
)
else:
outputs = torch.mm(
matmul2_results.view(inputs.size(0) * inputs.size(1), inputs.size(2)), output_weights.transpose(0, 1)
)
outputs = outputs.view(inputs.size(0), inputs.size(1), output_weights.size(0))
ctx.save_for_backward(
use_biases_t,
heads_t,
scale_t,
matmul2_results,
dropout_results,
softmax_results,
input_lin_results,
inputs,
input_weights,
output_weights,
dropout_mask,
dropout_prob_t,
)
return outputs.detach()
@staticmethod
def backward(ctx, output_grads):
(
use_biases_t,
heads_t,
scale_t,
matmul2_results,
dropout_results,
softmax_results,
input_lin_results,
inputs,
input_weights,
output_weights,
dropout_mask,
dropout_prob_t,
) = ctx.saved_tensors
head_dim = inputs.size(2) // heads_t[0]
# Slice out q,k,v from one big Input Linear outuput (should only impact meta data, no copies!)
# Sequences and heads are combined to make the batch of the Batched GEMM
# input_lin_results: [seql_q, seqs, heads(16), 3, head_dim(64)]
# input_lin_results: [seql_q, batches=seqs*heads, 3, head_dim]
input_lin_results = input_lin_results.view(inputs.size(0), inputs.size(1) * heads_t[0], 3, head_dim)
queries = input_lin_results[:, :, 0, :]
keys = input_lin_results[:, :, 1, :]
values = input_lin_results[:, :, 2, :]
# Slice out q,k,v from one big set of gradients entering the input linear's bprop (should only impact meta data, no copies!)
# The gradients are identical in size to the Input Linear outputs.
# The tensor is declared before hand to properly slice out query, key, and value grads.
input_lin_results_grads = torch.empty_like(input_lin_results)
queries_grads = input_lin_results_grads[:, :, 0, :]
keys_grads = input_lin_results_grads[:, :, 1, :]
values_grads = input_lin_results_grads[:, :, 2, :]
# Output Linear GEMM - DGRAD
# Input1: (data grads) [seql_q, seqs, embed_dim=heads*head_dim]
# Input2: (weights) [ embed_dim, embed_dim ]
# Output: [ seql_q, seqs, embed_dim ]
# GEMM: ( seql_q*seqs x embed_dim ) x ( embed_dim x embed_dim ) = ( seql_q*seqs x embed_dim )
output_lin_grads = torch.mm(
output_grads.view(output_grads.size(0) * output_grads.size(1), output_grads.size(2)), output_weights
)
output_lin_grads = output_lin_grads.view(output_grads.size(0), output_grads.size(1), output_weights.size(1))
# Output Linear GEMM - WGRAD
# Input1: (data grads) [seql_q*seqs, embed_dim=heads*head_dim] transpose(0,1)
# Input2: (activations) [seql_q*seqs, embed_dim ]
# Output: [ seql_q, seqs, embed_dim ]
# GEMM: ( embed_dim x seql_q*seqs ) x ( seql_q*seqs x embed_dim ) = ( embed_dim x embed_dim )
output_weight_grads = torch.mm(
output_grads.view(output_grads.size(0) * output_grads.size(1), output_grads.size(2)).transpose(0, 1),
matmul2_results.view(matmul2_results.size(0) * matmul2_results.size(1), matmul2_results.size(2)),
)
output_lin_grads = output_lin_grads.view(inputs.size(0), inputs.size(1) * heads_t[0], head_dim).transpose(0, 1)
if use_biases_t[0]:
output_bias_grads = torch.sum(
output_grads.view(output_grads.size(0) * output_grads.size(1), output_grads.size(2)), 0
)
else:
output_bias_grads = None
# Matmul2 - DGRAD1
# Input1: (data grads) [seql_q, seqs*heads, head_dim] transpose(0,1)
# Input2: (activations) [seql_k, seqs*heads, head_dim] transpose(0,1).transpose(1,2)
# Output: [seqs*heads, seql_q, seql_k]
# GEMM: Per batch: ( seql_q x head_dim ) x ( head_dim x seql_k ) = ( seql_q x seql_k )
matmul2_dgrad1 = torch.bmm(output_lin_grads, values.transpose(0, 1).transpose(1, 2))
# Matmul2 - DGRAD2
# Input1: (data grads) [seql_q, seqs*heads, head_dim] transpose(0,1)
# Input2: (activations) [seql_k, seqs*heads, head_dim] transpose(0,1).transpose(1,2)
# Output: [seqs*heads, seql_q, seql_k]
# GEMM: Per batch: ( seql_q x head_dim ) x ( head_dim x seql_k ) = ( seql_q x seql_k )
values_grads = torch.bmm(dropout_results.transpose(1, 2), output_lin_grads, out=values_grads.transpose(0, 1))
# Mask and Scaling for Dropout (not a publically documented op)
dropout_grads = torch._masked_scale(matmul2_dgrad1, dropout_mask, 1.0 / (1.0 - dropout_prob_t[0]))
# Softmax Grad (not a publically documented op)
softmax_grads = torch._softmax_backward_data(dropout_grads, softmax_results, -1, softmax_results)
# Matmul1 - DGRAD1
# Input1: (data grads) [seqs*heads, seql_q, seql_k]
# Input2: (activations) [seql_k, seqs*heads, head_dim] transpose(0,1)
# Output: [seqs*heads, seql_q, head_dim] transpose(0,1)
# GEMM: Per batch: ( seql_q x seql_k ) x ( seql_k x head_dim ) = ( seql_q x head_dim )
queries_grads = torch.baddbmm(
queries_grads.transpose(0, 1),
softmax_grads,
keys.transpose(0, 1),
out=queries_grads.transpose(0, 1),
beta=0.0,
alpha=scale_t[0],
)
# Matmul1 - DGRAD2
# Input1: (data grads) [seqs*heads, seql_q, seql_k] transpose(1,2)
# Input2: (activations) [seql_q, seqs*heads, head_dim] transpose(0,1)
# Output: [seqs*heads, seql_k, head_dim] transpose(0,1)
# GEMM: Per batch: ( seql_k x seql_q ) x ( seql_q x head_dim ) = ( seql_k x head_dim )
keys_grads = torch.baddbmm(
keys_grads.transpose(0, 1),
softmax_grads.transpose(1, 2),
queries.transpose(0, 1),
out=keys_grads.transpose(0, 1),
beta=0.0,
alpha=scale_t[0],
)
# Input Linear GEMM - DGRAD
# input1: (data grads) [seql_q, seqs, 3*embed_dim(3072)]
# input2: (weights) [embed_dim*3 (3072), embed_dim (1024)]
# output: [seql_q, seqs, embed_dim]
# GEMM: ( (seql_q*seqs) x 3*embed_dim ) x ( 3*embed_dim x embed_dim ) = (seql_q*seqs x embed_dim)
input_lin_results_grads = input_lin_results_grads.view(
inputs.size(0) * inputs.size(1), heads_t[0] * 3 * head_dim
)
input_grads = torch.mm(input_lin_results_grads, input_weights)
input_grads = input_grads.view(inputs.size(0), inputs.size(1), inputs.size(2))
# Input Linear GEMM - WGRAD
# input1: (data grads) [seql_q*seqs, 3*embed_dim(3072)]
# input2: (activations) [seql_q*seqs, embed_dim(1024)]
# output: [3*embed_dim, embed_dim]
# GEMM: ( 3*embed_dim x seql_q*seqs ) x ( seql_q*seqs x embed_dim ) = (3*embed_dim x embed_dim)
input_weight_grads = torch.mm(
input_lin_results_grads.transpose(0, 1), inputs.view(inputs.size(0) * inputs.size(1), inputs.size(2))
)
if use_biases_t[0]:
input_bias_grads = torch.sum(input_lin_results_grads, 0)
else:
input_bias_grads = None
return (
None,
None,
None,
None,
input_grads,
input_weight_grads,
output_weight_grads,
input_bias_grads,
output_bias_grads,
None,
None,
)
self_attn_func = SelfAttnFunc.apply
|
7179
|
from __future__ import annotations
from copy import deepcopy
from dataclasses import dataclass, field
from typing import List, Iterator, TypeVar, Union, Any, Generic
import pandas as pd
from pandas.core.indexing import _LocIndexer
from reamber.base.Map import Map
from reamber.base.Property import stack_props
NoteListT = TypeVar('NoteListT')
HitListT = TypeVar('HitListT')
HoldListT = TypeVar('HoldListT')
BpmListT = TypeVar('BpmListT')
MapT = TypeVar('MapT')
@dataclass
class MapSet(Generic[NoteListT, HitListT, HoldListT, BpmListT, MapT]):
maps: List[MapT[NoteListT, HitListT, HoldListT, BpmListT]] = field(default_factory=lambda: [])
def __init__(self, maps: List[MapT[NoteListT, HitListT, HoldListT, BpmListT]]):
self.maps = maps
def __iter__(self) -> Iterator[MapT]:
for m in self.maps:
yield m
def items(self):
for m in self.maps:
yield m.__class__, m
def __getitem__(self, item: Union[Any, type]):
if isinstance(item, type):
# We want to index by type.
return [m[item][0] for m in self.maps]
else:
# We want to index by slice/int/etc.
return self.maps[item]
def __setitem__(self, key: Union[Any, type], value):
this = self[key]
assert len(this) == len(value), "The lengths of the set and get must be the same."
for i in range(len(this)): this[i] = value[i]
def deepcopy(self):
""" Returns a deep copy of itself """
return deepcopy(self)
def describe(self, rounding: int = 2, unicode: bool = False) -> List[str]:
""" Describes the map's attributes as a short summary
:param rounding: The decimal rounding
:param unicode: Whether to attempt to get the non-unicode or unicode. \
Doesn't attempt to translate.
"""
return [m.describe(rounding=rounding, unicode=unicode, s=self) for m in self]
def rate(self, by: float) -> MapSet:
""" Changes the rate of the map. Note that you need to do rate on the mapset to affect BPM.
:param by: The value to rate it by. 1.1x speeds up the song by 10%. Hence 10/11 of the length.
"""
copy = self.deepcopy()
copy.maps = [m.rate(by=by) for m in copy.maps]
return copy
# noinspection DuplicatedCode,PyUnresolvedReferences
@stack_props()
class Stacker:
""" This purpose of this class is to provide unnamed access to the lists.
This can make code much shorter as we don't have to deal with keyed dicts.
For example,
>>> m = Map.stack()
>>> m.offset *= 2
Or if you do it inline,
>>> m.stack().lengths *= 2
This will change the offsets of all lists that have the offset property.
This will change the map itself, as stack is a reference
This also is a "naive" system, so if the property, like column, doesn't exist
for Bpms, it will not break it. However, all properties must exist at least
once.
If the property isn't listed here, you can do string indexing
For example,
>>> m = Map.stack()
>>> m.other_property *= 2
"""
""" How does this work?
Firstly, if you concat a list of dfs, pd will always make a copy, so you have to
preserve the original dfs and also the stacked.
LISTS ---STACK---> COPY ---> STACKED
+---------- REFERENCE ---> UNSTACKED
The reason for stacking is so that we don't have to loop through all dfs to mutate.
If we did loop through the dfs, we have to stack them anyways, so it's as efficient.
However, it's just easier, by my eyes, to stack then attempt to mutate.
So, we keep 2 things in check, the unstacked, and the stacked.
However, we only can mutate the stacked one, then convert to the unstacked, because
the unstacked is the referenced.
Hence, we keep track of what partitions of the unstacked are each of the stacked.
IXS | | | | |
UNSTACKED [........] [........] [..] [....]
STACKED [...............................]
That's where ixs come in to help in converting the stacked values to unstacked.
So the workflow is that when we retrieve a value, it's always from the stacked.
Then, when it's mutated, it can be set and it will always call the _update
to update the referenced unstacked.
"""
stackers: List[Map.Stacker]
# noinspection PyProtectedMember
def __init__(self, stackers: List[Map.Stacker]):
self.stackers = stackers
def __getitem__(self, item):
return pd.DataFrame([i[item] for i in self.stackers])
def __setitem__(self, key, value):
for s, i in zip(self.stackers, value.iloc):
s[key] = i
_props = ['offset', 'column', 'length', 'bpm', 'metronome']
def stack(self, include: List[str] = None):
""" This creates a mutator for this instance, see Mutator for details. """
return self.Stacker([_.stack(include) for _ in self])
|
7190
|
import unittest
from logics.classes.propositional import Inference, Formula
from logics.classes.propositional.proof_theories import NaturalDeductionStep, NaturalDeductionRule
from logics.utils.parsers import classical_parser
from logics.instances.propositional.natural_deduction import classical_natural_deduction_system as nd_system
class TestClassicalNaturalDeductionSystem(unittest.TestCase):
def test_natural_deduction_rule(self):
"""Test overriding of index and len methods in NaturalDeductionRule"""
rule = NaturalDeductionRule([
'(...)',
NaturalDeductionStep(Formula(['→', ['A'], ['B']])),
'(...)',
NaturalDeductionStep(Formula(['B']), 'E→', [0, 1])
])
self.assertEqual(rule.index(NaturalDeductionStep(Formula(['B']), 'E→', [0, 1])), 1)
self.assertEqual(len(rule), 2)
def test_nd_system(self):
"""Test the method that tells if a step is a correct application of a rule"""
# A correct derivation
deriv = classical_parser.parse_derivation(
"""p; premise
(p → q); premise
q; E→; [1, 0]; []
p ∧ q; I∧; [0, 2]; []""",
natural_deduction=True)
# Check is application of the correct rule, and a different rule
self.assertTrue(nd_system.is_correct_application(deriv, 2, nd_system.rules['E→']))
self.assertFalse(nd_system.is_correct_application(deriv, 2, nd_system.rules['E∧2']))
self.assertTrue(nd_system.is_correct_application(deriv, 3, nd_system.rules['I∧']))
self.assertFalse(nd_system.is_correct_application(deriv, 3, nd_system.rules['E→']))
# Check is correct derivation of the correct and an incorrect inference
i = Inference([Formula(['p']), Formula(['→', ['p'], ['q']])],
[Formula(['∧', ['p'], ['q']])])
self.assertTrue(nd_system.is_correct_derivation(deriv, i))
i2 = Inference([Formula(['p']), Formula(['→', ['p'], ['q']])],
[Formula(['∧', ['q'], ['p']])])
self.assertFalse(nd_system.is_correct_derivation(deriv, i2))
# Repeating steps should not alter the outcome (should print a warning)
# deriv2_0 = classical_parser.parse_derivation(
# """p; supposition; []; [0]
# p; repetition; [0, 0]; [0]""",
# natural_deduction=True)
# self.assertTrue(nd_system.is_correct_application(deriv2_0, 1, nd_system.rules['repetition']))
# Test step in the future
deriv2_1 = classical_parser.parse_derivation(
"""p; supposition; []; [0]
p; repetition; [1]; [0]""",
natural_deduction=True)
deriv2_2 = classical_parser.parse_derivation(
"""p; supposition; []; [0]
p; repetition; [2]; [0]""",
natural_deduction=True)
self.assertFalse(nd_system.is_correct_application(deriv2_1, 1, nd_system.rules['repetition']))
self.assertFalse(nd_system.is_correct_application(deriv2_2, 1, nd_system.rules['repetition']))
# -------------------------------------------------
# Test incorrect use of suppositions
# Using a step in a closed supposition
deriv3_1 = classical_parser.parse_derivation(
"""p; supposition; []; [0]
p; repetition; [0]; [0]
(p → p); I→; [0, 1]; []
p; E→; [2, 0]; []""",
natural_deduction=True)
# Check correct application of rep and I→
self.assertTrue(nd_system.is_correct_application(deriv3_1, 1, nd_system.rules['repetition']))
self.assertTrue(nd_system.is_correct_application(deriv3_1, 2, nd_system.rules['I→']))
self.assertFalse(nd_system.is_correct_application(deriv3_1, 3, nd_system.rules['E→']))
# Closing a supposition with a rule that does not close
deriv3_2 = classical_parser.parse_derivation('''
p; premise
p; supposition; []; [1]
p; repetition; [0]; [1]
(p ∨ q); I∨1; [0]; []''',
natural_deduction=True)
self.assertFalse(nd_system.is_correct_application(deriv3_2, 3, nd_system.rules['I∨1']))
# Closing two suppositions at once
deriv3_3 = classical_parser.parse_derivation(
"""p; supposition; []; [0]
p; supposition; [0]; [0, 1]
(p → p); I→; [0, 1]; []""",
natural_deduction=True)
self.assertFalse(nd_system.is_correct_application(deriv3_3, 2, nd_system.rules['I→']))
# Not closing a supposition with a rule that does close
deriv3_4 = classical_parser.parse_derivation(
"""p; supposition; []; [0]
p; repetition; [0]; [0]
(p → p); I→; [0, 1]; [0]""",
natural_deduction=True)
self.assertFalse(nd_system.is_correct_application(deriv3_4, 2, nd_system.rules['I→']))
# Incorrect opening of suppositions
deriv3_5 = classical_parser.parse_derivation(
"""p; supposition; []; []""",
natural_deduction=True)
self.assertFalse(nd_system.is_correct_derivation(deriv3_5, None))
deriv3_6 = classical_parser.parse_derivation(
"""p; premise; []; []
q; supposition; []; [0]""",
natural_deduction=True)
self.assertFalse(nd_system.is_correct_derivation(deriv3_6, None))
# -------------------------------------------------
# A correct derivation using all the rules
deriv4 = classical_parser.parse_derivation(
"""q; premise; []; []
~q; supposition; []; [1]
~q; repetition; [1]; [1]
(q ∧ ~q); I∧; [0, 2]; [1]
q; E∧1; [3]; [1]
⊥; E~; [1, 4]; [1]
p; EFSQ; [5]; [1]
⊥; repetition; [5]; [1]
~~q; I~; [1, 7]; []
q; ~~; [8]; []
q; supposition; []; [10]
q; repetition; [10]; [10]
(q → q); I→; [10, 11]; []
q; E→; [12, 9]; []
(q ∨ p); I∨1; [13]; []
(p → q); premise; []; []
q; E∨; [14, 12, 15]; []
""", natural_deduction=True)
i3 = Inference([Formula(['q']), Formula(['→', ['p'], ['q']])],
[Formula(['q'])])
self.assertTrue(nd_system.is_correct_derivation(deriv4, i3))
def test_rule_order(self):
# i1 is conjunction introduction
i1 = Inference([Formula(['p']), Formula(['q'])],
[Formula(['∧', ['p'], ['q']])])
# First derivation: standard one
deriv1_1 = classical_parser.parse_derivation(
"""p; premise; []; []
q; premise; []; []
(p ∧ q); I∧; [0, 1]; []""",
natural_deduction=True)
self.assertTrue(nd_system.is_correct_derivation(deriv1_1, i1))
# Second derivation: reverse on_steps order
deriv1_2 = classical_parser.parse_derivation(
"""p; premise; []; []
q; premise; []; []
(p ∧ q); I∧; [1, 0]; []""",
natural_deduction=True)
self.assertFalse(nd_system.is_correct_derivation(deriv1_2, i1))
i2 = Inference([Formula(['p']), Formula(['q'])],
[Formula(['∧', ['q'], ['p']])])
# Third derivation: reverse the conjuncts
deriv2_1 = classical_parser.parse_derivation(
"""p; premise; []; []
q; premise; []; []
(q ∧ p); I∧; [1, 0]; []""",
natural_deduction=True)
self.assertTrue(nd_system.is_correct_derivation(deriv2_1, i2))
# Fourth derivation: reverse the conjuncts and the on_steps
deriv2_2 = classical_parser.parse_derivation(
"""p; premise; []; []
q; premise; []; []
(q ∧ p); I∧; [0, 1]; []""",
natural_deduction=True)
self.assertFalse(nd_system.is_correct_derivation(deriv2_2, i2))
if __name__ == '__main__':
unittest.main()
|
7226
|
import subprocess, os
ue4_win = r"C:\Program Files\Epic Games\UE_4.16"
ue4_linux = "/home/qiuwch/workspace/UE416"
ue4_mac = '/Users/Shared/Epic Games/UE_4.16'
win_uprojects = [
r'C:\qiuwch\workspace\uprojects\UE4RealisticRendering\RealisticRendering.uproject',
r'C:\qiuwch\workspace\uprojects\UE4ArchinteriorsVol2Scene1\ArchinteriorsVol2Scene1.uproject',
r'C:\qiuwch\workspace\uprojects\UE4ArchinteriorsVol2Scene2\ArchinteriorsVol2Scene2.uproject',
r'C:\qiuwch\workspace\uprojects\UE4ArchinteriorsVol2Scene3\ArchinteriorsVol2Scene3.uproject',
r'C:\qiuwch\workspace\uprojects\UE4UrbanCity\UrbanCity.uproject',
r'D:\workspace\uprojects\Matinee\Matinee.uproject',
r'D:\workspace\uprojects\PhotorealisticCharacter\PhotorealisticCharacter2.uproject',
]
linux_uprojects = [
os.path.expanduser('~/workspace/uprojects/UE4RealisticRendering/RealisticRendering.uproject'),
os.path.expanduser('~/workspace/uprojects/UE4ArchinteriorsVol2Scene1/ArchinteriorsVol2Scene1.uproject'),
os.path.expanduser('~/workspace/uprojects/UE4ArchinteriorsVol2Scene2/ArchinteriorsVol2Scene2.uproject'),
os.path.expanduser('~/workspace/uprojects/UE4ArchinteriorsVol2Scene3/ArchinteriorsVol2Scene3.uproject'),
os.path.expanduser("~/workspace/uprojects/UE4UrbanCity/UrbanCity.uproject"),
]
mac_uprojects = [
os.path.expanduser('~/workspace/UnrealEngine/Templates/FP_FirstPerson/FP_FirstPerson.uproject'),
os.path.expanduser('~/uprojects/RealisticRendering/RealisticRendering.uproject'),
os.path.expanduser('~/uprojects/UE4ArchinteriorsVol2Scene1/ArchinteriorsVol2Scene1.uproject'),
os.path.expanduser('~/uprojects/UE4ArchinteriorsVol2Scene2/ArchinteriorsVol2Scene2.uproject'),
os.path.expanduser('~/uprojects/UE4ArchinteriorsVol2Scene3/ArchinteriorsVol2Scene3.uproject'),
os.path.expanduser('~/uprojects/UE4UrbanCity/UrbanCity.uproject'),
]
uprojects = []
for uproject_path in win_uprojects:
uproject_name = os.path.basename(uproject_path).split('.')[0]
uprojects.append(
dict(
uproject_path = uproject_path,
ue4_path = ue4_win,
log_file = 'log/win_%s.log' % uproject_name
),
)
for uproject_path in linux_uprojects:
uproject_name = os.path.basename(uproject_path).split('.')[0]
uprojects.append(
dict(
uproject_path = uproject_path,
ue4_path = ue4_linux,
log_file = 'log/linux_%s.log' % uproject_name
),
)
for uproject_path in mac_uprojects:
uproject_name = os.path.basename(uproject_path).split('.')[0]
uprojects.append(
dict(
uproject_path = uproject_path,
ue4_path = ue4_mac,
log_file = 'log/mac_%s.log' % uproject_name
),
)
if __name__ == '__main__':
for uproject in uprojects:
uproject_path = uproject['uproject_path']
if not os.path.isfile(uproject_path):
print("Can not find uproject file %s, skip this project" % uproject_path)
continue
cmd = [
'python', 'build.py',
'--UE4', uproject['ue4_path'],
# '--output', uproject['output_folder'],
uproject['uproject_path']
]
print(cmd)
subprocess.call(cmd,
stdout = open(uproject['log_file'], 'w'))
with open(uproject['log_file']) as f:
lines = f.readlines()
print(''.join(lines[-10:])) # Print the last few lines
|
7262
|
from .system import *
from .colours import *
class InputSystem(System):
def init(self):
self.key = 'input'
def setRequirements(self):
self.requiredComponents = ['input']
def updateEntity(self, entity, scene):
# don't allow input during a cutscene
if scene.cutscene is not None:
return
# run the stored input context
if entity.getComponent('input').inputContext is not None:
entity.getComponent('input').inputContext(entity)
|
7272
|
def sysrc(value):
"""Call sysrc.
CLI Example:
.. code-block:: bash
salt '*' freebsd_common.sysrc sshd_enable=YES
salt '*' freebsd_common.sysrc static_routes
"""
return __salt__['cmd.run_all']("sysrc %s" % value)
|
7277
|
import pytest
from fuzz_lightyear.datastore import _ALL_POST_FUZZ_HOOKS_BY_OPERATION
from fuzz_lightyear.datastore import _ALL_POST_FUZZ_HOOKS_BY_TAG
from fuzz_lightyear.datastore import _RERUN_POST_FUZZ_HOOKS_BY_OPERATION
from fuzz_lightyear.datastore import _RERUN_POST_FUZZ_HOOKS_BY_TAG
from fuzz_lightyear.datastore import get_excluded_operations
from fuzz_lightyear.datastore import get_included_tags
from fuzz_lightyear.datastore import get_non_vulnerable_operations
from fuzz_lightyear.datastore import get_user_defined_mapping
from fuzz_lightyear.plugins import get_enabled_plugins
from fuzz_lightyear.request import get_victim_session_factory
from fuzz_lightyear.supplements.abstraction import get_abstraction
@pytest.fixture(autouse=True)
def clear_caches():
get_abstraction.cache_clear()
get_user_defined_mapping.cache_clear()
get_enabled_plugins.cache_clear()
get_victim_session_factory.cache_clear()
get_excluded_operations.cache_clear()
get_non_vulnerable_operations.cache_clear()
get_included_tags.cache_clear()
_ALL_POST_FUZZ_HOOKS_BY_OPERATION.clear()
_ALL_POST_FUZZ_HOOKS_BY_TAG.clear()
_RERUN_POST_FUZZ_HOOKS_BY_OPERATION.clear()
_RERUN_POST_FUZZ_HOOKS_BY_TAG.clear()
@pytest.fixture(autouse=True)
def ignore_hypothesis_non_interactive_example_warning():
"""In theory we're not supposed to use hypothesis'
strategy.example(), but fuzz-lightyear isn't using
hypothesis in a normal way.
"""
import warnings
from hypothesis.errors import NonInteractiveExampleWarning
warnings.filterwarnings(
'ignore',
category=NonInteractiveExampleWarning,
)
|
7298
|
import sys
import logging
import unittest
from testfixtures import LogCapture
from twisted.python.failure import Failure
from scrapy.utils.log import (failure_to_exc_info, TopLevelFormatter,
LogCounterHandler, StreamLogger)
from scrapy.utils.test import get_crawler
from scrapy.extensions import telnet
class FailureToExcInfoTest(unittest.TestCase):
def test_failure(self):
try:
0 / 0
except ZeroDivisionError:
exc_info = sys.exc_info()
failure = Failure()
self.assertTupleEqual(exc_info, failure_to_exc_info(failure))
def test_non_failure(self):
self.assertIsNone(failure_to_exc_info('test'))
class TopLevelFormatterTest(unittest.TestCase):
def setUp(self):
self.handler = LogCapture()
self.handler.addFilter(TopLevelFormatter(['test']))
def test_top_level_logger(self):
logger = logging.getLogger('test')
with self.handler as log:
logger.warning('test log msg')
log.check(('test', 'WARNING', 'test log msg'))
def test_children_logger(self):
logger = logging.getLogger('test.test1')
with self.handler as log:
logger.warning('test log msg')
log.check(('test', 'WARNING', 'test log msg'))
def test_overlapping_name_logger(self):
logger = logging.getLogger('test2')
with self.handler as log:
logger.warning('test log msg')
log.check(('test2', 'WARNING', 'test log msg'))
def test_different_name_logger(self):
logger = logging.getLogger('different')
with self.handler as log:
logger.warning('test log msg')
log.check(('different', 'WARNING', 'test log msg'))
class LogCounterHandlerTest(unittest.TestCase):
def setUp(self):
settings = {'LOG_LEVEL': 'WARNING'}
if not telnet.TWISTED_CONCH_AVAILABLE:
# disable it to avoid the extra warning
settings['TELNETCONSOLE_ENABLED'] = False
self.logger = logging.getLogger('test')
self.logger.setLevel(logging.NOTSET)
self.logger.propagate = False
self.crawler = get_crawler(settings_dict=settings)
self.handler = LogCounterHandler(self.crawler)
self.logger.addHandler(self.handler)
def tearDown(self):
self.logger.propagate = True
self.logger.removeHandler(self.handler)
def test_init(self):
self.assertIsNone(self.crawler.stats.get_value('log_count/DEBUG'))
self.assertIsNone(self.crawler.stats.get_value('log_count/INFO'))
self.assertIsNone(self.crawler.stats.get_value('log_count/WARNING'))
self.assertIsNone(self.crawler.stats.get_value('log_count/ERROR'))
self.assertIsNone(self.crawler.stats.get_value('log_count/CRITICAL'))
def test_accepted_level(self):
self.logger.error('test log msg')
self.assertEqual(self.crawler.stats.get_value('log_count/ERROR'), 1)
def test_filtered_out_level(self):
self.logger.debug('test log msg')
self.assertIsNone(self.crawler.stats.get_value('log_count/INFO'))
class StreamLoggerTest(unittest.TestCase):
def setUp(self):
self.stdout = sys.stdout
logger = logging.getLogger('test')
logger.setLevel(logging.WARNING)
sys.stdout = StreamLogger(logger, logging.ERROR)
def tearDown(self):
sys.stdout = self.stdout
def test_redirect(self):
with LogCapture() as log:
print('test log msg')
log.check(('test', 'ERROR', 'test log msg'))
|
7305
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.home, name='home'),
url(r'^piechart/', views.demo_piechart, name='demo_piechart'),
url(r'^linechart/', views.demo_linechart, name='demo_linechart'),
url(r'^linechart_without_date/', views.demo_linechart_without_date, name='demo_linechart_without_date'),
url(r'^linewithfocuschart/', views.demo_linewithfocuschart, name='demo_linewithfocuschart'),
url(r'^multibarchart/', views.demo_multibarchart, name='demo_multibarchart'),
url(r'^stackedareachart/', views.demo_stackedareachart, name='demo_stackedareachart'),
url(r'^multibarhorizontalchart/', views.demo_multibarhorizontalchart, name='demo_multibarhorizontalchart'),
url(r'^lineplusbarchart/', views.demo_lineplusbarchart, name='demo_lineplusbarchart'),
url(r'^cumulativelinechart/', views.demo_cumulativelinechart, name='demo_cumulativelinechart'),
url(r'^discretebarchart/', views.demo_discretebarchart, name='demo_discretebarchart'),
url(r'^discretebarchart_with_date/', views.demo_discretebarchart_with_date, name='demo_discretebarchart_date'),
url(r'^scatterchart/', views.demo_scatterchart, name='demo_scatterchart'),
url(r'^linechart_with_ampm/', views.demo_linechart_with_ampm, name='demo_linechart_with_ampm'),
# url(r'^demoproject/', include('demoproject.foo.urls')),
]
|
7317
|
from pydantic import BaseSettings
class Settings(BaseSettings):
deta_project_key: str
settings = Settings()
|
7338
|
def us_choropleth(t):
import matplotlib.cm
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
from matplotlib.colors import Normalize
import shapefile
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
import numpy as np
import random
import pandas as pd
from collections import Counter
plt.title("NER", fontsize=12)
us_locations_map = Basemap(
resolution="l",
llcrnrlon=-128.94,
llcrnrlat=23.52,
urcrnrlon=-60.12,
urcrnrlat=50.93,
lat_0=37.26,
lon_0=-94.53)
us_locations_map.drawmapboundary(
fill_color="#46bcec") # Fills in the oceans
us_locations_map.fillcontinents(
color="#eabc77",
lake_color="#46bcec") # Defines the continents
us_locations_map.drawcoastlines()
fig = matplotlib.pyplot.gcf()
fig.set_size_inches(15.5, 12.5) # Sets the size of the map
# Converts the coordinates to map points
lons, lats = us_locations_map(t["longitude"], t["latitude"])
us_locations_map.scatter(
lons,
lats,
color="black",
zorder=10) # Draws the points on the map
# Labels each point with the location name
for i in range(t.num_rows):
lat_lon = (
t.row(i).item("longitude") + .2,
t.row(i).item("latitude") - .1)
plt.annotate(np.array(t.row(i).item("name")), lat_lon, fontsize=10)
# Here we are reading in a shape file, which places state boundary
# information for our Basemap
us_locations_map.readshapefile(
"data/us_shapefiles/cb_2016_us_state_20m", "us_states")
state_names = []
for shape_dict in us_locations_map.us_states_info:
state_names.append(shape_dict['NAME'])
ax = plt.gca() # get current axes instance
cmap = plt.get_cmap('Reds')
names = []
shapes = []
counts = []
state_counts = Counter(t["state"])
for index, state in enumerate(state_names):
seg = us_locations_map.us_states[index]
poly = Polygon(seg)
names.append(state)
shapes.append(poly)
if state in t['state']:
counts.append(state_counts[state])
else:
counts.append(0)
# Loading our lists into the DataFrame
shape_table = pd.DataFrame()
shape_table["State Name"] = np.array(names)
shape_table["Shapes"] = np.array(shapes)
shape_table["Count"] = np.array(counts)
pc = PatchCollection(shape_table["Shapes"], zorder=2)
norm = Normalize()
pc.set_facecolor(cmap(norm(shape_table['Count'].fillna(0).values)))
pc.set_edgecolor("black")
ax.add_collection(pc)
# Adds colorbar showing the scale
mapper = matplotlib.cm.ScalarMappable(norm=norm, cmap=cmap)
mapper.set_array(shape_table['Count'])
plt.colorbar(mapper, shrink=0.4)
|
7342
|
import os
import argparse
from tqdm import tqdm
import torch
from torch.autograd import Variable
from torch.utils import model_zoo
# http://scikit-learn.org
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.svm import LinearSVC
from sklearn.svm import SVC
import sys
sys.path.append('.')
import pretrainedmodels
import pretrainedmodels.utils
import pretrainedmodels.datasets
model_names = sorted(name for name in pretrainedmodels.__dict__
if not name.startswith("__")
and name.islower()
and callable(pretrainedmodels.__dict__[name]))
def extract_features_targets(model, features_size, loader, path_data, cuda=False):
if os.path.isfile(path_data):
print('Load features from {}'.format(path_data))
return torch.load(path_data)
print('\nExtract features on {}set'.format(loader.dataset.set))
features = torch.Tensor(len(loader.dataset), features_size)
targets = torch.Tensor(len(loader.dataset), len(loader.dataset.classes))
for batch_id, batch in enumerate(tqdm(loader)):
img = batch[0]
target = batch[2]
current_bsize = img.size(0)
from_ = int(batch_id * loader.batch_size)
to_ = int(from_ + current_bsize)
if cuda:
img = img.cuda(async=True)
input = Variable(img, requires_grad=False)
output = model(input)
features[from_:to_] = output.data.cpu()
targets[from_:to_] = target
os.system('mkdir -p {}'.format(os.path.dirname(path_data)))
print('save ' + path_data)
torch.save((features, targets), path_data)
print('')
return features, targets
def train_multilabel(features, targets, classes, train_split, test_split, C=1.0, ignore_hard_examples=True, after_ReLU=False, normalize_L2=False):
print('\nHyperparameters:\n - C: {}\n - after_ReLU: {}\n - normL2: {}'.format(C, after_ReLU, normalize_L2))
train_APs = []
test_APs = []
for class_id in range(len(classes)):
classifier = SVC(C=C, kernel='linear') # http://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html
if ignore_hard_examples:
train_masks = (targets[train_split][:,class_id] != 0).view(-1, 1)
train_features = torch.masked_select(features[train_split], train_masks.expand_as(features[train_split])).view(-1,features[train_split].size(1))
train_targets = torch.masked_select(targets[train_split], train_masks.expand_as(targets[train_split])).view(-1,targets[train_split].size(1))
test_masks = (targets[test_split][:,class_id] != 0).view(-1, 1)
test_features = torch.masked_select(features[test_split], test_masks.expand_as(features[test_split])).view(-1,features[test_split].size(1))
test_targets = torch.masked_select(targets[test_split], test_masks.expand_as(targets[test_split])).view(-1,targets[test_split].size(1))
else:
train_features = features[train_split]
train_targets = targets[train_split]
test_features = features[test_split]
test_targets = features[test_split]
if after_ReLU:
train_features[train_features < 0] = 0
test_features[test_features < 0] = 0
if normalize_L2:
train_norm = torch.norm(train_features, p=2, dim=1).unsqueeze(1)
train_features = train_features.div(train_norm.expand_as(train_features))
test_norm = torch.norm(test_features, p=2, dim=1).unsqueeze(1)
test_features = test_features.div(test_norm.expand_as(test_features))
train_X = train_features.numpy()
train_y = (train_targets[:,class_id] != -1).numpy() # uses hard examples if not ignored
test_X = test_features.numpy()
test_y = (test_targets[:,class_id] != -1).numpy()
classifier.fit(train_X, train_y) # train parameters of the classifier
train_preds = classifier.predict(train_X)
train_acc = accuracy_score(train_y, train_preds) * 100
train_AP = average_precision_score(train_y, train_preds) * 100
train_APs.append(train_AP)
test_preds = classifier.predict(test_X)
test_acc = accuracy_score(test_y, test_preds) * 100
test_AP = average_precision_score(test_y, test_preds) * 100
test_APs.append(test_AP)
print('class "{}" ({}/{}):'.format(classes[class_id], test_y.sum(), test_y.shape[0]))
print(' - {:8}: acc {:.2f}, AP {:.2f}'.format(train_split, train_acc, train_AP))
print(' - {:8}: acc {:.2f}, AP {:.2f}'.format(test_split, test_acc, test_AP))
print('all classes:')
print(' - {:8}: mAP {:.4f}'.format(train_split, sum(train_APs)/len(classes)))
print(' - {:8}: mAP {:.4f}'.format(test_split, sum(test_APs)/len(classes)))
##########################################################################
# main
##########################################################################
parser = argparse.ArgumentParser(
description='Train/Evaluate models',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--dir_outputs', default='/tmp/outputs', type=str, help='')
parser.add_argument('--dir_datasets', default='/tmp/datasets', type=str, help='')
parser.add_argument('--C', default=1, type=float, help='')
parser.add_argument('-b', '--batch_size', default=50, type=float, help='')
parser.add_argument('-a', '--arch', default='alexnet', choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: alexnet)')
parser.add_argument('--train_split', default='train', type=str, help='')
parser.add_argument('--test_split', default='val', type=str, help='')
parser.add_argument('--cuda', const=True, nargs='?', type=bool, help='')
def main ():
global args
args = parser.parse_args()
print('\nCUDA status: {}'.format(args.cuda))
print('\nLoad pretrained model on Imagenet')
model = pretrainedmodels.__dict__[args.arch](num_classes=1000, pretrained='imagenet')
model.eval()
if args.cuda:
model.cuda()
features_size = model.last_linear.in_features
model.last_linear = pretrainedmodels.utils.Identity() # Trick to get inputs (features) from last_linear
print('\nLoad datasets')
tf_img = pretrainedmodels.utils.TransformImage(model)
train_set = pretrainedmodels.datasets.Voc2007Classification(args.dir_datasets, 'train', transform=tf_img)
val_set = pretrainedmodels.datasets.Voc2007Classification(args.dir_datasets, 'val', transform=tf_img)
test_set = pretrainedmodels.datasets.Voc2007Classification(args.dir_datasets, 'test', transform=tf_img)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=args.batch_size, shuffle=False, num_workers=2)
val_loader = torch.utils.data.DataLoader(val_set, batch_size=args.batch_size, shuffle=False, num_workers=2)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=args.batch_size, shuffle=False, num_workers=2)
print('\nLoad features')
dir_features = os.path.join(args.dir_outputs, 'data/{}'.format(args.arch))
path_train_data = '{}/{}set.pth'.format(dir_features, 'train')
path_val_data = '{}/{}set.pth'.format(dir_features, 'val')
path_test_data = '{}/{}set.pth'.format(dir_features, 'test')
features = {}
targets = {}
features['train'], targets['train'] = extract_features_targets(model, features_size, train_loader, path_train_data, args.cuda)
features['val'], targets['val'] = extract_features_targets(model, features_size, val_loader, path_val_data, args.cuda)
features['test'], targets['test'] = extract_features_targets(model, features_size, test_loader, path_test_data, args.cuda)
features['trainval'] = torch.cat([features['train'], features['val']], 0)
targets['trainval'] = torch.cat([targets['train'], targets['val']], 0)
print('\nTrain Support Vector Machines')
if args.train_split == 'train' and args.test_split == 'val':
print('\nHyperparameters search: train multilabel classifiers (on-versus-all) on train/val')
elif args.train_split == 'trainval' and args.test_split == 'test':
print('\nEvaluation: train a multilabel classifier on trainval/test')
else:
raise ValueError('Trying to train on {} and eval on {}'.format(args.train_split, args.test_split))
train_multilabel(features, targets, train_set.classes, args.train_split, args.test_split, C=args.C)
if __name__ == '__main__':
main()
|
7418
|
import threading
from queue import Queue, Empty
from time import sleep
from libAnt.drivers.driver import Driver
from libAnt.message import *
class Network:
def __init__(self, key: bytes = b'\x00' * 8, name: str = None):
self.key = key
self.name = name
self.number = 0
def __str__(self):
return self.name
class Pump(threading.Thread):
def __init__(self, driver: Driver, initMessages, out: Queue, onSucces, onFailure):
super().__init__()
self._stopper = threading.Event()
self._driver = driver
self._out = out
self._initMessages = initMessages
self._waiters = []
self._onSuccess = onSucces
self._onFailure = onFailure
def stop(self):
self._driver.abort()
self._stopper.set()
def stopped(self):
return self._stopper.isSet()
def run(self):
while not self.stopped():
try:
with self._driver as d:
# Startup
rst = SystemResetMessage()
self._waiters.append(rst)
d.write(rst)
for m in self._initMessages:
self._waiters.append(m)
d.write(m)
while not self.stopped():
# Write
try:
outMsg = self._out.get(block=False)
self._waiters.append(outMsg)
d.write(outMsg)
except Empty:
pass
# Read
try:
msg = d.read(timeout=1)
if msg.type == MESSAGE_CHANNEL_EVENT:
# This is a response to our outgoing message
for w in self._waiters:
if w.type == msg.content[1]: # ACK
self._waiters.remove(w)
# TODO: Call waiter callback from tuple (waiter, callback)
break
elif msg.type == MESSAGE_CHANNEL_BROADCAST_DATA:
bmsg = BroadcastMessage(msg.type, msg.content).build(msg.content)
self._onSuccess(bmsg)
except Empty:
pass
except Exception as e:
self._onFailure(e)
except:
pass
self._waiters.clear()
sleep(1)
class Node:
def __init__(self, driver: Driver, name: str = None):
self._driver = driver
self._name = name
self._out = Queue()
self._init = []
self._pump = None
self._configMessages = Queue()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
def start(self, onSuccess, onFailure):
if not self.isRunning():
self._pump = Pump(self._driver, self._init, self._out, onSuccess, onFailure)
self._pump.start()
def enableRxScanMode(self, networkKey=ANTPLUS_NETWORK_KEY, channelType=CHANNEL_TYPE_ONEWAY_RECEIVE,
frequency: int = 2457, rxTimestamp: bool = True, rssi: bool = True, channelId: bool = True):
self._init.append(SystemResetMessage())
self._init.append(SetNetworkKeyMessage(0, networkKey))
self._init.append(AssignChannelMessage(0, channelType))
self._init.append(SetChannelIdMessage(0))
self._init.append(SetChannelRfFrequencyMessage(0, frequency))
self._init.append(EnableExtendedMessagesMessage())
self._init.append(LibConfigMessage(rxTimestamp, rssi, channelId))
self._init.append(OpenRxScanModeMessage())
def stop(self):
if self.isRunning():
self._pump.stop()
self._pump.join()
def isRunning(self):
if self._pump is None:
return False
return self._pump.is_alive()
def getCapabilities(self):
pass
|
7427
|
import os
DEBUG = True
DATABASES = {
'default':
{
'ENGINE': 'django.db.backends.sqlite3',
'NAME': '/tmp/piston.db'
}
}
DATABASE_ENGINE = 'sqlite3'
DATABASE_NAME = '/tmp/piston.db'
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.admin',
'piston',
'test_project.apps.testapp',
)
TEMPLATE_DIRS = (
os.path.join(os.path.dirname(__file__), 'templates'),
)
SITE_ID = 1
ROOT_URLCONF = 'test_project.urls'
MIDDLEWARE_CLASSES = (
'piston.middleware.ConditionalMiddlewareCompatProxy',
'django.contrib.sessions.middleware.SessionMiddleware',
'piston.middleware.CommonMiddlewareCompatProxy',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
|
7432
|
from gym_reinmav.envs.mujoco.mujoco_quad import MujocoQuadEnv
from gym_reinmav.envs.mujoco.mujoco_quad_hovering import MujocoQuadHoveringEnv
from gym_reinmav.envs.mujoco.mujoco_quad_quat import MujocoQuadQuaternionEnv
|
7434
|
import unittest
import shutil
from rdyn.alg.RDyn_v2 import RDynV2
class RDynTestCase(unittest.TestCase):
def test_rdyn_simplified(self):
print("1")
rdb = RDynV2(size=500, iterations=100)
rdb.execute(simplified=True)
print("2")
rdb = RDynV2(size=500, iterations=100, max_evts=2)
rdb.execute(simplified=True)
print("3")
rdb = RDynV2(size=500, iterations=100, new_node=0.1, del_node=0.1, max_evts=2, paction=0.8)
rdb.execute(simplified=False)
print("Done")
shutil.rmtree("results")
if __name__ == '__main__':
unittest.main()
|
7446
|
from setuptools import setup
import os
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.rst')).read()
#NEWS = open(os.path.join(here, 'NEWS.txt')).read()
rootdir = os.path.dirname(os.path.abspath(__file__))
exec(open(rootdir + '/cerridwen/version.py').read())
version = __VERSION__
setup(name='cerridwen',
version=version,
description='Accurate solar system data for everyone',
long_description=README,
author='<NAME>',
author_email='<EMAIL>',
url='http://cerridwen.bluemagician.vc/',
license='MIT',
classifiers=[
# Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
"Development Status :: 4 - Beta"
, "Environment :: Console"
, "Intended Audience :: Science/Research"
, "Intended Audience :: Developers"
, "License :: OSI Approved :: MIT License"
, "Operating System :: OS Independent"
, "Programming Language :: Python :: 3"
, "Topic :: Scientific/Engineering :: Astronomy"
, "Topic :: Other/Nonlisted Topic"
, "Topic :: Software Development :: Libraries :: Python Modules"
, "Topic :: Utilities"
],
maintainer='<NAME>',
maintainer_email='<EMAIL>',
packages=['cerridwen'],
requires=['pyswisseph', 'numpy', 'astropy(>=0.4)'],
extras_require={'Flask':['flask']},
entry_points={
'console_scripts':
['cerridwen = cerridwen.cli:main',
'cerridwen-server = cerridwen.api_server:main [Flask]']
})
|
7465
|
import libhustpass.sbDes as sbDes
import libhustpass.captcha as captcha
import requests
import re
import random
def toWideChar(data):
data_bytes = bytes(data, encoding="utf-8")
ret = []
for i in data_bytes:
ret.extend([0, i])
while len(ret) % 8 != 0:
ret.append(0)
return ret
def Enc(data, first_key, second_key, third_key):
data_bytes = toWideChar(data)
key1_bytes = toWideChar(first_key)
key2_bytes = toWideChar(second_key)
key3_bytes = toWideChar(third_key)
ret_ = []
i = 0
while i < len(data_bytes):
tmp = data_bytes[i : i + 8]
x = 0
y = 0
z = 0
while x < len(key1_bytes):
enc1_ = sbDes.des(key1_bytes[x : x + 8], sbDes.ECB)
tmp = list(enc1_.encrypt(tmp))
x += 8
while y < len(key2_bytes):
enc2_ = sbDes.des(key2_bytes[y : y + 8], sbDes.ECB)
tmp = list(enc2_.encrypt(tmp))
y += 8
while z < len(key3_bytes):
enc3_ = sbDes.des(key3_bytes[z : z + 8], sbDes.ECB)
tmp = list(enc3_.encrypt(tmp))
z += 8
ret_.extend(tmp)
i += 8
ret = ""
for i in ret_:
ret += "%02X" % i
return ret
def login(username, password, url):
r = requests.session()
login_html = r.get(url)
captcha_content = r.get("https://pass.hust.edu.cn/cas/code?"+str(random.random()), stream=True)
captcha_content.raw.decode_content = True
nonce = re.search(
'<input type="hidden" id="lt" name="lt" value="(.*)" />', login_html.text
).group(1)
action = re.search(
'<form id="loginForm" action="(.*)" method="post">', login_html.text
).group(1)
post_params = {
"code": captcha.deCaptcha(captcha_content.raw),
"rsa": Enc(username + password + nonce, "1", "2", "3"),
"ul": len(username),
"pl": len(password),
"lt": nonce,
"execution": "e1s1",
"_eventId": "submit",
}
redirect_html = r.post(
"https://pass.hust.edu.cn" + action, data=post_params, allow_redirects=False
)
try:
return redirect_html.headers["Location"]
except:
raise Exception("login failed")
|
7468
|
from mung.torch_ext.eval import Loss
from ltprg.model.seq import DataParameter, SequenceModelNoInput, SequenceModelInputToHidden, SequenceModelAttendedInput
from ltprg.model.seq import VariableLengthNLLLoss
# Expects config of the form:
# {
# data_parameter : {
# seq : [SEQUENCE PARAMETER NAME]
# input : [INPUT PARAMETER NAME]
# }
# name : [ID FOR MODEL]
# arch_type : [SequenceModelNoInput|SequenceModelInputToHidden]
# dropout : [DROPOUT]
# rnn_layers : [RNN_LAYERS]
# rnn_size : [SIZE OF RNN HIDDEN LAYER]
# embedding_size : [EMBEDDING_SIZE]
# rnn_type : [RNN TYPE]
# (SequenceModelAttendedInput) attn_type : [EMBEDDING|OUTPUT]
# (SequenceModelInputToHidden) conv_input : [INDICATOR OF WHETHER OR NOT TO CONVOLVE THE INPUT]
# (SequenceModelInputToHidden|SequenceModelAttendedInput) conv_kernel : [KERNEL SIZE FOR CONVOLUTION]
# (SequenceModelInputToHidden|SequenceModelAttendedInput) conv_stride : [STRIDE LENGTH FOR CONVOLUTION]
# }
def load_seq_model(config, D, gpu=False):
data_parameter = DataParameter.make(**config["data_parameter"])
seq_field = data_parameter["seq"]
utterance_size = D[seq_field].get_matrix(0).get_feature_set().get_token_count()
dropout = float(config["dropout"])
rnn_layers = int(config["rnn_layers"])
rnn_size = int(config["rnn_size"])
embedding_size = int(config["embedding_size"])
rnn_type = config["rnn_type"]
if config["arch_type"] == "SequenceModelNoInput":
model = SequenceModelNoInput(config["name"], utterance_size, \
embedding_size, rnn_size, rnn_layers, dropout=dropout, rnn_type=rnn_type)
elif config["arch_type"] == "SequenceModelAttendedInput":
input_field = data_parameter["input"]
input_size = D[input_field].get_feature_set().get_token_count()
conv_kernel = int(config["conv_kernel"])
conv_stride = int(config["conv_stride"])
attn_type = "EMBEDDING"
if "attn_type" in config:
attn_type = config["attn_type"]
model = SequenceModelAttendedInput(config["name"], utterance_size, input_size, \
embedding_size, rnn_size, rnn_layers, dropout=dropout, rnn_type=rnn_type, \
conv_kernel=conv_kernel, conv_stride=conv_stride, attn_type=attn_type)
else:
input_field = data_parameter["input"]
input_size = D[input_field].get_feature_set().get_token_count()
conv_input = False
conv_kernel = 1
conv_stride = 1
if "conv_input" in config:
conv_input = bool(int(config["conv_input"]))
conv_kernel = int(config["conv_kernel"])
conv_stride = int(config["conv_stride"])
model = SequenceModelInputToHidden(config["name"], utterance_size, input_size, \
embedding_size, rnn_size, rnn_layers, dropout=dropout, rnn_type=rnn_type, \
conv_input=conv_input, conv_kernel=conv_kernel, conv_stride=conv_stride)
return data_parameter, model
# Expects config of the form:
# {
# data_parameter : {
# seq : [SEQUENCE PARAMETER NAME]
# input : [INPUT PARAMETER NAME]
# },
# evaluations : [
# name : [NAME FOR EVALUATION]
# type : (VariableLengthNLLLoss)
# data : [NAME OF DATA SUBSET]
# (Optional) data_size : [SIZE OF RANDOM SUBET OF DATA TO TAKE]
# ]
# }
def load_evaluations(config, D, gpu=False):
data_parameter = DataParameter.make(**config["data_parameter"])
evaluations = []
loss_criterion = VariableLengthNLLLoss(norm_dim=True)
if gpu:
loss_criterion = loss_criterion.cuda()
for eval_config in config["evaluations"]:
data = D[eval_config["data"]]
if "data_size" in eval_config:
data = data.get_random_subset(int(eval_config["data_size"]))
if eval_config["type"] == "VariableLengthNLLLoss":
loss = Loss(eval_config["name"], data, data_parameter, loss_criterion, norm_dim=True)
evaluations.append(loss)
else:
raise ValueError("Invalid seq evaluation type in config (" + str(eval_config["type"]))
return evaluations
|
7478
|
import pbge
from game.content.plotutility import LMSkillsSelfIntro
from game.content import backstory
from pbge.plots import Plot
from pbge.dialogue import Offer, ContextTag
from game.ghdialogue import context
import gears
import game.content.gharchitecture
import game.content.ghterrain
import random
from game import memobrowser
Memo = memobrowser.Memo
# *******************
# *** UTILITIES ***
# *******************
def get_hire_cost(camp, npc):
return (npc.renown * npc.renown * (200 - npc.get_reaction_score(camp.pc, camp)))//10
# **************************
# *** RANDOM_LANCEMATE ***
# **************************
class UtterlyRandomLancemate(Plot):
LABEL = "RANDOM_LANCEMATE"
def custom_init(self, nart):
npc = gears.selector.random_character(rank=min(random.randint(10, 50),random.randint(10, 50)),
mecha_colors=gears.color.random_mecha_colors(),
local_tags=tuple(self.elements["METROSCENE"].attributes),
combatant=True)
scene = self.seek_element(nart, "LOCALE", self._is_best_scene, scope=self.elements["METROSCENE"])
specialties = [sk for sk in gears.stats.NONCOMBAT_SKILLS if sk in npc.statline]
if random.randint(-12,3) > len(specialties):
npc.statline[random.choice(gears.stats.NONCOMBAT_SKILLS)] += random.randint(1,4)
self.register_element("NPC", npc, dident="LOCALE")
self.add_sub_plot(nart, "RLM_Relationship")
return True
def _is_best_scene(self,nart,candidate):
return isinstance(candidate,pbge.scenes.Scene) and gears.tags.SCENE_PUBLIC in candidate.attributes
class UtterlyGenericLancemate(Plot):
LABEL = "RANDOM_LANCEMATE"
JOBS = ("Mecha Pilot","Arena Pilot","Recon Pilot","Mercenary","Bounty Hunter")
def custom_init(self, nart):
npc = gears.selector.random_character(rank=min(random.randint(10, 50),random.randint(10, 50)),
job=gears.jobs.ALL_JOBS[random.choice(self.JOBS)],
mecha_colors=gears.color.random_mecha_colors(),
local_tags=tuple(self.elements["METROSCENE"].attributes),
combatant=True)
if random.randint(1,20) == 1:
npc.statline[random.choice(gears.stats.NONCOMBAT_SKILLS)] += random.randint(1,4)
scene = self.seek_element(nart, "LOCALE", self._is_best_scene, scope=self.elements["METROSCENE"])
self.register_element("NPC", npc, dident="LOCALE")
self.add_sub_plot(nart, "RLM_Relationship")
return True
def _is_best_scene(self,nart,candidate):
return isinstance(candidate,pbge.scenes.Scene) and gears.tags.SCENE_PUBLIC in candidate.attributes
class GiftedNewbieLancemate(Plot):
# Amazing stats, amazingly crap skills.
LABEL = "RANDOM_LANCEMATE"
JOBS = ("Mecha Pilot","Arena Pilot","Citizen","Explorer","Factory Worker")
UNIQUE = True
def custom_init(self, nart):
npc = gears.selector.random_character(statline=gears.base.Being.random_stats(random.randint(100, 110)),
rank=random.randint(5, 15),
job=gears.jobs.ALL_JOBS[random.choice(self.JOBS)],
mecha_colors=gears.color.random_mecha_colors(),
local_tags=tuple(self.elements["METROSCENE"].attributes),
combatant=True, birth_year=nart.camp.year - random.randint(18,23))
if random.randint(1,10) == 1:
npc.statline[random.choice(gears.stats.NONCOMBAT_SKILLS)] += random.randint(1,4)
scene = self.seek_element(nart, "LOCALE", self._is_best_scene, scope=self.elements["METROSCENE"])
self.register_element("NPC", npc, dident="LOCALE")
self.add_sub_plot(nart, "RLM_Relationship")
return True
def _is_best_scene(self,nart,candidate):
return isinstance(candidate,pbge.scenes.Scene) and gears.tags.SCENE_PUBLIC in candidate.attributes
class OlderMentorLancemate(Plot):
LABEL = "RANDOM_LANCEMATE"
UNIQUE = True
def custom_init(self, nart):
npc = gears.selector.random_character(rank=random.randint(41, 85),
mecha_colors=gears.color.random_mecha_colors(),
local_tags=tuple(self.elements["METROSCENE"].attributes),
combatant=True, birth_year=nart.camp.year - random.randint(32,50))
npc.statline[random.choice(gears.stats.NONCOMBAT_SKILLS)] += random.randint(1, 4)
scene = self.seek_element(nart, "LOCALE", self._is_best_scene, scope=self.elements["METROSCENE"])
self.register_element("NPC", npc, dident="LOCALE")
self.add_sub_plot(nart, "RLM_Relationship")
return True
def _is_best_scene(self,nart,candidate):
return isinstance(candidate,pbge.scenes.Scene) and gears.tags.SCENE_PUBLIC in candidate.attributes
class DeadzonerInGreenZoneLancemate(Plot):
LABEL = "RANDOM_LANCEMATE"
JOBS = ("Mercenary","Bandit","Scavenger","Aristo","Tekno","Sheriff")
UNIQUE = True
@classmethod
def matches( self, pstate ):
"""Returns True if this plot matches the current plot state."""
return gears.personality.GreenZone in pstate.elements["METROSCENE"].attributes
def custom_init(self, nart):
npc = gears.selector.random_character(rank=min(random.randint(20, 55),random.randint(20, 55)),
job=gears.jobs.ALL_JOBS[random.choice(self.JOBS)],
mecha_colors=gears.color.random_mecha_colors(),
local_tags=(gears.personality.DeadZone,),
combatant=True)
scene = self.seek_element(nart, "LOCALE", self._is_best_scene, scope=self.elements["METROSCENE"])
self.register_element("NPC", npc, dident="LOCALE")
self.add_sub_plot(nart, "RLM_Relationship")
return True
def _is_best_scene(self,nart,candidate):
return isinstance(candidate,pbge.scenes.Scene) and gears.tags.SCENE_PUBLIC in candidate.attributes
class GladiatorLancemate(Plot):
LABEL = "RANDOM_LANCEMATE"
UNIQUE = True
@classmethod
def matches( self, pstate ):
"""Returns True if this plot matches the current plot state."""
return gears.personality.DeadZone in pstate.elements["METROSCENE"].attributes
def custom_init(self, nart):
npc = gears.selector.random_character(rank=min(random.randint(25, 65),random.randint(25, 65)),
can_cyberize=True,
job=gears.jobs.ALL_JOBS["Gladiator"],
mecha_colors=gears.color.random_mecha_colors(),
local_tags=(gears.personality.DeadZone,),
combatant=True)
scene = self.seek_element(nart, "LOCALE", self._is_best_scene, scope=self.elements["METROSCENE"])
self.register_element("NPC", npc, dident="LOCALE")
self.add_sub_plot(nart, "RLM_Relationship")
return True
def _is_best_scene(self,nart,candidate: gears.GearHeadScene):
return isinstance(candidate,pbge.scenes.Scene) and gears.tags.SCENE_PUBLIC in candidate.attributes
class MutantLancemate(Plot):
LABEL = "RANDOM_LANCEMATE"
UNIQUE = True
@classmethod
def matches( self, pstate ):
"""Returns True if this plot matches the current plot state."""
return {gears.personality.GreenZone,gears.personality.DeadZone}.intersection(pstate.elements["METROSCENE"].attributes)
def custom_init(self, nart):
npc = gears.selector.random_character(rank=random.randint(20, 45),
mecha_colors=gears.color.random_mecha_colors(),
local_tags=tuple(self.elements["METROSCENE"].attributes),
combatant=True)
scene = self.seek_element(nart, "LOCALE", self._is_best_scene, scope=self.elements["METROSCENE"])
mutation = random.choice(gears.personality.MUTATIONS)
mutation.apply(npc)
npc.personality.add(mutation)
specialties = [sk for sk in gears.stats.NONCOMBAT_SKILLS if sk in npc.statline]
if random.randint(-12,3) > len(specialties):
npc.statline[random.choice(gears.stats.NONCOMBAT_SKILLS)] += random.randint(1,4)
self.register_element("NPC", npc, dident="LOCALE")
self.add_sub_plot(nart, "RLM_Relationship")
return True
def _is_best_scene(self,nart,candidate):
return isinstance(candidate, pbge.scenes.Scene) and gears.tags.SCENE_PUBLIC in candidate.attributes
class FormerLancemateReturns(Plot):
LABEL = "RANDOM_LANCEMATE"
active = True
scope = "METRO"
def custom_init(self, nart):
npc: gears.base.Character = nart.camp.egg.seek_dramatis_person(nart.camp, self._is_good_npc, self)
if npc:
scene = self.seek_element(nart, "LOCALE", self._is_best_scene, scope=self.elements["METROSCENE"])
self.register_element("NPC", npc, dident="LOCALE")
#print(npc,scene)
self.bs = backstory.Backstory(("LONGTIMENOSEE",),keywords=[t.name.upper() for t in npc.get_tags()])
return npc
def _is_good_npc(self,nart,candidate):
return isinstance(candidate, gears.base.Character) and candidate.relationship and gears.relationships.RT_LANCEMATE in candidate.relationship.tags
def _is_best_scene(self,nart,candidate):
return isinstance(candidate,gears.GearHeadScene) and gears.tags.SCENE_PUBLIC in candidate.attributes
def _get_dialogue_grammar(self, npc, camp):
mygram = dict()
if npc is self.elements["NPC"]:
for k in self.bs.results.keys():
mygram[k] = [self.bs.get_one(k),]
else:
mygram["[News]"] = ["{NPC} has been hanging out at {LOCALE}".format(**self.elements), ]
return mygram
def NPC_offers(self, camp):
mylist = list()
mylist.append(Offer("[INFO_PERSONAL]",
context=ContextTag([context.PERSONAL]),
no_repeats=True, effect=self.end_plot))
return mylist
def t_START(self, camp):
if self.elements["NPC"] in camp.party:
self.end_plot(camp)
# **************************
# *** RLM_Relationship ***
# **************************
# Elements:
# NPC: The NPC who needs a personality
# METROSCENE: The city or whatever that the NPC calls home
#
# These subplots contain a personality for a random (potential) lancemate.
# Also include a means for the lancemate to gain the "RT_LANCEMATE" tag.
class RLM_Beginner(Plot):
LABEL = "RLM_Relationship"
active = True
scope = True
UNIQUE = True
@classmethod
def matches( self, pstate ):
"""Returns True if this plot matches the current plot state."""
return pstate.elements["NPC"].renown < 25
def custom_init(self, nart):
npc = self.elements["NPC"]
npc.relationship = gears.relationships.Relationship(attitude=gears.relationships.A_JUNIOR)
# This character gets fewer mecha points.
npc.relationship.data["mecha_level_bonus"] = -10
self._got_rumor = False
return True
def NPC_offers(self, camp):
mylist = list()
npc = self.elements["NPC"]
if gears.relationships.RT_LANCEMATE not in npc.relationship.tags:
if camp.can_add_lancemate():
mylist.append(Offer("I can't believe you asked me... [LETSGO]",
context=ContextTag((context.JOIN,)),
effect=self._join_lance
))
mylist.append(Offer(
"[HELLO] Some day I want to become a cavalier like you.", context=ContextTag((context.HELLO,))
))
mylist.append(LMSkillsSelfIntro(npc))
return mylist
def _get_dialogue_grammar(self, npc, camp):
mygram = dict()
if camp.scene.get_root_scene() is self.elements["METROSCENE"] and npc is not self.elements["NPC"]:
# This is an NPC in Wujung. Give them some news.
mygram["[News]"] = ["{} has dreams of someday becoming a cavalier".format(self.elements["NPC"]), ]
return mygram
def _join_lance(self, camp):
npc = self.elements["NPC"]
npc.relationship.tags.add(gears.relationships.RT_LANCEMATE)
effect = game.content.plotutility.AutoJoiner(npc)
effect(camp)
self.end_plot(camp)
def _get_generic_offers(self, npc, camp):
"""Get any offers that could apply to non-element NPCs."""
goffs = list()
if camp.scene.get_root_scene() is self.elements["METROSCENE"] and npc is not self.elements["NPC"] and not self._got_rumor:
mynpc = self.elements["NPC"]
goffs.append(Offer(
msg="As far as I know {} usually hangs out at {}.".format(mynpc,mynpc.get_scene()),
context=ContextTag((context.INFO,)), effect=self._get_rumor,
subject=str(mynpc), data={"subject": str(mynpc)}, no_repeats=True
))
return goffs
def _get_rumor(self,camp):
mynpc = self.elements["NPC"]
self._got_rumor = True
self.memo = Memo( "{} dreams of becoming a cavalier.".format(mynpc)
, mynpc.get_scene()
)
class RLM_Friendly(Plot):
LABEL = "RLM_Relationship"
active = True
scope = True
UNIQUE = True
def custom_init(self, nart):
npc = self.elements["NPC"]
npc.relationship = gears.relationships.Relationship(attitude=gears.relationships.A_FRIENDLY)
self._got_rumor = False
return True
def NPC_offers(self, camp):
mylist = list()
npc = self.elements["NPC"]
if gears.relationships.RT_LANCEMATE not in npc.relationship.tags:
if camp.can_add_lancemate() and npc.get_reaction_score(camp.pc, camp) > 0:
mylist.append(Offer("[THANKS_FOR_CHOOSING_ME] [LETSGO]",
context=ContextTag((context.JOIN,)),
effect=self._join_lance
))
mylist.append(Offer(
"[HELLO] [WAITINGFORMISSION]", context=ContextTag((context.HELLO,))
))
mylist.append(LMSkillsSelfIntro(npc))
return mylist
def _join_lance(self, camp):
npc = self.elements["NPC"]
npc.relationship.tags.add(gears.relationships.RT_LANCEMATE)
effect = game.content.plotutility.AutoJoiner(npc)
effect(camp)
self.end_plot(camp)
def _get_dialogue_grammar(self, npc, camp):
mygram = dict()
if camp.scene.get_root_scene() is self.elements["METROSCENE"] and npc is not self.elements["NPC"] and not self._got_rumor:
# This is an NPC in Wujung. Give them some news.
mygram["[News]"] = ["{} is looking for a lance to join".format(self.elements["NPC"]), ]
return mygram
def _get_generic_offers(self, npc, camp):
"""Get any offers that could apply to non-element NPCs."""
goffs = list()
if camp.scene.get_root_scene() is self.elements["METROSCENE"] and npc is not self.elements["NPC"] and not self._got_rumor:
mynpc = self.elements["NPC"]
goffs.append(Offer(
msg="You can usually find {} at {}, if you're planning to invite {} to join your lance.".format(mynpc,mynpc.get_scene(),mynpc.gender.object_pronoun),
context=ContextTag((context.INFO,)), effect=self._get_rumor,
subject=str(mynpc), data={"subject": str(mynpc)}, no_repeats=True
))
return goffs
def _get_rumor(self,camp):
mynpc = self.elements["NPC"]
self._got_rumor = True
self.memo = Memo( "{} is looking for a lance to join.".format(mynpc)
, mynpc.get_scene()
)
class RLM_Medic(Plot):
LABEL = "RLM_Relationship"
active = True
scope = True
UNIQUE = True
VIRTUES = (gears.personality.Peace,gears.personality.Fellowship)
@classmethod
def matches( self, pstate ):
"""Returns True if this plot matches the current plot state."""
return pstate.elements["NPC"].job and gears.tags.Medic in pstate.elements["NPC"].job.tags
def custom_init(self, nart):
npc = self.elements["NPC"]
npc.relationship = gears.relationships.Relationship(expectation=gears.relationships.E_GREATERGOOD)
new_virtue = random.choice(self.VIRTUES)
if new_virtue not in npc.personality:
npc.personality.add(new_virtue)
return True
def NPC_offers(self, camp):
mylist = list()
npc = self.elements["NPC"]
if gears.relationships.RT_LANCEMATE not in npc.relationship.tags:
if camp.can_add_lancemate():
mylist.append(Offer("[THANKS_FOR_CHOOSING_ME] [LETSGO]",
context=ContextTag((context.JOIN,)),
effect=self._join_lance
))
else:
mylist.append(Offer("You've got a full crew right now, but if you ever find yourself in need of a qualified medic come back and find me.",
context=ContextTag((context.JOIN,)),
effect=self._defer_join
))
mylist.append(Offer(
"[HELLO] Lately I've been spending too much time here, when I'd rather be out in the danger zone saving lives.", context=ContextTag((context.HELLO,))
))
mylist.append(LMSkillsSelfIntro(npc))
return mylist
def _get_dialogue_grammar(self, npc, camp):
mygram = dict()
if camp.scene.get_root_scene() is self.elements["METROSCENE"] and npc is not self.elements["NPC"]:
# This is an NPC in Wujung. Give them some news.
mygram["[News]"] = ["{} wants to leave {} so {} can make a positive difference in the world".format(self.elements["NPC"],self.elements["NPC"].get_scene(),self.elements["NPC"].gender.subject_pronoun), ]
return mygram
def _join_lance(self, camp):
npc = self.elements["NPC"]
npc.relationship.tags.add(gears.relationships.RT_LANCEMATE)
effect = game.content.plotutility.AutoJoiner(npc)
effect(camp)
self.end_plot(camp)
def _defer_join(self, camp):
npc = self.elements["NPC"]
npc.relationship.tags.add(gears.relationships.RT_LANCEMATE)
self.end_plot(camp)
class RLM_Mercenary(Plot):
LABEL = "RLM_Relationship"
active = True
scope = True
UNIQUE = True
@classmethod
def matches( self, pstate ):
"""Returns True if this plot matches the current plot state."""
return pstate.elements["NPC"].job and {gears.tags.Adventurer,gears.tags.Military}.intersection(pstate.elements["NPC"].job.tags)
def custom_init(self, nart):
npc = self.elements["NPC"]
npc.relationship = gears.relationships.Relationship(expectation=gears.relationships.E_MERCENARY)
# This character gets extra mecha points, showing their good investment sense.
npc.relationship.data["mecha_level_bonus"] = 10
self._got_rumor = False
return True
def NPC_offers(self, camp):
mylist = list()
npc = self.elements["NPC"]
self.hire_cost = get_hire_cost(camp,npc)
if gears.relationships.RT_LANCEMATE not in npc.relationship.tags:
if camp.can_add_lancemate():
mylist.append(Offer("I'll join your lance for a mere ${}. [DOYOUACCEPTMYOFFER]".format(self.hire_cost),
context=ContextTag((context.PROPOSAL, context.JOIN)),
data={"subject": "joining my lance"},
subject=self, subject_start=True,
))
mylist.append(Offer("[DENY_JOIN] [GOODBYE]",
context=ContextTag((context.DENY, context.JOIN)), subject=self
))
if camp.credits >= self.hire_cost:
mylist.append(Offer("[THANKS_FOR_CHOOSING_ME] [LETSGO]",
context=ContextTag((context.ACCEPT, context.JOIN)), subject=self,
effect=self._join_lance
))
mylist.append(Offer(
"[HELLO] I am a mercenary pilot, looking for my next contract.", context=ContextTag((context.HELLO,))
))
mylist.append(LMSkillsSelfIntro(npc))
return mylist
def _get_dialogue_grammar(self, npc, camp):
mygram = dict()
if camp.scene.get_root_scene() is self.elements["METROSCENE"] and npc is not self.elements["NPC"]:
# This is an NPC in Wujung. Give them some news.
mygram["[News]"] = ["{} is hoping to make some quick cash".format(self.elements["NPC"]), ]
return mygram
def _join_lance(self, camp):
npc = self.elements["NPC"]
npc.relationship.tags.add(gears.relationships.RT_LANCEMATE)
camp.credits -= self.hire_cost
effect = game.content.plotutility.AutoJoiner(npc)
effect(camp)
self.end_plot(camp)
def _get_generic_offers(self, npc, camp):
"""Get any offers that could apply to non-element NPCs."""
goffs = list()
if camp.scene.get_root_scene() is self.elements["METROSCENE"] and npc is not self.elements["NPC"] and not self._got_rumor:
mynpc = self.elements["NPC"]
goffs.append(Offer(
msg="As far as I know {} can usually be found at {}.".format(mynpc,mynpc.get_scene()),
context=ContextTag((context.INFO,)), effect=self._get_rumor,
subject=str(mynpc), data={"subject": str(mynpc)}, no_repeats=True
))
return goffs
def _get_rumor(self,camp):
mynpc = self.elements["NPC"]
self._got_rumor = True
self.memo = Memo("{} is a mercenary pilot looking for a job.".format(mynpc)
, mynpc.get_scene()
)
class RLM_Professional(Plot):
LABEL = "RLM_Relationship"
active = True
scope = True
UNIQUE = True
@classmethod
def matches( self, pstate ):
"""Returns True if this plot matches the current plot state."""
return pstate.elements["NPC"].renown > 20
def custom_init(self, nart):
npc = self.elements["NPC"]
npc.relationship = gears.relationships.Relationship(expectation=gears.relationships.E_PROFESSIONAL)
# This character gets 10 extra stat points, showing their elite nature.
npc.roll_stats(10, clear_first=False)
self._got_rumor = False
return True
def NPC_offers(self, camp):
mylist = list()
npc = self.elements["NPC"]
self.hire_cost = get_hire_cost(camp,npc)
if gears.relationships.RT_LANCEMATE not in npc.relationship.tags:
if camp.can_add_lancemate():
mylist.append(Offer(
"[NOEXPOSURE] I think ${} is a fair signing price. [DOYOUACCEPTMYOFFER]".format(self.hire_cost),
context=ContextTag((context.PROPOSAL, context.JOIN)), data={"subject": "joining my lance"},
subject=self, subject_start=True,
))
mylist.append(Offer("[DENY_JOIN] [GOODBYE]",
context=ContextTag((context.DENY, context.JOIN)), subject=self
))
if camp.credits >= self.hire_cost:
mylist.append(Offer("[THANKS_FOR_CHOOSING_ME] [LETSGO]",
context=ContextTag((context.ACCEPT, context.JOIN)), subject=self,
effect=self._join_lance
))
mylist.append(Offer(
"[HELLO] I see you are also a cavalier.", context=ContextTag((context.HELLO,))
))
mylist.append(LMSkillsSelfIntro(npc))
return mylist
def _get_dialogue_grammar(self, npc, camp):
mygram = dict()
if camp.scene.get_root_scene() is self.elements["METROSCENE"] and npc is not self.elements["NPC"]:
# This is an NPC in Wujung. Give them some news.
mygram["[News]"] = ["{} is an experienced pilot looking for work".format(self.elements["NPC"]), ]
return mygram
def _join_lance(self, camp):
npc = self.elements["NPC"]
npc.relationship.tags.add(gears.relationships.RT_LANCEMATE)
camp.credits -= self.hire_cost
effect = game.content.plotutility.AutoJoiner(npc)
effect(camp)
self.end_plot(camp)
def _get_generic_offers(self, npc, camp):
"""Get any offers that could apply to non-element NPCs."""
goffs = list()
if camp.scene.get_root_scene() is self.elements["METROSCENE"] and npc is not self.elements["NPC"] and not self._got_rumor:
mynpc = self.elements["NPC"]
goffs.append(Offer(
msg="You can usually find {} at {}. Bring cash if you're planning to hire {}.".format(mynpc,mynpc.get_scene(),mynpc.gender.object_pronoun),
context=ContextTag((context.INFO,)), effect=self._get_rumor,
subject=str(mynpc), data={"subject": str(mynpc)}, no_repeats=True
))
return goffs
def _get_rumor(self,camp):
mynpc = self.elements["NPC"]
self._got_rumor = True
self.memo = Memo( "{} is an experienced pilot looking for work.".format(mynpc)
, mynpc.get_scene()
)
class RLM_RatherGeneric(Plot):
LABEL = "RLM_Relationship"
active = True
scope = True
def custom_init(self, nart):
npc = self.elements["NPC"]
npc.relationship = gears.relationships.Relationship()
self._got_rumor = False
return True
def NPC_offers(self, camp):
mylist = list()
npc = self.elements["NPC"]
self.hire_cost = get_hire_cost(camp,npc)
if gears.relationships.RT_LANCEMATE not in npc.relationship.tags:
if camp.can_add_lancemate():
if npc.get_reaction_score(camp.pc, camp) > 60:
mylist.append(Offer("[IWOULDLOVETO] [THANKS_FOR_CHOOSING_ME]",
context=ContextTag((context.PROPOSAL, context.JOIN)),
data={"subject": "joining my lance"},
effect=self._join_lance
))
else:
mylist.append(Offer("My regular signing rate is ${}. [DOYOUACCEPTMYOFFER]".format(self.hire_cost),
context=ContextTag((context.PROPOSAL, context.JOIN)),
data={"subject": "joining my lance"},
subject=self, subject_start=True,
))
mylist.append(Offer("[DENY_JOIN] [GOODBYE]",
context=ContextTag((context.DENY, context.JOIN)), subject=self
))
if camp.credits >= self.hire_cost:
mylist.append(Offer("[THANKS_FOR_CHOOSING_ME] [LETSGO]",
context=ContextTag((context.ACCEPT, context.JOIN)), subject=self,
effect=self._pay_to_join
))
mylist.append(Offer(
"[HELLO] [WAITINGFORMISSION]", context=ContextTag((context.HELLO,))
))
else:
mylist.append(Offer(
"[HELLO] Must be nice going off, having adventures with your lancemates. I'd like to do that again someday.", context=ContextTag((context.HELLO,))
))
mylist.append(LMSkillsSelfIntro(npc))
return mylist
def _get_dialogue_grammar(self, npc, camp):
mygram = dict()
if camp.scene.get_root_scene() is self.elements["METROSCENE"] and npc is not self.elements["NPC"]:
mygram["[News]"] = ["{} is looking for a new lance to join".format(self.elements["NPC"]), ]
return mygram
def _pay_to_join(self,camp):
camp.credits -= self.hire_cost
self._join_lance(camp)
def _join_lance(self, camp):
npc = self.elements["NPC"]
npc.relationship.tags.add(gears.relationships.RT_LANCEMATE)
effect = game.content.plotutility.AutoJoiner(npc)
effect(camp)
self.end_plot(camp)
def _get_generic_offers(self, npc, camp):
"""Get any offers that could apply to non-element NPCs."""
goffs = list()
if camp.scene.get_root_scene() is self.elements["METROSCENE"] and npc is not self.elements["NPC"] and not self._got_rumor:
mynpc = self.elements["NPC"]
goffs.append(Offer(
msg="You can find {} at {}.".format(mynpc,mynpc.get_scene()),
context=ContextTag((context.INFO,)), effect=self._get_rumor,
subject=str(mynpc), data={"subject": str(mynpc)}, no_repeats=True
))
return goffs
def _get_rumor(self,camp):
mynpc = self.elements["NPC"]
self._got_rumor = True
self.memo = Memo("{} is looking for a new lance.".format(mynpc)
, mynpc.get_scene()
)
class RLM_DamagedGoodsSale(Plot):
LABEL = "RLM_Relationship"
active = True
scope = True
UNIQUE = True
def custom_init(self, nart):
npc = self.elements["NPC"]
npc.relationship = gears.relationships.Relationship(expectation=gears.relationships.E_IMPROVER)
# This NPC gets a stat bonus but a crappy mech to show their history.
npc.relationship.data["mecha_level_bonus"] = -15
npc.roll_stats(5, clear_first=False)
self._got_rumor = False
return True
def NPC_offers(self, camp):
mylist = list()
npc = self.elements["NPC"]
self.hire_cost = get_hire_cost(camp,npc)//2
if gears.relationships.RT_LANCEMATE not in npc.relationship.tags:
if camp.can_add_lancemate():
if npc.get_reaction_score(camp.pc, camp) > 20:
mylist.append(Offer("[IWOULDLOVETO] I'll do my best to not let you down.",
context=ContextTag((context.PROPOSAL, context.JOIN)),
data={"subject": "joining my lance"},
effect=self._join_lance
))
else:
mylist.append(Offer("I'll sign up with you for just ${}. [DOYOUACCEPTMYOFFER]".format(self.hire_cost),
context=ContextTag((context.PROPOSAL, context.JOIN)),
data={"subject": "joining my lance"},
subject=self, subject_start=True,
))
mylist.append(Offer("[DENY_JOIN] [GOODBYE]",
context=ContextTag((context.DENY, context.JOIN)), subject=self
))
if camp.credits >= self.hire_cost:
mylist.append(Offer("[THANKS_FOR_CHOOSING_ME] I'll do my best to not let you down.",
context=ContextTag((context.ACCEPT, context.JOIN)), subject=self,
effect=self._pay_to_join
))
mylist.append(Offer(
"[HELLO] The life of a cavalier is full of ups and downs... right now I'm in one of those downs.", context=ContextTag((context.HELLO,))
))
else:
mylist.append(Offer(
"[HELLO] Be careful out there... all it takes is one little mistake to cost you everything.", context=ContextTag((context.HELLO,))
))
mylist.append(LMSkillsSelfIntro(npc))
return mylist
def _get_dialogue_grammar(self, npc, camp):
mygram = dict()
if camp.scene.get_root_scene() is self.elements["METROSCENE"] and npc is not self.elements["NPC"]:
mygram["[News]"] = ["{NPC} is a down on {NPC.gender.possessive_determiner} luck cavalier looking for another chance".format(**self.elements), ]
return mygram
def _pay_to_join(self,camp):
camp.credits -= self.hire_cost
self._join_lance(camp)
def _join_lance(self, camp):
npc = self.elements["NPC"]
npc.relationship.tags.add(gears.relationships.RT_LANCEMATE)
effect = game.content.plotutility.AutoJoiner(npc)
effect(camp)
self.end_plot(camp)
def _get_generic_offers(self, npc, camp):
"""Get any offers that could apply to non-element NPCs."""
goffs = list()
if camp.scene.get_root_scene() is self.elements["METROSCENE"] and npc is not self.elements["NPC"] and not self._got_rumor:
mynpc = self.elements["NPC"]
goffs.append(Offer(
msg="You can find {} at {}. Don't say that you weren't warned.".format(mynpc,mynpc.get_scene()),
context=ContextTag((context.INFO,)), effect=self._get_rumor,
subject=str(mynpc), data={"subject": str(mynpc)}, no_repeats=True
))
return goffs
def _get_rumor(self,camp):
mynpc = self.elements["NPC"]
self._got_rumor = True
self.memo = Memo( "{} is looking for a new lance.".format(mynpc)
, mynpc.get_scene()
)
|
7497
|
from __future__ import absolute_import
from django.core.urlresolvers import reverse
from sentry.models import OrganizationMember, OrganizationMemberTeam, Team
from sentry.testutils import TestCase, PermissionTestCase
class CreateTeamPermissionTest(PermissionTestCase):
def setUp(self):
super(CreateTeamPermissionTest, self).setUp()
self.path = reverse('sentry-create-team', args=[self.organization.slug])
def test_teamless_admin_can_load(self):
self.assert_teamless_admin_can_access(self.path)
def test_team_admin_can_load(self):
self.assert_team_admin_can_access(self.path)
def test_member_cannot_load(self):
self.assert_member_cannot_access(self.path)
def test_owner_can_load(self):
self.assert_owner_can_access(self.path)
class CreateTeamTest(TestCase):
def test_renders_with_context(self):
organization = self.create_organization()
path = reverse('sentry-create-team', args=[organization.slug])
self.login_as(self.user)
resp = self.client.get(path)
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'sentry/create-team.html')
assert resp.context['organization'] == organization
assert resp.context['form']
def test_submission(self):
organization = self.create_organization()
path = reverse('sentry-create-team', args=[organization.slug])
self.login_as(self.user)
resp = self.client.post(path, {
'name': 'bar',
})
assert resp.status_code == 302, resp.context['form'].errors
team = Team.objects.get(organization=organization, name='bar')
member = OrganizationMember.objects.get(
user=self.user,
organization=organization,
)
assert OrganizationMemberTeam.objects.filter(
organizationmember=member,
team=team,
is_active=True,
).exists()
redirect_uri = reverse('sentry-create-project', args=[organization.slug])
assert resp['Location'] == 'http://testserver%s?team=%s' % (
redirect_uri, team.slug)
def test_admin_can_create_team(self):
organization = self.create_organization()
path = reverse('sentry-create-team', args=[organization.slug])
admin = self.create_user('<EMAIL>')
self.create_member(
organization=organization,
user=admin,
role='admin',
teams=[],
)
self.login_as(admin)
resp = self.client.post(path, {
'name': 'bar',
})
assert resp.status_code == 302, resp.context['form'].errors
assert Team.objects.filter(
organization=organization,
name='bar',
).exists()
|
7504
|
import os
os.makedirs(os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'build')), exist_ok=True)
from .chamfer_distance import ChamferDistance
|
7638
|
import pprint
from typing import Optional, List, Tuple, Set, Dict
import numpy as np
from overrides import overrides
from python.handwritten_baseline.pipeline.data.base import Dataset
from python.handwritten_baseline.pipeline.model.feature_extr import DEBUG_EXTR
from python.handwritten_baseline.pipeline.model.feature_extr.base_mixin import FeatureExtractorMixin
class DebugFeatureExtractor(FeatureExtractorMixin):
"""
Returns constant or random feature value for testing purposes.
"""
def __init__(self,
strategy: str,
num_features: int,
use_cache: bool,
features_to_select: Optional[List[str]]):
super(DebugFeatureExtractor, self).__init__(DEBUG_EXTR, use_cache, features_to_select)
self.strategy = strategy
self.num_features = num_features
@overrides
def _transform(self, dataset: Dataset, pairs: List[Tuple[Tuple, Tuple]], unique_mentions: Set[Tuple]):
if self.strategy == "random":
return np.random.normal(0, 1, (len(pairs), self.num_features))
elif self.strategy == "zero":
return np.zeros((len(pairs), self.num_features))
elif self.strategy == "mix":
num_zero_features = self.num_features // 2
print(f"Generating {num_zero_features} zero features and {self.num_features - num_zero_features} random features.")
zero_features = np.zeros((len(pairs), num_zero_features))
random_features = np.random.normal(0, 1, (len(pairs), self.num_features - num_zero_features))
feature_matrix = np.hstack([zero_features, random_features])
np.random.shuffle(np.transpose(feature_matrix))
return feature_matrix
@overrides
def _get_plain_names_of_all_features(self) -> List[str]:
return [str(i) for i in range(self.num_features)]
@classmethod
@overrides
def from_params(cls, config: Dict):
strategy = config.pop("strategy")
num_features = config.pop("num_features")
use_cache = config.pop("use_cache", False)
features_to_select = config.pop("features_to_select", None)
obj = DebugFeatureExtractor(strategy, num_features, use_cache, features_to_select)
if config:
raise ValueError("Leftover configuration: " + pprint.pformat(config))
return obj
|
7652
|
from CyberSource import *
import os
import json
from importlib.machinery import SourceFileLoader
config_file = os.path.join(os.getcwd(), "data", "Configuration.py")
configuration = SourceFileLoader("module.name", config_file).load_module()
# To delete None values in Input Request Json body
def del_none(d):
for key, value in list(d.items()):
if value is None:
del d[key]
elif isinstance(value, dict):
del_none(value)
return d
def customer_match_denied_parties_list():
clientReferenceInformationCode = "verification example"
clientReferenceInformationComments = "Export-basic"
clientReferenceInformationPartnerDeveloperId = "7891234"
clientReferenceInformationPartnerSolutionId = "89012345"
clientReferenceInformationPartner = Riskv1decisionsClientReferenceInformationPartner(
developer_id = clientReferenceInformationPartnerDeveloperId,
solution_id = clientReferenceInformationPartnerSolutionId
)
clientReferenceInformation = Riskv1decisionsClientReferenceInformation(
code = clientReferenceInformationCode,
comments = clientReferenceInformationComments,
partner = clientReferenceInformationPartner.__dict__
)
orderInformationBillToAddress1 = "901 Metro Centre Blvd"
orderInformationBillToAdministrativeArea = "CA"
orderInformationBillToCountry = "US"
orderInformationBillToLocality = "Foster City"
orderInformationBillToPostalCode = "94404"
orderInformationBillToCompanyName = "A & C International Trade, Inc"
orderInformationBillToCompany = Riskv1exportcomplianceinquiriesOrderInformationBillToCompany(
name = orderInformationBillToCompanyName
)
orderInformationBillToFirstName = "ANDREE"
orderInformationBillToLastName = "AGNESE"
orderInformationBillToEmail = "<EMAIL>"
orderInformationBillTo = Riskv1exportcomplianceinquiriesOrderInformationBillTo(
address1 = orderInformationBillToAddress1,
administrative_area = orderInformationBillToAdministrativeArea,
country = orderInformationBillToCountry,
locality = orderInformationBillToLocality,
postal_code = orderInformationBillToPostalCode,
company = orderInformationBillToCompany.__dict__,
first_name = orderInformationBillToFirstName,
last_name = orderInformationBillToLastName,
email = orderInformationBillToEmail
)
orderInformationShipToCountry = "IN"
orderInformationShipToFirstName = "DumbelDore"
orderInformationShipToLastName = "Albus"
orderInformationShipTo = Riskv1exportcomplianceinquiriesOrderInformationShipTo(
country = orderInformationShipToCountry,
first_name = orderInformationShipToFirstName,
last_name = orderInformationShipToLastName
)
orderInformationLineItems = []
orderInformationLineItems1 = Riskv1exportcomplianceinquiriesOrderInformationLineItems(
unit_price = "120.50",
quantity = 3,
product_sku = "123456",
product_name = "Qwe",
product_code = "physical_software"
)
orderInformationLineItems.append(orderInformationLineItems1.__dict__)
orderInformation = Riskv1exportcomplianceinquiriesOrderInformation(
bill_to = orderInformationBillTo.__dict__,
ship_to = orderInformationShipTo.__dict__,
line_items = orderInformationLineItems
)
requestObj = ValidateExportComplianceRequest(
client_reference_information = clientReferenceInformation.__dict__,
order_information = orderInformation.__dict__
)
requestObj = del_none(requestObj.__dict__)
requestObj = json.dumps(requestObj)
try:
config_obj = configuration.Configuration()
client_config = config_obj.get_configuration()
api_instance = VerificationApi(client_config)
return_data, status, body = api_instance.validate_export_compliance(requestObj)
print("\nAPI RESPONSE CODE : ", status)
print("\nAPI RESPONSE BODY : ", body)
return return_data
except Exception as e:
print("\nException when calling VerificationApi->validate_export_compliance: %s\n" % e)
if __name__ == "__main__":
customer_match_denied_parties_list()
|
7669
|
from estimagic.inference.ml_covs import cov_cluster_robust
from estimagic.inference.ml_covs import cov_hessian
from estimagic.inference.ml_covs import cov_jacobian
from estimagic.inference.ml_covs import cov_robust
from estimagic.inference.ml_covs import cov_strata_robust
from estimagic.inference.shared import calculate_inference_quantities
from estimagic.inference.shared import check_is_optimized_and_derivative_case
from estimagic.inference.shared import get_derivative_case
from estimagic.inference.shared import get_internal_first_derivative
from estimagic.inference.shared import transform_covariance
from estimagic.optimization.optimize import maximize
from estimagic.parameters.parameter_conversion import get_derivative_conversion_function
from estimagic.parameters.process_constraints import process_constraints
from estimagic.shared.check_option_dicts import check_numdiff_options
from estimagic.shared.check_option_dicts import check_optimization_options
def estimate_ml(
loglike,
params,
optimize_options,
*,
constraints=None,
logging=False,
log_options=None,
loglike_kwargs=None,
derivative=None,
derivative_kwargs=None,
loglike_and_derivative=None,
loglike_and_derivative_kwargs=None,
numdiff_options=None,
jacobian=None,
jacobian_kwargs=None,
hessian=False,
hessian_kwargs=None,
ci_level=0.95,
n_samples=10_000,
bounds_handling="raise",
design_info=None,
):
"""Do a maximum likelihood (ml) estimation.
This is a high level interface of our lower level functions for maximization,
numerical differentiation and inference. It does the full workflow for maximum
likelihood estimation with just one function call.
While we have good defaults, you can still configure each aspect of each step
via the optional arguments of this function. If you find it easier to do the
"difficult" steps (mainly maximization and calculating numerical derivatives
of a potentially noisy function) separately, you can do so and just provide those
results as ``params``, ``jacobian`` and ``hessian``.
The docstring is aspirational and not all options are supported yet.
Args:
loglike (callable): Likelihood function that takes a params DataFrame (and
potentially other keyword arguments) and returns a dictionary that has at
least the entries "value" (a scalar float) and "contributions" (a 1d numpy
array or pandas Series) with the log likelihood contribution per individual.
params (pd.DataFrame): DataFrame where the "value" column contains the
estimated or start parameters of a likelihood model. See :ref:`params` for
details. If the supplied parameters are estimated parameters, set
optimize_options to False.
optimize_options (dict or False): Keyword arguments that govern the numerical
optimization. Valid entries are all arguments of
:func:`~estimagic.optimization.optimize.minimize` except for criterion,
derivative, criterion_and_derivative and params. If you pass False as
optimize_options you signal that ``params`` are already the optimal
parameters and no numerical optimization is needed.
constraints (list): List with constraint dictionaries.
See .. _link: ../../docs/source/how_to_guides/how_to_use_constraints.ipynb
logging (pathlib.Path, str or False): Path to sqlite3 file (which typically has
the file extension ``.db``. If the file does not exist, it will be created.
The dashboard can only be used when logging is used.
log_options (dict): Additional keyword arguments to configure the logging.
- "fast_logging": A boolean that determines if "unsafe" settings are used
to speed up write processes to the database. This should only be used for
very short running criterion functions where the main purpose of the log
is a real-time dashboard and it would not be catastrophic to get a
corrupted database in case of a sudden system shutdown. If one evaluation
of the criterion function (and gradient if applicable) takes more than
100 ms, the logging overhead is negligible.
- "if_table_exists": (str) One of "extend", "replace", "raise". What to
do if the tables we want to write to already exist. Default "extend".
- "if_database_exists": (str): One of "extend", "replace", "raise". What to
do if the database we want to write to already exists. Default "extend".
loglike_kwargs (dict): Additional keyword arguments for loglike.
derivative (callable): Function takes params and potentially other keyword
arguments and calculates the first derivative of loglike. It can either
return a numpy array or pandas Series/DataFrame with the derivative or
a dictionary with derivatives of each output of loglike. If loglike
returns a dict but derivative does not, it is your responsibility to
make sure that the correct derivative for the numerical optimizers you are
using is returned.
derivative_kwargs (dict): Additional keyword arguments for loglike.
loglike_and_derivative (callable): Return a tuple consisting of the result
of loglike and the result of derivative. Only use this if you can exploit
synergies in the calculation of loglike and derivative.
loglike_and_derivative_kwargs (dict): Additional keyword arguments for
loglike_and_derivative.
numdiff_options (dict): Keyword arguments for the calculation of numerical
derivatives for the calculation of standard errors. See
:ref:`first_derivative` for details.
jacobian (callable or pandas.DataFrame or False): A function that takes
``params`` and potentially other keyword arguments and returns the jacobian
of loglike["contributions"] with respect to the params. Alternatively, you
can pass a pandas.DataFrame with the Jacobian at the optimal parameters.
This is only possible if you pass ``optimize_options=False``. Note that you
only need to pass a Jacobian function if you have a closed form Jacobian but
decided not to return it as part of ``derivative`` (e.g. because you use
a scalar optimizer and can calculate a gradient in a way that is faster
than calculating and summing the Jacobian). If you pass None, a numerical
Jacobian will be calculated. If you pass ``False``, you signal that no
Jacobian should be calculated. Thus, no result that requires the Jacobian
will be calculated.
jacobian_kwargs (dict): Additional keyword arguments for the Jacobian function.
hessian (callable or pd.DataFrame): A function that takes
``params`` and potentially other keyword arguments and returns the Hessian
of loglike["value"] with respect to the params. Alternatively, you
can pass a pandas.DataFrame with the Hessian at the optimal parameters.
This is only possible if you pass ``optimize_options=False``. If you pass
None, a numerical Hessian will be calculated. If you pass ``False``, you
signal that no Hessian should be calculated. Thus, no result that requires
the Hessian will be calculated.
hessian_kwargs (dict): Additional keyword arguments for the Hessian function.
ci_level (float): Confidence level for the calculation of confidence intervals.
The default is 0.95.
n_samples (int): Number of samples used to transform the covariance matrix of
the internal parameter vector into the covariance matrix of the external
parameters. For background information about internal and external params
see :ref:`implementation_of_constraints`. This is only used if you have
specified constraints.
bounds_handling (str): One of "clip", "raise", "ignore". Determines how bounds
are handled. If "clip", confidence intervals are clipped at the bounds.
Standard errors are only adjusted if a sampling step is necessary due to
additional constraints. If "raise" and any lower or upper bound is binding,
we raise an Error. If "ignore", boundary problems are simply ignored.
design_info (pandas.DataFrame): DataFrame with one row per observation that
contains some or all of the variables "psu" (primary sampling unit),
"stratum" and "fpc" (finite population corrector). See
:ref:`robust_likelihood_inference` for details.
Returns:
dict: The estimated parameters, standard errors and covariance matrix of the
parameters.
"""
# ==================================================================================
# Check and process inputs
# ==================================================================================
is_optimized = optimize_options is False
check_optimization_options(
optimize_options,
usage="estimate_ml",
algorithm_mandatory=True,
)
jac_case = get_derivative_case(jacobian)
hess_case = get_derivative_case(hessian)
check_is_optimized_and_derivative_case(is_optimized, jac_case)
check_is_optimized_and_derivative_case(is_optimized, hess_case)
cov_cases = _get_cov_cases(jac_case, hess_case, design_info)
check_numdiff_options(numdiff_options, "estimate_ml")
numdiff_options = {} if numdiff_options in (None, False) else numdiff_options
constraints = [] if constraints is None else constraints
processed_constraints, _ = process_constraints(constraints, params)
# ==================================================================================
# Calculate estimates via maximization (if necessary)
# ==================================================================================
if is_optimized:
estimates = params
else:
opt_res = maximize(
criterion=loglike,
criterion_kwargs=loglike_kwargs,
params=params,
constraints=constraints,
derivative=derivative,
derivative_kwargs=derivative_kwargs,
criterion_and_derivative=loglike_and_derivative,
criterion_and_derivative_kwargs=loglike_and_derivative_kwargs,
logging=logging,
log_options=log_options,
**optimize_options,
)
estimates = opt_res["solution_params"]
# ==================================================================================
# Calculate internal jacobian
# ==================================================================================
deriv_to_internal = get_derivative_conversion_function(
params=params, constraints=constraints
)
if jac_case == "pre-calculated":
int_jac = deriv_to_internal(jacobian)
elif jac_case == "closed-form":
jacobian_kwargs = {} if jacobian_kwargs is None else jacobian_kwargs
_jac = jacobian(estimates, **jacobian_kwargs)
int_jac = deriv_to_internal(_jac)
# switch to "numerical" even if jac_case == "skip" because jac is required for ml.
elif jac_case == "numerical":
options = numdiff_options.copy()
options["key"] = "contributions"
deriv_res = get_internal_first_derivative(
func=loglike,
params=estimates,
constraints=constraints,
func_kwargs=loglike_kwargs,
numdiff_options=options,
)
int_jac = deriv_res["derivative"]
jac_numdiff_info = {k: v for k, v in deriv_res.items() if k != "derivative"}
else:
int_jac = None
# ==================================================================================
# Calculate internal Hessian (most of this is not yet implemented)
# ==================================================================================
if hess_case == "skip":
int_hess = None
elif hess_case == "numerical":
raise NotImplementedError("Numerical Hessian calculation is not yet supported.")
hess_numdiff_info = {}
elif hess_case in ("closed-form", "pre-calculated") and constraints:
raise NotImplementedError(
"Closed-form or pre-calculated Hessians are not yet compatible with "
"constraints."
)
else:
int_hess = hessian(estimates, **hessian_kwargs)
# ==================================================================================
# Calculate all available internal cov types
# ==================================================================================
int_covs = {}
if "jacobian" in cov_cases:
int_covs["cov_jacobian"] = cov_jacobian(int_jac)
if "hessian" in cov_cases:
int_covs["cov_hessian"] = cov_hessian(int_hess)
if "robust" in cov_cases:
int_covs["cov_robust"] = cov_robust(jac=int_jac, hess=int_hess)
if "cluster_robust" in cov_cases:
int_covs["cov_cluster_robust"] = cov_cluster_robust(
jac=int_jac, hess=int_hess, design_info=design_info
)
if "strata_robust" in cov_cases:
int_covs["cov_strata_robust"] = cov_strata_robust(
jac=int_jac, hess=int_hess, design_info=design_info
)
# ==================================================================================
# Calculate all available external covs and summaries
# ==================================================================================
covs = {}
summaries = {}
for case in cov_cases:
cov = transform_covariance(
params=estimates,
internal_cov=int_covs[f"cov_{case}"],
constraints=constraints,
n_samples=n_samples,
bounds_handling=bounds_handling,
)
summary = calculate_inference_quantities(
params=estimates,
free_cov=cov,
ci_level=ci_level,
)
covs[f"cov_{case}"] = cov
summaries[f"summary_{case}"] = summary
# ==================================================================================
# Calculate external jac and hess (if no transforming constraints)
# ==================================================================================
if not processed_constraints:
ext_jac = int_jac
ext_hess = int_hess
else:
ext_jac = "No external Jacobian defined due to constraints."
ext_hess = "No external Hessian defined due to constraints."
# ==================================================================================
# Construct output
# ==================================================================================
out = {
**summaries,
**covs,
"jacobian": ext_jac,
"hessian": ext_hess,
}
if not is_optimized:
out["optimize_res"] = opt_res
if jac_case == "numerical":
out["jacobian_numdiff_info"] = jac_numdiff_info
if hess_case == "numerical":
out["hessian_numdiff_info"] = hess_numdiff_info
return out
def _get_cov_cases(jac_case, hess_case, design_info):
if jac_case == "skip" and hess_case == "skip":
raise ValueError("Jacobian and Hessian cannot both be False.")
elif jac_case == "skip" and hess_case != "skip":
cases = ["hessian"]
elif hess_case == "skip" and jac_case != "skip":
cases = ["jacobian"]
else:
cases = ["jacobian", "hessian", "robust"]
if design_info is not None:
if "psu" in design_info:
cases.append("cluster_robust")
if {"strata", "psu", "fpc"}.issubset(design_info):
cases.append("strata_robust")
return cases
|
7670
|
import six
import chainer
import numpy as np
import chainer.links as L
import chainer.functions as F
import nutszebra_chainer
import functools
from collections import defaultdict
class Conv(nutszebra_chainer.Model):
def __init__(self, in_channel, out_channel, filter_size=(3, 3), stride=(1, 1), pad=(1, 1)):
super(Conv, self).__init__(
conv=L.Convolution2D(in_channel, out_channel, filter_size, stride, pad),
)
def weight_initialization(self):
self.conv.W.data = self.weight_relu_initialization(self.conv)
self.conv.b.data = self.bias_initialization(self.conv, constant=0)
def __call__(self, x, train=False):
return self.conv(x)
def count_parameters(self):
return functools.reduce(lambda a, b: a * b, self.conv.W.data.shape)
class Conv_ReLU_BN(nutszebra_chainer.Model):
def __init__(self, in_channel, out_channel, filter_size=(3, 3), stride=(1, 1), pad=(1, 1)):
super(Conv_ReLU_BN, self).__init__(
conv=L.Convolution2D(in_channel, out_channel, filter_size, stride, pad),
bn=L.BatchNormalization(out_channel),
)
def weight_initialization(self):
self.conv.W.data = self.weight_relu_initialization(self.conv)
self.conv.b.data = self.bias_initialization(self.conv, constant=0)
def __call__(self, x, train=False):
return self.bn(F.relu(self.conv(x)), test=not train)
def count_parameters(self):
return functools.reduce(lambda a, b: a * b, self.conv.W.data.shape)
class AppendixA(nutszebra_chainer.Model):
def __init__(self, category_num):
super(AppendixA, self).__init__()
out_channels = [36, 48, 36, 36, 48, 48, 48, 36, 36, 36, 36, 48, 48, 48, 48]
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14
skip_connections = [[0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1],
[0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
]
filters = [(3, 3), (3, 3), (3, 3), (5, 5), (3, 7), (7, 7), (7, 7), (7, 3), (7, 1), (7, 7), (5, 7), (7, 7), (7, 5), (7, 5), (7, 5)]
modules = []
in_channel = 3
for i in six.moves.range(len(out_channels)):
modules += [('conv{}'.format(i), Conv_ReLU_BN(in_channel, out_channels[i], filters[i], 1, 0))]
in_channel = int(np.sum([out_channels[ii] for ii, s in enumerate(skip_connections) if s[i] == 1])) + out_channels[i]
modules += [('linear', Conv(out_channels[-1], category_num, 1, 1, 0))]
# register layers
[self.add_link(*link) for link in modules]
self.modules = modules
self.category_num = category_num
self.out_channels = out_channels
self.skip_connections = skip_connections
self.filters = filters
self.name = 'appndix_a_{}'.format(category_num)
def weight_initialization(self):
[link.weight_initialization() for _, link in self.modules]
def count_parameters(self):
return int(np.sum([link.count_parameters() for _, link in self.modules]))
@staticmethod
def _zero_pads(x, pad, axis):
if type(x.data) is not np.ndarray:
pad.to_gpu()
return F.concat((x, pad), axis=axis)
@staticmethod
def zero_pads(x, sizes):
batch, channel, height, width = x.data.shape
diff_height = sizes[2] - height
diff_width = sizes[3] - width
# pad along with height
if diff_height >= 1:
pad = chainer.Variable(np.zeros((batch, channel, diff_height, width), dtype=x.dtype), volatile=x.volatile)
x = AppendixA._zero_pads(x, pad, axis=2)
_, _, height, _ = x.data.shape
# pad along with width
if diff_width >= 1:
pad = chainer.Variable(np.zeros((batch, channel, height, diff_width), dtype=x.dtype), volatile=x.volatile)
x = AppendixA._zero_pads(x, pad, axis=3)
return x
def _max(a, b):
return (max(a[0], b[0]), max(a[1], b[1]), max(a[2], b[2]), max(a[3], b[3]))
@staticmethod
def concatenate(X):
sizes = (0, 0, 0, 0)
for x in X:
sizes = AppendixA._max(sizes, x.data.shape)
X = [AppendixA.zero_pads(x, sizes) for x in X]
return F.concat(X, axis=1)
def __call__(self, x, train=False):
x = [x]
outputs = []
for i in six.moves.range(len(self.out_channels)):
x = self['conv{}'.format(i)](self.concatenate(x), train=train)
outputs.append(x)
x = [outputs[ii] for ii, s in enumerate(self.skip_connections) if s[i] == 1] + [outputs[i]]
x = outputs[-1]
batch, channels, height, width = x.data.shape
x = F.reshape(F.average_pooling_2d(x, (height, width)), (batch, channels, 1, 1))
return F.reshape(self.linear(x, train), (batch, self.category_num))
def calc_loss(self, y, t):
loss = F.softmax_cross_entropy(y, t)
return loss
def accuracy(self, y, t, xp=np):
y.to_cpu()
t.to_cpu()
indices = np.where((t.data == np.argmax(y.data, axis=1)) == True)[0]
accuracy = defaultdict(int)
for i in indices:
accuracy[t.data[i]] += 1
indices = np.where((t.data == np.argmax(y.data, axis=1)) == False)[0]
false_accuracy = defaultdict(int)
false_y = np.argmax(y.data, axis=1)
for i in indices:
false_accuracy[(t.data[i], false_y[i])] += 1
return accuracy, false_accuracy
|
7685
|
from seedwork.application.modules import BusinessModule
from modules.iam.application.services import AuthenticationService
class IdentityAndAccessModule(BusinessModule):
def __init__(self, authentication_service: AuthenticationService):
self.authentication_service = authentication_service
# @staticmethod
# def create(container):
# assert False
# """Factory method for creating a module by using dependencies from a DI container"""
# return IdentityAndAccessModule(
# logger=container.logger(),
# authentication_service=container.authentication_service(),
# )
|
7869
|
import numpy as np
import hexy as hx
def test_get_hex_line():
expected = [
[-3, 3, 0],
[-2, 2, 0],
[-1, 2, -1],
[0, 2, -2],
[1, 1, -2],
]
start = np.array([-3, 3, 0])
end = np.array([1, 1, -2])
print(hx.get_hex_line(start, end))
print(expected);
assert(np.array_equal(
hx.get_hex_line(start, end),
expected));
if __name__ == "__main__":
test_get_hex_line()
|
7885
|
import pybullet as p
import pybullet_data
import gym
from gym import spaces
from gym.utils import seeding
import numpy as np
from math import sqrt
import random
import time
import math
import cv2
import torch
import os
def random_crop(imgs, out):
"""
args:
imgs: shape (B,C,H,W)
out: output size (e.g. 84)
"""
n, c, h, w = imgs.shape
crop_max = h - out + 1
w1 = np.random.randint(0, crop_max, n)
h1 = np.random.randint(0, crop_max, n)
cropped = np.empty((n, c, out, out), dtype=imgs.dtype)
for i, (img, w11, h11) in enumerate(zip(imgs, w1, h1)):
cropped[i] = img[:, h11:h11 + out, w11:w11 + out]
return cropped
class KukaReachVisualEnv(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': 50
}
kMaxEpisodeSteps = 700
kImageSize = {'width': 96, 'height': 96}
kFinalImageSize = {'width': 84, 'height': 84}
def __init__(self, is_render=False, is_good_view=False):
self.is_render = is_render
self.is_good_view = is_good_view
if self.is_render:
p.connect(p.GUI)
else:
p.connect(p.DIRECT)
self.x_low_obs = 0.2
self.x_high_obs = 0.7
self.y_low_obs = -0.3
self.y_high_obs = 0.3
self.z_low_obs = 0
self.z_high_obs = 0.55
self.x_low_action = -0.4
self.x_high_action = 0.4
self.y_low_action = -0.4
self.y_high_action = 0.4
self.z_low_action = -0.6
self.z_high_action = 0.3
self.step_counter = 0
self.urdf_root_path = pybullet_data.getDataPath()
# lower limits for null space
self.lower_limits = [-.967, -2, -2.96, 0.19, -2.96, -2.09, -3.05]
# upper limits for null space
self.upper_limits = [.967, 2, 2.96, 2.29, 2.96, 2.09, 3.05]
# joint ranges for null space
self.joint_ranges = [5.8, 4, 5.8, 4, 5.8, 4, 6]
# restposes for null space
self.rest_poses = [0, 0, 0, 0.5 * math.pi, 0, -math.pi * 0.5 * 0.66, 0]
# joint damping coefficents
self.joint_damping = [
0.00001, 0.00001, 0.00001, 0.00001, 0.00001, 0.00001, 0.00001
]
self.init_joint_positions = [
0.006418, 0.413184, -0.011401, -1.589317, 0.005379, 1.137684,
-0.006539
]
self.orientation = p.getQuaternionFromEuler(
[0., -math.pi, math.pi / 2.])
self.camera_parameters = {
'width': 960.,
'height': 720,
'fov': 60,
'near': 0.1,
'far': 100.,
'eye_position': [0.59, 0, 0.8],
'target_position': [0.55, 0, 0.05],
'camera_up_vector':
[1, 0, 0], # I really do not know the parameter's effect.
'light_direction': [
0.5, 0, 1
], # the direction is from the light source position to the origin of the world frame.
}
self.view_matrix = p.computeViewMatrixFromYawPitchRoll(
cameraTargetPosition=[0.55, 0, 0.05],
distance=.7,
yaw=90,
pitch=-70,
roll=0,
upAxisIndex=2)
self.projection_matrix = p.computeProjectionMatrixFOV(
fov=self.camera_parameters['fov'],
aspect=self.camera_parameters['width'] /
self.camera_parameters['height'],
nearVal=self.camera_parameters['near'],
farVal=self.camera_parameters['far'])
p.configureDebugVisualizer(lightPosition=[5, 0, 5])
p.resetDebugVisualizerCamera(cameraDistance=1.5,
cameraYaw=0,
cameraPitch=-40,
cameraTargetPosition=[0.55, -0.35, 0.2])
self.action_space = spaces.Box(low=np.array(
[self.x_low_action, self.y_low_action, self.z_low_action]),
high=np.array([
self.x_high_action,
self.y_high_action,
self.z_high_action
]),
dtype=np.float32)
self.observation_space = spaces.Box(low=0, high=1,
shape=(1, self.kFinalImageSize['width'], self.kFinalImageSize['height']))
self.seed()
self.reset()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def reset(self):
self.step_counter = 0
p.resetSimulation()
# p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 0)
self.terminated = False
p.setGravity(0, 0, -10)
# 这些是周围那些白线,用来观察是否超过了obs的边界
p.addUserDebugLine(
lineFromXYZ=[self.x_low_obs, self.y_low_obs, 0],
lineToXYZ=[self.x_low_obs, self.y_low_obs, self.z_high_obs])
p.addUserDebugLine(
lineFromXYZ=[self.x_low_obs, self.y_high_obs, 0],
lineToXYZ=[self.x_low_obs, self.y_high_obs, self.z_high_obs])
p.addUserDebugLine(
lineFromXYZ=[self.x_high_obs, self.y_low_obs, 0],
lineToXYZ=[self.x_high_obs, self.y_low_obs, self.z_high_obs])
p.addUserDebugLine(
lineFromXYZ=[self.x_high_obs, self.y_high_obs, 0],
lineToXYZ=[self.x_high_obs, self.y_high_obs, self.z_high_obs])
p.addUserDebugLine(
lineFromXYZ=[self.x_low_obs, self.y_low_obs, self.z_high_obs],
lineToXYZ=[self.x_high_obs, self.y_low_obs, self.z_high_obs])
p.addUserDebugLine(
lineFromXYZ=[self.x_low_obs, self.y_high_obs, self.z_high_obs],
lineToXYZ=[self.x_high_obs, self.y_high_obs, self.z_high_obs])
p.addUserDebugLine(
lineFromXYZ=[self.x_low_obs, self.y_low_obs, self.z_high_obs],
lineToXYZ=[self.x_low_obs, self.y_high_obs, self.z_high_obs])
p.addUserDebugLine(
lineFromXYZ=[self.x_high_obs, self.y_low_obs, self.z_high_obs],
lineToXYZ=[self.x_high_obs, self.y_high_obs, self.z_high_obs])
p.loadURDF(os.path.join(self.urdf_root_path, "plane.urdf"),
basePosition=[0, 0, -0.65])
self.kuka_id = p.loadURDF(os.path.join(self.urdf_root_path,
"kuka_iiwa/model.urdf"),
useFixedBase=True)
table_uid = p.loadURDF(os.path.join(self.urdf_root_path,
"table/table.urdf"),
basePosition=[0.5, 0, -0.65])
p.changeVisualShape(table_uid, -1, rgbaColor=[1, 1, 1, 1])
self.object_id = p.loadURDF(os.path.join(self.urdf_root_path,
"random_urdfs/000/000.urdf"),
basePosition=[
random.uniform(self.x_low_obs,
self.x_high_obs),
random.uniform(self.y_low_obs,
self.y_high_obs), 0.01
])
self.num_joints = p.getNumJoints(self.kuka_id)
for i in range(self.num_joints):
p.resetJointState(
bodyUniqueId=self.kuka_id,
jointIndex=i,
targetValue=self.init_joint_positions[i],
)
self.robot_pos_obs = p.getLinkState(self.kuka_id,
self.num_joints - 1)[4]
p.stepSimulation()
(_, _, px, _,
_) = p.getCameraImage(width=960,
height=960,
viewMatrix=self.view_matrix,
projectionMatrix=self.projection_matrix,
renderer=p.ER_BULLET_HARDWARE_OPENGL)
self.images = px
p.enableJointForceTorqueSensor(bodyUniqueId=self.kuka_id,
jointIndex=self.num_joints - 1,
enableSensor=True)
self.object_pos = p.getBasePositionAndOrientation(self.object_id)[0]
self.images = self.images[:, :, :
3] # the 4th channel is alpha channel, we do not need it.
return self._process_image(self.images)
def _process_image(self, image):
"""Convert the RGB pic to gray pic and add a channel 1
Args:
image ([type]): [description]
"""
if image is not None:
image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
image = cv2.resize(image, (self.kImageSize['width'], self.kImageSize['height']))[None, :, :] / 255.
return image
else:
return np.zeros((1, self.kImageSize['width'], self.kImageSize['height']))
def step(self, action):
dv = 0.005
dx = action[0] * dv
dy = action[1] * dv
dz = action[2] * dv
self.current_pos = p.getLinkState(self.kuka_id, self.num_joints - 1)[4]
self.new_robot_pos = [
self.current_pos[0] + dx, self.current_pos[1] + dy,
self.current_pos[2] + dz
]
self.robot_joint_positions = p.calculateInverseKinematics(
bodyUniqueId=self.kuka_id,
endEffectorLinkIndex=self.num_joints - 1,
targetPosition=[
self.new_robot_pos[0], self.new_robot_pos[1],
self.new_robot_pos[2]
],
targetOrientation=self.orientation,
jointDamping=self.joint_damping,
)
for i in range(self.num_joints):
p.resetJointState(
bodyUniqueId=self.kuka_id,
jointIndex=i,
targetValue=self.robot_joint_positions[i],
)
p.stepSimulation()
# 在代码开始部分,如果定义了is_good_view,那么机械臂的动作会变慢,方便观察
if self.is_good_view:
time.sleep(0.05)
self.step_counter += 1
return self._reward()
def _reward(self):
# 一定注意是取第4个值,请参考pybullet手册的这个函数返回值的说明
self.robot_state = p.getLinkState(self.kuka_id, self.num_joints - 1)[4]
self.object_state = np.array(
p.getBasePositionAndOrientation(self.object_id)[0]).astype(
np.float32)
square_dx = (self.robot_state[0] - self.object_state[0]) ** 2
square_dy = (self.robot_state[1] - self.object_state[1]) ** 2
square_dz = (self.robot_state[2] - self.object_state[2]) ** 2
# 用机械臂末端和物体的距离作为奖励函数的依据
self.distance = sqrt(square_dx + square_dy + square_dz)
# print(self.distance)
x = self.robot_state[0]
y = self.robot_state[1]
z = self.robot_state[2]
# 如果机械比末端超过了obs的空间,也视为done,而且会给予一定的惩罚
terminated = bool(x < self.x_low_obs or x > self.x_high_obs
or y < self.y_low_obs or y > self.y_high_obs
or z < self.z_low_obs or z > self.z_high_obs)
if terminated:
reward = -0.1
self.terminated = True
# 如果机械臂一直无所事事,在最大步数还不能接触到物体,也需要给一定的惩罚
elif self.step_counter > self.kMaxEpisodeSteps:
reward = -0.1
self.terminated = True
elif self.distance < 0.1:
reward = 1
self.terminated = True
else:
reward = 0
self.terminated = False
info = {'distance:', self.distance}
(_, _, px, _,
_) = p.getCameraImage(width=960,
height=960,
viewMatrix=self.view_matrix,
projectionMatrix=self.projection_matrix,
renderer=p.ER_BULLET_HARDWARE_OPENGL)
self.images = px
self.processed_image = self._process_image(self.images)
# self.observation=self.robot_state
self.observation = self.object_state
return self.processed_image, reward, self.terminated, info
def close(self):
p.disconnect()
def _get_force_sensor_value(self):
force_sensor_value = p.getJointState(bodyUniqueId=self.kuka_id,
jointIndex=self.num_joints -
1)[2][2]
# the first 2 stands for jointReactionForces, the second 2 stands for Fz,
# the pybullet methods' return is a tuple,so can not
# index it with str like dict. I think it can be improved
# that return value is a dict rather than tuple.
return force_sensor_value
class CustomSkipFrame(gym.Wrapper):
""" Make a 4 frame skip, so the observation space will change to (4,84,84) from (1,84,84)
Args:
gym ([type]): [description]
"""
def __init__(self, env, skip=4):
super(CustomSkipFrame, self).__init__(env)
self.observation_space = spaces.Box(low=0,
high=1,
shape=(skip, self.kFinalImageSize['width'], self.kFinalImageSize['height']))
self.skip = skip
def step(self, action):
total_reward = 0
states = []
state, reward, done, info = self.env.step(action)
for i in range(self.skip):
if not done:
state, reward, done, info = self.env.step(action)
total_reward += reward
states.append(state)
else:
states.append(state)
states = np.concatenate(states, 0)[None, :, :, :]
return random_crop(states.astype(np.float32), self.kFinalImageSize['width']), reward, done, info
def reset(self):
state = self.env.reset()
states = np.concatenate([state for _ in range(self.skip)],
0)[None, :, :, :]
return random_crop(states.astype(np.float32), self.kFinalImageSize['width'])
if __name__ == '__main__':
# 这一部分是做baseline,即让机械臂随机选择动作,看看能够得到的分数
import matplotlib.pyplot as plt
env = KukaReachVisualEnv(is_render=False)
env = CustomSkipFrame(env)
print(env.observation_space.shape)
print(env.action_space.shape)
print(env.action_space.n)
# for _ in range(20):
# action=env.action_space.sample()
# print(action)
# env.step(action)
#
# state = env.reset()
# print(state.shape)
# img = state[0][0]
# plt.imshow(img, cmap='gray')
# plt.show()
|
7949
|
class Solution:
def allPossibleFBT(self, N):
def constr(N):
if N == 1: yield TreeNode(0)
for i in range(1, N, 2):
for l in constr(i):
for r in constr(N - i - 1):
m = TreeNode(0)
m.left = l
m.right = r
yield m
return list(constr(N))
|
8014
|
import hashlib
import unittest
from colicoords.cell import Cell, CellList
from colicoords.preprocess import data_to_cells
from test import testcase
from test.test_functions import load_testdata
class DataTest(testcase.ArrayTestCase):
def setUp(self):
self.data = load_testdata('ds1')
def test_data_slicing(self):
sl1 = self.data[2:5, :, :]
self.assertEqual(sl1.shape, (3, 512, 512))
sl2 = self.data[:, 20:40, 100:200]
self.assertEqual(sl2.shape, (10, 20, 100))
def test_data_copy(self):
m0 = self.data.binary_img.mean()
data_copy = self.data.copy()
self.assertEqual(m0, self.data.binary_img.mean())
data_copy.data_dict['binary'] += 20
self.assertEqual(m0, self.data.binary_img.mean())
self.assertEqual(data_copy.binary_img.mean(), m0 + 20)
def _test_cell_list(self):
#todo check order
print(hashlib.md5(self.data).hexdigest())
cell_list = data_to_cells(self.data, initial_crop=2, cell_frac=0.5, rotate='binary')
print(hashlib.md5(self.data).hexdigest())
cell_list = data_to_cells(self.data, initial_crop=2, cell_frac=0.5, rotate='binary')
print(hashlib.md5(self.data).hexdigest())
d = self.data.copy()
print(d == self.data)
cl = CellList(cell_list)
self.assertEqual(len(cl), 48)
c5 = cl[5]
self.assertIsInstance(c5, Cell)
del cl[5]
self.assertEqual(len(cl), 47)
self.assertTrue(cl[3] in cl)
cl.append(c5)
self.assertTrue(c5 in cl)
vol = cl.volume
self.assertEqual(len(vol), 48)
class CellListTest(testcase.ArrayTestCase):
def setUp(self):
data = load_testdata('ds1')
self.cell_list = data_to_cells(data)
def test_slicing(self):
sliced = self.cell_list[:5]
self.assertIsInstance(sliced, CellList)
if __name__ == '__main__':
unittest.main()
|
8028
|
from unittest import mock
import pytest
from django.http import HttpRequest
from rest_framework.response import Response
from rest_framework.test import APIClient
from drf_viewset_profiler.middleware import LineProfilerViewSetMiddleware
@pytest.fixture
def api_client():
return APIClient()
@pytest.fixture
def mock_http_request():
http_request = HttpRequest()
http_request.method = "GET"
return http_request
@pytest.fixture
def mock_http_response(mock_http_request):
response = Response()
mock_http_request.line_profiler = mock.Mock()
mock_http_request.parser_context = {"view": mock.Mock()}
response.renderer_context = {"request": mock_http_request}
return response
@pytest.fixture
def mock_output_writer(monkeypatch):
mock_output_writer_ = mock.Mock()
monkeypatch.setattr("drf_viewset_profiler.middleware.output_writer.stream", mock_output_writer_)
return mock_output_writer_
@pytest.fixture
def mock_line_profiler_viewset_middleware():
return LineProfilerViewSetMiddleware()
|
8041
|
from django.contrib.messages.constants import DEFAULT_LEVELS
from user_messages.api import get_messages
def messages(request):
"""
Return a lazy 'messages' context variable as well as
'DEFAULT_MESSAGE_LEVELS'.
"""
return {
"messages": get_messages(request=request),
"DEFAULT_MESSAGE_LEVELS": DEFAULT_LEVELS,
}
|
8085
|
import collections
import nltk
import os
from sklearn import (
datasets, model_selection, feature_extraction, linear_model, naive_bayes,
ensemble
)
def extract_features(corpus):
'''Extract TF-IDF features from corpus'''
sa_stop_words = nltk.corpus.stopwords.words("english")
# words that might invert a sentence's meaning
white_list = [
'what', 'but', 'if', 'because', 'as', 'until', 'against',
'up', 'down', 'in', 'out', 'on', 'off', 'over', 'under', 'again',
'further', 'then', 'once', 'here', 'there', 'why', 'how', 'all', 'any',
'most', 'other', 'some', 'such', 'no', 'nor', 'not', 'only', 'own',
'same', 'so', 'than', 'too', 'can', 'will', 'just', 'don', 'should']
# take these out of the standard NLTK stop word list
sa_stop_words = [sw for sw in sa_stop_words if sw not in white_list]
# vectorize means we turn non-numerical data into an array of numbers
count_vectorizer = feature_extraction.text.CountVectorizer(
lowercase=True, # for demonstration, True by default
tokenizer=nltk.word_tokenize, # use the NLTK tokenizer
min_df=2, # minimum document frequency, i.e. the word must appear more than once.
ngram_range=(1, 2),
stop_words=sa_stop_words
)
processed_corpus = count_vectorizer.fit_transform(corpus)
processed_corpus = feature_extraction.text.TfidfTransformer().fit_transform(
processed_corpus)
return processed_corpus
data_directory = 'movie_reviews'
movie_sentiment_data = datasets.load_files(data_directory, shuffle=True)
print('{} files loaded.'.format(len(movie_sentiment_data.data)))
print('They contain the following classes: {}.'.format(
movie_sentiment_data.target_names))
movie_tfidf = extract_features(movie_sentiment_data.data)
X_train, X_test, y_train, y_test = model_selection.train_test_split(
movie_tfidf, movie_sentiment_data.target, test_size=0.30, random_state=42)
# similar to nltk.NaiveBayesClassifier.train()
clf1 = linear_model.LogisticRegression()
clf1.fit(X_train, y_train)
print('Logistic Regression performance: {}'.format(clf1.score(X_test, y_test)))
clf2 = linear_model.SGDClassifier()
clf2.fit(X_train, y_train)
print('SGDClassifier performance: {}'.format(clf2.score(X_test, y_test)))
clf3 = naive_bayes.MultinomialNB()
clf3.fit(X_train, y_train)
print('MultinomialNB performance: {}'.format(clf3.score(X_test, y_test)))
clf4 = naive_bayes.BernoulliNB()
clf4.fit(X_train, y_train)
print('BernoulliNB performance: {}'.format(clf4.score(X_test, y_test)))
voting_model = ensemble.VotingClassifier(
estimators=[('lr', clf1), ('sgd', clf2), ('mnb', clf3), ('bnb', clf4)],
voting='hard')
voting_model.fit(X_train, y_train)
print('Voting classifier performance: {}'.format(
voting_model.score(X_test, y_test)))
|
8099
|
import glob
import numpy as np
X = np.empty((0, 193))
y = np.empty((0, 10))
groups = np.empty((0, 1))
npz_files = glob.glob('./urban_sound_?.npz')
for fn in npz_files:
print(fn)
data = np.load(fn)
X = np.append(X, data['X'], axis=0)
y = np.append(y, data['y'], axis=0)
groups = np.append(groups, data['groups'], axis=0)
print(groups[groups>0])
print(X.shape, y.shape)
for r in y:
if np.sum(r) > 1.5:
print(r)
np.savez('urban_sound', X=X, y=y, groups=groups)
|
8132
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import cv2
import numpy as np
import os
import math
from PIL import Image, ImageDraw, ImageFont
from caffe2.python import workspace
from detectron.core.config import cfg
from detectron.core.config import get_output_dir
def vis_training(cur_iter):
prefix = ''
if cfg.WEBLY.MINING:
prefix = 'mining_'
if not (cfg.WSL.DEBUG or
(cfg.WSL.SAMPLE and cur_iter % cfg.WSL.SAMPLE_ITER == 0)):
return
output_dir = get_output_dir(cfg.TRAIN.DATASETS, training=True)
sample_dir = os.path.join(output_dir, 'webly_sample')
if not os.path.exists(sample_dir):
os.makedirs(sample_dir)
for gpu_id in range(cfg.NUM_GPUS):
data_ids = workspace.FetchBlob('gpu_{}/{}'.format(gpu_id, 'data_ids'))
ims = workspace.FetchBlob('gpu_{}/{}'.format(gpu_id, 'data'))
labels_oh = workspace.FetchBlob('gpu_{}/{}'.format(
gpu_id, 'labels_oh'))
im_score = workspace.FetchBlob('gpu_{}/{}'.format(gpu_id, 'cls_prob'))
roi_score = workspace.FetchBlob('gpu_{}/{}'.format(
gpu_id, prefix + 'rois_pred'))
# roi_score_softmax = workspace.FetchBlob('gpu_{}/{}'.format(
# gpu_id, prefix + 'rois_pred_softmax'))
rois = workspace.FetchBlob('gpu_{}/{}'.format(gpu_id, prefix + 'rois'))
# anchor_argmax = workspace.FetchBlob('gpu_{}/{}'.format(
# gpu_id, 'anchor_argmax'))
preffix = 'iter_' + str(cur_iter) + '_gpu_' + str(gpu_id)
save_im(labels_oh, im_score, ims, cfg.PIXEL_MEANS, preffix, sample_dir)
save_rois(labels_oh, im_score, roi_score, ims, rois, cfg.PIXEL_MEANS,
preffix, '', sample_dir)
# continue
if cfg.WEBLY.ENTROPY:
pass
else:
continue
class_weight = workspace.FetchBlob('gpu_{}/{}'.format(
gpu_id, prefix + 'rois_class_weight'))
rois_pred_hatE = workspace.FetchBlob('gpu_{}/{}'.format(
gpu_id, prefix + 'rois_pred_hatE'))
rois_pred_E = workspace.FetchBlob('gpu_{}/{}'.format(
gpu_id, prefix + 'rois_pred_E'))
y_logN__logy = workspace.FetchBlob('gpu_{}/{}'.format(
gpu_id, prefix + 'rois_pred_y_logN__logy'))
save_entropy(labels_oh, im_score, class_weight, roi_score, ims, rois,
cfg.PIXEL_MEANS, preffix, '', sample_dir, rois_pred_hatE,
rois_pred_E, y_logN__logy)
def save_im(labels_oh, im_score, ims, pixel_means, prefix, output_dir):
batch_size, num_classes = im_score.shape
for b in range(batch_size):
for c in range(num_classes):
# if labels_oh[b][c] == 0.0:
# continue
if im_score[b][c] < 0.1:
continue
im = ims[b, :, :, :].copy()
channel_swap = (1, 2, 0)
im = im.transpose(channel_swap)
im += pixel_means
im = im.astype(np.uint8)
file_name = os.path.join(
output_dir, prefix + '_b_' + str(b) + '_c_' + str(c) + '.png')
cv2.imwrite(file_name, im)
def save_rois(labels_oh, im_score, roi_score, ims, rois, pixel_means, prefix,
suffix, output_dir):
num_rois, num_classes = roi_score.shape
batch_size, _, height, weight = ims.shape
has_bg = False
num_rois_this = min(500, num_rois)
for b in range(batch_size):
for c in range(num_classes):
# if labels_oh[b][c] == 0.0:
# continue
if im_score[b][c] < 0.1:
if has_bg:
continue
has_bg = True
im = ims[b, :, :, :].copy()
channel_swap = (1, 2, 0)
im = im.transpose(channel_swap)
im += pixel_means
im = im.astype(np.uint8)
im_S = im.copy()
im_A = im.copy()
argsort = np.argsort(-np.abs(roi_score[:, c]))
argsort = argsort[:num_rois_this]
argsort = argsort[::-1]
if im_score[b][c] < 0.1:
scale_p = 1.0
else:
scale_p = 1.0 / roi_score[:, c].max()
for n in range(num_rois_this):
roi = rois[argsort[n]]
if roi[0] != b:
continue
if roi_score[argsort[n]][c] * scale_p < 0.4:
thickness = 3
else:
thickness = 6
jet = gray2jet(roi_score[argsort[n]][c] * scale_p)
cv2.rectangle(im_S, (roi[1], roi[2]), (roi[3], roi[4]), jet, thickness)
file_name = os.path.join(
output_dir, prefix + '_b_' + str(b) + '_c_' + str(c) + '_' +
suffix + '.png')
cv2.imwrite(file_name, im_S)
continue
num_anchors = anchor_argmax.shape[0]
for n in range(num_rois):
roi = rois[n]
if roi[0] != b:
continue
for a in range(num_anchors):
if anchor_argmax[a][n] == 1.0:
break
jet = gray2jet(1.0 * a / num_anchors)
cv2.rectangle(im_A, (roi[1], roi[2]), (roi[3], roi[4]), jet, 1)
file_name = os.path.join(
output_dir, prefix + '_b_' + str(b) + '_c_' + str(c) + '_A_' +
suffix + '.png')
cv2.imwrite(file_name, im_A)
def save_entropy(labels_oh, im_score, class_weight, roi_score, ims, rois,
pixel_means, prefix, suffix, output_dir, rois_pred_hatE,
rois_pred_E, y_logN__logy):
num_rois, num_classes = roi_score.shape
batch_size, _, height, weight = ims.shape
rois_pred_E_sum = np.sum(rois_pred_E, axis=0).reshape(1, -1)
E_sum_norm = np.true_divide(rois_pred_E_sum, y_logN__logy)
E_sum_norm = np.where(E_sum_norm > 1., 1., E_sum_norm)
E_class_weight = 1 - E_sum_norm
for b in range(batch_size):
for c in range(num_classes):
if labels_oh[b][c] == 0.0 and im_score[b][c] < 0.1:
continue
im = ims[b, :, :, :].copy()
channel_swap = (1, 2, 0)
im = im.transpose(channel_swap)
im += pixel_means
im = im.astype(np.uint8)
im_S = im.copy()
im_A = im.copy()
im_hatE = im.copy()
im_E = im.copy()
_NUM = 10
argsort_roi = np.argsort(roi_score[:, c])[::-1]
argsort_hatE = np.argsort(rois_pred_hatE[:, c])[::-1]
argsort_E = np.argsort(rois_pred_E[:, c])[::-1]
if len(argsort_roi) >= _NUM:
_NUM = 10
else:
_NUM = len(argsort_roi)
argsort_roi = argsort_roi[:_NUM][::-1]
argsort_hatE = argsort_hatE[:_NUM][::-1]
argsort_E = argsort_E[:_NUM][::-1]
argsort_hatE = argsort_roi
argsort_E = argsort_roi
scale_p = 1.0 / roi_score[:, c].max()
scale_p = 1.0
for n in range(_NUM):
roi = rois[argsort_roi[n]]
hatE_roi = rois[argsort_hatE[n]]
E_roi = rois[argsort_E[n]]
if roi[0] != b:
continue
# draw roi
jet = gray2jet(roi_score[argsort_roi[n]][c] * scale_p)
bgr = jet
rgb = (jet[2], jet[1], jet[0])
# roi location
cv2.rectangle(im_S, (roi[1], roi[2]), (roi[3], roi[4]),
bgr,
2,
lineType=cv2.LINE_AA)
text = "{:.4f}".format(roi_score[argsort_roi[n]][c])
im_S = putText_with_TNR(im_S, int(roi[1]), int(roi[2]), 15,
jet, rgb, text)
if hatE_roi[0] != b:
continue
# draw rois_pred_hatE
# jet = gray2jet(rois_pred_hatE[argsort_hatE[n]][c] * scale_p)
# bgr = jet
# rgb = (jet[2], jet[1], jet[0])
# roi location
cv2.rectangle(im_hatE, (hatE_roi[1], hatE_roi[2]),
(hatE_roi[3], hatE_roi[4]),
bgr,
2,
lineType=cv2.LINE_AA)
# put Text hat_E
text = "{:.4f}".format(rois_pred_hatE[argsort_hatE[n]][c])
im_hatE = putText_with_TNR(im_hatE, int(hatE_roi[1]),
int(hatE_roi[2]), 15, jet, rgb,
text)
if E_roi[0] != b:
continue
# draw rois_pred_E
# jet = gray2jet(rois_pred_E[argsort_E[n]][c] * scale_p)
# bgr = jet
# rgb = (jet[2], jet[1], jet[0])
# roi location
cv2.rectangle(im_E, (E_roi[1], E_roi[2]), (E_roi[3], E_roi[4]),
bgr,
2,
lineType=cv2.LINE_AA)
# put Text E
text = "{:.4f}".format(rois_pred_E[argsort_E[n]][c])
im_E = putText_with_TNR(im_E, int(E_roi[1]), int(E_roi[2]), 15,
jet, rgb, text)
# write im_score
text = "{:.4f}".format(im_score[b][c])
im_S = putText_with_TNR(im_S, 0, 0, 20, (0, 140, 255),
(255, 255, 255), text)
# write class_weight
text = "{:.4f}".format(class_weight[b][c])
im_hatE = putText_with_TNR(im_hatE, 0, 0, 20, (0, 140, 255),
(255, 255, 255), text)
# write class_weight
text = "{:.4f}".format(E_class_weight[b][c])
im_E = putText_with_TNR(im_E, 0, 0, 20, (0, 140, 255),
(255, 255, 255), text)
file_name_roi = os.path.join(
output_dir, prefix + '_b_' + str(b) + '_c_' + str(c) + '_roi' +
suffix + '.png')
cv2.imwrite(file_name_roi, im_S)
file_name_hatE = os.path.join(
output_dir, prefix + '_b_' + str(b) + '_c_' + str(c) +
'_hatE' + suffix + '.png')
cv2.imwrite(file_name_hatE, im_hatE)
file_name_E = os.path.join(
output_dir, prefix + '_b_' + str(b) + '_c_' + str(c) + '_E' +
suffix + '.png')
cv2.imwrite(file_name_E, im_E)
def dump_proto_files(model, output_dir):
"""Save prototxt descriptions of the training network and parameter
initialization network."""
with open(os.path.join(output_dir, model.net.Proto().name), 'w') as fid:
fid.write(str(model.net.Proto()))
with open(os.path.join(output_dir,
model.param_init_net.Proto().name), 'w') as fid:
fid.write(str(model.param_init_net.Proto()))
def gray2jet(f):
# plot short rainbow RGB
a = f / 0.25 # invert and group
X = math.floor(a) # this is the integer part
Y = math.floor(255 * (a - X)) # fractional part from 0 to 255
Z = math.floor(128 * (a - X)) # fractional part from 0 to 128
if X == 0:
r = 0
g = Y
b = 128 - Z
elif X == 1:
r = Y
g = 255
b = 0
elif X == 2:
r = 255
g = 255 - Z
b = 0
elif X == 3:
r = 255
g = 128 - Z
b = 0
elif X == 4:
r = 255
g = 0
b = 0
# opencv is bgr, not rgb
return (b, g, r)
def putText_with_TNR(img, x, y, size, fontColor, bgColor, string):
thickness = 2
font_scale = 1.1
font = cv2.FONT_HERSHEY_SIMPLEX
s = cv2.getTextSize(string, font, font_scale, thickness)
cv2.rectangle(
img,
(x + thickness, y + thickness),
(x + thickness + s[0][0] + 2, y + thickness + s[0][1] + 2),
# (0, 140, 255),
fontColor,
cv2.FILLED,
lineType=cv2.LINE_AA)
position = (x + thickness + 1, y + thickness + s[0][1] + 1)
cv2.putText(img, string, position, font, font_scale, (255, 255, 255),
thickness, cv2.LINE_AA)
return img
# from OpenCV to PIL
font = "/home/chenzhiwei/Documents/myFonts/timesnewroman.ttf"
img_PIL = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
font = ImageFont.truetype(font, size)
position = (x + 3, y - 2)
draw = ImageDraw.Draw(img_PIL)
offsetx, offsety = font.getoffset(string)
width, height = font.getsize(string)
draw.rectangle((offsetx + x + 2, offsety + y - 3, offsetx + x + width + 3,
offsety + y + height - 3),
fill=bgColor)
draw.text(position, string, font=font, fill=fontColor)
# back to OpenCV type
img_OpenCV = cv2.cvtColor(np.asarray(img_PIL), cv2.COLOR_RGB2BGR)
return img_OpenCV
|
8150
|
def save_form(form, actor=None):
"""Allows storing a form with a passed actor. Normally, Form.save() does not accept an actor, but if you require
this to be passed (is not handled by middleware), you can use this to replace form.save().
Requires you to use the audit.Model model as the actor is passed to the object's save method.
"""
obj = form.save(commit=False)
obj.save(actor=actor)
form.save_m2m()
return obj
#def intermediate_save(instance, actor=None):
# """Allows saving of an instance, without storing the changes, but keeping the history. This allows you to perform
# intermediate saves:
#
# obj.value1 = 1
# intermediate_save(obj)
# obj.value2 = 2
# obj.save()
# <value 1 and value 2 are both stored in the database>
# """
# if hasattr(instance, '_audit_changes'):
# tmp = instance._audit_changes
# if actor:
# instance.save(actor=actor)
# else:
# instance.save()
# instance._audit_changes = tmp
# else:
# if actor:
# instance.save(actor=actor)
# else:
# instance.save()
|
8173
|
from django.db import models
class SiteSettings(models.Model):
site_name = models.CharField(max_length=200 , verbose_name='Site Name')
site_url = models.CharField(max_length=200 , verbose_name='Site URL')
site_address = models.CharField(max_length=300 , verbose_name='Site Address')
site_phone = models.CharField(max_length=100 , null=True , blank=True , verbose_name='Site Phone')
site_fax = models.CharField(max_length=200 , null=True , blank=True , verbose_name='Site Fax')
site_email = models.EmailField(max_length=200 , null=True , blank=True , verbose_name='Site Email')
about_us_text = models.TextField(verbose_name='About Us Text')
site_copy_right = models.TextField(verbose_name='Copyright Text')
site_logo = models.ImageField(upload_to='images/site-setting/' , verbose_name='Site Logo')
is_main_setting = models.BooleanField(verbose_name='Site Main Settings')
def __str__(self) -> str:
super(SiteSettings , self).__str__()
return self.site_name
class Meta:
verbose_name = 'Site Setting'
verbose_name_plural = 'Site Settings'
class FooterLinkBox(models.Model):
title = models.CharField(max_length=200 , verbose_name='Title')
def __str__(self) -> str:
super(FooterLinkBox , self).__str__()
return self.title
class Meta:
verbose_name = 'Footer Link Setting'
verbose_name_plural = 'Footer Link Settings'
class FooterLink(models.Model):
title = models.CharField(max_length=200 , verbose_name='Title')
url = models.URLField(max_length=500 , verbose_name='Links')
footer_link_box = models.ForeignKey(to=FooterLinkBox , verbose_name='Category' , on_delete=models.CASCADE)
def __str__(self) -> str:
super(FooterLink , self).__str__()
return self.title
class Meta:
verbose_name = 'Footer Link'
verbose_name_plural = 'Footer Links'
class Slider(models.Model):
title = models.CharField(max_length=200 , verbose_name='Title')
description = models.TextField(verbose_name='Slider Description')
url_title = models.CharField(max_length=200 , verbose_name='URL Title')
url = models.URLField(max_length=200 , verbose_name='URL Address')
image = models.ImageField(upload_to='images/sliders' , verbose_name='Slider Image')
is_active = models.BooleanField(default=False , verbose_name='Active / Inactive')
def __str__(self) -> str:
super(Slider , self).__str__()
return self.title
class Meta:
verbose_name = 'Slider'
verbose_name_plural = 'Sliders'
|
8207
|
from __future__ import absolute_import, division, print_function, unicode_literals
import torch.nn.qat as nnqat
import torch.nn.intrinsic
import torch.nn.functional as F
class LinearReLU(nnqat.Linear):
r"""
A LinearReLU module fused from Linear and ReLU modules, attached with
FakeQuantize modules for output activation and weight, used in
quantization aware training.
We adopt the same interface as :class:`torch.nn.Linear`.
Similar to `torch.nn.intrinsic.LinearReLU`, with FakeQuantize modules initialized to
default.
Attributes:
activation_post_process: fake quant module for output activation
weight: fake quant module for weight
Examples::
>>> m = nn.qat.LinearReLU(20, 30)
>>> input = torch.randn(128, 20)
>>> output = m(input)
>>> print(output.size())
torch.Size([128, 30])
"""
_FLOAT_MODULE = torch.nn.intrinsic.LinearReLU
def __init__(self, in_features, out_features, bias=True,
qconfig=None):
super(LinearReLU, self).__init__(in_features, out_features, bias, qconfig)
def forward(self, input):
return self.activation_post_process(F.relu(
F.linear(input, self.weight_fake_quant(self.weight), self.bias)))
@classmethod
def from_float(cls, mod, qconfig=None):
return super(LinearReLU, cls).from_float(mod, qconfig)
|
8219
|
import unittest
import astar
class BasicTests(unittest.TestCase):
def test_bestpath(self):
"""ensure that we take the shortest path, and not the path with less elements.
the path with less elements is A -> B with a distance of 100
the shortest path is A -> C -> D -> B with a distance of 60
"""
nodes = {'A': [('B', 100), ('C', 20)],
'C': [('D', 20)], 'D': [('B', 20)]}
def neighbors(n):
for n1, d in nodes[n]:
yield n1
def distance(n1, n2):
for n, d in nodes[n1]:
if n == n2:
return d
def cost(n, goal):
return 1
path = list(astar.find_path('A', 'B', neighbors_fnct=neighbors,
heuristic_cost_estimate_fnct=cost, distance_between_fnct=distance))
self.assertEqual(4, len(path))
for i, n in enumerate('ACDB'):
self.assertEqual(n, path[i])
if __name__ == '__main__':
unittest.main()
|
8251
|
import numpy as np
from collections import defaultdict, Counter
import random
import json
from tqdm import tqdm
def transX(dataset):
rel2id = json.load(open(dataset + '/relation2ids'))
ent2id = json.load(open(dataset + '/ent2ids'))
with open('../Fast-TransX/' + dataset + '_base/entity2id.txt', 'w') as g1:
num_ents = len(ent2id.keys())
g1.write(str(num_ents) + '\n')
for k, v in ent2id.items():
g1.write(k + '\t' + str(v) + '\n')
with open('../Fast-TransX/' + dataset + '_base/relation2id.txt', 'w') as g1:
num_rels = len(rel2id.keys())
g1.write(str(num_rels) + '\n')
for k, v in rel2id.items():
g1.write(k + '\t' + str(v) + '\n')
file_name = dataset + '/path_graph'
train_triples = []
with open(file_name) as f:
lines = f.readlines()
for line in tqdm(lines):
e1 = line.split('\t')[0]
e2 = line.rstrip().split('\t')[2]
rel = line.split('\t')[1]
train_triples.append([e1,rel,e2])
train_triples.append([e2,rel+'_inv',e1])
with open('../Fast-TransX/' + dataset + '_base/train2id.txt', 'w') as g3:
num_triples = len(train_triples)
g3.write(str(num_triples) + '\n')
for triple in train_triples:
e1, rel, e2 = triple
g3.write(str(ent2id[e1]) + '\t' + str(ent2id[e2]) + '\t' + str(rel2id[rel]) + '\n')
if __name__ == '__main__':
transX('Wiki')
|
8281
|
import os
from subprocess import call
from . import glob2
pwd = os.path.dirname(__file__)
def get_files_from_path(path, ext):
# use set to remove duplicate files. weird...but it happens
if os.path.isfile(path): return set([os.path.abspath(path)])
else: # i.e., folder
files = glob2.glob(os.path.abspath(os.path.join(path, "**/*.{}".format(ext))))
return set(sorted(files)) # to guarantee the order of files read
"""
handling javajskparser AST
"""
def toAST(files, ext, add_libs):
prg_files = []
for f in files:
prg_files.extend(get_files_from_path(f, "java"))
if not prg_files: exit('jskparser.util: File(s) not found!')
java_in = os.path.abspath(os.path.join(pwd, '../tests/ir_asts/API.java'))
json_out = os.path.abspath(os.path.join(pwd, '../tests/ir_asts/java.json'))
if add_libs:
obj_path = os.path.abspath(os.path.join(pwd, '../../model/lang/Object.java'))
str_path = os.path.abspath(os.path.join(pwd, '../../model/lang/String.java'))
num_path = os.path.abspath(os.path.join(pwd, '../../model/lang/Number.java'))
int_path = os.path.abspath(os.path.join(pwd, '../../model/lang/Integer.java'))
char_path = os.path.abspath(os.path.join(pwd, '../../model/lang/Character.java'))
itbl_path = os.path.abspath(os.path.join(pwd, '../../model/lang/Iterable.java'))
iter_path = os.path.abspath(os.path.join(pwd, '../../model/util/Iterator.java'))
arr_path = os.path.abspath(os.path.join(pwd, '../../model/util/Arrays.java'))
list_path = os.path.abspath(os.path.join(pwd, '../../model/util/List.java'))
alist_path = os.path.abspath(os.path.join(pwd, '../../model/util/ArrayList.java'))
llist_path = os.path.abspath(os.path.join(pwd, '../../model/util/LinkedList.java'))
hmap_path = os.path.abspath(os.path.join(pwd, '../../model/util/HashMap.java'))
hset_path = os.path.abspath(os.path.join(pwd, '../../model/util/HashSet.java'))
if obj_path not in prg_files: prg_files.append(obj_path)
if str_path not in prg_files: prg_files.append(str_path)
if num_path not in prg_files: prg_files.append(num_path)
if int_path not in prg_files: prg_files.append(int_path)
if char_path not in prg_files: prg_files.append(char_path)
if itbl_path not in prg_files: prg_files.append(itbl_path)
if iter_path not in prg_files: prg_files.append(iter_path)
if arr_path not in prg_files: prg_files.append(arr_path)
if list_path not in prg_files: prg_files.append(list_path)
if alist_path not in prg_files: prg_files.append(alist_path)
if llist_path not in prg_files: prg_files.append(llist_path)
if hmap_path not in prg_files: prg_files.append(hmap_path)
if hset_path not in prg_files: prg_files.append(hset_path)
api = ""
for fname in prg_files:
with open(fname, 'r') as fd:
api += fd.read()
with open(java_in, 'w') as fd:
fd.write(api)
# this classpath stuff seems awful. Jsonify is hardcoded, passing a
# single string to subprocess.call is platform dependant, and shell=True
# can be a security vulnerability (if allowed to take user input).
# This just got a whole lot nastier
cmd = 'cd ' + pwd + '/..; /usr/bin/java -cp .:javaparser/javaparser-core/target/classes:$HOME/.m2/repository/com/cedarsoftware/json-io/4.3.0/json-io-4.3.0.jar jskparser.Jsonify ' + java_in + ' ' + json_out
ret = call(cmd, shell=True)
if ret != 0: exit('Problem parsing.')
return json_out
|
8295
|
import argparse
import operator
import os
import re
import shutil
import spacy
import tempfile
from nerds.utils import spans_to_tokens, get_logger
def segment_text_to_sentences(text_file, sentence_splitter):
""" Segment text into sentences. Text is provided by BRAT in .txt
file.
Args:
text_file (str): the full path to the BRAT .txt file.
sentence_splitter (spacy LM): SpaCy EN language model.
Returns:
sentences (list((int, int, str))): list of sentence spans.
Spans are triples of (start_offset, end_offset, text),
where offset is relative to the text.
"""
sentences = []
ftext = open(text_file, "r")
for line in ftext:
splits = sentence_splitter(line.strip())
for sent in splits.sents:
sentences.append((sent.start_char, sent.end_char, sent.text))
ftext.close()
return sentences
def parse_text_annotations(ann_file):
""" Parses BRAT annotations provided in the .ann file and converts them
to annotation spans of (start_position, end_position, entity_class).
Args:
ann_file (str): full path to the BRAT .ann file.
Returns:
annotations (list((int, int, str))): list of annotation spans.
Spans are triples of (start_offset, end_offset, entity_class)
where offset is relative to the text.
"""
annots = []
fann = open(ann_file, "r")
for line in fann:
cols = re.split(r"\s+", line.strip())
if not cols[0].startswith("T"):
continue
annots.append((int(cols[2]), int(cols[3]), cols[1]))
fann.close()
return annots
def apply_annotations(sentences, annotations, tokenizer):
""" Apply annotation spans to the sentence spans to create a list of tokens
and tags.
Args:
sentences (list((int, int, str))): list of sentence spans.
annotations (list((int, int, str))): list of annotation spans.
tokenizer (spacy LM): SpaCy EN language model.
Returns:
tokens_tags_list (list((list(str), list(str)))): list of list of token
tag pairs. Each list of token-tag pairs corresponds to a single
sentence.
"""
tokens_tags_list = []
for sent_start, sent_end, sent_text in sentences:
sent_annots = [a for a in annotations if a[0] >= sent_start and a[1] <= sent_end]
# convert document offsets to sentence offsets
sent_annots = [(s[0] - sent_start, s[1] - sent_start, s[2]) for s in sent_annots]
tokens, tags = spans_to_tokens(sent_text, sent_annots, tokenizer)
tokens_tags_list.append(zip(tokens, tags))
return tokens_tags_list
def convert_brat_to_iob(input_dir, output_file, nlp):
""" Convenience Convertor function.
Args:
input_dir (str): the directory where the BRAT .txt and .ann files
are located.
output_file (str): the full path name of file to write output in
IOB format to.
nlp (SpaCy LM): reference to the SpaCy EN model.
Returns:
None.
"""
fout = open(output_file, "w")
for text_file in os.listdir(input_dir):
# only process .txt and .ann pairs in specified directory
if not text_file.endswith(".txt"):
continue
annot_file = text_file[:-4] + ".ann"
if not os.path.exists(os.path.join(input_dir, annot_file)):
# do not process file if no corresponding .ann file
continue
# process file pair
logger.info("Processing file: {:s}".format(text_file))
sentences = segment_text_to_sentences(os.path.join(input_dir, text_file), nlp)
annotations = parse_text_annotations(os.path.join(input_dir, annot_file))
tokens_tags_list = apply_annotations(sentences, annotations, nlp)
for tokens_tags in tokens_tags_list:
for token, tag in tokens_tags:
fout.write("{:s}\t{:s}\n".format(token, tag))
fout.write("\n")
fout.close()
def do_self_test(nlp):
""" Simple self-test with small dataset to prove that this works okay. """
text = "<NAME>, 61 years old, will join the board as a nonexecutive director, Nov. 29. Mr. Vinken is chairman of Elsevier N.V., the Dutch publishing group."
annotations = [
"T1 PER 0 13 <NAME>",
"T2 PER 86 96 Mr. Vinken",
"T3 DATE 15 27 61 years old",
"T4 DATE 77 84 Nov. 29",
"T5 ORG 112 125 Elsevier N.V.",
"T6 NORP 131 136 Dutch"
]
input_dir = tempfile.mkdtemp(dir="/tmp")
ftext = open(os.path.join(input_dir, "test.txt"), "w")
ftext.write(text)
ftext.close()
fann = open(os.path.join(input_dir, "test.ann"), "w")
for line in annotations:
fann.write(line + "\n")
fann.close()
output_file = os.path.join(input_dir, "test.iob")
convert_brat_to_iob(input_dir, output_file, nlp)
fout = open(output_file, "r")
for line in fout:
logger.warn(line.strip())
shutil.rmtree(input_dir)
################################ main ################################
#
# usage: brat2iob.py [-h] [-i INPUT_DIR] [-o OUTPUT_FILE] [-t]
# Script to convert BRAT annotations to IOB (NERDS) format.
# optional arguments:
# -h, --help show this help message and exit
# -i INPUT_DIR, --input_dir INPUT_DIR
# Directory to store BRAT .txt and .ann files.
# -o OUTPUT_FILE, --output_file OUTPUT_FILE
# Output file to write IOB output to.
# -t, --test Runs self test.
######################################################################
parser = argparse.ArgumentParser(
description="Script to convert BRAT annotations to IOB (NERDS) format.")
parser.add_argument("-i", "--input_dir", help="Directory to store BRAT .txt and .ann files.")
parser.add_argument("-o", "--output_file", help="Output file to write IOB output to.")
parser.add_argument("-t", "--test", help="Runs self test.", action="store_true")
args = parser.parse_args()
logger = get_logger()
input_dir = args.input_dir
output_file = args.output_file
self_test = args.test
nlp = spacy.load("en")
if self_test:
logger.info("Executing self test...")
do_self_test(nlp)
else:
logger.info("Reading BRAT .txt and .ann files from: {:s}".format(input_dir))
logger.info("Writing IOB tokens/tags to file: {:s}".format(output_file))
convert_brat_to_iob(input_dir, output_file, nlp)
|
8297
|
import sys
sys.path.insert(0,'..')
from data.whale_data import exchnage_accounts
from data.html_helper import check_if_address_name_exists
from data.whale_eth_tx_data import *
from data.whale_token_tx_data import identify_investor_type_token
holding_account = "holding_account"
deposit_account = 'deposit_account'
withdraw_account = "withdraw_account"
in_type = "IN"
out_type = "OUT"
all_acc_types = dict()
for acc in exchnage_accounts:
all_acc_types[acc] = exchange_type
def update_y_array(X,y,timestamp,amount):
target_index = 0
for i in range(len(X)):
x_time = X[i]
if timestamp < x_time:
target_index = i
break
for i in range(target_index,len(y)):
y[i] += amount
return y
def perform_bfs_on_accounts(out_txs,top_holder_type,acc,m_type='OUT'):
print("\t"+m_type)
unique_out = set()
for out in out_txs:
unique_out.add(out[3])
unique_out = list(unique_out)[:5]
for out in unique_out:
print("\t"+out)
if out not in all_acc_types:
investor_type = identify_investor_type(out)
if investor_type == affliate_type:
investor_type = identify_investor_type_token(out)
print("\t\t{}".format(investor_type))
else:
investor_type = all_acc_types[out]
if investor_type == exchange_type:
top_holder_type[acc] = deposit_account if m_type == "OUT" else withdraw_account
all_acc_types[out] = investor_type
if acc not in top_holder_type:
top_holder_type[acc] = holding_account
return top_holder_type
def calculate_holding_amount(X,escape_accounts,txs):
top_holder_type = dict()
for acc in txs:
tx = txs[acc]
if acc in escape_accounts:
continue
#如果当前账户从来没有向外打过token,ignore
out_txs = [item for item in tx if item[2] == 'OUT']
if len(out_txs) == 0:
print("\tholding account")
top_holder_type[acc] = holding_account
continue
# build all traxe Y: holding_amount, deposit_amount, withdraw_amount
amount_trace_y = [0] * len(X)
for holder in txs:
if holder in escape_accounts:
continue
if holder not in top_holder_type:
print("{} not identified! ".format(holder))
continue
holder_type = top_holder_type[holder]
holder_txs = txs[holder]
print("{} {}".format(holder,holder_type))
for tx in holder_txs:
[timestamp,from_a,tx_type,to_a,amount] = tx
if holder_type == holding_account:
if tx_type == in_type:
amount_trace_y = update_y_array(X,amount_trace_y,timestamp,amount)
else:
amount_trace_y = update_y_array(X,amount_trace_y,timestamp,-amount)
return amount_trace_y
|
8316
|
from baremetal import *
from math import pi, sin, cos
import sys
from scale import scale
from settings import *
from ssb import ssb_polar
def modulator(clk, audio, audio_stb, settings):
audio_bits = audio.subtype.bits
#AM modulation
am_mag = Unsigned(12).constant(0) + audio + 2048
am_phase = Signed(32).constant(0)
am_stb = audio_stb
#FM modulation
fm_mag = Unsigned(12).constant(4095)
frequency = Signed(32).constant(0) + audio
nfm_scaled_frequency = frequency * (2**(32-audio_bits) * 5 / 50)
nfm_phase = nfm_scaled_frequency.subtype.register(clk, en=audio_stb, init=0)
nfm_phase.d(nfm_phase + nfm_scaled_frequency)
scaled_frequency = frequency * (2**(32-audio_bits) * 8 / 50)
fm_phase = scaled_frequency.subtype.register(clk, en=audio_stb, init=0)
fm_phase.d(fm_phase + scaled_frequency)
fm_stb = Boolean().register(clk, d=audio_stb, init=0)
#ssb
ssb_mag, ssb_phase, ssb_stb = ssb_polar(clk, audio, audio_stb, settings.mode==LSB)
ssb_mag <<= 1
ssb_phase = Signed(32).constant(0) + ssb_phase
ssb_phase <<= (32 - audio_bits)
#cw modulation
cw_mag = Unsigned(12).constant(0)
cw_phase = Signed(32).constant(0)
cw_stb = audio_stb
#mode switching
magnitude = Unsigned(12).select(settings.mode, am_mag, fm_mag, fm_mag, ssb_mag, ssb_mag, cw_mag)
phase = Signed(32).select(settings.mode, am_phase, nfm_phase, fm_phase, ssb_phase, ssb_phase, cw_phase)
stb = Boolean().select(settings.mode, am_stb, fm_stb, fm_stb, ssb_stb, ssb_stb, cw_stb)
return magnitude, phase, audio_stb
import numpy as np
from matplotlib import pyplot as plt
def test_modulator(stimulus, mode):
settings = Settings()
settings.mode = Unsigned(3).input("filter_mode")
clk = Clock("clk")
audio_in = Signed(12).input("i_data_in")
audio_stb_in = Boolean().input("stb_in")
i, q, stb = modulator(clk, audio_in, audio_stb_in, settings)
#simulate
clk.initialise()
settings.mode.set(mode)
response = []
for data in stimulus:
for j in range(200):
audio_stb_in.set(j==199)
audio_in.set(data)
clk.tick()
if stb.get():
print i.get(), q.get()
if i.get() is None or q.get() is None:
continue
response.append(i.get()*(2**20)+1j*q.get())
response = np.array(response)
plt.title("Modulator")
plt.xlabel("Time (samples)")
plt.ylabel("Value")
a, = plt.plot(np.real(response), label="I")
b, = plt.plot(np.imag(response), label="Q")
c, = plt.plot(stimulus*(2**20), label="Audio Input")
plt.legend(handles=[a, b, c])
plt.show()
if __name__ == "__main__" and "sim" in sys.argv:
#mode am stim am
stimulus=(
np.sin(np.arange(1000)*2.0*pi*0.02)*1023+
np.sin(np.arange(1000)*2.0*pi*0.03)*1023
)
#test_modulator(stimulus, FM)
#test_modulator(stimulus, FM)
#test_modulator(stimulus, NBFM)
test_modulator(stimulus, USB)
|
8402
|
import _ast
from peon.src.project.file.function_def.function import FunctionLint
class ReflectionAtLineFixture:
empty_node = _ast.Pass
is_instance_at_first_lvl = _ast.FunctionDef(id='isinstance', lineno=1)
type_at_first_lvl = _ast.FunctionDef(id='type', lineno=1)
is_instance_at_second_lvl = _ast.FunctionDef(body=[_ast.Expr(id='isinstance', lineno=2)], lineno=1)
type_at_second_lvl = _ast.FunctionDef(body=[_ast.Expr(id='type', lineno=2)], lineno=1)
def test_empty_node():
assert FunctionLint(
definition=ReflectionAtLineFixture.empty_node,
).reflection_at_line() == tuple()
def test_is_instance_at_first_lvl():
assert FunctionLint(
definition=ReflectionAtLineFixture.is_instance_at_first_lvl,
).reflection_at_line() == (1,)
def test_type_at_first_lvl():
assert FunctionLint(
definition=ReflectionAtLineFixture.type_at_first_lvl,
).reflection_at_line() == (1,)
def test_is_instance_at_second_lvl():
assert FunctionLint(
definition=ReflectionAtLineFixture.is_instance_at_second_lvl,
).reflection_at_line() == (2,)
def test_type_at_second_lvl():
assert FunctionLint(
definition=ReflectionAtLineFixture.type_at_second_lvl,
).reflection_at_line() == (2,)
|
8410
|
import math
class Schedule():
def __init__(self, total, batch_size):
self._batch_size = batch_size
self._state = ""
self.total = total
self.scheduled = 0
self.finished = 0
@property
def _batch(self):
return math.ceil(self.scheduled / self._batch_size)
@property
def _batches(self):
return math.ceil(self.total / self._batch_size)
@property
def _percentage(self):
_percentage = self.scheduled / self.total * 100
return "%.1f%%" % _percentage
def suffix(self, string):
return " ".join((
string,
"#%d/%d %s" %
(
self._batch,
self._batches,
self._percentage
)
))
def completed(self):
if self.finished != self.total:
raise ValueError(self.finished, self.total)
def __iter__(self):
return self
def __next__(self):
if self.scheduled >= self.total:
self._state = "pending, waiting for completion,"
raise StopIteration()
self.scheduled += self._batch_size
if self.scheduled > self.total:
self.scheduled = self.total
self._state = self.suffix("running, on batch") + ","
return self._batch
def __str__(self):
return " ".join(f"""
<Schedule {"done" if self.finished >= self.total else self._state}
total={self.total} scheduled={self.scheduled} finished={self.finished}>
""".split())
def test_01():
schedule = Schedule(100, 10)
for batch in schedule:
print(batch)
print(schedule)
def test_02():
schedule = Schedule(25, 10)
for batch in schedule:
print(batch)
print(schedule)
print(schedule.suffix("Task"))
def test_03():
schedule = Schedule(0, 10)
for batch in schedule:
print(batch)
print(schedule)
print(schedule.suffix("Task"))
def test_04():
schedule = Schedule(1, 10)
for batch in schedule:
print(batch)
print(schedule)
print(schedule.suffix("Task"))
if __name__ == "__main__":
test_02()
|
8413
|
from abc import ABCMeta, abstractmethod
class DataWrapper:
"""Interface for access to datasets."""
__metaclass__ = ABCMeta
@abstractmethod
def next(self):
"""Returns next minibatch for training."""
return NotImplementedError
|
8524
|
import setuptools
from hugdatafast.__init__ import __version__
with open("README.md", "r") as fh:
long_description = fh.read()
REQUIRED_PKGS = [
'fastai>=2.0.8',
'fastscore>=1.0.1', # change of store_attr api
'datasets',
]
setuptools.setup(
name="hugdatafast",
version=__version__,
author="<NAME>",
author_email="<EMAIL>",
description="The elegant bridge between hugginface data and fastai",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/richarddwang/hugdatafast",
license='Apache 2.0',
packages=setuptools.find_packages(),
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
python_requires='>=3.6',
install_requires=REQUIRED_PKGS,
keywords='datasets machine learning datasets metrics fastai huggingface',
)
|
8683
|
from attrdict import AttrDefault
import asyncio
class StepperMotor:
def __init__(self):
self.keys = ['<KEY> 'common']
self.required_keys = ['a', 'b', 'aa', 'bb']
self._step_instructions = AttrDefault(bool,
{
'1': [[0, 1, 1, 1], [1, 0, 1, 1], [1, 1, 0, 1], [1, 1, 1, 0]],
'2': [[0, 0, 1, 1], [1, 0, 0, 1], [1, 1, 0, 0], [0, 1, 1, 0]],
'1-2': [[0, 1, 1, 1], [0, 0, 1, 1], [1, 0, 1, 1], [1, 0, 0, 1], [1, 1, 0, 1], [1, 1, 0, 0], [1, 1, 1, 0], [0, 1, 1, 0]]
}
)
self.type = None
self.current_step = 0
self._step_type = '2'
self.frequency = 100
self.rotation_step_count = 100
self.milli_meter_step_count = 1
@staticmethod
def info():
return AttrDefault(bool, {'name': 'StepperMotor'})
def wired(self, obniz):
self.obniz = obniz
if obniz.is_valid_io(*[self.params.common]):
self.common = obniz.get_io(*[self.params.common])
self.common.output(*[True])
self.type = 'unipolar'
else:
self.type = 'bipolar'
self.ios = []
self.ios.append(*[obniz.get_io(*[self.params.a])])
self.ios.append(*[obniz.get_io(*[self.params.b])])
self.ios.append(*[obniz.get_io(*[self.params.aa])])
self.ios.append(*[obniz.get_io(*[self.params.bb])])
async def step_wait(self, step_count):
if type(step_count) in ['int', 'float']:
raise Exception('must provide number')
step_count = round(*[step_count])
if step_count == 0:
return
step_count_abs = abs(*[step_count])
instructions = self._get_step_instructions(*[])
instruction_length = len(instructions)
array = []
current_phase = self.current_step % instruction_length
if current_phase < 0:
current_phase = (instruction_length - current_phase * -1)
if step_count > 0:
for i in range(0, len(instructions), 1):
current_phase += 1
if current_phase >= instruction_length:
current_phase = 0
array.append(*[instructions[current_phase]])
else:
for i in range(0, len(instructions), 1):
current_phase -= 1
if current_phase < 0:
current_phase = (instruction_length - 1)
array.append(*[instructions[current_phase]])
msec = 1000 / self.frequency
msec = int(*[msec])
if msec < 1:
msec = 1
def anonymous0(index):
instruction = array[index]
for i in range(0, len(self.ios), 1):
self.ios[i].output(*[instruction[i]])
state = anonymous0
states = []
for i in range(0, instruction_length, 1):
states.append(*[AttrDefault(bool, {'duration': msec, 'state': state})])
await self.obniz.io.repeat_wait(*[states, step_count_abs])
self.current_step += step_count
async def step_to_wait(self, destination):
mustmove = (destination - self.current_step)
await self.step_wait(*[mustmove])
async def hold_wait(self):
instructions = self._get_step_instructions(*[])
instruction_length = len(instructions)
current_phase = self.current_step % instruction_length
if current_phase < 0:
current_phase = (instruction_length - current_phase * -1)
for i in range(0, len(self.ios), 1):
self.ios[i].output(*[instructions[current_phase][i]])
await self.obniz.ping_wait(*[])
async def free_wait(self):
for i in range(0, len(self.ios), 1):
self.ios[i].output(*[True])
await self.obniz.ping_wait(*[])
def step_type(self, step_type):
new_type = self._step_instructions[step_type]
if not new_type:
raise Exception('unknown step type ' + str(step_type))
self._step_type = step_type
def speed(self, step_per_sec):
self.frequency = step_per_sec
def current_rotation(self):
return self.current_step / self.rotation_step_count * 360
def current_angle(self):
angle = int(*[self.current_rotation(*[]) * 1000]) % 360000 / 1000
if angle < 0:
angle = (360 - angle)
return angle
async def rotate_wait(self, rotation):
rotation /= 360
needed = rotation * self.rotation_step_count
await self.step_wait(*[needed])
async def rotate_to_wait(self, angle):
needed = (angle - self.current_angle(*[]))
if abs(*[needed]) > 180:
needed = (needed - 360) if needed > 0 else (360 + needed)
needed = needed / 360 * self.rotation_step_count
await self.step_wait(*[needed])
def current_distance(self):
return self.current_step / self.milli_meter_step_count
async def move_wait(self, distance):
needed = distance * self.milli_meter_step_count
await self.step_wait(*[needed])
async def move_to_wait(self, destination):
needed = (destination - self.current_distance(*[])) * self.milli_meter_step_count
await self.step_wait(*[needed])
def _get_step_instructions(self):
return self._step_instructions[self._step_type]
|
8692
|
import requests
import ssbio.utils
import os.path as op
# #### PDB stats
# Request flexibility data about one particular PDB.
#
# http://pdbflex.org/php/api/PDBStats.php?pdbID=1a50&chainID=A
#
# pdbID of structure you are interested in
# chainID of chain you are interested in
#
# [{"pdbID":"1a50",
# "chainID":"A",
# "parentClusterID":"4hn4A",
# "avgRMSD":"0.538",
# "maxRMSD":"2.616",
# "flexibilityLabel":"Low",
# "otherClusterMembers":["4hn4A","4hpjA","4hpxA","4kkxA",...],
# "PDBFlexLink":"http:\/\/pdbflex.org\/cluster.html#!\/4hn4A\/20987\/1a50A"}]
#
# Note: you can omit the chainID and PDBFlex will return information for all chains.
#
# #### RMSD profile
# Request RMSD array used for local flexibility plots
#
# http://pdbflex.org/php/api/rmsdProfile.php?pdbID=1a50&chainID=A
#
# pdbID PDB ID of structure you are interested in
# chainID Chain ID of chain you are interested in
#
# {"queryPDB":"1a50A",
# "clusterName":"4hn4A",
# "profile":"[0.616,0.624,0.624,0.624,0.624,0.624,0.029,0.013,0.016,0.023,0.025,0.028,0.030,0.034,0.035,0.035,0.035,0.035,0.036,0.033,0.027,0.023,0.017...]"}
#
# #### PDB representatives
# Request representatives for a PDB's own cluster. Returns a list of chains that represent the most distinct structures in the cluster.
#
# http://pdbflex.org/php/api/representatives.php?pdbID=1a50&chainID=A
#
# pdbID PDB ID of structure you are interested in
# chainID Chain ID of chain you are interested in
#
# ["2trsA","3pr2A","1kfjA"]
def get_pdbflex_info(pdb_id, chain_id, outdir, force_rerun=False):
outfile = '{}{}_pdbflex_stats.json'.format(pdb_id, chain_id)
pdbflex_link = 'http://pdbflex.org/php/api/PDBStats.php?pdbID={}&chainID={}'.format(pdb_id,
chain_id)
infolist = ssbio.utils.request_json(link=pdbflex_link, outfile=outfile, outdir=outdir, force_rerun_flag=force_rerun)
# TODO: will running with chain ID always return a single item list?
assert len(infolist) == 1
newdict = {}
for k, v in infolist[0].items():
if k == 'avgRMSD' and v:
newdict[k] = float(v)
elif k == 'maxRMSD' and v:
newdict[k] = float(v)
else:
newdict[k] = v
return newdict
def get_pdbflex_rmsd_profile(pdb_id, chain_id, outdir, force_rerun=False):
outfile = '{}{}_pdbflex_rmsdprofile.json'.format(pdb_id, chain_id)
pdbflex_link = 'http://pdbflex.org/php/api/rmsdProfile.php?pdbID={}&chainID={}'.format(pdb_id,
chain_id)
infodict = ssbio.utils.request_json(link=pdbflex_link, outfile=outfile, outdir=outdir, force_rerun_flag=force_rerun)
infodict['profile'] = [float(x) for x in infodict['profile'].strip('[]').split(',')]
return infodict
def get_pdbflex_representatives(pdb_id, chain_id, outdir, force_rerun=False):
outfile = '{}{}_pdbflex_representatives.json'.format(pdb_id, chain_id)
pdbflex_link = 'http://pdbflex.org/php/api/representatives.php?pdbID={}&chainID={}'.format(pdb_id,
chain_id)
infolist = ssbio.utils.request_json(link=pdbflex_link, outfile=outfile, outdir=outdir, force_rerun_flag=force_rerun)
# infolist = [str(x) for x in infolist.strip('[]').split(',')]
return infolist
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.