max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
---|---|---|---|---|---|---|---|---|---|---|
library/math_tool_box.py | brianchiang-tw/Python | 0 | 6630251 | import functools
import random
import math
class StatMaker:
container = []
size = 0
def __init__(self, new_list):
self.container = new_list
self.size = len(self.container)
# Get the minimum value of a series
def get_min(self):
min_value = functools.reduce( lambda smallest, x: smallest if smallest < x else x, self.container, self.container[0] )
return min_value
# Get the maximum value of a series
def get_max(self):
max_value = functools.reduce( lambda largest, x: largest if largest > x else x, self.container, self.container[0] )
return max_value
# Get the summation of a series
def get_sum(self):
sum_value = functools.reduce( lambda sum, x: sum + x, self.container, 0 )
return sum_value
# Get the average of a series
def get_avg(self):
avg = self.get_sum() / self.size
return avg
# Get the standard deviation of a series
def get_std(self):
# Recall:
# var = { ( sigma[ (Xi - avg )^2 ] ) / (N-1) }
# = { ( sigma[ Xi^2 ] - N * avg^2 ) / (N-1) }
# std = sqrt(var)
sum_of_element_square = functools.reduce( lambda sum, x: sum + x**2, self.container, 0 )
N_of_avg_square = self.size * self.get_avg()**2
var = ( sum_of_element_square - N_of_avg_square) / ( self.size-1 )
std = var**( 1/2 )
return std
### Tutorial:
# list_test = list( range(1,6) )
# [1, 2, 3, 4, 5]
# print( list_test )
# random.shuffle( list_test )
# example output:
# [3, 1, 2, 5, 4]
# print( list_test )
# stat_info = StatMaker(list_test)
# min_value = stat_info.get_min()
# max_value = stat_info.get_max()
# sum_value = stat_info.get_sum()
# avg_value = stat_info.get_avg()
# std_value = stat_info.get_std()
# 1
# print( min_value )
# 5
# print( max_value )
# 15
# print( sum_value )
# 3.0
# print( avg_value )
# 1.5811388300841898
# print( std_value )
| import functools
import random
import math
class StatMaker:
container = []
size = 0
def __init__(self, new_list):
self.container = new_list
self.size = len(self.container)
# Get the minimum value of a series
def get_min(self):
min_value = functools.reduce( lambda smallest, x: smallest if smallest < x else x, self.container, self.container[0] )
return min_value
# Get the maximum value of a series
def get_max(self):
max_value = functools.reduce( lambda largest, x: largest if largest > x else x, self.container, self.container[0] )
return max_value
# Get the summation of a series
def get_sum(self):
sum_value = functools.reduce( lambda sum, x: sum + x, self.container, 0 )
return sum_value
# Get the average of a series
def get_avg(self):
avg = self.get_sum() / self.size
return avg
# Get the standard deviation of a series
def get_std(self):
# Recall:
# var = { ( sigma[ (Xi - avg )^2 ] ) / (N-1) }
# = { ( sigma[ Xi^2 ] - N * avg^2 ) / (N-1) }
# std = sqrt(var)
sum_of_element_square = functools.reduce( lambda sum, x: sum + x**2, self.container, 0 )
N_of_avg_square = self.size * self.get_avg()**2
var = ( sum_of_element_square - N_of_avg_square) / ( self.size-1 )
std = var**( 1/2 )
return std
### Tutorial:
# list_test = list( range(1,6) )
# [1, 2, 3, 4, 5]
# print( list_test )
# random.shuffle( list_test )
# example output:
# [3, 1, 2, 5, 4]
# print( list_test )
# stat_info = StatMaker(list_test)
# min_value = stat_info.get_min()
# max_value = stat_info.get_max()
# sum_value = stat_info.get_sum()
# avg_value = stat_info.get_avg()
# std_value = stat_info.get_std()
# 1
# print( min_value )
# 5
# print( max_value )
# 15
# print( sum_value )
# 3.0
# print( avg_value )
# 1.5811388300841898
# print( std_value )
| en | 0.482989 | # Get the minimum value of a series # Get the maximum value of a series # Get the summation of a series # Get the average of a series # Get the standard deviation of a series # Recall: # var = { ( sigma[ (Xi - avg )^2 ] ) / (N-1) } # = { ( sigma[ Xi^2 ] - N * avg^2 ) / (N-1) } # std = sqrt(var) ### Tutorial: # list_test = list( range(1,6) ) # [1, 2, 3, 4, 5] # print( list_test ) # random.shuffle( list_test ) # example output: # [3, 1, 2, 5, 4] # print( list_test ) # stat_info = StatMaker(list_test) # min_value = stat_info.get_min() # max_value = stat_info.get_max() # sum_value = stat_info.get_sum() # avg_value = stat_info.get_avg() # std_value = stat_info.get_std() # 1 # print( min_value ) # 5 # print( max_value ) # 15 # print( sum_value ) # 3.0 # print( avg_value ) # 1.5811388300841898 # print( std_value ) | 3.6024 | 4 |
robopose/third_party/craves/heatmap_utils.py | lesteve/robopose | 43 | 6630252 | <filename>robopose/third_party/craves/heatmap_utils.py
import torch
import numpy as np
def heatmap_from_keypoints(bbox, pts2d):
scale_factor = 60.0
out_res = 64
nparts = 17
sigma = 1
label_type = 'Gaussian'
bbox = bbox.cpu().numpy()
pts2d = pts2d.cpu().numpy()
x0, y0, x1, y1 = bbox
c = np.array([(x0+x1), (y0+y1)])/2
s = np.sqrt((y1-y0)*(x1-x0)) / scale_factor
r = 0
tpts = np.asarray(pts2d).copy()
target = torch.zeros(nparts, out_res, out_res)
for i in range(nparts):
# if tpts[i, 2] > 0: # This is evil!!
if tpts[i, 1] > 0:
tpts[i, 0:2] = to_torch(transform(tpts[i, 0:2], c, s, [out_res, out_res], rot=r))
target[i] = draw_labelmap(target[i], tpts[i], sigma, type=label_type)
return target
def to_numpy(tensor):
if torch.is_tensor(tensor):
return tensor.cpu().numpy()
elif type(tensor).__module__ != 'numpy':
raise ValueError("Cannot convert {} to numpy array"
.format(type(tensor)))
return tensor
def to_torch(ndarray):
if type(ndarray).__module__ == 'numpy':
return torch.from_numpy(ndarray)
elif not torch.is_tensor(ndarray):
raise ValueError("Cannot convert {} to torch tensor"
.format(type(ndarray)))
return ndarray
def draw_labelmap(img, pt, sigma, type='Gaussian'):
# Draw a 2D gaussian
# Adopted from https://github.com/anewell/pose-hg-train/blob/master/src/pypose/draw.py
img = to_numpy(img)
# Check that any part of the gaussian is in-bounds
ul = [int(pt[0] - 3 * sigma), int(pt[1] - 3 * sigma)]
br = [int(pt[0] + 3 * sigma + 1), int(pt[1] + 3 * sigma + 1)]
if (ul[0] >= img.shape[1] or ul[1] >= img.shape[0] or
br[0] < 0 or br[1] < 0):
# If not, just return the image as is
return to_torch(img)
# Generate gaussian
size = 6 * sigma + 1
x = np.arange(0, size, 1, float)
y = x[:, np.newaxis]
x0 = y0 = size // 2
# The gaussian is not normalized, we want the center value to equal 1
if type == 'Gaussian':
g = np.exp(- ((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2))
elif type == 'Cauchy':
g = sigma / (((x - x0) ** 2 + (y - y0) ** 2 + sigma ** 2) ** 1.5)
# Usable gaussian range
g_x = max(0, -ul[0]), min(br[0], img.shape[1]) - ul[0]
g_y = max(0, -ul[1]), min(br[1], img.shape[0]) - ul[1]
# Image range
img_x = max(0, ul[0]), min(br[0], img.shape[1])
img_y = max(0, ul[1]), min(br[1], img.shape[0])
img[img_y[0]:img_y[1], img_x[0]:img_x[1]] = g[g_y[0]:g_y[1], g_x[0]:g_x[1]]
return to_torch(img)
def transform(pt, center, scale, res, invert=0, rot=0):
# Transform pixel location to different reference
# print(scale)
t = get_transform(center, scale, res, rot=rot)
if invert:
t = np.linalg.inv(t)
new_pt = np.array([pt[0] - 1, pt[1] - 1, 1.]).T
#new_pt = np.array([pt[0], pt[1], 1.]).T
new_pt = np.dot(t, new_pt)
# return new_pt[:2].astype(int) + 1
return (new_pt[:2] + 0.5).astype(int)
def get_transform(center, scale, res, rot=0):
"""
General image processing functions
"""
# Generate transformation matrix
h = 200 * scale
t = np.zeros((3, 3))
t[0, 0] = float(res[1]) / h
t[1, 1] = float(res[0]) / h
t[0, 2] = res[1] * (-float(center[0]) / h + .5)
t[1, 2] = res[0] * (-float(center[1]) / h + .5)
t[2, 2] = 1
if not rot == 0:
rot = -rot # To match direction of rotation from cropping
rot_mat = np.zeros((3,3))
rot_rad = rot * np.pi / 180
sn,cs = np.sin(rot_rad), np.cos(rot_rad)
rot_mat[0,:2] = [cs, -sn]
rot_mat[1,:2] = [sn, cs]
rot_mat[2,2] = 1
# Need to rotate around center
t_mat = np.eye(3)
t_mat[0,2] = -res[1]/2
t_mat[1,2] = -res[0]/2
t_inv = t_mat.copy()
t_inv[:2,2] *= -1
t = np.dot(t_inv,np.dot(rot_mat,np.dot(t_mat,t)))
return t
| <filename>robopose/third_party/craves/heatmap_utils.py
import torch
import numpy as np
def heatmap_from_keypoints(bbox, pts2d):
scale_factor = 60.0
out_res = 64
nparts = 17
sigma = 1
label_type = 'Gaussian'
bbox = bbox.cpu().numpy()
pts2d = pts2d.cpu().numpy()
x0, y0, x1, y1 = bbox
c = np.array([(x0+x1), (y0+y1)])/2
s = np.sqrt((y1-y0)*(x1-x0)) / scale_factor
r = 0
tpts = np.asarray(pts2d).copy()
target = torch.zeros(nparts, out_res, out_res)
for i in range(nparts):
# if tpts[i, 2] > 0: # This is evil!!
if tpts[i, 1] > 0:
tpts[i, 0:2] = to_torch(transform(tpts[i, 0:2], c, s, [out_res, out_res], rot=r))
target[i] = draw_labelmap(target[i], tpts[i], sigma, type=label_type)
return target
def to_numpy(tensor):
if torch.is_tensor(tensor):
return tensor.cpu().numpy()
elif type(tensor).__module__ != 'numpy':
raise ValueError("Cannot convert {} to numpy array"
.format(type(tensor)))
return tensor
def to_torch(ndarray):
if type(ndarray).__module__ == 'numpy':
return torch.from_numpy(ndarray)
elif not torch.is_tensor(ndarray):
raise ValueError("Cannot convert {} to torch tensor"
.format(type(ndarray)))
return ndarray
def draw_labelmap(img, pt, sigma, type='Gaussian'):
# Draw a 2D gaussian
# Adopted from https://github.com/anewell/pose-hg-train/blob/master/src/pypose/draw.py
img = to_numpy(img)
# Check that any part of the gaussian is in-bounds
ul = [int(pt[0] - 3 * sigma), int(pt[1] - 3 * sigma)]
br = [int(pt[0] + 3 * sigma + 1), int(pt[1] + 3 * sigma + 1)]
if (ul[0] >= img.shape[1] or ul[1] >= img.shape[0] or
br[0] < 0 or br[1] < 0):
# If not, just return the image as is
return to_torch(img)
# Generate gaussian
size = 6 * sigma + 1
x = np.arange(0, size, 1, float)
y = x[:, np.newaxis]
x0 = y0 = size // 2
# The gaussian is not normalized, we want the center value to equal 1
if type == 'Gaussian':
g = np.exp(- ((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2))
elif type == 'Cauchy':
g = sigma / (((x - x0) ** 2 + (y - y0) ** 2 + sigma ** 2) ** 1.5)
# Usable gaussian range
g_x = max(0, -ul[0]), min(br[0], img.shape[1]) - ul[0]
g_y = max(0, -ul[1]), min(br[1], img.shape[0]) - ul[1]
# Image range
img_x = max(0, ul[0]), min(br[0], img.shape[1])
img_y = max(0, ul[1]), min(br[1], img.shape[0])
img[img_y[0]:img_y[1], img_x[0]:img_x[1]] = g[g_y[0]:g_y[1], g_x[0]:g_x[1]]
return to_torch(img)
def transform(pt, center, scale, res, invert=0, rot=0):
# Transform pixel location to different reference
# print(scale)
t = get_transform(center, scale, res, rot=rot)
if invert:
t = np.linalg.inv(t)
new_pt = np.array([pt[0] - 1, pt[1] - 1, 1.]).T
#new_pt = np.array([pt[0], pt[1], 1.]).T
new_pt = np.dot(t, new_pt)
# return new_pt[:2].astype(int) + 1
return (new_pt[:2] + 0.5).astype(int)
def get_transform(center, scale, res, rot=0):
"""
General image processing functions
"""
# Generate transformation matrix
h = 200 * scale
t = np.zeros((3, 3))
t[0, 0] = float(res[1]) / h
t[1, 1] = float(res[0]) / h
t[0, 2] = res[1] * (-float(center[0]) / h + .5)
t[1, 2] = res[0] * (-float(center[1]) / h + .5)
t[2, 2] = 1
if not rot == 0:
rot = -rot # To match direction of rotation from cropping
rot_mat = np.zeros((3,3))
rot_rad = rot * np.pi / 180
sn,cs = np.sin(rot_rad), np.cos(rot_rad)
rot_mat[0,:2] = [cs, -sn]
rot_mat[1,:2] = [sn, cs]
rot_mat[2,2] = 1
# Need to rotate around center
t_mat = np.eye(3)
t_mat[0,2] = -res[1]/2
t_mat[1,2] = -res[0]/2
t_inv = t_mat.copy()
t_inv[:2,2] *= -1
t = np.dot(t_inv,np.dot(rot_mat,np.dot(t_mat,t)))
return t
| en | 0.741739 | # if tpts[i, 2] > 0: # This is evil!! # Draw a 2D gaussian # Adopted from https://github.com/anewell/pose-hg-train/blob/master/src/pypose/draw.py # Check that any part of the gaussian is in-bounds # If not, just return the image as is # Generate gaussian # The gaussian is not normalized, we want the center value to equal 1 # Usable gaussian range # Image range # Transform pixel location to different reference # print(scale) #new_pt = np.array([pt[0], pt[1], 1.]).T # return new_pt[:2].astype(int) + 1 General image processing functions # Generate transformation matrix # To match direction of rotation from cropping # Need to rotate around center | 2.328275 | 2 |
miwell-flask-app/tests/functional_tests/test_pages/test_main_pages/test_about_page.py | joshuahigginson1/DevOps-Assessment-1 | 1 | 6630253 | # Contains the code to test our about page.
# Imports --------------------------------------------------------------------------------
from tests.functional_test_framework import LiveServerTestCase
# Tests ----------------------------------------------------------------------------------
class TestAboutPage(LiveServerTestCase):
pass
| # Contains the code to test our about page.
# Imports --------------------------------------------------------------------------------
from tests.functional_test_framework import LiveServerTestCase
# Tests ----------------------------------------------------------------------------------
class TestAboutPage(LiveServerTestCase):
pass
| en | 0.253894 | # Contains the code to test our about page. # Imports -------------------------------------------------------------------------------- # Tests ---------------------------------------------------------------------------------- | 1.526103 | 2 |
tensorflow-mnist-code/admm_pruning.py | KaiqiZhang/admm-pruning | 90 | 6630254 | <gh_stars>10-100
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A deep MNIST classifier using convolutional layers.
See extensive documentation at
https://www.tensorflow.org/get_started/mnist/pros
"""
# Disable linter warnings to maintain consistency with tutorial.
# pylint: disable=invalid-name
# pylint: disable=g-bad-import-order
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import tempfile
from model import create_model
from solver import create_admm_solver
from tensorflow.examples.tutorials.mnist import input_data
from prune_utility import apply_prune_on_grads,apply_prune,get_configuration,projection
import tensorflow as tf
import numpy as np
from numpy import linalg as LA
FLAGS = None
# pruning ratio
prune_configuration = get_configuration()
dense_w = {}
P1 = prune_configuration.P1
P2 = prune_configuration.P2
P3 = prune_configuration.P3
P4 = prune_configuration.P4
prune_configuration.display()
def main(_):
# Import data
mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
model = create_model()
x = model.x
y_ = model.y_
cross_entropy = model.cross_entropy
layers = model.layers
logits = model.logits
solver = create_admm_solver(model)
keep_prob = model.keep_prob
train_step = solver.train_step
train_step1 = solver.train_step1
W_conv1 = model.W_conv1
W_conv2 = model.W_conv2
W_fc1 = model.W_fc1
W_fc2 = model.W_fc2
A = solver.A
B = solver.B
C = solver.C
D = solver.D
E = solver.E
F = solver.F
G = solver.G
H = solver.H
my_trainer = tf.train.AdamOptimizer(1e-3)
grads = my_trainer.compute_gradients(cross_entropy)
with tf.name_scope('accuracy'):
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(y_, 1))
correct_prediction = tf.cast(correct_prediction, tf.float32)
accuracy = tf.reduce_mean(correct_prediction)
graph_location = tempfile.mkdtemp()
print('Saving graph to: %s' % graph_location)
train_writer = tf.summary.FileWriter(graph_location)
train_writer.add_graph(tf.get_default_graph())
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(20000):
batch = mnist.train.next_batch(50)
if i % 100 == 0:
train_accuracy = accuracy.eval(feed_dict={
x: batch[0], y_: batch[1], keep_prob: 1.0})
print('step %d, training accuracy %g' % (i, train_accuracy))
train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
print('test accuracy %g' % accuracy.eval(feed_dict={
x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))
Z1 = sess.run(W_conv1)
Z1 = projection(Z1, percent=P1)
U1 = np.zeros_like(Z1)
Z2 = sess.run(W_conv2)
Z2 = projection(Z2, percent=P2)
U2 = np.zeros_like(Z2)
Z3 = sess.run(W_fc1)
Z3 = projection(Z3, percent=P3)
U3 = np.zeros_like(Z3)
Z4 = sess.run(W_fc2)
Z4 = projection(Z4, percent=P4)
U4 = np.zeros_like(Z4)
for j in range(30):
for i in range(5000):
batch = mnist.train.next_batch(50)
if i % 100 == 0:
train_accuracy = accuracy.eval(feed_dict={
x: batch[0], y_: batch[1], keep_prob: 1.0})
print('step %d, training accuracy %g' % (i, train_accuracy))
train_step1.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 1.0, A:Z1, B:U1, C:Z2, D:U2, E:Z3, F:U3, G:Z4, H:U4})
Z1 = sess.run(W_conv1) + U1
Z1 = projection(Z1, percent=P1)
U1 = U1 + sess.run(W_conv1) - Z1
Z2 = sess.run(W_conv2) + U2
Z2 = projection(Z2, percent=P2)
U2 = U2 + sess.run(W_conv2) - Z2
Z3 = sess.run(W_fc1) + U3
Z3 = projection(Z3, percent=P3)
U3 = U3 + sess.run(W_fc1) - Z3
Z4 = sess.run(W_fc2) + U4
Z4 = projection(Z4, percent=P4)
U4 = U4 + sess.run(W_fc2) - Z4
print('test accuracy %g' % accuracy.eval(feed_dict={
x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))
print(LA.norm(sess.run(W_conv1) - Z1))
print(LA.norm(sess.run(W_conv2) - Z2))
print(LA.norm(sess.run(W_fc1) - Z3))
print(LA.norm(sess.run(W_fc2) - Z4))
dense_w['conv1/W_conv1'] = W_conv1
dense_w['conv2/W_conv2'] = W_conv2
dense_w['fc1/W_fc1'] = W_fc1
dense_w['fc2/W_fc2'] = W_fc2
dict_nzidx = apply_prune(dense_w,sess)
print ("checking space dictionary")
print (dict_nzidx.keys())
grads = apply_prune_on_grads(grads,dict_nzidx)
apply_gradient_op = my_trainer.apply_gradients(grads)
for var in tf.global_variables():
if tf.is_variable_initialized(var).eval() == False:
sess.run(tf.variables_initializer([var]))
print ("start retraining after pruning")
for i in range(20000):
batch = mnist.train.next_batch(50)
if i % 100 == 0:
train_accuracy = accuracy.eval(feed_dict={
x: batch[0], y_: batch[1], keep_prob: 1.0})
print('step %d, training accuracy %g' % (i, train_accuracy))
apply_gradient_op.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
print('test accuracy %g' % accuracy.eval(feed_dict={
x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))
print(np.sum(sess.run(W_conv1)!=0))
print(np.sum(sess.run(W_conv2) != 0))
print(np.sum(sess.run(W_fc1) != 0))
print(np.sum(sess.run(W_fc2) != 0))
# do the saving.
saver = tf.train.Saver()
saver.save(sess,"./lenet_5_pruned_model.ckpt")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str,
default='/tmp/tensorflow/mnist/input_data',
help='Directory for storing input data')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A deep MNIST classifier using convolutional layers.
See extensive documentation at
https://www.tensorflow.org/get_started/mnist/pros
"""
# Disable linter warnings to maintain consistency with tutorial.
# pylint: disable=invalid-name
# pylint: disable=g-bad-import-order
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import tempfile
from model import create_model
from solver import create_admm_solver
from tensorflow.examples.tutorials.mnist import input_data
from prune_utility import apply_prune_on_grads,apply_prune,get_configuration,projection
import tensorflow as tf
import numpy as np
from numpy import linalg as LA
FLAGS = None
# pruning ratio
prune_configuration = get_configuration()
dense_w = {}
P1 = prune_configuration.P1
P2 = prune_configuration.P2
P3 = prune_configuration.P3
P4 = prune_configuration.P4
prune_configuration.display()
def main(_):
# Import data
mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
model = create_model()
x = model.x
y_ = model.y_
cross_entropy = model.cross_entropy
layers = model.layers
logits = model.logits
solver = create_admm_solver(model)
keep_prob = model.keep_prob
train_step = solver.train_step
train_step1 = solver.train_step1
W_conv1 = model.W_conv1
W_conv2 = model.W_conv2
W_fc1 = model.W_fc1
W_fc2 = model.W_fc2
A = solver.A
B = solver.B
C = solver.C
D = solver.D
E = solver.E
F = solver.F
G = solver.G
H = solver.H
my_trainer = tf.train.AdamOptimizer(1e-3)
grads = my_trainer.compute_gradients(cross_entropy)
with tf.name_scope('accuracy'):
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(y_, 1))
correct_prediction = tf.cast(correct_prediction, tf.float32)
accuracy = tf.reduce_mean(correct_prediction)
graph_location = tempfile.mkdtemp()
print('Saving graph to: %s' % graph_location)
train_writer = tf.summary.FileWriter(graph_location)
train_writer.add_graph(tf.get_default_graph())
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(20000):
batch = mnist.train.next_batch(50)
if i % 100 == 0:
train_accuracy = accuracy.eval(feed_dict={
x: batch[0], y_: batch[1], keep_prob: 1.0})
print('step %d, training accuracy %g' % (i, train_accuracy))
train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
print('test accuracy %g' % accuracy.eval(feed_dict={
x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))
Z1 = sess.run(W_conv1)
Z1 = projection(Z1, percent=P1)
U1 = np.zeros_like(Z1)
Z2 = sess.run(W_conv2)
Z2 = projection(Z2, percent=P2)
U2 = np.zeros_like(Z2)
Z3 = sess.run(W_fc1)
Z3 = projection(Z3, percent=P3)
U3 = np.zeros_like(Z3)
Z4 = sess.run(W_fc2)
Z4 = projection(Z4, percent=P4)
U4 = np.zeros_like(Z4)
for j in range(30):
for i in range(5000):
batch = mnist.train.next_batch(50)
if i % 100 == 0:
train_accuracy = accuracy.eval(feed_dict={
x: batch[0], y_: batch[1], keep_prob: 1.0})
print('step %d, training accuracy %g' % (i, train_accuracy))
train_step1.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 1.0, A:Z1, B:U1, C:Z2, D:U2, E:Z3, F:U3, G:Z4, H:U4})
Z1 = sess.run(W_conv1) + U1
Z1 = projection(Z1, percent=P1)
U1 = U1 + sess.run(W_conv1) - Z1
Z2 = sess.run(W_conv2) + U2
Z2 = projection(Z2, percent=P2)
U2 = U2 + sess.run(W_conv2) - Z2
Z3 = sess.run(W_fc1) + U3
Z3 = projection(Z3, percent=P3)
U3 = U3 + sess.run(W_fc1) - Z3
Z4 = sess.run(W_fc2) + U4
Z4 = projection(Z4, percent=P4)
U4 = U4 + sess.run(W_fc2) - Z4
print('test accuracy %g' % accuracy.eval(feed_dict={
x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))
print(LA.norm(sess.run(W_conv1) - Z1))
print(LA.norm(sess.run(W_conv2) - Z2))
print(LA.norm(sess.run(W_fc1) - Z3))
print(LA.norm(sess.run(W_fc2) - Z4))
dense_w['conv1/W_conv1'] = W_conv1
dense_w['conv2/W_conv2'] = W_conv2
dense_w['fc1/W_fc1'] = W_fc1
dense_w['fc2/W_fc2'] = W_fc2
dict_nzidx = apply_prune(dense_w,sess)
print ("checking space dictionary")
print (dict_nzidx.keys())
grads = apply_prune_on_grads(grads,dict_nzidx)
apply_gradient_op = my_trainer.apply_gradients(grads)
for var in tf.global_variables():
if tf.is_variable_initialized(var).eval() == False:
sess.run(tf.variables_initializer([var]))
print ("start retraining after pruning")
for i in range(20000):
batch = mnist.train.next_batch(50)
if i % 100 == 0:
train_accuracy = accuracy.eval(feed_dict={
x: batch[0], y_: batch[1], keep_prob: 1.0})
print('step %d, training accuracy %g' % (i, train_accuracy))
apply_gradient_op.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
print('test accuracy %g' % accuracy.eval(feed_dict={
x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))
print(np.sum(sess.run(W_conv1)!=0))
print(np.sum(sess.run(W_conv2) != 0))
print(np.sum(sess.run(W_fc1) != 0))
print(np.sum(sess.run(W_fc2) != 0))
# do the saving.
saver = tf.train.Saver()
saver.save(sess,"./lenet_5_pruned_model.ckpt")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str,
default='/tmp/tensorflow/mnist/input_data',
help='Directory for storing input data')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed) | en | 0.747761 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== A deep MNIST classifier using convolutional layers.
See extensive documentation at
https://www.tensorflow.org/get_started/mnist/pros # Disable linter warnings to maintain consistency with tutorial. # pylint: disable=invalid-name # pylint: disable=g-bad-import-order # pruning ratio # Import data # do the saving. | 2.251371 | 2 |
examples/optionsdata.py | victorjourne/ezibpy | 296 | 6630255 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# ezIBpy: a Pythonic Client for Interactive Brokers API
# https://github.com/ranaroussi/ezibpy
#
# Copyright 2015 <NAME>
#
# Licensed under the GNU Lesser General Public License, v3.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.gnu.org/licenses/lgpl-3.0.en.html
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ezibpy
import time
# initialize ezIBpy
ibConn = ezibpy.ezIBpy()
# connect to IB (7496/7497 = TWS, 4001 = IBGateway)
ibConn.connect(clientId=100, host="localhost", port=4001)
# create some contracts using dedicated methods
put = ibConn.createOptionContract("AAPL", expiry="20161021", strike=117.0, otype="PUT")
call = ibConn.createOptionContract("AAPL", expiry="20161021", strike=117.0, otype="CALL")
# request market data for all created contracts
ibConn.requestMarketData()
# wait 30 seconds
time.sleep(30)
# print market data
print("Options Data")
print(ibConn.optionsData)
# cancel market data request & disconnect
ibConn.cancelMarketData()
ibConn.disconnect()
| #!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# ezIBpy: a Pythonic Client for Interactive Brokers API
# https://github.com/ranaroussi/ezibpy
#
# Copyright 2015 <NAME>
#
# Licensed under the GNU Lesser General Public License, v3.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.gnu.org/licenses/lgpl-3.0.en.html
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ezibpy
import time
# initialize ezIBpy
ibConn = ezibpy.ezIBpy()
# connect to IB (7496/7497 = TWS, 4001 = IBGateway)
ibConn.connect(clientId=100, host="localhost", port=4001)
# create some contracts using dedicated methods
put = ibConn.createOptionContract("AAPL", expiry="20161021", strike=117.0, otype="PUT")
call = ibConn.createOptionContract("AAPL", expiry="20161021", strike=117.0, otype="CALL")
# request market data for all created contracts
ibConn.requestMarketData()
# wait 30 seconds
time.sleep(30)
# print market data
print("Options Data")
print(ibConn.optionsData)
# cancel market data request & disconnect
ibConn.cancelMarketData()
ibConn.disconnect()
| en | 0.792779 | #!/usr/bin/env python # -*- coding: UTF-8 -*- # # ezIBpy: a Pythonic Client for Interactive Brokers API # https://github.com/ranaroussi/ezibpy # # Copyright 2015 <NAME> # # Licensed under the GNU Lesser General Public License, v3.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.gnu.org/licenses/lgpl-3.0.en.html # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # initialize ezIBpy # connect to IB (7496/7497 = TWS, 4001 = IBGateway) # create some contracts using dedicated methods # request market data for all created contracts # wait 30 seconds # print market data # cancel market data request & disconnect | 2.354981 | 2 |
src/vsc/rand_obj.py | fvutils/pyvsc | 54 | 6630256 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Created on Jul 23, 2019
#
# @author: ballance
import inspect
from vsc.impl.randobj_int import RandObjInt
from vsc.constraints import constraint_t, dynamic_constraint_t
from vsc.impl.ctor import push_constraint_scope, pop_constraint_scope, \
clear_exprs, push_srcinfo_mode, pop_srcinfo_mode, in_srcinfo_mode
from vsc.impl.generator_int import GeneratorInt
from vsc.impl.expr_mode import _expr_mode, get_expr_mode, expr_mode, get_expr_mode_depth, \
enter_expr_mode, leave_expr_mode, is_raw_mode, is_expr_mode
from vsc.model.field_composite_model import FieldCompositeModel
from vsc.model.constraint_block_model import ConstraintBlockModel
from vsc.model.randomizer import Randomizer
from vsc.model.field_scalar_model import FieldScalarModel
from vsc.model.source_info import SourceInfo
from vsc.types import type_base, field_info, list_t
from vsc.model.solve_failure import SolveFailure
from vsc.impl.constraint_proxy import ConstraintProxy
class _randobj:
"""Mark a class as randomizable"""
def __init__(self, kwargs):
self.srcinfo = False
for kw in kwargs.keys():
if kw == "srcinfo":
self.srcinfo = kwargs[kw]
else:
raise Exception("Unknown randobj kwarg: %s" % kw)
def __call__(self, T):
srcinfo = self.srcinfo
class randobj_interposer(T):
def __init__(self, *args, **kwargs):
ro_i = self._get_ro_int()
ro_i.srcinfo = srcinfo
# Capture the instantiation location
frame = inspect.stack()[1]
ro_i.srcinfo_inst = SourceInfo(frame.filename, frame.lineno)
# Initialize the field_info member before going deeper
if ro_i.ctor_level == 0:
self.tname = T.__qualname__
self._int_field_info = field_info()
# Decide whether to record sourceinfo for this class
push_srcinfo_mode(srcinfo)
# Call the user's constructor
ro_i.ctor_level += 1
super().__init__(*args, **kwargs)
ro_i.ctor_level -= 1
if ro_i.ctor_level == 0:
self.build_field_model(None)
pop_srcinfo_mode()
# Add the interposer class
ret = type(T.__name__, (randobj_interposer,), dict())
if not hasattr(T, "_ro_init"):
def __getattribute__(self, a):
ret = object.__getattribute__(self, a)
if isinstance(ret, type_base) and not is_raw_mode():
# We're not in an expression, so the user
# wants the value of this field
ret = ret.get_val()
elif a == "rand_mode":
ret = self._int_rand_info.rand_mode
elif isinstance(ret, (constraint_t,dynamic_constraint_t)):
if not is_expr_mode():
# The constraint_t wrapper is per-type. In regular
# procedural code we need to return a reference
# to the instance object. The proxy provides a
# way to do so.
model = object.__getattribute__(self, "get_model")()
cm = model.get_constraint(a)
ret = ConstraintProxy(cm)
return ret
def __setattr__(self, field, val):
try:
# Retrieve the field object so we can check if it's
# a type_base object. This will throw an exception
# if the field doesn't exist
fo = object.__getattribute__(self, field)
except:
object.__setattr__(self, field, val)
else:
if isinstance(fo, type_base):
if not is_raw_mode():
# We're not in an expression context, so the
# user really wants us to set the actual value
# of the field
if isinstance(val, type_base):
# Looks like we're re-assigning it.
if self._get_ro_int().ctor_level > 0:
object.__setattr__(self, field, val)
else:
raise Exception("Cannot re-construct field")
else:
fo.set_val(val)
else:
raise Exception("Attempting to use '=' in a constraint")
elif isinstance(fo, list_t):
fo.clear()
for i in val:
fo.append(i)
elif field == "rand_mode":
self._int_rand_info.rand_mode = bool(val)
else:
object.__setattr__(self, field, val)
def randomize(self,
debug=0,
lint=0,
solve_fail_debug=0):
frame = inspect.stack()[1]
model = self.get_model()
try:
Randomizer.do_randomize(
SourceInfo(frame.filename, frame.lineno),
[model],
debug=debug,
lint=lint,
solve_fail_debug=solve_fail_debug)
except SolveFailure as e:
print(e.diagnostics)
raise e
def build_field_model(self, name):
if self._int_field_info.model is None:
model = FieldCompositeModel(name, self._int_field_info.is_rand, self)
model.typename = T.__qualname__
self._int_field_info.model = model
# Iterate through the fields and constraints
# First, assign IDs to each of the randomized fields
with expr_mode():
for f in dir(self):
if not f.startswith("__") and not f.startswith("_int"):
fo = getattr(self, f)
if hasattr(fo, "_int_field_info"):
if fo._int_field_info.model is None:
fo._int_field_info.model = fo.build_field_model(f)
else:
# Some fields may already be created, and will
# have been given a placeholder name. Back-annotate
# the proper name now
fo._int_field_info.model.name = f
fo._int_field_info.parent = self._int_field_info
model.add_field(fo._int_field_info.model)
# Now, elaborate the constraints
for f in dir(self):
if not f.startswith("__") and not f.startswith("_int"):
fo = getattr(self, f)
if isinstance(fo, constraint_t):
clear_exprs()
block = ConstraintBlockModel(f)
block.srcinfo = fo.srcinfo
push_constraint_scope(block)
try:
fo.c(self)
except Exception as e:
print("Exception while processing constraint: " + str(e))
raise e
fo.set_model(pop_constraint_scope())
model.add_constraint(fo.model)
clear_exprs()
elif isinstance(fo, dynamic_constraint_t):
clear_exprs()
block = ConstraintBlockModel(f)
block.srcinfo = fo.srcinfo
push_constraint_scope(block)
try:
fo.c(self)
except Exception as e:
print("Exception while processing constraint: " + str(e))
raise e
fo.set_model(pop_constraint_scope())
fo.model.is_dynamic = True
model.add_dynamic_constraint(fo.model)
clear_exprs()
self._int_field_info.model.name = name
return self._int_field_info.model
def get_model(self):
with expr_mode():
if self._int_field_info.model is None:
self._int_field_info.model = self.build_field_model(None)
return self._int_field_info.model
def _get_ro_int(self):
if not hasattr(self, "_ro_int"):
self._ro_int = RandObjInt()
return self._ro_int
def __enter__(self):
ro_i = self._get_ro_int()
enter_expr_mode()
self.get_model() # Ensure model is constructed
push_srcinfo_mode(ro_i.srcinfo)
push_constraint_scope(ConstraintBlockModel("inline"))
return self
def __exit__(self, t, v, tb):
frame = inspect.stack()[1]
c = pop_constraint_scope()
leave_expr_mode()
pop_srcinfo_mode()
model = self.get_model() # Ensure model is constructed
try:
Randomizer.do_randomize(
SourceInfo(frame.filename, frame.lineno),
[model],
[c],
debug=self.debug,
lint=self.lint,
solve_fail_debug=self.solve_fail_debug)
except SolveFailure as e:
print(e.diagnostics)
raise e
def randomize_with(self,
debug=0,
lint=0,
solve_fail_debug=0):
# Ensure the 'model' data structures have been built
self.get_model()
self.debug = debug
self.lint = lint
self.solve_fail_debug = solve_fail_debug
return self
def do_pre_randomize(self):
if hasattr(self, "pre_randomize"):
self.pre_randomize()
def do_post_randomize(self):
if hasattr(self, "post_randomize"):
self.post_randomize()
def _id_fields(self, it, parent):
"""Apply an ID to all fields so they can be
referenced using indexed expressions
"""
it._int_field_info.parent = parent
fid = 0
for fn in dir(it):
fo = getattr(it, fn)
if hasattr(fo, "_int_field_info"):
fi = fo._int_field_info
fi.id = fid
fi.parent = it._int_field_info
fid += 1
if fi.is_composite:
self._id_fields(fo, fi)
setattr(T, "__getattribute__", __getattribute__)
setattr(T, "__setattr__", __setattr__)
setattr(T, "randomize", randomize)
setattr(T, "randomize_with", randomize_with)
setattr(T, "build_field_model", build_field_model)
setattr(T, "get_model", get_model)
setattr(T, "_get_ro_int", _get_ro_int)
setattr(T, "__enter__", __enter__)
setattr(T, "__exit__", __exit__)
setattr(T, "do_pre_randomize", do_pre_randomize)
setattr(T, "do_post_randomize", do_post_randomize)
setattr(T, "_id_fields", _id_fields)
setattr(T, "_ro_init", True)
return ret
def randobj(*args, **kwargs):
if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
# Called without arguments
obj = _randobj({})
return obj(args[0])
else:
obj = _randobj(kwargs)
return obj
def generator(T):
"""Mark a class as a generator"""
class generator_interposer(T):
def __init__(self, *args, **kwargs):
gen_i = self._get_int()
# Capture the instantiation location
frame = inspect.stack()[1]
gen_i.srcinfo_inst = SourceInfo(frame.filename, frame.lineno)
# Call the user's constructor
with gen_i:
super().__init__(*args, **kwargs)
self._int_field_info = field_info()
if gen_i.ctor_level == 0:
self.build_model()
pass
# Add the interposer class
ret = type(T.__name__, (generator_interposer,), dict())
if not hasattr(T, "_gen_init"):
def __getattribute__(self, a):
ret = object.__getattribute__(self, a)
if isinstance(ret, type_base) and not is_raw_mode():
# We're not in an expression, so the user
# wants the value of this field
ret = ret.get_val()
return ret
def __setattr__(self, field, val):
try:
# Retrieve the field object so we can check if it's
# a type_base object. This will throw an exception
# if the field doesn't exist
fo = object.__getattribute__(self, field)
except:
object.__setattr__(self, field, val)
else:
if isinstance(fo, type_base):
if not is_raw_mode():
# We're not in an expression context, so the
# user really wants us to set the actual value
# of the field
fo.set_val(val)
else:
raise Exception("Attempting to use '=' in a constraint")
else:
object.__setattr__(self, field, val)
def randomize(self):
model = self.get_model()
Randomizer.do_randomize([model])
def build_field_model(self, name):
if self._int_field_info.model is None:
model = FieldCompositeModel(name, self._int_field_info.is_rand, self)
model.typename = T.__name__
self._int_field_info.model = model
# Iterate through the fields and constraints
# First, assign IDs to each of the randomized fields
with expr_mode():
for f in dir(self):
if not f.startswith("__") and not f.startswith("_int"):
fo = getattr(self, f)
if hasattr(fo, "_int_field_info"):
if fo._int_field_info.model is None:
fo._int_field_info.model = fo.build_field_model(f)
model.add_field(fo._int_field_info.model)
# Now, elaborate the constraints
for f in dir(self):
if not f.startswith("__") and not f.startswith("_int"):
fo = getattr(self, f)
if isinstance(fo, constraint_t):
clear_exprs()
block = ConstraintBlockModel(f)
block.srcinfo = fo.srcinfo
push_constraint_scope(block)
try:
fo.c(self)
except Exception as e:
print("Exception while processing constraint: " + str(e))
raise e
fo.set_model(pop_constraint_scope())
model.add_constraint(fo.model)
clear_exprs()
self._int_field_info.model.name = name
return self._int_field_info.model
def get_model(self):
with expr_mode():
if self._int_field_info.model is None:
self._int_field_info.model = self.build_field_model(None)
return self._int_field_info.model
def _get_int(self):
if not hasattr(self, "_gen_int"):
self._gen_int = GeneratorInt()
return self._gen_int
setattr(T, "__getattribute__", __getattribute__)
setattr(T, "__setattr__", __setattr__)
setattr(T, "randomize", randomize)
# setattr(T, "randomize_with", randomize_with)
setattr(T, "build_field_model", build_field_model)
setattr(T, "get_model", get_model)
# setattr(T, "__enter__", __enter__)
# setattr(T, "__exit__", __exit__)
# setattr(T, "do_pre_randomize", do_pre_randomize)
# setattr(T, "do_post_randomize", do_post_randomize)
setattr(T, "_int_field_info", field_info(True))
setattr(T, "_get_int", _get_int)
setattr(T, "_ro_init", True)
return ret
| # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Created on Jul 23, 2019
#
# @author: ballance
import inspect
from vsc.impl.randobj_int import RandObjInt
from vsc.constraints import constraint_t, dynamic_constraint_t
from vsc.impl.ctor import push_constraint_scope, pop_constraint_scope, \
clear_exprs, push_srcinfo_mode, pop_srcinfo_mode, in_srcinfo_mode
from vsc.impl.generator_int import GeneratorInt
from vsc.impl.expr_mode import _expr_mode, get_expr_mode, expr_mode, get_expr_mode_depth, \
enter_expr_mode, leave_expr_mode, is_raw_mode, is_expr_mode
from vsc.model.field_composite_model import FieldCompositeModel
from vsc.model.constraint_block_model import ConstraintBlockModel
from vsc.model.randomizer import Randomizer
from vsc.model.field_scalar_model import FieldScalarModel
from vsc.model.source_info import SourceInfo
from vsc.types import type_base, field_info, list_t
from vsc.model.solve_failure import SolveFailure
from vsc.impl.constraint_proxy import ConstraintProxy
class _randobj:
"""Mark a class as randomizable"""
def __init__(self, kwargs):
self.srcinfo = False
for kw in kwargs.keys():
if kw == "srcinfo":
self.srcinfo = kwargs[kw]
else:
raise Exception("Unknown randobj kwarg: %s" % kw)
def __call__(self, T):
srcinfo = self.srcinfo
class randobj_interposer(T):
def __init__(self, *args, **kwargs):
ro_i = self._get_ro_int()
ro_i.srcinfo = srcinfo
# Capture the instantiation location
frame = inspect.stack()[1]
ro_i.srcinfo_inst = SourceInfo(frame.filename, frame.lineno)
# Initialize the field_info member before going deeper
if ro_i.ctor_level == 0:
self.tname = T.__qualname__
self._int_field_info = field_info()
# Decide whether to record sourceinfo for this class
push_srcinfo_mode(srcinfo)
# Call the user's constructor
ro_i.ctor_level += 1
super().__init__(*args, **kwargs)
ro_i.ctor_level -= 1
if ro_i.ctor_level == 0:
self.build_field_model(None)
pop_srcinfo_mode()
# Add the interposer class
ret = type(T.__name__, (randobj_interposer,), dict())
if not hasattr(T, "_ro_init"):
def __getattribute__(self, a):
ret = object.__getattribute__(self, a)
if isinstance(ret, type_base) and not is_raw_mode():
# We're not in an expression, so the user
# wants the value of this field
ret = ret.get_val()
elif a == "rand_mode":
ret = self._int_rand_info.rand_mode
elif isinstance(ret, (constraint_t,dynamic_constraint_t)):
if not is_expr_mode():
# The constraint_t wrapper is per-type. In regular
# procedural code we need to return a reference
# to the instance object. The proxy provides a
# way to do so.
model = object.__getattribute__(self, "get_model")()
cm = model.get_constraint(a)
ret = ConstraintProxy(cm)
return ret
def __setattr__(self, field, val):
try:
# Retrieve the field object so we can check if it's
# a type_base object. This will throw an exception
# if the field doesn't exist
fo = object.__getattribute__(self, field)
except:
object.__setattr__(self, field, val)
else:
if isinstance(fo, type_base):
if not is_raw_mode():
# We're not in an expression context, so the
# user really wants us to set the actual value
# of the field
if isinstance(val, type_base):
# Looks like we're re-assigning it.
if self._get_ro_int().ctor_level > 0:
object.__setattr__(self, field, val)
else:
raise Exception("Cannot re-construct field")
else:
fo.set_val(val)
else:
raise Exception("Attempting to use '=' in a constraint")
elif isinstance(fo, list_t):
fo.clear()
for i in val:
fo.append(i)
elif field == "rand_mode":
self._int_rand_info.rand_mode = bool(val)
else:
object.__setattr__(self, field, val)
def randomize(self,
debug=0,
lint=0,
solve_fail_debug=0):
frame = inspect.stack()[1]
model = self.get_model()
try:
Randomizer.do_randomize(
SourceInfo(frame.filename, frame.lineno),
[model],
debug=debug,
lint=lint,
solve_fail_debug=solve_fail_debug)
except SolveFailure as e:
print(e.diagnostics)
raise e
def build_field_model(self, name):
if self._int_field_info.model is None:
model = FieldCompositeModel(name, self._int_field_info.is_rand, self)
model.typename = T.__qualname__
self._int_field_info.model = model
# Iterate through the fields and constraints
# First, assign IDs to each of the randomized fields
with expr_mode():
for f in dir(self):
if not f.startswith("__") and not f.startswith("_int"):
fo = getattr(self, f)
if hasattr(fo, "_int_field_info"):
if fo._int_field_info.model is None:
fo._int_field_info.model = fo.build_field_model(f)
else:
# Some fields may already be created, and will
# have been given a placeholder name. Back-annotate
# the proper name now
fo._int_field_info.model.name = f
fo._int_field_info.parent = self._int_field_info
model.add_field(fo._int_field_info.model)
# Now, elaborate the constraints
for f in dir(self):
if not f.startswith("__") and not f.startswith("_int"):
fo = getattr(self, f)
if isinstance(fo, constraint_t):
clear_exprs()
block = ConstraintBlockModel(f)
block.srcinfo = fo.srcinfo
push_constraint_scope(block)
try:
fo.c(self)
except Exception as e:
print("Exception while processing constraint: " + str(e))
raise e
fo.set_model(pop_constraint_scope())
model.add_constraint(fo.model)
clear_exprs()
elif isinstance(fo, dynamic_constraint_t):
clear_exprs()
block = ConstraintBlockModel(f)
block.srcinfo = fo.srcinfo
push_constraint_scope(block)
try:
fo.c(self)
except Exception as e:
print("Exception while processing constraint: " + str(e))
raise e
fo.set_model(pop_constraint_scope())
fo.model.is_dynamic = True
model.add_dynamic_constraint(fo.model)
clear_exprs()
self._int_field_info.model.name = name
return self._int_field_info.model
def get_model(self):
with expr_mode():
if self._int_field_info.model is None:
self._int_field_info.model = self.build_field_model(None)
return self._int_field_info.model
def _get_ro_int(self):
if not hasattr(self, "_ro_int"):
self._ro_int = RandObjInt()
return self._ro_int
def __enter__(self):
ro_i = self._get_ro_int()
enter_expr_mode()
self.get_model() # Ensure model is constructed
push_srcinfo_mode(ro_i.srcinfo)
push_constraint_scope(ConstraintBlockModel("inline"))
return self
def __exit__(self, t, v, tb):
frame = inspect.stack()[1]
c = pop_constraint_scope()
leave_expr_mode()
pop_srcinfo_mode()
model = self.get_model() # Ensure model is constructed
try:
Randomizer.do_randomize(
SourceInfo(frame.filename, frame.lineno),
[model],
[c],
debug=self.debug,
lint=self.lint,
solve_fail_debug=self.solve_fail_debug)
except SolveFailure as e:
print(e.diagnostics)
raise e
def randomize_with(self,
debug=0,
lint=0,
solve_fail_debug=0):
# Ensure the 'model' data structures have been built
self.get_model()
self.debug = debug
self.lint = lint
self.solve_fail_debug = solve_fail_debug
return self
def do_pre_randomize(self):
if hasattr(self, "pre_randomize"):
self.pre_randomize()
def do_post_randomize(self):
if hasattr(self, "post_randomize"):
self.post_randomize()
def _id_fields(self, it, parent):
"""Apply an ID to all fields so they can be
referenced using indexed expressions
"""
it._int_field_info.parent = parent
fid = 0
for fn in dir(it):
fo = getattr(it, fn)
if hasattr(fo, "_int_field_info"):
fi = fo._int_field_info
fi.id = fid
fi.parent = it._int_field_info
fid += 1
if fi.is_composite:
self._id_fields(fo, fi)
setattr(T, "__getattribute__", __getattribute__)
setattr(T, "__setattr__", __setattr__)
setattr(T, "randomize", randomize)
setattr(T, "randomize_with", randomize_with)
setattr(T, "build_field_model", build_field_model)
setattr(T, "get_model", get_model)
setattr(T, "_get_ro_int", _get_ro_int)
setattr(T, "__enter__", __enter__)
setattr(T, "__exit__", __exit__)
setattr(T, "do_pre_randomize", do_pre_randomize)
setattr(T, "do_post_randomize", do_post_randomize)
setattr(T, "_id_fields", _id_fields)
setattr(T, "_ro_init", True)
return ret
def randobj(*args, **kwargs):
if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
# Called without arguments
obj = _randobj({})
return obj(args[0])
else:
obj = _randobj(kwargs)
return obj
def generator(T):
"""Mark a class as a generator"""
class generator_interposer(T):
def __init__(self, *args, **kwargs):
gen_i = self._get_int()
# Capture the instantiation location
frame = inspect.stack()[1]
gen_i.srcinfo_inst = SourceInfo(frame.filename, frame.lineno)
# Call the user's constructor
with gen_i:
super().__init__(*args, **kwargs)
self._int_field_info = field_info()
if gen_i.ctor_level == 0:
self.build_model()
pass
# Add the interposer class
ret = type(T.__name__, (generator_interposer,), dict())
if not hasattr(T, "_gen_init"):
def __getattribute__(self, a):
ret = object.__getattribute__(self, a)
if isinstance(ret, type_base) and not is_raw_mode():
# We're not in an expression, so the user
# wants the value of this field
ret = ret.get_val()
return ret
def __setattr__(self, field, val):
try:
# Retrieve the field object so we can check if it's
# a type_base object. This will throw an exception
# if the field doesn't exist
fo = object.__getattribute__(self, field)
except:
object.__setattr__(self, field, val)
else:
if isinstance(fo, type_base):
if not is_raw_mode():
# We're not in an expression context, so the
# user really wants us to set the actual value
# of the field
fo.set_val(val)
else:
raise Exception("Attempting to use '=' in a constraint")
else:
object.__setattr__(self, field, val)
def randomize(self):
model = self.get_model()
Randomizer.do_randomize([model])
def build_field_model(self, name):
if self._int_field_info.model is None:
model = FieldCompositeModel(name, self._int_field_info.is_rand, self)
model.typename = T.__name__
self._int_field_info.model = model
# Iterate through the fields and constraints
# First, assign IDs to each of the randomized fields
with expr_mode():
for f in dir(self):
if not f.startswith("__") and not f.startswith("_int"):
fo = getattr(self, f)
if hasattr(fo, "_int_field_info"):
if fo._int_field_info.model is None:
fo._int_field_info.model = fo.build_field_model(f)
model.add_field(fo._int_field_info.model)
# Now, elaborate the constraints
for f in dir(self):
if not f.startswith("__") and not f.startswith("_int"):
fo = getattr(self, f)
if isinstance(fo, constraint_t):
clear_exprs()
block = ConstraintBlockModel(f)
block.srcinfo = fo.srcinfo
push_constraint_scope(block)
try:
fo.c(self)
except Exception as e:
print("Exception while processing constraint: " + str(e))
raise e
fo.set_model(pop_constraint_scope())
model.add_constraint(fo.model)
clear_exprs()
self._int_field_info.model.name = name
return self._int_field_info.model
def get_model(self):
with expr_mode():
if self._int_field_info.model is None:
self._int_field_info.model = self.build_field_model(None)
return self._int_field_info.model
def _get_int(self):
if not hasattr(self, "_gen_int"):
self._gen_int = GeneratorInt()
return self._gen_int
setattr(T, "__getattribute__", __getattribute__)
setattr(T, "__setattr__", __setattr__)
setattr(T, "randomize", randomize)
# setattr(T, "randomize_with", randomize_with)
setattr(T, "build_field_model", build_field_model)
setattr(T, "get_model", get_model)
# setattr(T, "__enter__", __enter__)
# setattr(T, "__exit__", __exit__)
# setattr(T, "do_pre_randomize", do_pre_randomize)
# setattr(T, "do_post_randomize", do_post_randomize)
setattr(T, "_int_field_info", field_info(True))
setattr(T, "_get_int", _get_int)
setattr(T, "_ro_init", True)
return ret
| en | 0.843287 | # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # Created on Jul 23, 2019 # # @author: ballance Mark a class as randomizable # Capture the instantiation location # Initialize the field_info member before going deeper # Decide whether to record sourceinfo for this class # Call the user's constructor # Add the interposer class # We're not in an expression, so the user # wants the value of this field # The constraint_t wrapper is per-type. In regular # procedural code we need to return a reference # to the instance object. The proxy provides a # way to do so. # Retrieve the field object so we can check if it's # a type_base object. This will throw an exception # if the field doesn't exist # We're not in an expression context, so the # user really wants us to set the actual value # of the field # Looks like we're re-assigning it. # Iterate through the fields and constraints # First, assign IDs to each of the randomized fields # Some fields may already be created, and will # have been given a placeholder name. Back-annotate # the proper name now # Now, elaborate the constraints # Ensure model is constructed # Ensure model is constructed # Ensure the 'model' data structures have been built Apply an ID to all fields so they can be referenced using indexed expressions # Called without arguments Mark a class as a generator # Capture the instantiation location # Call the user's constructor # Add the interposer class # We're not in an expression, so the user # wants the value of this field # Retrieve the field object so we can check if it's # a type_base object. This will throw an exception # if the field doesn't exist # We're not in an expression context, so the # user really wants us to set the actual value # of the field # Iterate through the fields and constraints # First, assign IDs to each of the randomized fields # Now, elaborate the constraints # setattr(T, "randomize_with", randomize_with) # setattr(T, "__enter__", __enter__) # setattr(T, "__exit__", __exit__) # setattr(T, "do_pre_randomize", do_pre_randomize) # setattr(T, "do_post_randomize", do_post_randomize) | 1.452931 | 1 |
tests/pyre.pkg/filesystem/virtual_info.py | avalentino/pyre | 25 | 6630257 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# <NAME>. aïvázis
# orthologue
# (c) 1998-2021 all rights reserved
#
"""
Verify that the metadata associated with node are maintained properly
"""
def test():
# support
import pyre.primitives
# my package
import pyre.filesystem
# build a virtual filesystem
root = pyre.filesystem.virtual()
# and a couple of nodes
root['home/users'] = root.folder()
root['home/users/mga'] = root.folder()
# check their uris
assert str(root['home/users'].uri) == '/home/users'
assert str(root['home/users/mga'].uri) == '/home/users/mga'
# all done
return root
# main
if __name__ == "__main__":
# skip pyre initialization since we don't rely on the executive
pyre_noboot = True
# do...
test()
# end of file
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# <NAME>. aïvázis
# orthologue
# (c) 1998-2021 all rights reserved
#
"""
Verify that the metadata associated with node are maintained properly
"""
def test():
# support
import pyre.primitives
# my package
import pyre.filesystem
# build a virtual filesystem
root = pyre.filesystem.virtual()
# and a couple of nodes
root['home/users'] = root.folder()
root['home/users/mga'] = root.folder()
# check their uris
assert str(root['home/users'].uri) == '/home/users'
assert str(root['home/users/mga'].uri) == '/home/users/mga'
# all done
return root
# main
if __name__ == "__main__":
# skip pyre initialization since we don't rely on the executive
pyre_noboot = True
# do...
test()
# end of file
| en | 0.849946 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- # # <NAME>. aïvázis # orthologue # (c) 1998-2021 all rights reserved # Verify that the metadata associated with node are maintained properly # support # my package # build a virtual filesystem # and a couple of nodes # check their uris # all done # main # skip pyre initialization since we don't rely on the executive # do... # end of file | 2.157557 | 2 |
challenges/week_1/checker.py | Eric-Njoroge/python | 6 | 6630258 | import bus_fare_challenge as solution
import datetime
import unittest
class TestBusFareChallenge(unittest.TestCase):
def setUp(self) -> None:
self.date = datetime.datetime.now().date()
self.day = self.date.strftime("%a")
self.charts = {
"Mon": 100,
"Tue": 100,
"Wed": 100,
"Thu": 100,
"Fri": 100,
"Sat": 60,
"Sun": 80,
}
def test_date(self) -> None:
"""
Tests whether the date returned by the program is correct.
"""
actual = self.date
given = solution.date
self.assertEqual(actual, given, f"Today's date is Wrong by {given - actual}!")
def test_day(self) -> None:
"""
Tests whether the day returned by the program is correct.
"""
actual = self.day
given = solution.day
self.assertEqual(
actual, given, f"Today is wrong, expexted {actual} but got {given}!"
)
def test_fare(self) -> None:
"""
Tests whether the fare returned by the program is correct.
"""
actual = self.charts[self.day]
given = solution.fare
self.assertEqual(
actual, given, f"Fare is wrong, expected {actual} but got {given}!"
)
if __name__ == "__main__":
print("=========================================================================")
print("=========================================================================")
print("===== Start: Checking Return Values For Today's Date, Day and Fare =====")
unittest.main(exit=False)
print("===== End: Checking Return Values For Today's Date, Day and Fare =======")
print("=========================================================================")
| import bus_fare_challenge as solution
import datetime
import unittest
class TestBusFareChallenge(unittest.TestCase):
def setUp(self) -> None:
self.date = datetime.datetime.now().date()
self.day = self.date.strftime("%a")
self.charts = {
"Mon": 100,
"Tue": 100,
"Wed": 100,
"Thu": 100,
"Fri": 100,
"Sat": 60,
"Sun": 80,
}
def test_date(self) -> None:
"""
Tests whether the date returned by the program is correct.
"""
actual = self.date
given = solution.date
self.assertEqual(actual, given, f"Today's date is Wrong by {given - actual}!")
def test_day(self) -> None:
"""
Tests whether the day returned by the program is correct.
"""
actual = self.day
given = solution.day
self.assertEqual(
actual, given, f"Today is wrong, expexted {actual} but got {given}!"
)
def test_fare(self) -> None:
"""
Tests whether the fare returned by the program is correct.
"""
actual = self.charts[self.day]
given = solution.fare
self.assertEqual(
actual, given, f"Fare is wrong, expected {actual} but got {given}!"
)
if __name__ == "__main__":
print("=========================================================================")
print("=========================================================================")
print("===== Start: Checking Return Values For Today's Date, Day and Fare =====")
unittest.main(exit=False)
print("===== End: Checking Return Values For Today's Date, Day and Fare =======")
print("=========================================================================")
| en | 0.960739 | Tests whether the date returned by the program is correct. Tests whether the day returned by the program is correct. Tests whether the fare returned by the program is correct. | 3.77603 | 4 |
kaori/__init__.py | austinpray/kaori | 3 | 6630259 | from pathlib import Path
from .support.config import get_config
_test_config_path = Path(__file__).parent.joinpath('../config/kaori_test.py').absolute()
test_config = get_config(str(_test_config_path))
__all__ = ['test_config']
| from pathlib import Path
from .support.config import get_config
_test_config_path = Path(__file__).parent.joinpath('../config/kaori_test.py').absolute()
test_config = get_config(str(_test_config_path))
__all__ = ['test_config']
| none | 1 | 1.797832 | 2 |
|
ch10/errorExample.py | rfreiberger/Automate-the-Boring-Stuff | 0 | 6630260 | <filename>ch10/errorExample.py<gh_stars>0
def spam():
bacon()
def bacon():
raise Exception('This is the error message.')
spam() | <filename>ch10/errorExample.py<gh_stars>0
def spam():
bacon()
def bacon():
raise Exception('This is the error message.')
spam() | none | 1 | 1.895998 | 2 |
|
exam_practice/encapsulation.py | IroniX2/python-exercises | 0 | 6630261 | # Bad way
class Testing:
def __init__(self, x, y):
self.__set_x(x)
self.__set_y(y)
# Getters and Setters have to be private or else we have two ways -
# of doing something (not pythonic)
def __get_x(self):
return self.__x
def __set_x(self, x):
if x < 0:
self.__x = 0
elif x > 1000:
self.__x = 1000
else:
self.__x = x
def __get_y(self):
return self.__y
def __set_y(self, y):
self.__y = y
# Fix for bad setup to be able to call var.x -
# instead of var.get_x()
x = property(__get_x, __set_x)
y = property(__get_y, __set_y)
testing = Testing(10001, 2)
print("Testing:", testing.x, testing.y)
# Proper Properties way
class Tester:
def __init__(self, x, y):
self.x = x
self.y = y
@property
def x(self):
return self.__x
@x.setter
def x(self, x):
if x < 0:
self.__x = 0
elif x > 1000:
self.__x = 1000
else:
self.__x = x
@property
def y(self):
return self.__y
@y.setter
def y(self, y):
self.__y = y
tester = Tester(10001, 2)
print(type(tester.x))
print("Tester:", tester.x, tester.y)
| # Bad way
class Testing:
def __init__(self, x, y):
self.__set_x(x)
self.__set_y(y)
# Getters and Setters have to be private or else we have two ways -
# of doing something (not pythonic)
def __get_x(self):
return self.__x
def __set_x(self, x):
if x < 0:
self.__x = 0
elif x > 1000:
self.__x = 1000
else:
self.__x = x
def __get_y(self):
return self.__y
def __set_y(self, y):
self.__y = y
# Fix for bad setup to be able to call var.x -
# instead of var.get_x()
x = property(__get_x, __set_x)
y = property(__get_y, __set_y)
testing = Testing(10001, 2)
print("Testing:", testing.x, testing.y)
# Proper Properties way
class Tester:
def __init__(self, x, y):
self.x = x
self.y = y
@property
def x(self):
return self.__x
@x.setter
def x(self, x):
if x < 0:
self.__x = 0
elif x > 1000:
self.__x = 1000
else:
self.__x = x
@property
def y(self):
return self.__y
@y.setter
def y(self, y):
self.__y = y
tester = Tester(10001, 2)
print(type(tester.x))
print("Tester:", tester.x, tester.y)
| en | 0.836432 | # Bad way # Getters and Setters have to be private or else we have two ways - # of doing something (not pythonic) # Fix for bad setup to be able to call var.x - # instead of var.get_x() # Proper Properties way | 3.798641 | 4 |
bop_toolkit_lib/visibility.py | gist-ailab/bop_toolkit | 201 | 6630262 | <reponame>gist-ailab/bop_toolkit<gh_stars>100-1000
# Author: <NAME> (<EMAIL>)
# Center for Machine Perception, Czech Technical University in Prague
"""Estimation of the visible object surface from depth images."""
import numpy as np
def _estimate_visib_mask(d_test, d_model, delta, visib_mode='bop19'):
"""Estimates a mask of the visible object surface.
:param d_test: Distance image of a scene in which the visibility is estimated.
:param d_model: Rendered distance image of the object model.
:param delta: Tolerance used in the visibility test.
:param visib_mode: Visibility mode:
1) 'bop18' - Object is considered NOT VISIBLE at pixels with missing depth.
2) 'bop19' - Object is considered VISIBLE at pixels with missing depth. This
allows to use the VSD pose error function also on shiny objects, which
are typically not captured well by the depth sensors. A possible problem
with this mode is that some invisible parts can be considered visible.
However, the shadows of missing depth measurements, where this problem is
expected to appear and which are often present at depth discontinuities,
are typically relatively narrow and therefore this problem is less
significant.
:return: Visibility mask.
"""
assert (d_test.shape == d_model.shape)
if visib_mode == 'bop18':
mask_valid = np.logical_and(d_test > 0, d_model > 0)
d_diff = d_model.astype(np.float32) - d_test.astype(np.float32)
visib_mask = np.logical_and(d_diff <= delta, mask_valid)
elif visib_mode == 'bop19':
d_diff = d_model.astype(np.float32) - d_test.astype(np.float32)
visib_mask = np.logical_and(
np.logical_or(d_diff <= delta, d_test == 0), d_model > 0)
else:
raise ValueError('Unknown visibility mode.')
return visib_mask
def estimate_visib_mask_gt(d_test, d_gt, delta, visib_mode='bop19'):
"""Estimates a mask of the visible object surface in the ground-truth pose.
:param d_test: Distance image of a scene in which the visibility is estimated.
:param d_gt: Rendered distance image of the object model in the GT pose.
:param delta: Tolerance used in the visibility test.
:param visib_mode: See _estimate_visib_mask.
:return: Visibility mask.
"""
visib_gt = _estimate_visib_mask(d_test, d_gt, delta, visib_mode)
return visib_gt
def estimate_visib_mask_est(d_test, d_est, visib_gt, delta, visib_mode='bop19'):
"""Estimates a mask of the visible object surface in the estimated pose.
For an explanation of why the visibility mask is calculated differently for
the estimated and the ground-truth pose, see equation (14) and related text in
Hodan et al., On Evaluation of 6D Object Pose Estimation, ECCVW'16.
:param d_test: Distance image of a scene in which the visibility is estimated.
:param d_est: Rendered distance image of the object model in the est. pose.
:param visib_gt: Visibility mask of the object model in the GT pose (from
function estimate_visib_mask_gt).
:param delta: Tolerance used in the visibility test.
:param visib_mode: See _estimate_visib_mask.
:return: Visibility mask.
"""
visib_est = _estimate_visib_mask(d_test, d_est, delta, visib_mode)
visib_est = np.logical_or(visib_est, np.logical_and(visib_gt, d_est > 0))
return visib_est
| # Author: <NAME> (<EMAIL>)
# Center for Machine Perception, Czech Technical University in Prague
"""Estimation of the visible object surface from depth images."""
import numpy as np
def _estimate_visib_mask(d_test, d_model, delta, visib_mode='bop19'):
"""Estimates a mask of the visible object surface.
:param d_test: Distance image of a scene in which the visibility is estimated.
:param d_model: Rendered distance image of the object model.
:param delta: Tolerance used in the visibility test.
:param visib_mode: Visibility mode:
1) 'bop18' - Object is considered NOT VISIBLE at pixels with missing depth.
2) 'bop19' - Object is considered VISIBLE at pixels with missing depth. This
allows to use the VSD pose error function also on shiny objects, which
are typically not captured well by the depth sensors. A possible problem
with this mode is that some invisible parts can be considered visible.
However, the shadows of missing depth measurements, where this problem is
expected to appear and which are often present at depth discontinuities,
are typically relatively narrow and therefore this problem is less
significant.
:return: Visibility mask.
"""
assert (d_test.shape == d_model.shape)
if visib_mode == 'bop18':
mask_valid = np.logical_and(d_test > 0, d_model > 0)
d_diff = d_model.astype(np.float32) - d_test.astype(np.float32)
visib_mask = np.logical_and(d_diff <= delta, mask_valid)
elif visib_mode == 'bop19':
d_diff = d_model.astype(np.float32) - d_test.astype(np.float32)
visib_mask = np.logical_and(
np.logical_or(d_diff <= delta, d_test == 0), d_model > 0)
else:
raise ValueError('Unknown visibility mode.')
return visib_mask
def estimate_visib_mask_gt(d_test, d_gt, delta, visib_mode='bop19'):
"""Estimates a mask of the visible object surface in the ground-truth pose.
:param d_test: Distance image of a scene in which the visibility is estimated.
:param d_gt: Rendered distance image of the object model in the GT pose.
:param delta: Tolerance used in the visibility test.
:param visib_mode: See _estimate_visib_mask.
:return: Visibility mask.
"""
visib_gt = _estimate_visib_mask(d_test, d_gt, delta, visib_mode)
return visib_gt
def estimate_visib_mask_est(d_test, d_est, visib_gt, delta, visib_mode='bop19'):
"""Estimates a mask of the visible object surface in the estimated pose.
For an explanation of why the visibility mask is calculated differently for
the estimated and the ground-truth pose, see equation (14) and related text in
Hodan et al., On Evaluation of 6D Object Pose Estimation, ECCVW'16.
:param d_test: Distance image of a scene in which the visibility is estimated.
:param d_est: Rendered distance image of the object model in the est. pose.
:param visib_gt: Visibility mask of the object model in the GT pose (from
function estimate_visib_mask_gt).
:param delta: Tolerance used in the visibility test.
:param visib_mode: See _estimate_visib_mask.
:return: Visibility mask.
"""
visib_est = _estimate_visib_mask(d_test, d_est, delta, visib_mode)
visib_est = np.logical_or(visib_est, np.logical_and(visib_gt, d_est > 0))
return visib_est | en | 0.863666 | # Author: <NAME> (<EMAIL>) # Center for Machine Perception, Czech Technical University in Prague Estimation of the visible object surface from depth images. Estimates a mask of the visible object surface. :param d_test: Distance image of a scene in which the visibility is estimated. :param d_model: Rendered distance image of the object model. :param delta: Tolerance used in the visibility test. :param visib_mode: Visibility mode: 1) 'bop18' - Object is considered NOT VISIBLE at pixels with missing depth. 2) 'bop19' - Object is considered VISIBLE at pixels with missing depth. This allows to use the VSD pose error function also on shiny objects, which are typically not captured well by the depth sensors. A possible problem with this mode is that some invisible parts can be considered visible. However, the shadows of missing depth measurements, where this problem is expected to appear and which are often present at depth discontinuities, are typically relatively narrow and therefore this problem is less significant. :return: Visibility mask. Estimates a mask of the visible object surface in the ground-truth pose. :param d_test: Distance image of a scene in which the visibility is estimated. :param d_gt: Rendered distance image of the object model in the GT pose. :param delta: Tolerance used in the visibility test. :param visib_mode: See _estimate_visib_mask. :return: Visibility mask. Estimates a mask of the visible object surface in the estimated pose. For an explanation of why the visibility mask is calculated differently for the estimated and the ground-truth pose, see equation (14) and related text in Hodan et al., On Evaluation of 6D Object Pose Estimation, ECCVW'16. :param d_test: Distance image of a scene in which the visibility is estimated. :param d_est: Rendered distance image of the object model in the est. pose. :param visib_gt: Visibility mask of the object model in the GT pose (from function estimate_visib_mask_gt). :param delta: Tolerance used in the visibility test. :param visib_mode: See _estimate_visib_mask. :return: Visibility mask. | 2.791266 | 3 |
tools.py | n3llo/test_dinasty | 0 | 6630263 | <gh_stars>0
'''
Package for the automatic computing of scores and rankings for the Play.it Dinasty keeper league
<NAME> (<EMAIL>)
'''
import argparse
import os
import tools
def parse_arguments():
'''
Parse arguments
'''
# Initialize parser
parser = argparse.ArgumentParser()
# Parse arguments
parser.add_argument('--games', dest='games', help='Select the range of games whose scores will be retrieved')
parser.add_argument('--retrieve_scores', dest='retrieve_scores', default=False, action='store_true', help='Retrieve scores for the selected games')
parser.add_argument('--print_rankings', dest='print_rankings', default=False, action='store_true', help='Print rankings updated to the last game retrieved')
parser.add_argument('--run_dir', dest='run_dir', default=os.getcwd(), help='Select the directory containing the .py files')
parser.add_argument('--data_dir', dest='data_dir', default=os.getcwd(), help='')
parser.add_argument('--home_bonus_score', dest='home_bonus_score', type=int, default=5, help='')
parser.add_argument('--n_best_scores', dest='n_best_scores', type=int, default=10, help='')
parser.add_argument('--league_id', dest='league_id', type=int, default=170806, help='')
return parser.parse_args()
def set_up_run(args):
'''
Create file and folder names
'''
args.data_file = os.path.join(args.run_dir, 'dinasty.yaml')
args.schedule_file = os.path.join(args.run_dir, 'schedule_2016-17.yaml')
args.league_url = 'http://basketball.sports.ws/game/%d' % args.league_id
args.scores_dir = os.path.join(args.data_dir, 'scores')
args.forum_games_dir = os.path.join(args.data_dir, 'forum-games')
args.games_dir = os.path.join(args.data_dir, 'games')
args.stats_dir = os.path.join(args.data_dir, 'stats')
if args.retrieve_scores:
r = args.games.split(',')
if len(r) == 2:
args.range = range(int(r[0]), int(r[1]) + 1)
tools.make_directory(args.data_dir)
tools.make_directory(args.scores_dir)
tools.make_directory(args.stats_dir)
tools.make_directory(args.forum_games_dir)
tools.make_directory(args.games_dir)
return args
def make_directory(dir_name):
'''
Create directory if it does not exist yet
'''
if not os.path.isdir(dir_name):
os.mkdir(dir_name)
| '''
Package for the automatic computing of scores and rankings for the Play.it Dinasty keeper league
<NAME> (<EMAIL>)
'''
import argparse
import os
import tools
def parse_arguments():
'''
Parse arguments
'''
# Initialize parser
parser = argparse.ArgumentParser()
# Parse arguments
parser.add_argument('--games', dest='games', help='Select the range of games whose scores will be retrieved')
parser.add_argument('--retrieve_scores', dest='retrieve_scores', default=False, action='store_true', help='Retrieve scores for the selected games')
parser.add_argument('--print_rankings', dest='print_rankings', default=False, action='store_true', help='Print rankings updated to the last game retrieved')
parser.add_argument('--run_dir', dest='run_dir', default=os.getcwd(), help='Select the directory containing the .py files')
parser.add_argument('--data_dir', dest='data_dir', default=os.getcwd(), help='')
parser.add_argument('--home_bonus_score', dest='home_bonus_score', type=int, default=5, help='')
parser.add_argument('--n_best_scores', dest='n_best_scores', type=int, default=10, help='')
parser.add_argument('--league_id', dest='league_id', type=int, default=170806, help='')
return parser.parse_args()
def set_up_run(args):
'''
Create file and folder names
'''
args.data_file = os.path.join(args.run_dir, 'dinasty.yaml')
args.schedule_file = os.path.join(args.run_dir, 'schedule_2016-17.yaml')
args.league_url = 'http://basketball.sports.ws/game/%d' % args.league_id
args.scores_dir = os.path.join(args.data_dir, 'scores')
args.forum_games_dir = os.path.join(args.data_dir, 'forum-games')
args.games_dir = os.path.join(args.data_dir, 'games')
args.stats_dir = os.path.join(args.data_dir, 'stats')
if args.retrieve_scores:
r = args.games.split(',')
if len(r) == 2:
args.range = range(int(r[0]), int(r[1]) + 1)
tools.make_directory(args.data_dir)
tools.make_directory(args.scores_dir)
tools.make_directory(args.stats_dir)
tools.make_directory(args.forum_games_dir)
tools.make_directory(args.games_dir)
return args
def make_directory(dir_name):
'''
Create directory if it does not exist yet
'''
if not os.path.isdir(dir_name):
os.mkdir(dir_name) | en | 0.638718 | Package for the automatic computing of scores and rankings for the Play.it Dinasty keeper league <NAME> (<EMAIL>) Parse arguments # Initialize parser # Parse arguments Create file and folder names Create directory if it does not exist yet | 2.99371 | 3 |
examples/request_init_listener.py | clohfink/python-driver | 1,163 | 6630264 | <gh_stars>1000+
#!/usr/bin/env python
# Copyright DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script shows an example "request init listener" which can be registered to track certain request metrics
# for a session. In this case we're just accumulating total request and error counts, as well as some statistics
# about the encoded request size. Note that the counts would be available using the internal 'metrics' tracking --
# this is just demonstrating a way to track a few custom attributes.
from __future__ import print_function
from cassandra.cluster import Cluster
from greplin import scales
import pprint
pp = pprint.PrettyPrinter(indent=2)
class RequestAnalyzer(object):
"""
Class used to track request and error counts for a Session.
Also computes statistics on encoded request size.
"""
requests = scales.PmfStat('request size')
errors = scales.IntStat('errors')
def __init__(self, session):
scales.init(self, '/cassandra')
# each instance will be registered with a session, and receive a callback for each request generated
session.add_request_init_listener(self.on_request)
def on_request(self, rf):
# This callback is invoked each time a request is created, on the thread creating the request.
# We can use this to count events, or add callbacks
rf.add_callbacks(self.on_success, self.on_error, callback_args=(rf,), errback_args=(rf,))
def on_success(self, _, response_future):
# future callback on a successful request; just record the size
self.requests.addValue(response_future.request_encoded_size)
def on_error(self, _, response_future):
# future callback for failed; record size and increment errors
self.requests.addValue(response_future.request_encoded_size)
self.errors += 1
def __str__(self):
# just extracting request count from the size stats (which are recorded on all requests)
request_sizes = dict(self.requests)
count = request_sizes.pop('count')
return "%d requests (%d errors)\nRequest size statistics:\n%s" % (count, self.errors, pp.pformat(request_sizes))
# connect a session
session = Cluster().connect()
# attach a listener to this session
ra = RequestAnalyzer(session)
session.execute("SELECT release_version FROM system.local")
session.execute("SELECT release_version FROM system.local")
print(ra)
# 2 requests (0 errors)
# Request size statistics:
# { '75percentile': 74,
# '95percentile': 74,
# '98percentile': 74,
# '999percentile': 74,
# '99percentile': 74,
# 'max': 74,
# 'mean': 74.0,
# 'median': 74.0,
# 'min': 74,
# 'stddev': 0.0}
try:
# intentional error to show that count increase
session.execute("syntax err")
except Exception as e:
pass
print()
print(ra) # note: the counts are updated, but the stats are not because scales only updates every 20s
# 3 requests (1 errors)
# Request size statistics:
# { '75percentile': 74,
# '95percentile': 74,
# '98percentile': 74,
# '999percentile': 74,
# '99percentile': 74,
# 'max': 74,
# 'mean': 74.0,
# 'median': 74.0,
# 'min': 74,
# 'stddev': 0.0}
| #!/usr/bin/env python
# Copyright DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script shows an example "request init listener" which can be registered to track certain request metrics
# for a session. In this case we're just accumulating total request and error counts, as well as some statistics
# about the encoded request size. Note that the counts would be available using the internal 'metrics' tracking --
# this is just demonstrating a way to track a few custom attributes.
from __future__ import print_function
from cassandra.cluster import Cluster
from greplin import scales
import pprint
pp = pprint.PrettyPrinter(indent=2)
class RequestAnalyzer(object):
"""
Class used to track request and error counts for a Session.
Also computes statistics on encoded request size.
"""
requests = scales.PmfStat('request size')
errors = scales.IntStat('errors')
def __init__(self, session):
scales.init(self, '/cassandra')
# each instance will be registered with a session, and receive a callback for each request generated
session.add_request_init_listener(self.on_request)
def on_request(self, rf):
# This callback is invoked each time a request is created, on the thread creating the request.
# We can use this to count events, or add callbacks
rf.add_callbacks(self.on_success, self.on_error, callback_args=(rf,), errback_args=(rf,))
def on_success(self, _, response_future):
# future callback on a successful request; just record the size
self.requests.addValue(response_future.request_encoded_size)
def on_error(self, _, response_future):
# future callback for failed; record size and increment errors
self.requests.addValue(response_future.request_encoded_size)
self.errors += 1
def __str__(self):
# just extracting request count from the size stats (which are recorded on all requests)
request_sizes = dict(self.requests)
count = request_sizes.pop('count')
return "%d requests (%d errors)\nRequest size statistics:\n%s" % (count, self.errors, pp.pformat(request_sizes))
# connect a session
session = Cluster().connect()
# attach a listener to this session
ra = RequestAnalyzer(session)
session.execute("SELECT release_version FROM system.local")
session.execute("SELECT release_version FROM system.local")
print(ra)
# 2 requests (0 errors)
# Request size statistics:
# { '75percentile': 74,
# '95percentile': 74,
# '98percentile': 74,
# '999percentile': 74,
# '99percentile': 74,
# 'max': 74,
# 'mean': 74.0,
# 'median': 74.0,
# 'min': 74,
# 'stddev': 0.0}
try:
# intentional error to show that count increase
session.execute("syntax err")
except Exception as e:
pass
print()
print(ra) # note: the counts are updated, but the stats are not because scales only updates every 20s
# 3 requests (1 errors)
# Request size statistics:
# { '75percentile': 74,
# '95percentile': 74,
# '98percentile': 74,
# '999percentile': 74,
# '99percentile': 74,
# 'max': 74,
# 'mean': 74.0,
# 'median': 74.0,
# 'min': 74,
# 'stddev': 0.0} | en | 0.774112 | #!/usr/bin/env python # Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This script shows an example "request init listener" which can be registered to track certain request metrics # for a session. In this case we're just accumulating total request and error counts, as well as some statistics # about the encoded request size. Note that the counts would be available using the internal 'metrics' tracking -- # this is just demonstrating a way to track a few custom attributes. Class used to track request and error counts for a Session. Also computes statistics on encoded request size. # each instance will be registered with a session, and receive a callback for each request generated # This callback is invoked each time a request is created, on the thread creating the request. # We can use this to count events, or add callbacks # future callback on a successful request; just record the size # future callback for failed; record size and increment errors # just extracting request count from the size stats (which are recorded on all requests) # connect a session # attach a listener to this session # 2 requests (0 errors) # Request size statistics: # { '75percentile': 74, # '95percentile': 74, # '98percentile': 74, # '999percentile': 74, # '99percentile': 74, # 'max': 74, # 'mean': 74.0, # 'median': 74.0, # 'min': 74, # 'stddev': 0.0} # intentional error to show that count increase # note: the counts are updated, but the stats are not because scales only updates every 20s # 3 requests (1 errors) # Request size statistics: # { '75percentile': 74, # '95percentile': 74, # '98percentile': 74, # '999percentile': 74, # '99percentile': 74, # 'max': 74, # 'mean': 74.0, # 'median': 74.0, # 'min': 74, # 'stddev': 0.0} | 2.251937 | 2 |
metachecker/routes/meta.py | art101nft/metachecker | 2 | 6630265 | import requests
from flask import Blueprint, render_template
from flask import redirect, url_for
from flask_login import logout_user, login_required
from metachecker import config
bp = Blueprint('meta', 'meta')
@bp.route('/about')
def about():
return render_template('about.html')
@bp.route('/disconnect')
def disconnect():
logout_user()
return redirect(url_for('collection.index'))
@bp.route('/ipfs/<path:path>')
@login_required
def load_ipfs(path):
ipfs_uri = f'{config.IPFS_SERVER}/ipfs/{path}'
res = requests.get(ipfs_uri, timeout=60)
return res.content
| import requests
from flask import Blueprint, render_template
from flask import redirect, url_for
from flask_login import logout_user, login_required
from metachecker import config
bp = Blueprint('meta', 'meta')
@bp.route('/about')
def about():
return render_template('about.html')
@bp.route('/disconnect')
def disconnect():
logout_user()
return redirect(url_for('collection.index'))
@bp.route('/ipfs/<path:path>')
@login_required
def load_ipfs(path):
ipfs_uri = f'{config.IPFS_SERVER}/ipfs/{path}'
res = requests.get(ipfs_uri, timeout=60)
return res.content
| none | 1 | 2.014569 | 2 |
|
PythonDesafios/d029.py | adaatii/Python-Curso-em-Video- | 0 | 6630266 | <reponame>adaatii/Python-Curso-em-Video-
#Escreva um programa que leia a velocidade de um carro.
# Se ele ultrapassar 80Km/h, mostre uma mensagem dizendo
# que ele foi multado. A multa vai custar R$7,00 por cada
# Km acima do limite.
velo = float(input('Qual a velocidade do carro? '))
if velo > 80:
print('Multado! voce excedeu o limite de 80km/h')
multa = (velo-80)*7
print('Voce deve pagar uma multa de R${:.2f}!'.format(multa))
print('Tenha um bom dia, dirija com segurança!')
| #Escreva um programa que leia a velocidade de um carro.
# Se ele ultrapassar 80Km/h, mostre uma mensagem dizendo
# que ele foi multado. A multa vai custar R$7,00 por cada
# Km acima do limite.
velo = float(input('Qual a velocidade do carro? '))
if velo > 80:
print('Multado! voce excedeu o limite de 80km/h')
multa = (velo-80)*7
print('Voce deve pagar uma multa de R${:.2f}!'.format(multa))
print('Tenha um bom dia, dirija com segurança!') | pt | 0.99544 | #Escreva um programa que leia a velocidade de um carro. # Se ele ultrapassar 80Km/h, mostre uma mensagem dizendo # que ele foi multado. A multa vai custar R$7,00 por cada # Km acima do limite. | 3.873431 | 4 |
qcloudsdkcam/AttachUserPoliciesRequest.py | f3n9/qcloudcli | 0 | 6630267 | # -*- coding: utf-8 -*-
from qcloudsdkcore.request import Request
class AttachUserPoliciesRequest(Request):
def __init__(self):
super(AttachUserPoliciesRequest, self).__init__(
'cam', 'qcloudcliV1', 'AttachUserPolicies', 'cam.api.qcloud.com')
def get_uin(self):
return self.get_params().get('uin')
def set_uin(self, uin):
self.add_param('uin', uin)
| # -*- coding: utf-8 -*-
from qcloudsdkcore.request import Request
class AttachUserPoliciesRequest(Request):
def __init__(self):
super(AttachUserPoliciesRequest, self).__init__(
'cam', 'qcloudcliV1', 'AttachUserPolicies', 'cam.api.qcloud.com')
def get_uin(self):
return self.get_params().get('uin')
def set_uin(self, uin):
self.add_param('uin', uin)
| en | 0.769321 | # -*- coding: utf-8 -*- | 1.841276 | 2 |
benchmarks/benchmark_files.py | devincornell/sqlitedocuments | 1 | 6630268 | <gh_stars>1-10
#from .doctable import DocTable, DocTableRow
#from .util import Timer
import sys
sys.path.append('..')
import doctable
import pickle
import os
import typing
from dataclasses import dataclass, field
import random
import time
@doctable.schema(require_slots=False)
class TestObjBase:
idx: int = doctable.IDCol()
size: int = 10000000
def __post_init__(self):
if self.data is None:
self.data = [random.randrange(10**12)]*self.size
@dataclass
class TestObj1(TestObjBase):
data: list = doctable.Col(None)
@dataclass
class TestObj2(TestObjBase):
data: list = doctable.Col(None, coltype='picklefile', type_args=dict(folder='tmp'))
def run_benchmark(num_vals = 10):
tmp = doctable.TempFolder('tmp')
timer = doctable.Timer('creating databases', logfile=tmp.joinpath('log.txt'))
db1 = doctable.DocTable(schema=TestObj1, target=tmp.joinpath('1.db'), new_db=True)
db2 = doctable.DocTable(schema=TestObj2, target=tmp.joinpath('2.db'), new_db=True)
db2.clean_col_files('data')
timer.step('creating synthetic data')
data1 = [TestObj1(i) for i in range(num_vals)]
data2 = [TestObj2(i) for i in range(num_vals)]
timer.step('insert into table directly')
db1.insert(data1)
timer.step('insert into a column file')
db2.insert(data2)
timer.step('finished inserting')
print(f'===========================================')
print(f'===== Total took: {timer.total_diff()} =================')
print(f'===========================================')
#timer.print_table()
if __name__ == '__main__':
run_benchmark()
| #from .doctable import DocTable, DocTableRow
#from .util import Timer
import sys
sys.path.append('..')
import doctable
import pickle
import os
import typing
from dataclasses import dataclass, field
import random
import time
@doctable.schema(require_slots=False)
class TestObjBase:
idx: int = doctable.IDCol()
size: int = 10000000
def __post_init__(self):
if self.data is None:
self.data = [random.randrange(10**12)]*self.size
@dataclass
class TestObj1(TestObjBase):
data: list = doctable.Col(None)
@dataclass
class TestObj2(TestObjBase):
data: list = doctable.Col(None, coltype='picklefile', type_args=dict(folder='tmp'))
def run_benchmark(num_vals = 10):
tmp = doctable.TempFolder('tmp')
timer = doctable.Timer('creating databases', logfile=tmp.joinpath('log.txt'))
db1 = doctable.DocTable(schema=TestObj1, target=tmp.joinpath('1.db'), new_db=True)
db2 = doctable.DocTable(schema=TestObj2, target=tmp.joinpath('2.db'), new_db=True)
db2.clean_col_files('data')
timer.step('creating synthetic data')
data1 = [TestObj1(i) for i in range(num_vals)]
data2 = [TestObj2(i) for i in range(num_vals)]
timer.step('insert into table directly')
db1.insert(data1)
timer.step('insert into a column file')
db2.insert(data2)
timer.step('finished inserting')
print(f'===========================================')
print(f'===== Total took: {timer.total_diff()} =================')
print(f'===========================================')
#timer.print_table()
if __name__ == '__main__':
run_benchmark() | fa | 0.095698 | #from .doctable import DocTable, DocTableRow #from .util import Timer #timer.print_table() | 2.373705 | 2 |
backend/app.py | cbaron3/safewalks.io | 1 | 6630269 | <filename>backend/app.py<gh_stars>1-10
from flask import Flask, request, jsonify, make_response
import googlemaps
from datetime import datetime
import secrets
import json
import operator
import opendata
import urllib.request
import requests
import types
# Handling cross origin resource handling
# declare constants
HOST = '0.0.0.0'
PORT = 5000
import polyline
# initialize flask application
app = Flask(__name__)
from flask_cors import CORS
CORS(app, resources={r'/*': {'origins': '*'}})
# Google maps client
gmaps = googlemaps.Client(key=secrets.API_KEY)
# Debug variable
DEBUG = True
# Safest path
# Usage: IP:HOST/api/path?from=LAT,LON&to=LAT,LON
# Example: http://0.0.0.0:5000/api/path?from=43.004663,-81.276361&to=248 Trott Dr
@app.route('/api/path', methods=['GET'])
def safe_path():
print(request.args)
# Grab data from requests
start = request.args.get('from')
#print(start)
end = request.args.get('to')
end = gmaps.geocode(end)
end = (end[0]['geometry']['location']['lat'], end[0]['geometry']['location']['lng'])
if DEBUG:
print('Start coordinates: {}'.format(start))
print('End coordinates: {}'.format(end))
# Query Directions API from start to end
now = datetime.now()
routes = gmaps.directions(origin=start,
destination=end,
mode="walking",
alternatives=True,
departure_time=now)
print('Possible routes: {}'.format(len(routes)))
seen_lights = [None] * len(routes)
tracked_lights = [None] * len(routes)
all_lights = [None] * len(routes)
total_lights = [0] * len(routes)
for index, route in enumerate(routes):
# For every route, calculate the possible lights
ne_bound = (route['bounds']['northeast']['lat'], route['bounds']['northeast']['lng'])
sw_bound = (route['bounds']['southwest']['lat'], route['bounds']['southwest']['lng'])
available_lights = opendata.queryAreaLights(ne_bound, sw_bound, 0.00025)
all_lights[index] = available_lights
# Decode polyline for waypoints
waypoints = polyline.decode(route['overview_polyline']['points'])
# Add start and stop points to waypoints
#print(start)
if type(start) is not list:
start = start.split(',')
waypoints.insert(0, (float(start[0]),float(start[1]) ) )
waypoints.append((float(end[0]),float(end[1]) ) )
print('{} waypoints for route {}'.format(len(waypoints), index+1))
for i in range(len(waypoints) - 1):
point = waypoints[i]
next_point = waypoints[i+1]
in_range = opendata.getSeenLights(point, next_point, available_lights, 0.0005)
for rlight in in_range:
if not seen_lights[index]:
seen_lights[index] = set()
if not tracked_lights[index]:
tracked_lights[index] = list()
# If light has already been tracked for this path, dont track it again
if rlight['id'] not in seen_lights[index]:
# If not tracked, track it and increment the total count of lights for this route
seen_lights[index].add(rlight['id'])
tracked_lights[index].append(rlight)
total_lights[index] += (1 * rlight['head'])
max_light_density = 0
max_index = -1
max_dist = 0
print('Lights for each route: {}'.format(total_lights))
for i in range(len(total_lights)):
dist = routes[i]['legs'][0]['distance']['text']
dist = dist.split(' ')
dist = float( dist[0] )
total_lights[i] = total_lights[i]/dist
# Check lights per km
if total_lights[i] > max_light_density:
max_light_density = total_lights[i]
max_index = i
max_dist = dist
elif total_lights[i] == max_light_density:
# If same amount of light, only change max if new one is shorter
if dist < max_dist:
max_light_density = total_lights[i]
max_index = i
max_dist = dist
print('Weighted lights for each route: {}'.format(total_lights))
print('Best route: {}'.format(max_index+1))
# Return as a response a list of routes with their corresponding tracked lights and their bounding box lights and their safety rating
safety_result = []
for i in range(len(routes)):
ids = []
not_tracked = []
for tlight in tracked_lights[i]:
ids.append(tlight['id'])
for nlight in all_lights[i]:
if nlight['attributes']['OBJECTID'] not in ids:
not_tracked.append(nlight)
print(not_tracked)
safety_result.append( {
'rating': total_lights[i],
'polyline': routes[i]['overview_polyline']['points'],
'area_lights' : not_tracked,
'in_range_lights': tracked_lights[i]
} )
safety_result = json.dumps(safety_result)
resp = make_response(safety_result, 200)
resp.headers['Access-Control-Allow-Origin'] = '*'
resp.headers['Access-Control-Allow-Headers'] = 'Content-Type,Authorization'
return(resp)
if __name__ == '__main__':
app.run(host=HOST,
debug=True,
port=PORT) | <filename>backend/app.py<gh_stars>1-10
from flask import Flask, request, jsonify, make_response
import googlemaps
from datetime import datetime
import secrets
import json
import operator
import opendata
import urllib.request
import requests
import types
# Handling cross origin resource handling
# declare constants
HOST = '0.0.0.0'
PORT = 5000
import polyline
# initialize flask application
app = Flask(__name__)
from flask_cors import CORS
CORS(app, resources={r'/*': {'origins': '*'}})
# Google maps client
gmaps = googlemaps.Client(key=secrets.API_KEY)
# Debug variable
DEBUG = True
# Safest path
# Usage: IP:HOST/api/path?from=LAT,LON&to=LAT,LON
# Example: http://0.0.0.0:5000/api/path?from=43.004663,-81.276361&to=248 Trott Dr
@app.route('/api/path', methods=['GET'])
def safe_path():
print(request.args)
# Grab data from requests
start = request.args.get('from')
#print(start)
end = request.args.get('to')
end = gmaps.geocode(end)
end = (end[0]['geometry']['location']['lat'], end[0]['geometry']['location']['lng'])
if DEBUG:
print('Start coordinates: {}'.format(start))
print('End coordinates: {}'.format(end))
# Query Directions API from start to end
now = datetime.now()
routes = gmaps.directions(origin=start,
destination=end,
mode="walking",
alternatives=True,
departure_time=now)
print('Possible routes: {}'.format(len(routes)))
seen_lights = [None] * len(routes)
tracked_lights = [None] * len(routes)
all_lights = [None] * len(routes)
total_lights = [0] * len(routes)
for index, route in enumerate(routes):
# For every route, calculate the possible lights
ne_bound = (route['bounds']['northeast']['lat'], route['bounds']['northeast']['lng'])
sw_bound = (route['bounds']['southwest']['lat'], route['bounds']['southwest']['lng'])
available_lights = opendata.queryAreaLights(ne_bound, sw_bound, 0.00025)
all_lights[index] = available_lights
# Decode polyline for waypoints
waypoints = polyline.decode(route['overview_polyline']['points'])
# Add start and stop points to waypoints
#print(start)
if type(start) is not list:
start = start.split(',')
waypoints.insert(0, (float(start[0]),float(start[1]) ) )
waypoints.append((float(end[0]),float(end[1]) ) )
print('{} waypoints for route {}'.format(len(waypoints), index+1))
for i in range(len(waypoints) - 1):
point = waypoints[i]
next_point = waypoints[i+1]
in_range = opendata.getSeenLights(point, next_point, available_lights, 0.0005)
for rlight in in_range:
if not seen_lights[index]:
seen_lights[index] = set()
if not tracked_lights[index]:
tracked_lights[index] = list()
# If light has already been tracked for this path, dont track it again
if rlight['id'] not in seen_lights[index]:
# If not tracked, track it and increment the total count of lights for this route
seen_lights[index].add(rlight['id'])
tracked_lights[index].append(rlight)
total_lights[index] += (1 * rlight['head'])
max_light_density = 0
max_index = -1
max_dist = 0
print('Lights for each route: {}'.format(total_lights))
for i in range(len(total_lights)):
dist = routes[i]['legs'][0]['distance']['text']
dist = dist.split(' ')
dist = float( dist[0] )
total_lights[i] = total_lights[i]/dist
# Check lights per km
if total_lights[i] > max_light_density:
max_light_density = total_lights[i]
max_index = i
max_dist = dist
elif total_lights[i] == max_light_density:
# If same amount of light, only change max if new one is shorter
if dist < max_dist:
max_light_density = total_lights[i]
max_index = i
max_dist = dist
print('Weighted lights for each route: {}'.format(total_lights))
print('Best route: {}'.format(max_index+1))
# Return as a response a list of routes with their corresponding tracked lights and their bounding box lights and their safety rating
safety_result = []
for i in range(len(routes)):
ids = []
not_tracked = []
for tlight in tracked_lights[i]:
ids.append(tlight['id'])
for nlight in all_lights[i]:
if nlight['attributes']['OBJECTID'] not in ids:
not_tracked.append(nlight)
print(not_tracked)
safety_result.append( {
'rating': total_lights[i],
'polyline': routes[i]['overview_polyline']['points'],
'area_lights' : not_tracked,
'in_range_lights': tracked_lights[i]
} )
safety_result = json.dumps(safety_result)
resp = make_response(safety_result, 200)
resp.headers['Access-Control-Allow-Origin'] = '*'
resp.headers['Access-Control-Allow-Headers'] = 'Content-Type,Authorization'
return(resp)
if __name__ == '__main__':
app.run(host=HOST,
debug=True,
port=PORT) | en | 0.771797 | # Handling cross origin resource handling # declare constants # initialize flask application # Google maps client # Debug variable # Safest path # Usage: IP:HOST/api/path?from=LAT,LON&to=LAT,LON # Example: http://0.0.0.0:5000/api/path?from=43.004663,-81.276361&to=248 Trott Dr # Grab data from requests #print(start) # Query Directions API from start to end # For every route, calculate the possible lights # Decode polyline for waypoints # Add start and stop points to waypoints #print(start) # If light has already been tracked for this path, dont track it again # If not tracked, track it and increment the total count of lights for this route # Check lights per km # If same amount of light, only change max if new one is shorter # Return as a response a list of routes with their corresponding tracked lights and their bounding box lights and their safety rating | 2.779415 | 3 |
mlcomp/contrib/catalyst/register.py | sUeharaE4/mlcomp | 0 | 6630270 | from catalyst.dl import registry
from catalyst.contrib.models.segmentation import (
Unet, ResnetLinknet, MobileUnet, ResnetUnet, ResnetFPNUnet, ResnetPSPnet,
FPNUnet, Linknet, PSPnet,
ResNetLinknet)
from mlcomp.contrib.criterion import RingLoss
from mlcomp.contrib.catalyst.callbacks.inference import InferBestCallback
from mlcomp.contrib.catalyst.optim import OneCycleCosineAnnealLR
from mlcomp.contrib.model.segmentation_model_pytorch import \
SegmentationModelPytorch
from mlcomp.contrib.model import Pretrained
from mlcomp.contrib.segmentation.deeplabv3.deeplab import DeepLab
def register():
registry.Criterion(RingLoss)
registry.Callback(InferBestCallback)
registry.Scheduler(OneCycleCosineAnnealLR)
# classification
registry.Model(Pretrained)
# segmentation
registry.Model(Unet)
registry.Model(ResnetLinknet)
registry.Model(MobileUnet)
registry.Model(ResnetUnet)
registry.Model(ResnetFPNUnet)
registry.Model(ResnetPSPnet)
registry.Model(FPNUnet)
registry.Model(Linknet)
registry.Model(PSPnet)
registry.Model(ResNetLinknet)
registry.Model(SegmentationModelPytorch)
registry.Model(DeepLab)
__all__ = ['register']
| from catalyst.dl import registry
from catalyst.contrib.models.segmentation import (
Unet, ResnetLinknet, MobileUnet, ResnetUnet, ResnetFPNUnet, ResnetPSPnet,
FPNUnet, Linknet, PSPnet,
ResNetLinknet)
from mlcomp.contrib.criterion import RingLoss
from mlcomp.contrib.catalyst.callbacks.inference import InferBestCallback
from mlcomp.contrib.catalyst.optim import OneCycleCosineAnnealLR
from mlcomp.contrib.model.segmentation_model_pytorch import \
SegmentationModelPytorch
from mlcomp.contrib.model import Pretrained
from mlcomp.contrib.segmentation.deeplabv3.deeplab import DeepLab
def register():
registry.Criterion(RingLoss)
registry.Callback(InferBestCallback)
registry.Scheduler(OneCycleCosineAnnealLR)
# classification
registry.Model(Pretrained)
# segmentation
registry.Model(Unet)
registry.Model(ResnetLinknet)
registry.Model(MobileUnet)
registry.Model(ResnetUnet)
registry.Model(ResnetFPNUnet)
registry.Model(ResnetPSPnet)
registry.Model(FPNUnet)
registry.Model(Linknet)
registry.Model(PSPnet)
registry.Model(ResNetLinknet)
registry.Model(SegmentationModelPytorch)
registry.Model(DeepLab)
__all__ = ['register']
| en | 0.652533 | # classification # segmentation | 1.874459 | 2 |
math/numberTheory/euler.py | snowflying/algorithm-in-python | 1 | 6630271 | #coding: utf-8
''' mbinary
#######################################################################
# File : euler.py
# Author: mbinary
# Mail: <EMAIL>
# Blog: https://mbinary.xyz
# Github: https://github.com/mbinary
# Created Time: 2018-12-16 10:53
# Description:
euler function: phi(n)
perfect num: \sigma (n) = 2n, \sigma (n) is the sum of all factors of n
eg \sigma (9) = 3+3+9 = 15
#######################################################################
'''
from factor import factor
from collections import Counter
from functools import reduce
from operator import mul
def phi(n):
st = set(factor(n))
return round(reduce(mul,(1-1/p for p in st),n))
def sigma(n):
ct = Counter(factor(n))
return reduce(mul,(round((p**(ct[p]+1)-1)/(p-1)) for p in ct),1)
if __name__=='__main__':
while 1:
n = int(input('n: '))
print('phi(n):',phi(n))
print('sigma(n):',sigma(n))
| #coding: utf-8
''' mbinary
#######################################################################
# File : euler.py
# Author: mbinary
# Mail: <EMAIL>
# Blog: https://mbinary.xyz
# Github: https://github.com/mbinary
# Created Time: 2018-12-16 10:53
# Description:
euler function: phi(n)
perfect num: \sigma (n) = 2n, \sigma (n) is the sum of all factors of n
eg \sigma (9) = 3+3+9 = 15
#######################################################################
'''
from factor import factor
from collections import Counter
from functools import reduce
from operator import mul
def phi(n):
st = set(factor(n))
return round(reduce(mul,(1-1/p for p in st),n))
def sigma(n):
ct = Counter(factor(n))
return reduce(mul,(round((p**(ct[p]+1)-1)/(p-1)) for p in ct),1)
if __name__=='__main__':
while 1:
n = int(input('n: '))
print('phi(n):',phi(n))
print('sigma(n):',sigma(n))
| de | 0.370576 | #coding: utf-8 mbinary ####################################################################### # File : euler.py # Author: mbinary # Mail: <EMAIL> # Blog: https://mbinary.xyz # Github: https://github.com/mbinary # Created Time: 2018-12-16 10:53 # Description: euler function: phi(n) perfect num: \sigma (n) = 2n, \sigma (n) is the sum of all factors of n eg \sigma (9) = 3+3+9 = 15 ####################################################################### | 3.464021 | 3 |
comparator.py | vybhavjain/SPEECH_IMAGE_FINAL | 0 | 6630272 | <reponame>vybhavjain/SPEECH_IMAGE_FINAL
import turtle
import fcomponent as fc
def start(r): # Horizontal Oval
turtle.penup()
turtle.setpos(0,60)
turtle.pendown()
turtle.penup()
turtle.setpos(-(r/1.414),60+(r/1.414))
turtle.pendown()
turtle.write(" Start")
turtle.right(45)
for loop in range(2):
turtle.circle(r,90)
turtle.circle(r/2,90)
turtle.circle(r,45)
turtle.right(90)
turtle.forward(50)
turtle.left(90)
def stop(r): # Horizontal Oval
turtle.penup()
turtle.setpos(-10,-220)
turtle.pendown()
turtle.right(45)
for loop in range(2):
turtle.circle(r,90)
turtle.circle(r/2,90)
turtle.write(" Stop")
def flowchart1():
start(25) # function to print the start inside the oval
turtle.penup() # makes pen disappear in the current block only
turtle.setpos(0,20)
turtle.pendown()
fc.arrow()
turtle.penup() # makes pen disappear in the current block only
turtle.setpos(0,0)
turtle.pendown()
fc.parallelogram(" Get a and b",40)
turtle.setpos(0,-40)
fc.arrow()
fc.rhombus(60) # draws a rhombus for the conditional statement
turtle.penup()
turtle.setpos(-30,-90)
turtle.pendown()
turtle.write(' is a>b?')
# code if comparison is true
turtle.penup()
turtle.setpos(0,-40)
turtle.pendown()
turtle.forward(60)
turtle.left(45)
turtle.forward(70)
turtle.write('yes') # results of comparison is true
turtle.right(90)
turtle.forward(60)
turtle.left(90)
fc.arrow()
turtle.right(90)
turtle.penup()
turtle.forward(20)
turtle.pendown()
turtle.left(90)
fc.parallelogram(" display a ",30)
turtle.right(90)
turtle.forward(55)
turtle.right(90)
turtle.forward(85)
turtle.left(180)
turtle.right(90)
fc.arrow()
turtle.left(90)
# code if comparison is flase
turtle.penup()
turtle.setpos(0,-40)
turtle.pendown()
turtle.right(135)
turtle.forward(60)
turtle.right(45)
turtle.forward(70)
turtle.write('no') #results of comparison is false
turtle.left(90)
turtle.forward(60)
turtle.left(90)
fc.arrow()
turtle.right(90)
turtle.penup()
turtle.forward(20)
turtle.pendown()
turtle.left(90)
fc.parallelogram(" display b ",30)
turtle.right(90)
turtle.forward(55)
turtle.left(90)
turtle.forward(100)
turtle.left(90)
fc.arrow()
turtle.right(90)
stop(25) # function to print inside oval stop
turtle.hideturtle()
turtle.done()
| import turtle
import fcomponent as fc
def start(r): # Horizontal Oval
turtle.penup()
turtle.setpos(0,60)
turtle.pendown()
turtle.penup()
turtle.setpos(-(r/1.414),60+(r/1.414))
turtle.pendown()
turtle.write(" Start")
turtle.right(45)
for loop in range(2):
turtle.circle(r,90)
turtle.circle(r/2,90)
turtle.circle(r,45)
turtle.right(90)
turtle.forward(50)
turtle.left(90)
def stop(r): # Horizontal Oval
turtle.penup()
turtle.setpos(-10,-220)
turtle.pendown()
turtle.right(45)
for loop in range(2):
turtle.circle(r,90)
turtle.circle(r/2,90)
turtle.write(" Stop")
def flowchart1():
start(25) # function to print the start inside the oval
turtle.penup() # makes pen disappear in the current block only
turtle.setpos(0,20)
turtle.pendown()
fc.arrow()
turtle.penup() # makes pen disappear in the current block only
turtle.setpos(0,0)
turtle.pendown()
fc.parallelogram(" Get a and b",40)
turtle.setpos(0,-40)
fc.arrow()
fc.rhombus(60) # draws a rhombus for the conditional statement
turtle.penup()
turtle.setpos(-30,-90)
turtle.pendown()
turtle.write(' is a>b?')
# code if comparison is true
turtle.penup()
turtle.setpos(0,-40)
turtle.pendown()
turtle.forward(60)
turtle.left(45)
turtle.forward(70)
turtle.write('yes') # results of comparison is true
turtle.right(90)
turtle.forward(60)
turtle.left(90)
fc.arrow()
turtle.right(90)
turtle.penup()
turtle.forward(20)
turtle.pendown()
turtle.left(90)
fc.parallelogram(" display a ",30)
turtle.right(90)
turtle.forward(55)
turtle.right(90)
turtle.forward(85)
turtle.left(180)
turtle.right(90)
fc.arrow()
turtle.left(90)
# code if comparison is flase
turtle.penup()
turtle.setpos(0,-40)
turtle.pendown()
turtle.right(135)
turtle.forward(60)
turtle.right(45)
turtle.forward(70)
turtle.write('no') #results of comparison is false
turtle.left(90)
turtle.forward(60)
turtle.left(90)
fc.arrow()
turtle.right(90)
turtle.penup()
turtle.forward(20)
turtle.pendown()
turtle.left(90)
fc.parallelogram(" display b ",30)
turtle.right(90)
turtle.forward(55)
turtle.left(90)
turtle.forward(100)
turtle.left(90)
fc.arrow()
turtle.right(90)
stop(25) # function to print inside oval stop
turtle.hideturtle()
turtle.done() | en | 0.874365 | # Horizontal Oval # Horizontal Oval # function to print the start inside the oval # makes pen disappear in the current block only # makes pen disappear in the current block only # draws a rhombus for the conditional statement # code if comparison is true # results of comparison is true # code if comparison is flase #results of comparison is false # function to print inside oval stop | 4.01808 | 4 |
pyelf/structs64.py | guilload/pyelf | 3 | 6630273 | from .enums import *
from .flags import *
from .structure import Structure
Elf64_Addr = 'Q'
Elf64_Byte = 'B'
Elf64_Half = 'H'
Elf64_Off = 'Q'
Elf64_SHalf = 'h'
Elf64_Sword = 'i'
Elf64_Sxword = 'q'
Elf64_Word = 'I'
Elf64_Xword = 'Q'
class Elf64_Ehdr(Structure):
"""
File header.
"""
members = ({'name': 'e_type', 'type': Elf64_Half, 'enum': E_TYPE},
{'name': 'e_machine', 'type': Elf64_Half, 'enum': E_MACHINE},
{'name': 'e_version', 'type': Elf64_Word},
{'name': 'e_entry', 'type': Elf64_Addr},
{'name': 'e_phoff', 'type': Elf64_Off},
{'name': 'e_shoff', 'type': Elf64_Off},
{'name': 'e_flags', 'type': Elf64_Word},
{'name': 'e_ehsize', 'type': Elf64_Half},
{'name': 'e_phentsize', 'type': Elf64_Half},
{'name': 'e_phnum', 'type': Elf64_Half},
{'name': 'e_shentsize', 'type': Elf64_Half},
{'name': 'e_shnum', 'type': Elf64_Half},
{'name': 'e_shstrndx', 'type': Elf64_Half},)
class Elf64_Shdr(Structure):
"""
Section header.
"""
members = ({'name': 'sh_name', 'type': Elf64_Word},
{'name': 'sh_type', 'type': Elf64_Word, 'enum': SH_TYPE},
{'name': 'sh_flags', 'type': Elf64_Xword, 'flag': SH_FLAG},
{'name': 'sh_addr', 'type': Elf64_Addr, 'label': 'Address'},
{'name': 'sh_offset', 'type': Elf64_Off},
{'name': 'sh_size', 'type': Elf64_Xword},
{'name': 'sh_link', 'type': Elf64_Word},
{'name': 'sh_info', 'type': Elf64_Word},
{'name': 'sh_addralign', 'type': Elf64_Xword, 'label': 'Align'},
{'name': 'sh_entsize', 'type': Elf64_Xword, 'label': 'Entry size'},
{'name': 'name', 'type': 'property'},
{'name': 'number', 'type': 'property', 'label': 'No.'})
display = ('number',
'name',
'sh_type',
'sh_addr',
'sh_offset',
'sh_size',
'sh_entsize',
'sh_flags',
'sh_link',
'sh_info',
'sh_addralign')
@property
def name(self):
return self.elf.shstrtab[self.sh_name]
@property
def number(self):
return (self.offset - self.elf.header.e_shoff) / self.elf.header.e_shentsize
class Elf64_Phdr(Structure):
"""
Program header.
"""
members = ({'name': 'p_type', 'type': Elf64_Word},
{'name': 'p_flags', 'type': Elf64_Word},
{'name': 'p_offset', 'type': Elf64_Off},
{'name': 'p_vaddr', 'type': Elf64_Addr},
{'name': 'p_paddr', 'type': Elf64_Addr},
{'name': 'p_filesz', 'type': Elf64_Xword},
{'name': 'p_memsz', 'type': Elf64_Xword},
{'name': 'p_align', 'type': Elf64_Xword})
class Elf64_Sym(Structure):
"""
Symbol section entry.
"""
members = ({'name': 'st_name', 'type': Elf64_Word},
{'name': 'st_info', 'type': Elf64_Byte},
{'name': 'st_other', 'type': Elf64_Byte, 'enum': ST_VISIBILITY, 'label': 'VIS'},
{'name': 'st_shndx', 'type': Elf64_Half, 'enum': SH_Nindex},
{'name': 'st_value', 'type': Elf64_Addr},
{'name': 'st_size', 'type': Elf64_Xword},
{'name': 'st_bind', 'type': 'property'},
{'name': 'st_type', 'type': 'property'},
{'name': 'name', 'type': 'property'},
{'name': 'number', 'type': 'property', 'label': 'No.'})
display = ('number',
'st_value',
'st_size',
'st_type',
'st_bind',
'st_other',
'st_shndx',
'name')
@property
def name(self):
symtab = self.elf.sections[self.sheader.sh_link]
return symtab[self.st_name]
@property
def number(self):
return (self.offset - self.sheader.sh_offset) / self.sheader.sh_entsize
@property
def st_bind(self):
return ST_BIND[self.st_info >> 4]
@property
def st_type(self):
return ST_TYPE[self.st_info & 0xf]
class Elf64_Rel(Structure):
"""
'SHT_REL' relocation section entry.
"""
members = ({'name': 'r_offset', 'type': Elf64_Addr},
{'name': 'r_info', 'type': Elf64_Xword},
{'name': 'r_sym', 'type': 'property'},
{'name': 'r_type', 'type': 'property'},)
@property
def r_sym(self):
return (self.r_info >> 32) & 0xffffffff
@property
def r_type(self):
return self.r_info & 0xffffffff
class Elf64_Rela(Structure):
"""
'SHT_RELA' relocation section entry.
"""
members = ({'name': 'r_offset', 'type': Elf64_Addr},
{'name': 'r_info', 'type': Elf64_Xword},
{'name': 'r_addend', 'type': Elf64_Sxword},
{'name': 'r_sym', 'type': 'property'},
{'name': 'r_type', 'type': 'property'},
{'name': 'name', 'type': 'property', 'label': "Symbol's name + addend"},
{'name': 'value', 'type': 'property', 'label': "Symbol's value"})
display = ('r_offset',
'r_info',
'r_type',
'value',
'name')
@property
def r_sym(self):
return (self.r_info >> 32) & 0xffffffff
@property
def r_type(self):
return R_RELOCATION[self.r_info & 0xffffffff]
@property
def name(self):
if self.r_sym == 0:
return ''
if self.symbol.st_name == 0:
sheader = self.elf.sheaders[self.symbol.st_shndx]
name = sheader.name
else:
name = self.symbol.name
return '{} {} {}'.format(name, '+' if self.r_addend >= 0 else '-',
abs(self.r_addend))
@property
def symbol(self):
symtab = self.elf.sections[self.sheader.sh_link]
symbol = symtab[self.r_sym]
return symbol
@property
def value(self):
if self.r_sym == 0:
return ''
return self.symbol.st_value
class Elf64_Dyn(Structure):
"""
Dynamic section entry.
"""
members = ({'name': 'd_tag', 'type': Elf64_Sxword},
{'name': 'd_val', 'type': Elf64_Xword},
{'name': 'd_type', 'type': 'property'},
{'name': 'name', 'type': 'property', 'label': 'Name or value'})
display = ('d_tag',
'd_type',
'name',)
@property
def d_type(self):
return D_TAG[self.d_tag]
@property
def name(self): # TODO: there're more d_types to deal with
if self.d_type == DT_NEEDED:
symtab = self.elf.sections[self.sheader.sh_link]
return 'Shared library: [{}]'.format(symtab[self.d_val])
else:
return self.d_val
| from .enums import *
from .flags import *
from .structure import Structure
Elf64_Addr = 'Q'
Elf64_Byte = 'B'
Elf64_Half = 'H'
Elf64_Off = 'Q'
Elf64_SHalf = 'h'
Elf64_Sword = 'i'
Elf64_Sxword = 'q'
Elf64_Word = 'I'
Elf64_Xword = 'Q'
class Elf64_Ehdr(Structure):
"""
File header.
"""
members = ({'name': 'e_type', 'type': Elf64_Half, 'enum': E_TYPE},
{'name': 'e_machine', 'type': Elf64_Half, 'enum': E_MACHINE},
{'name': 'e_version', 'type': Elf64_Word},
{'name': 'e_entry', 'type': Elf64_Addr},
{'name': 'e_phoff', 'type': Elf64_Off},
{'name': 'e_shoff', 'type': Elf64_Off},
{'name': 'e_flags', 'type': Elf64_Word},
{'name': 'e_ehsize', 'type': Elf64_Half},
{'name': 'e_phentsize', 'type': Elf64_Half},
{'name': 'e_phnum', 'type': Elf64_Half},
{'name': 'e_shentsize', 'type': Elf64_Half},
{'name': 'e_shnum', 'type': Elf64_Half},
{'name': 'e_shstrndx', 'type': Elf64_Half},)
class Elf64_Shdr(Structure):
"""
Section header.
"""
members = ({'name': 'sh_name', 'type': Elf64_Word},
{'name': 'sh_type', 'type': Elf64_Word, 'enum': SH_TYPE},
{'name': 'sh_flags', 'type': Elf64_Xword, 'flag': SH_FLAG},
{'name': 'sh_addr', 'type': Elf64_Addr, 'label': 'Address'},
{'name': 'sh_offset', 'type': Elf64_Off},
{'name': 'sh_size', 'type': Elf64_Xword},
{'name': 'sh_link', 'type': Elf64_Word},
{'name': 'sh_info', 'type': Elf64_Word},
{'name': 'sh_addralign', 'type': Elf64_Xword, 'label': 'Align'},
{'name': 'sh_entsize', 'type': Elf64_Xword, 'label': 'Entry size'},
{'name': 'name', 'type': 'property'},
{'name': 'number', 'type': 'property', 'label': 'No.'})
display = ('number',
'name',
'sh_type',
'sh_addr',
'sh_offset',
'sh_size',
'sh_entsize',
'sh_flags',
'sh_link',
'sh_info',
'sh_addralign')
@property
def name(self):
return self.elf.shstrtab[self.sh_name]
@property
def number(self):
return (self.offset - self.elf.header.e_shoff) / self.elf.header.e_shentsize
class Elf64_Phdr(Structure):
"""
Program header.
"""
members = ({'name': 'p_type', 'type': Elf64_Word},
{'name': 'p_flags', 'type': Elf64_Word},
{'name': 'p_offset', 'type': Elf64_Off},
{'name': 'p_vaddr', 'type': Elf64_Addr},
{'name': 'p_paddr', 'type': Elf64_Addr},
{'name': 'p_filesz', 'type': Elf64_Xword},
{'name': 'p_memsz', 'type': Elf64_Xword},
{'name': 'p_align', 'type': Elf64_Xword})
class Elf64_Sym(Structure):
"""
Symbol section entry.
"""
members = ({'name': 'st_name', 'type': Elf64_Word},
{'name': 'st_info', 'type': Elf64_Byte},
{'name': 'st_other', 'type': Elf64_Byte, 'enum': ST_VISIBILITY, 'label': 'VIS'},
{'name': 'st_shndx', 'type': Elf64_Half, 'enum': SH_Nindex},
{'name': 'st_value', 'type': Elf64_Addr},
{'name': 'st_size', 'type': Elf64_Xword},
{'name': 'st_bind', 'type': 'property'},
{'name': 'st_type', 'type': 'property'},
{'name': 'name', 'type': 'property'},
{'name': 'number', 'type': 'property', 'label': 'No.'})
display = ('number',
'st_value',
'st_size',
'st_type',
'st_bind',
'st_other',
'st_shndx',
'name')
@property
def name(self):
symtab = self.elf.sections[self.sheader.sh_link]
return symtab[self.st_name]
@property
def number(self):
return (self.offset - self.sheader.sh_offset) / self.sheader.sh_entsize
@property
def st_bind(self):
return ST_BIND[self.st_info >> 4]
@property
def st_type(self):
return ST_TYPE[self.st_info & 0xf]
class Elf64_Rel(Structure):
"""
'SHT_REL' relocation section entry.
"""
members = ({'name': 'r_offset', 'type': Elf64_Addr},
{'name': 'r_info', 'type': Elf64_Xword},
{'name': 'r_sym', 'type': 'property'},
{'name': 'r_type', 'type': 'property'},)
@property
def r_sym(self):
return (self.r_info >> 32) & 0xffffffff
@property
def r_type(self):
return self.r_info & 0xffffffff
class Elf64_Rela(Structure):
"""
'SHT_RELA' relocation section entry.
"""
members = ({'name': 'r_offset', 'type': Elf64_Addr},
{'name': 'r_info', 'type': Elf64_Xword},
{'name': 'r_addend', 'type': Elf64_Sxword},
{'name': 'r_sym', 'type': 'property'},
{'name': 'r_type', 'type': 'property'},
{'name': 'name', 'type': 'property', 'label': "Symbol's name + addend"},
{'name': 'value', 'type': 'property', 'label': "Symbol's value"})
display = ('r_offset',
'r_info',
'r_type',
'value',
'name')
@property
def r_sym(self):
return (self.r_info >> 32) & 0xffffffff
@property
def r_type(self):
return R_RELOCATION[self.r_info & 0xffffffff]
@property
def name(self):
if self.r_sym == 0:
return ''
if self.symbol.st_name == 0:
sheader = self.elf.sheaders[self.symbol.st_shndx]
name = sheader.name
else:
name = self.symbol.name
return '{} {} {}'.format(name, '+' if self.r_addend >= 0 else '-',
abs(self.r_addend))
@property
def symbol(self):
symtab = self.elf.sections[self.sheader.sh_link]
symbol = symtab[self.r_sym]
return symbol
@property
def value(self):
if self.r_sym == 0:
return ''
return self.symbol.st_value
class Elf64_Dyn(Structure):
"""
Dynamic section entry.
"""
members = ({'name': 'd_tag', 'type': Elf64_Sxword},
{'name': 'd_val', 'type': Elf64_Xword},
{'name': 'd_type', 'type': 'property'},
{'name': 'name', 'type': 'property', 'label': 'Name or value'})
display = ('d_tag',
'd_type',
'name',)
@property
def d_type(self):
return D_TAG[self.d_tag]
@property
def name(self): # TODO: there're more d_types to deal with
if self.d_type == DT_NEEDED:
symtab = self.elf.sections[self.sheader.sh_link]
return 'Shared library: [{}]'.format(symtab[self.d_val])
else:
return self.d_val
| en | 0.787254 | File header. Section header. Program header. Symbol section entry. 'SHT_REL' relocation section entry. 'SHT_RELA' relocation section entry. Dynamic section entry. # TODO: there're more d_types to deal with | 2.091531 | 2 |
src/memote/support/biomass.py | Midnighter/memote | 0 | 6630274 | # -*- coding: utf-8 -*-
# Copyright 2017 Novo Nordisk Foundation Center for Biosustainability,
# Technical University of Denmark.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Supporting functions for biomass consistency checks."""
from __future__ import absolute_import
import logging
import re
import numpy as np
from cobra.exceptions import OptimizationError
from future.utils import raise_with_traceback
from six import iteritems
import memote.support.helpers as helpers
__all__ = (
"sum_biomass_weight",
"find_biomass_precursors",
"find_blocked_biomass_precursors",
)
LOGGER = logging.getLogger(__name__)
# 20 Amino Acids, 4 Deoxyribonucleotides, 4 Ribonucleotides,
# 8 Universal Cofactors, and H2O
ESSENTIAL_PRECURSOR_IDS = [
"MNXM94",
"MNXM55",
"MNXM134",
"MNXM76",
"MNXM61",
"MNXM97",
"MNXM53",
"MNXM114",
"MNXM42",
"MNXM142",
"MNXM37",
"MNXM89557",
"MNXM231",
"MNXM70",
"MNXM78",
"MNXM199",
"MNXM140",
"MNXM32",
"MNXM29",
"MNXM147",
# Deoxyribonucleotides
"MNXM286",
"MNXM360",
"MNXM394",
"MNXM344",
# Ribonucleotides
"MNXM3",
"MNXM51",
"MNXM63",
"MNXM121",
# NAD
"MNXM8",
# NADP
"MNXM5",
# S-adenosyl-L-methionine
"MNXM16",
# FAD
"MNXM33",
# Pyridoxal 5'-phosphate
"MNXM161",
# CoA
"MNXM12",
# Thiamine Diphosphate
"MNXM256",
# FMN
"MNXM119",
# H2O
"MNXM2",
]
def sum_biomass_weight(reaction):
"""
Compute the sum of all reaction compounds.
This function expects all metabolites of the biomass reaction to have
formula information assigned.
Parameters
----------
reaction : cobra.core.reaction.Reaction
The biomass reaction of the model under investigation.
Returns
-------
float
The molecular weight of the biomass reaction in units of g/mmol.
"""
return (
sum(
-coef * met.formula_weight
for (met, coef) in iteritems(reaction.metabolites)
)
/ 1000.0
)
def find_biomass_precursors(model, reaction):
"""
Return a list of all biomass precursors excluding ATP and H2O.
Parameters
----------
reaction : cobra.core.reaction.Reaction
The biomass reaction of the model under investigation.
model : cobra.Model
The metabolic model under investigation.
Returns
-------
list
Metabolite objects that are reactants of the biomass reaction excluding
ATP and H2O.
"""
id_of_main_compartment = helpers.find_compartment_id_in_model(model, "c")
gam_reactants = set()
try:
gam_reactants.update(
[helpers.find_met_in_model(model, "MNXM3", id_of_main_compartment)[0]]
)
except RuntimeError:
pass
try:
gam_reactants.update(
[helpers.find_met_in_model(model, "MNXM2", id_of_main_compartment)[0]]
)
except RuntimeError:
pass
biomass_precursors = set(reaction.reactants) - gam_reactants
return list(biomass_precursors)
def find_blocked_biomass_precursors(reaction, model):
"""
Return a list of all biomass precursors that cannot be produced.
Parameters
----------
reaction : cobra.core.reaction.Reaction
The biomass reaction of the model under investigation.
model : cobra.Model
The metabolic model under investigation.
Returns
-------
list
Metabolite objects that are reactants of the biomass reaction excluding
ATP and H2O that cannot be produced by flux balance analysis.
"""
LOGGER.debug("Finding blocked biomass precursors")
precursors = find_biomass_precursors(model, reaction)
blocked_precursors = list()
_, ub = helpers.find_bounds(model)
for precursor in precursors:
with model:
dm_rxn = model.add_boundary(
precursor, type="safe-demand", reaction_id="safe_demand", lb=0, ub=ub
)
flux = helpers.run_fba(model, dm_rxn.id, direction="max")
if np.isnan(flux) or abs(flux) < 1e-08:
blocked_precursors.append(precursor)
return blocked_precursors
def gam_in_biomass(model, reaction):
"""
Return boolean if biomass reaction includes growth-associated maintenance.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
reaction : cobra.core.reaction.Reaction
The biomass reaction of the model under investigation.
Returns
-------
boolean
True if the biomass reaction includes ATP and H2O as reactants and ADP,
Pi and H as products, False otherwise.
"""
id_of_main_compartment = helpers.find_compartment_id_in_model(model, "c")
try:
left = {
helpers.find_met_in_model(model, "MNXM3", id_of_main_compartment)[0],
helpers.find_met_in_model(model, "MNXM2", id_of_main_compartment)[0],
}
right = {
helpers.find_met_in_model(model, "MNXM7", id_of_main_compartment)[0],
helpers.find_met_in_model(model, "MNXM1", id_of_main_compartment)[0],
helpers.find_met_in_model(model, "MNXM9", id_of_main_compartment)[0],
}
except RuntimeError:
return False
return left.issubset(set(reaction.reactants)) and right.issubset(
set(reaction.products)
)
def find_direct_metabolites(model, reaction, tolerance=1e-06):
"""
Return list of possible direct biomass precursor metabolites.
The term direct metabolites describes metabolites that are involved only
in either transport and/or boundary reactions, AND the biomass reaction(s),
but not in any purely metabolic reactions.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
reaction : cobra.Reaction
The biomass reaction of the model under investigation.
tolerance : float, optional
Tolerance below which values will be regarded as zero.
Returns
-------
list
Metabolites that qualify as direct metabolites i.e. biomass precursors
that are taken up to be consumed by the biomass reaction only.
"""
biomass_rxns = set(helpers.find_biomass_reaction(model))
tra_bou_bio_rxns = helpers.find_interchange_biomass_reactions(model, biomass_rxns)
try:
precursors = find_biomass_precursors(model, reaction)
main_comp = helpers.find_compartment_id_in_model(model, "c")
ext_space = helpers.find_compartment_id_in_model(model, "e")
except KeyError:
LOGGER.error(
"Failed to properly identify cytosolic and extracellular " "compartments."
)
raise_with_traceback(
KeyError(
"The cytosolic and/or extracellular "
"compartments could not be identified."
)
)
except RuntimeError:
LOGGER.error(
"Failed to properly identify cytosolic and extracellular " "compartments."
)
raise_with_traceback(
RuntimeError(
"The cytosolic and/or extracellular "
"compartments could not be "
"identified."
)
)
else:
tra_bou_bio_mets = [
met for met in precursors if met.reactions.issubset(tra_bou_bio_rxns)
]
rxns_of_interest = set(
[
rxn
for met in tra_bou_bio_mets
for rxn in met.reactions
if rxn not in biomass_rxns
]
)
solution = model.optimize(raise_error=True)
if np.isclose(solution.objective_value, 0, atol=tolerance):
LOGGER.error(
"Failed to generate a non-zero objective value with "
"flux balance analysis."
)
raise OptimizationError(
"The flux balance analysis on this model returned an "
"objective value of zero. Make sure the model can "
"grow! Check if the constraints are not too strict!"
)
tra_bou_bio_fluxes = {r: solution[r.id] for r in rxns_of_interest}
met_flux_sum = {m: 0 for m in tra_bou_bio_mets}
return detect_false_positive_direct_metabolites(
tra_bou_bio_mets,
biomass_rxns,
main_comp,
ext_space,
tra_bou_bio_fluxes,
met_flux_sum,
)
def detect_false_positive_direct_metabolites(
candidates, biomass_reactions, cytosol, extra, reaction_fluxes, metabolite_fluxes
):
"""
Weed out false positive direct metabolites.
False positives exists in the extracellular
compartment with flux from the cytosolic compartment and are part of the
biomass reaction(s). It sums fluxes positively or negatively depending
on if direct metabolites in the extracellular compartment are defined as
reactants or products in various reactions.
Parameters
----------
candidates : list of cobra.Metabolite
Candidate direct metabolites.
biomass_reactions : set of cobra.Reaction
The biomass reactions. Usually one or two.
cytosol : str
The identifier of the cytosolic compartment.
extra : str
The identifier of the extracellular compartment.
Returns
-------
list
Definitive list of direct metabolites, i.e., biomass precursors
that are taken up to be consumed by the biomass reaction only.
"""
for met in candidates:
is_internal = met.compartment != extra
for rxn in met.reactions:
if rxn in biomass_reactions:
continue
# Internal metabolites can not be false positives.
if is_internal:
metabolite_fluxes[met] += abs(reaction_fluxes[rxn])
continue
# if the metabolite is in the "e" compartment and a reactant,
# sum the fluxes accordingly (outward=negative, inward=positive)
if met in rxn.reactants:
product_comps = set([p.compartment for p in rxn.products])
# if the reaction has no product (outward flux)
if len(product_comps) == 0:
metabolite_fluxes[met] += -reaction_fluxes[rxn]
# if the reaction has a product in "c" (inward flux)
elif cytosol in product_comps:
metabolite_fluxes[met] += reaction_fluxes[rxn]
# if the metabolite is in the "e" compartment and a product,
# sum the fluxes accordingly (outward=negative, inward=positive)
elif met in rxn.products:
reactant_comps = set([p.compartment for p in rxn.reactants])
# if the reaction has no reactant (inward flux)
if len(reactant_comps) == 0:
metabolite_fluxes[met] += reaction_fluxes[rxn]
# if the reaction has a reactant in "c" (outward flux)
elif cytosol in reactant_comps:
metabolite_fluxes[met] += -reaction_fluxes[rxn]
return [m for m, f in iteritems(metabolite_fluxes) if f > 0]
def bundle_biomass_components(model, reaction):
"""
Return bundle biomass component reactions if it is not one lumped reaction.
There are two basic ways of specifying the biomass composition. The most
common is a single lumped reaction containing all biomass precursors.
Alternatively, the biomass equation can be split into several reactions
each focusing on a different macromolecular component for instance
a (1 gDW ash) + b (1 gDW phospholipids) + c (free fatty acids)+
d (1 gDW carbs) + e (1 gDW protein) + f (1 gDW RNA) + g (1 gDW DNA) +
h (vitamins/cofactors) + xATP + xH2O-> 1 gDCW biomass + xADP + xH + xPi.
This function aims to identify if the given biomass reaction 'reaction',
is a lumped all-in-one reaction, or whether it is just the final
composing reaction of all macromolecular components. It is important to
identify which other reaction belong to a given biomass reaction to be
able to identify universal biomass components or calculate detailed
precursor stoichiometries.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
reaction : cobra.core.reaction.Reaction
The biomass reaction of the model under investigation.
Returns
-------
list
One or more reactions that qualify as THE biomass equation together.
Notes
-----
Counting H2O, ADP, Pi, H, and ATP, the amount of metabolites in a split
reaction is comparatively low:
Any reaction with less or equal to 15 metabolites can
probably be counted as a split reaction containing Ash, Phospholipids,
Fatty Acids, Carbohydrates (i.e. cell wall components), Protein, RNA,
DNA, Cofactors and Vitamins, and Small Molecules. Any reaction with more
than or equal to 28 metabolites, however, (21 AA + 3 Nucleotides (4-ATP)
+ 4 Deoxy-Nucleotides) can be considered a lumped reaction.
Anything in between will be treated conservatively as a lumped reaction.
For split reactions, after removing any of the metabolites associated with
growth-associated energy expenditure (H2O, ADP, Pi, H, and ATP), the
only remaining metabolites should be generalized macromolecule precursors
e.g. Protein, Phospholipids etc. Each of these have their own composing
reactions. Hence we include the reactions of these metabolites in the
set that ultimately makes up the returned list of reactions that together
make up the biomass equation.
"""
if len(reaction.metabolites) >= 16:
return [reaction]
id_of_main_compartment = helpers.find_compartment_id_in_model(model, "c")
gam_mets = ["MNXM3", "MNXM2", "MNXM7", "MNXM1", "MNXM9"]
try:
gam = set(
[
helpers.find_met_in_model(model, met, id_of_main_compartment)[0]
for met in gam_mets
]
)
except RuntimeError:
gam = set()
regex = re.compile("^{}(_[a-zA-Z]+?)*?$".format("biomass"), re.IGNORECASE)
biomass_metabolite = set(model.metabolites.query(regex))
macromolecules = set(reaction.metabolites) - gam - biomass_metabolite
bundled_reactions = set()
for met in macromolecules:
bundled_reactions = bundled_reactions | set(met.reactions)
return list(bundled_reactions)
def essential_precursors_not_in_biomass(model, reaction):
u"""
Return a list of essential precursors missing from the biomass reaction.
There are universal components of life that make up the biomass of all
known organisms. These include all proteinogenic amino acids, deoxy- and
ribonucleotides, water and a range of metabolic cofactors.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
reaction : cobra.core.reaction.Reaction
The biomass reaction of the model under investigation.
Returns
-------
list
IDs of essential metabolites missing from the biomass reaction. The
IDS will appear in the models namespace if the metabolite exists, but
will be using the MetaNetX namespace if the metabolite does not exist
in the model.
Notes
-----
"Answering the question of what to include in the core of a biomass
objective function is not always straightforward. One example is different
nucleotide forms, which, although inter-convertible, are essential for
cellular chemistry. We propose here that all essential and irreplaceable
molecules for metabolism should be included in the biomass functions of
genome scale metabolic models. In the special case of cofactors, when two
forms of the same cofactor take part in the same reactions (such as NAD
and NADH), only one form could be included for the sake of simplicity.
When a class of cofactors includes active and non-active interconvertible
forms, the active forms should be preferred. [1]_."
Please note, that [1]_ also suggest to count C1 carriers
(derivatives of tetrahydrofolate(B9) or tetrahydromethanopterin) as
universal cofactors. We have omitted these from this check because there
are many individual compounds that classify as C1 carriers, and it is not
clear a priori which one should be preferred. In a future update, we may
consider identifying these using a chemical ontology.
References
----------
.. [1] <NAME>., <NAME>., & <NAME>. (2017). Integration of
Biomass Formulations of Genome-Scale Metabolic Models with Experimental
Data Reveals Universally Essential Cofactors in Prokaryotes. Metabolic
Engineering, 39(October 2016), 200–208.
http://doi.org/10.1016/j.ymben.2016.12.002
"""
main_comp = helpers.find_compartment_id_in_model(model, "c")
biomass_eq = bundle_biomass_components(model, reaction)
pooled_precursors = set([met for rxn in biomass_eq for met in rxn.metabolites])
missing_essential_precursors = []
for mnx_id in ESSENTIAL_PRECURSOR_IDS:
try:
met = helpers.find_met_in_model(model, mnx_id, main_comp)[0]
if met not in pooled_precursors:
missing_essential_precursors.append(met.id)
except RuntimeError:
missing_essential_precursors.append(mnx_id)
return missing_essential_precursors
| # -*- coding: utf-8 -*-
# Copyright 2017 Novo Nordisk Foundation Center for Biosustainability,
# Technical University of Denmark.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Supporting functions for biomass consistency checks."""
from __future__ import absolute_import
import logging
import re
import numpy as np
from cobra.exceptions import OptimizationError
from future.utils import raise_with_traceback
from six import iteritems
import memote.support.helpers as helpers
__all__ = (
"sum_biomass_weight",
"find_biomass_precursors",
"find_blocked_biomass_precursors",
)
LOGGER = logging.getLogger(__name__)
# 20 Amino Acids, 4 Deoxyribonucleotides, 4 Ribonucleotides,
# 8 Universal Cofactors, and H2O
ESSENTIAL_PRECURSOR_IDS = [
"MNXM94",
"MNXM55",
"MNXM134",
"MNXM76",
"MNXM61",
"MNXM97",
"MNXM53",
"MNXM114",
"MNXM42",
"MNXM142",
"MNXM37",
"MNXM89557",
"MNXM231",
"MNXM70",
"MNXM78",
"MNXM199",
"MNXM140",
"MNXM32",
"MNXM29",
"MNXM147",
# Deoxyribonucleotides
"MNXM286",
"MNXM360",
"MNXM394",
"MNXM344",
# Ribonucleotides
"MNXM3",
"MNXM51",
"MNXM63",
"MNXM121",
# NAD
"MNXM8",
# NADP
"MNXM5",
# S-adenosyl-L-methionine
"MNXM16",
# FAD
"MNXM33",
# Pyridoxal 5'-phosphate
"MNXM161",
# CoA
"MNXM12",
# Thiamine Diphosphate
"MNXM256",
# FMN
"MNXM119",
# H2O
"MNXM2",
]
def sum_biomass_weight(reaction):
"""
Compute the sum of all reaction compounds.
This function expects all metabolites of the biomass reaction to have
formula information assigned.
Parameters
----------
reaction : cobra.core.reaction.Reaction
The biomass reaction of the model under investigation.
Returns
-------
float
The molecular weight of the biomass reaction in units of g/mmol.
"""
return (
sum(
-coef * met.formula_weight
for (met, coef) in iteritems(reaction.metabolites)
)
/ 1000.0
)
def find_biomass_precursors(model, reaction):
"""
Return a list of all biomass precursors excluding ATP and H2O.
Parameters
----------
reaction : cobra.core.reaction.Reaction
The biomass reaction of the model under investigation.
model : cobra.Model
The metabolic model under investigation.
Returns
-------
list
Metabolite objects that are reactants of the biomass reaction excluding
ATP and H2O.
"""
id_of_main_compartment = helpers.find_compartment_id_in_model(model, "c")
gam_reactants = set()
try:
gam_reactants.update(
[helpers.find_met_in_model(model, "MNXM3", id_of_main_compartment)[0]]
)
except RuntimeError:
pass
try:
gam_reactants.update(
[helpers.find_met_in_model(model, "MNXM2", id_of_main_compartment)[0]]
)
except RuntimeError:
pass
biomass_precursors = set(reaction.reactants) - gam_reactants
return list(biomass_precursors)
def find_blocked_biomass_precursors(reaction, model):
"""
Return a list of all biomass precursors that cannot be produced.
Parameters
----------
reaction : cobra.core.reaction.Reaction
The biomass reaction of the model under investigation.
model : cobra.Model
The metabolic model under investigation.
Returns
-------
list
Metabolite objects that are reactants of the biomass reaction excluding
ATP and H2O that cannot be produced by flux balance analysis.
"""
LOGGER.debug("Finding blocked biomass precursors")
precursors = find_biomass_precursors(model, reaction)
blocked_precursors = list()
_, ub = helpers.find_bounds(model)
for precursor in precursors:
with model:
dm_rxn = model.add_boundary(
precursor, type="safe-demand", reaction_id="safe_demand", lb=0, ub=ub
)
flux = helpers.run_fba(model, dm_rxn.id, direction="max")
if np.isnan(flux) or abs(flux) < 1e-08:
blocked_precursors.append(precursor)
return blocked_precursors
def gam_in_biomass(model, reaction):
"""
Return boolean if biomass reaction includes growth-associated maintenance.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
reaction : cobra.core.reaction.Reaction
The biomass reaction of the model under investigation.
Returns
-------
boolean
True if the biomass reaction includes ATP and H2O as reactants and ADP,
Pi and H as products, False otherwise.
"""
id_of_main_compartment = helpers.find_compartment_id_in_model(model, "c")
try:
left = {
helpers.find_met_in_model(model, "MNXM3", id_of_main_compartment)[0],
helpers.find_met_in_model(model, "MNXM2", id_of_main_compartment)[0],
}
right = {
helpers.find_met_in_model(model, "MNXM7", id_of_main_compartment)[0],
helpers.find_met_in_model(model, "MNXM1", id_of_main_compartment)[0],
helpers.find_met_in_model(model, "MNXM9", id_of_main_compartment)[0],
}
except RuntimeError:
return False
return left.issubset(set(reaction.reactants)) and right.issubset(
set(reaction.products)
)
def find_direct_metabolites(model, reaction, tolerance=1e-06):
"""
Return list of possible direct biomass precursor metabolites.
The term direct metabolites describes metabolites that are involved only
in either transport and/or boundary reactions, AND the biomass reaction(s),
but not in any purely metabolic reactions.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
reaction : cobra.Reaction
The biomass reaction of the model under investigation.
tolerance : float, optional
Tolerance below which values will be regarded as zero.
Returns
-------
list
Metabolites that qualify as direct metabolites i.e. biomass precursors
that are taken up to be consumed by the biomass reaction only.
"""
biomass_rxns = set(helpers.find_biomass_reaction(model))
tra_bou_bio_rxns = helpers.find_interchange_biomass_reactions(model, biomass_rxns)
try:
precursors = find_biomass_precursors(model, reaction)
main_comp = helpers.find_compartment_id_in_model(model, "c")
ext_space = helpers.find_compartment_id_in_model(model, "e")
except KeyError:
LOGGER.error(
"Failed to properly identify cytosolic and extracellular " "compartments."
)
raise_with_traceback(
KeyError(
"The cytosolic and/or extracellular "
"compartments could not be identified."
)
)
except RuntimeError:
LOGGER.error(
"Failed to properly identify cytosolic and extracellular " "compartments."
)
raise_with_traceback(
RuntimeError(
"The cytosolic and/or extracellular "
"compartments could not be "
"identified."
)
)
else:
tra_bou_bio_mets = [
met for met in precursors if met.reactions.issubset(tra_bou_bio_rxns)
]
rxns_of_interest = set(
[
rxn
for met in tra_bou_bio_mets
for rxn in met.reactions
if rxn not in biomass_rxns
]
)
solution = model.optimize(raise_error=True)
if np.isclose(solution.objective_value, 0, atol=tolerance):
LOGGER.error(
"Failed to generate a non-zero objective value with "
"flux balance analysis."
)
raise OptimizationError(
"The flux balance analysis on this model returned an "
"objective value of zero. Make sure the model can "
"grow! Check if the constraints are not too strict!"
)
tra_bou_bio_fluxes = {r: solution[r.id] for r in rxns_of_interest}
met_flux_sum = {m: 0 for m in tra_bou_bio_mets}
return detect_false_positive_direct_metabolites(
tra_bou_bio_mets,
biomass_rxns,
main_comp,
ext_space,
tra_bou_bio_fluxes,
met_flux_sum,
)
def detect_false_positive_direct_metabolites(
candidates, biomass_reactions, cytosol, extra, reaction_fluxes, metabolite_fluxes
):
"""
Weed out false positive direct metabolites.
False positives exists in the extracellular
compartment with flux from the cytosolic compartment and are part of the
biomass reaction(s). It sums fluxes positively or negatively depending
on if direct metabolites in the extracellular compartment are defined as
reactants or products in various reactions.
Parameters
----------
candidates : list of cobra.Metabolite
Candidate direct metabolites.
biomass_reactions : set of cobra.Reaction
The biomass reactions. Usually one or two.
cytosol : str
The identifier of the cytosolic compartment.
extra : str
The identifier of the extracellular compartment.
Returns
-------
list
Definitive list of direct metabolites, i.e., biomass precursors
that are taken up to be consumed by the biomass reaction only.
"""
for met in candidates:
is_internal = met.compartment != extra
for rxn in met.reactions:
if rxn in biomass_reactions:
continue
# Internal metabolites can not be false positives.
if is_internal:
metabolite_fluxes[met] += abs(reaction_fluxes[rxn])
continue
# if the metabolite is in the "e" compartment and a reactant,
# sum the fluxes accordingly (outward=negative, inward=positive)
if met in rxn.reactants:
product_comps = set([p.compartment for p in rxn.products])
# if the reaction has no product (outward flux)
if len(product_comps) == 0:
metabolite_fluxes[met] += -reaction_fluxes[rxn]
# if the reaction has a product in "c" (inward flux)
elif cytosol in product_comps:
metabolite_fluxes[met] += reaction_fluxes[rxn]
# if the metabolite is in the "e" compartment and a product,
# sum the fluxes accordingly (outward=negative, inward=positive)
elif met in rxn.products:
reactant_comps = set([p.compartment for p in rxn.reactants])
# if the reaction has no reactant (inward flux)
if len(reactant_comps) == 0:
metabolite_fluxes[met] += reaction_fluxes[rxn]
# if the reaction has a reactant in "c" (outward flux)
elif cytosol in reactant_comps:
metabolite_fluxes[met] += -reaction_fluxes[rxn]
return [m for m, f in iteritems(metabolite_fluxes) if f > 0]
def bundle_biomass_components(model, reaction):
"""
Return bundle biomass component reactions if it is not one lumped reaction.
There are two basic ways of specifying the biomass composition. The most
common is a single lumped reaction containing all biomass precursors.
Alternatively, the biomass equation can be split into several reactions
each focusing on a different macromolecular component for instance
a (1 gDW ash) + b (1 gDW phospholipids) + c (free fatty acids)+
d (1 gDW carbs) + e (1 gDW protein) + f (1 gDW RNA) + g (1 gDW DNA) +
h (vitamins/cofactors) + xATP + xH2O-> 1 gDCW biomass + xADP + xH + xPi.
This function aims to identify if the given biomass reaction 'reaction',
is a lumped all-in-one reaction, or whether it is just the final
composing reaction of all macromolecular components. It is important to
identify which other reaction belong to a given biomass reaction to be
able to identify universal biomass components or calculate detailed
precursor stoichiometries.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
reaction : cobra.core.reaction.Reaction
The biomass reaction of the model under investigation.
Returns
-------
list
One or more reactions that qualify as THE biomass equation together.
Notes
-----
Counting H2O, ADP, Pi, H, and ATP, the amount of metabolites in a split
reaction is comparatively low:
Any reaction with less or equal to 15 metabolites can
probably be counted as a split reaction containing Ash, Phospholipids,
Fatty Acids, Carbohydrates (i.e. cell wall components), Protein, RNA,
DNA, Cofactors and Vitamins, and Small Molecules. Any reaction with more
than or equal to 28 metabolites, however, (21 AA + 3 Nucleotides (4-ATP)
+ 4 Deoxy-Nucleotides) can be considered a lumped reaction.
Anything in between will be treated conservatively as a lumped reaction.
For split reactions, after removing any of the metabolites associated with
growth-associated energy expenditure (H2O, ADP, Pi, H, and ATP), the
only remaining metabolites should be generalized macromolecule precursors
e.g. Protein, Phospholipids etc. Each of these have their own composing
reactions. Hence we include the reactions of these metabolites in the
set that ultimately makes up the returned list of reactions that together
make up the biomass equation.
"""
if len(reaction.metabolites) >= 16:
return [reaction]
id_of_main_compartment = helpers.find_compartment_id_in_model(model, "c")
gam_mets = ["MNXM3", "MNXM2", "MNXM7", "MNXM1", "MNXM9"]
try:
gam = set(
[
helpers.find_met_in_model(model, met, id_of_main_compartment)[0]
for met in gam_mets
]
)
except RuntimeError:
gam = set()
regex = re.compile("^{}(_[a-zA-Z]+?)*?$".format("biomass"), re.IGNORECASE)
biomass_metabolite = set(model.metabolites.query(regex))
macromolecules = set(reaction.metabolites) - gam - biomass_metabolite
bundled_reactions = set()
for met in macromolecules:
bundled_reactions = bundled_reactions | set(met.reactions)
return list(bundled_reactions)
def essential_precursors_not_in_biomass(model, reaction):
u"""
Return a list of essential precursors missing from the biomass reaction.
There are universal components of life that make up the biomass of all
known organisms. These include all proteinogenic amino acids, deoxy- and
ribonucleotides, water and a range of metabolic cofactors.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
reaction : cobra.core.reaction.Reaction
The biomass reaction of the model under investigation.
Returns
-------
list
IDs of essential metabolites missing from the biomass reaction. The
IDS will appear in the models namespace if the metabolite exists, but
will be using the MetaNetX namespace if the metabolite does not exist
in the model.
Notes
-----
"Answering the question of what to include in the core of a biomass
objective function is not always straightforward. One example is different
nucleotide forms, which, although inter-convertible, are essential for
cellular chemistry. We propose here that all essential and irreplaceable
molecules for metabolism should be included in the biomass functions of
genome scale metabolic models. In the special case of cofactors, when two
forms of the same cofactor take part in the same reactions (such as NAD
and NADH), only one form could be included for the sake of simplicity.
When a class of cofactors includes active and non-active interconvertible
forms, the active forms should be preferred. [1]_."
Please note, that [1]_ also suggest to count C1 carriers
(derivatives of tetrahydrofolate(B9) or tetrahydromethanopterin) as
universal cofactors. We have omitted these from this check because there
are many individual compounds that classify as C1 carriers, and it is not
clear a priori which one should be preferred. In a future update, we may
consider identifying these using a chemical ontology.
References
----------
.. [1] <NAME>., <NAME>., & <NAME>. (2017). Integration of
Biomass Formulations of Genome-Scale Metabolic Models with Experimental
Data Reveals Universally Essential Cofactors in Prokaryotes. Metabolic
Engineering, 39(October 2016), 200–208.
http://doi.org/10.1016/j.ymben.2016.12.002
"""
main_comp = helpers.find_compartment_id_in_model(model, "c")
biomass_eq = bundle_biomass_components(model, reaction)
pooled_precursors = set([met for rxn in biomass_eq for met in rxn.metabolites])
missing_essential_precursors = []
for mnx_id in ESSENTIAL_PRECURSOR_IDS:
try:
met = helpers.find_met_in_model(model, mnx_id, main_comp)[0]
if met not in pooled_precursors:
missing_essential_precursors.append(met.id)
except RuntimeError:
missing_essential_precursors.append(mnx_id)
return missing_essential_precursors
| en | 0.858191 | # -*- coding: utf-8 -*- # Copyright 2017 Novo Nordisk Foundation Center for Biosustainability, # Technical University of Denmark. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Supporting functions for biomass consistency checks. # 20 Amino Acids, 4 Deoxyribonucleotides, 4 Ribonucleotides, # 8 Universal Cofactors, and H2O # Deoxyribonucleotides # Ribonucleotides # NAD # NADP # S-adenosyl-L-methionine # FAD # Pyridoxal 5'-phosphate # CoA # Thiamine Diphosphate # FMN # H2O Compute the sum of all reaction compounds. This function expects all metabolites of the biomass reaction to have formula information assigned. Parameters ---------- reaction : cobra.core.reaction.Reaction The biomass reaction of the model under investigation. Returns ------- float The molecular weight of the biomass reaction in units of g/mmol. Return a list of all biomass precursors excluding ATP and H2O. Parameters ---------- reaction : cobra.core.reaction.Reaction The biomass reaction of the model under investigation. model : cobra.Model The metabolic model under investigation. Returns ------- list Metabolite objects that are reactants of the biomass reaction excluding ATP and H2O. Return a list of all biomass precursors that cannot be produced. Parameters ---------- reaction : cobra.core.reaction.Reaction The biomass reaction of the model under investigation. model : cobra.Model The metabolic model under investigation. Returns ------- list Metabolite objects that are reactants of the biomass reaction excluding ATP and H2O that cannot be produced by flux balance analysis. Return boolean if biomass reaction includes growth-associated maintenance. Parameters ---------- model : cobra.Model The metabolic model under investigation. reaction : cobra.core.reaction.Reaction The biomass reaction of the model under investigation. Returns ------- boolean True if the biomass reaction includes ATP and H2O as reactants and ADP, Pi and H as products, False otherwise. Return list of possible direct biomass precursor metabolites. The term direct metabolites describes metabolites that are involved only in either transport and/or boundary reactions, AND the biomass reaction(s), but not in any purely metabolic reactions. Parameters ---------- model : cobra.Model The metabolic model under investigation. reaction : cobra.Reaction The biomass reaction of the model under investigation. tolerance : float, optional Tolerance below which values will be regarded as zero. Returns ------- list Metabolites that qualify as direct metabolites i.e. biomass precursors that are taken up to be consumed by the biomass reaction only. Weed out false positive direct metabolites. False positives exists in the extracellular compartment with flux from the cytosolic compartment and are part of the biomass reaction(s). It sums fluxes positively or negatively depending on if direct metabolites in the extracellular compartment are defined as reactants or products in various reactions. Parameters ---------- candidates : list of cobra.Metabolite Candidate direct metabolites. biomass_reactions : set of cobra.Reaction The biomass reactions. Usually one or two. cytosol : str The identifier of the cytosolic compartment. extra : str The identifier of the extracellular compartment. Returns ------- list Definitive list of direct metabolites, i.e., biomass precursors that are taken up to be consumed by the biomass reaction only. # Internal metabolites can not be false positives. # if the metabolite is in the "e" compartment and a reactant, # sum the fluxes accordingly (outward=negative, inward=positive) # if the reaction has no product (outward flux) # if the reaction has a product in "c" (inward flux) # if the metabolite is in the "e" compartment and a product, # sum the fluxes accordingly (outward=negative, inward=positive) # if the reaction has no reactant (inward flux) # if the reaction has a reactant in "c" (outward flux) Return bundle biomass component reactions if it is not one lumped reaction. There are two basic ways of specifying the biomass composition. The most common is a single lumped reaction containing all biomass precursors. Alternatively, the biomass equation can be split into several reactions each focusing on a different macromolecular component for instance a (1 gDW ash) + b (1 gDW phospholipids) + c (free fatty acids)+ d (1 gDW carbs) + e (1 gDW protein) + f (1 gDW RNA) + g (1 gDW DNA) + h (vitamins/cofactors) + xATP + xH2O-> 1 gDCW biomass + xADP + xH + xPi. This function aims to identify if the given biomass reaction 'reaction', is a lumped all-in-one reaction, or whether it is just the final composing reaction of all macromolecular components. It is important to identify which other reaction belong to a given biomass reaction to be able to identify universal biomass components or calculate detailed precursor stoichiometries. Parameters ---------- model : cobra.Model The metabolic model under investigation. reaction : cobra.core.reaction.Reaction The biomass reaction of the model under investigation. Returns ------- list One or more reactions that qualify as THE biomass equation together. Notes ----- Counting H2O, ADP, Pi, H, and ATP, the amount of metabolites in a split reaction is comparatively low: Any reaction with less or equal to 15 metabolites can probably be counted as a split reaction containing Ash, Phospholipids, Fatty Acids, Carbohydrates (i.e. cell wall components), Protein, RNA, DNA, Cofactors and Vitamins, and Small Molecules. Any reaction with more than or equal to 28 metabolites, however, (21 AA + 3 Nucleotides (4-ATP) + 4 Deoxy-Nucleotides) can be considered a lumped reaction. Anything in between will be treated conservatively as a lumped reaction. For split reactions, after removing any of the metabolites associated with growth-associated energy expenditure (H2O, ADP, Pi, H, and ATP), the only remaining metabolites should be generalized macromolecule precursors e.g. Protein, Phospholipids etc. Each of these have their own composing reactions. Hence we include the reactions of these metabolites in the set that ultimately makes up the returned list of reactions that together make up the biomass equation. Return a list of essential precursors missing from the biomass reaction. There are universal components of life that make up the biomass of all known organisms. These include all proteinogenic amino acids, deoxy- and ribonucleotides, water and a range of metabolic cofactors. Parameters ---------- model : cobra.Model The metabolic model under investigation. reaction : cobra.core.reaction.Reaction The biomass reaction of the model under investigation. Returns ------- list IDs of essential metabolites missing from the biomass reaction. The IDS will appear in the models namespace if the metabolite exists, but will be using the MetaNetX namespace if the metabolite does not exist in the model. Notes ----- "Answering the question of what to include in the core of a biomass objective function is not always straightforward. One example is different nucleotide forms, which, although inter-convertible, are essential for cellular chemistry. We propose here that all essential and irreplaceable molecules for metabolism should be included in the biomass functions of genome scale metabolic models. In the special case of cofactors, when two forms of the same cofactor take part in the same reactions (such as NAD and NADH), only one form could be included for the sake of simplicity. When a class of cofactors includes active and non-active interconvertible forms, the active forms should be preferred. [1]_." Please note, that [1]_ also suggest to count C1 carriers (derivatives of tetrahydrofolate(B9) or tetrahydromethanopterin) as universal cofactors. We have omitted these from this check because there are many individual compounds that classify as C1 carriers, and it is not clear a priori which one should be preferred. In a future update, we may consider identifying these using a chemical ontology. References ---------- .. [1] <NAME>., <NAME>., & <NAME>. (2017). Integration of Biomass Formulations of Genome-Scale Metabolic Models with Experimental Data Reveals Universally Essential Cofactors in Prokaryotes. Metabolic Engineering, 39(October 2016), 200–208. http://doi.org/10.1016/j.ymben.2016.12.002 | 1.877881 | 2 |
_unittests/ut_onnxrt/test_rt_valid_model_onevsrest_classifier.py | henrywu2019/mlprodict | 1 | 6630275 | """
@brief test log(time=4s)
"""
import unittest
from logging import getLogger
from pandas import DataFrame
from pyquickhelper.loghelper import fLOG
from pyquickhelper.pycode import ExtTestCase
from pyquickhelper.pandashelper import df2rst
from sklearn.exceptions import ConvergenceWarning
try:
from sklearn.utils._testing import ignore_warnings
except ImportError:
from sklearn.utils.testing import ignore_warnings
from skl2onnx import __version__ as skl2onnx_version
from mlprodict.onnxrt.validate import (
enumerate_validated_operator_opsets, summary_report
)
from mlprodict.onnxrt.doc.doc_write_helper import (
split_columns_subsets, build_key_split, filter_rows
)
class TestRtValidateOneVsRestClassifier(ExtTestCase):
@ignore_warnings(category=(UserWarning, ConvergenceWarning, RuntimeWarning))
def test_rt_OneVsRestClassifier_python(self):
fLOG(__file__, self._testMethodName, OutputPrint=__name__ == "__main__")
logger = getLogger('skl2onnx')
logger.disabled = True
verbose = 1 if __name__ == "__main__" else 0
debug = False
buffer = []
def myprint(*args, **kwargs):
buffer.append(" ".join(map(str, args)))
rows = list(enumerate_validated_operator_opsets(
verbose, models={"OneVsRestClassifier"}, opset_min=9,
opset_max=11, fLOG=myprint, benchmark=True,
runtime='python', debug=debug,
filter_exp=lambda m, p: True or 'm-cl' in p))
self.assertGreater(len(rows), 1)
self.assertIn('skl_nop', rows[0])
self.assertIn('onx_size', rows[-1])
piv = summary_report(DataFrame(rows))
self.assertGreater(piv.shape[0], 1)
self.assertGreater(piv.shape[0], 2)
common, subsets = split_columns_subsets(piv)
rst = df2rst(piv, number_format=2,
replacements={'nan': '', 'ERR: 4convert': ''},
split_row=lambda index, dp=piv: build_key_split(
dp.loc[index, "name"], index),
split_col_common=common,
split_col_subsets=subsets,
filter_rows=filter_rows,
column_size={'problem': 25},
label_pattern=".. _lpy-{section}:")
self.assertIn("opset9 | RT/SKL-N=1", rst)
if __name__ == "__main__":
unittest.main()
| """
@brief test log(time=4s)
"""
import unittest
from logging import getLogger
from pandas import DataFrame
from pyquickhelper.loghelper import fLOG
from pyquickhelper.pycode import ExtTestCase
from pyquickhelper.pandashelper import df2rst
from sklearn.exceptions import ConvergenceWarning
try:
from sklearn.utils._testing import ignore_warnings
except ImportError:
from sklearn.utils.testing import ignore_warnings
from skl2onnx import __version__ as skl2onnx_version
from mlprodict.onnxrt.validate import (
enumerate_validated_operator_opsets, summary_report
)
from mlprodict.onnxrt.doc.doc_write_helper import (
split_columns_subsets, build_key_split, filter_rows
)
class TestRtValidateOneVsRestClassifier(ExtTestCase):
@ignore_warnings(category=(UserWarning, ConvergenceWarning, RuntimeWarning))
def test_rt_OneVsRestClassifier_python(self):
fLOG(__file__, self._testMethodName, OutputPrint=__name__ == "__main__")
logger = getLogger('skl2onnx')
logger.disabled = True
verbose = 1 if __name__ == "__main__" else 0
debug = False
buffer = []
def myprint(*args, **kwargs):
buffer.append(" ".join(map(str, args)))
rows = list(enumerate_validated_operator_opsets(
verbose, models={"OneVsRestClassifier"}, opset_min=9,
opset_max=11, fLOG=myprint, benchmark=True,
runtime='python', debug=debug,
filter_exp=lambda m, p: True or 'm-cl' in p))
self.assertGreater(len(rows), 1)
self.assertIn('skl_nop', rows[0])
self.assertIn('onx_size', rows[-1])
piv = summary_report(DataFrame(rows))
self.assertGreater(piv.shape[0], 1)
self.assertGreater(piv.shape[0], 2)
common, subsets = split_columns_subsets(piv)
rst = df2rst(piv, number_format=2,
replacements={'nan': '', 'ERR: 4convert': ''},
split_row=lambda index, dp=piv: build_key_split(
dp.loc[index, "name"], index),
split_col_common=common,
split_col_subsets=subsets,
filter_rows=filter_rows,
column_size={'problem': 25},
label_pattern=".. _lpy-{section}:")
self.assertIn("opset9 | RT/SKL-N=1", rst)
if __name__ == "__main__":
unittest.main()
| en | 0.452621 | @brief test log(time=4s) | 2.420161 | 2 |
kitt/dataloading/mapping.py | spirali/k | 2 | 6630276 | <filename>kitt/dataloading/mapping.py
def create_tuple_mapper(input_fn, output_fn):
"""
Creates a mapping function that receives a tuple (input, output) and uses the two
provided functions to return tuple (input_fn(input), output_fn(output)).
"""
def fun(item):
input, output = item
return input_fn(input), output_fn(output)
return fun
def identity_fn(x):
return x
| <filename>kitt/dataloading/mapping.py
def create_tuple_mapper(input_fn, output_fn):
"""
Creates a mapping function that receives a tuple (input, output) and uses the two
provided functions to return tuple (input_fn(input), output_fn(output)).
"""
def fun(item):
input, output = item
return input_fn(input), output_fn(output)
return fun
def identity_fn(x):
return x
| en | 0.611806 | Creates a mapping function that receives a tuple (input, output) and uses the two provided functions to return tuple (input_fn(input), output_fn(output)). | 3.360691 | 3 |
agent_stable_baselines/stable_baselines/ddpg/main.py | Jannkar/doom_actionspace | 1 | 6630277 | import argparse
import time
import os
import gym
import tensorflow as tf
import numpy as np
from mpi4py import MPI
from stable_baselines import logger, bench
from stable_baselines.common.misc_util import set_global_seeds, boolean_flag
from stable_baselines.ddpg.policies import MlpPolicy, LnMlpPolicy
from stable_baselines.ddpg import DDPG
from stable_baselines.ddpg.memory import Memory
from stable_baselines.ddpg.noise import AdaptiveParamNoiseSpec, OrnsteinUhlenbeckActionNoise, NormalActionNoise
def run(env_id, seed, noise_type, layer_norm, evaluation, **kwargs):
"""
run the training of DDPG
:param env_id: (str) the environment ID
:param seed: (int) the initial random seed
:param noise_type: (str) the wanted noises ('adaptive-param', 'normal' or 'ou'), can use multiple noise type by
seperating them with commas
:param layer_norm: (bool) use layer normalization
:param evaluation: (bool) enable evaluation of DDPG training
:param kwargs: (dict) extra keywords for the training.train function
"""
# Configure things.
rank = MPI.COMM_WORLD.Get_rank()
if rank != 0:
logger.set_level(logger.DISABLED)
# Create envs.
env = gym.make(env_id)
env = bench.Monitor(env, logger.get_dir() and os.path.join(logger.get_dir(), str(rank)))
if evaluation and rank == 0:
eval_env = gym.make(env_id)
eval_env = bench.Monitor(eval_env, os.path.join(logger.get_dir(), 'gym_eval'))
env = bench.Monitor(env, None)
else:
eval_env = None
# Parse noise_type
action_noise = None
param_noise = None
nb_actions = env.action_space.shape[-1]
for current_noise_type in noise_type.split(','):
current_noise_type = current_noise_type.strip()
if current_noise_type == 'none':
pass
elif 'adaptive-param' in current_noise_type:
_, stddev = current_noise_type.split('_')
param_noise = AdaptiveParamNoiseSpec(initial_stddev=float(stddev), desired_action_stddev=float(stddev))
elif 'normal' in current_noise_type:
_, stddev = current_noise_type.split('_')
action_noise = NormalActionNoise(mean=np.zeros(nb_actions), sigma=float(stddev) * np.ones(nb_actions))
elif 'ou' in current_noise_type:
_, stddev = current_noise_type.split('_')
action_noise = OrnsteinUhlenbeckActionNoise(mean=np.zeros(nb_actions),
sigma=float(stddev) * np.ones(nb_actions))
else:
raise RuntimeError('unknown noise type "{}"'.format(current_noise_type))
# Seed everything to make things reproducible.
seed = seed + 1000000 * rank
logger.info('rank {}: seed={}, logdir={}'.format(rank, seed, logger.get_dir()))
tf.reset_default_graph()
set_global_seeds(seed)
env.seed(seed)
if eval_env is not None:
eval_env.seed(seed)
# Disable logging for rank != 0 to avoid noise.
start_time = 0
if rank == 0:
start_time = time.time()
if layer_norm:
policy = LnMlpPolicy
else:
policy = MlpPolicy
num_timesteps = kwargs['num_timesteps']
del kwargs['num_timesteps']
model = DDPG(policy=policy, env=env, memory_policy=Memory, eval_env=eval_env, param_noise=param_noise,
action_noise=action_noise, memory_limit=int(1e6), verbose=2, **kwargs)
model.learn(total_timesteps=num_timesteps)
env.close()
if eval_env is not None:
eval_env.close()
if rank == 0:
logger.info('total runtime: {}s'.format(time.time() - start_time))
def parse_args():
"""
parse the arguments for DDPG training
:return: (dict) the arguments
"""
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--env-id', type=str, default='HalfCheetah-v1')
boolean_flag(parser, 'render-eval', default=False)
boolean_flag(parser, 'layer-norm', default=True)
boolean_flag(parser, 'render', default=False)
boolean_flag(parser, 'normalize-returns', default=False)
boolean_flag(parser, 'normalize-observations', default=True)
parser.add_argument('--seed', help='RNG seed', type=int, default=0)
parser.add_argument('--critic-l2-reg', type=float, default=1e-2)
parser.add_argument('--batch-size', type=int, default=64) # per MPI worker
parser.add_argument('--actor-lr', type=float, default=1e-4)
parser.add_argument('--critic-lr', type=float, default=1e-3)
boolean_flag(parser, 'enable-popart', default=False)
parser.add_argument('--gamma', type=float, default=0.99)
parser.add_argument('--reward-scale', type=float, default=1.)
parser.add_argument('--clip-norm', type=float, default=None)
parser.add_argument('--nb-train-steps', type=int, default=50) # per epoch cycle and MPI worker
parser.add_argument('--nb-eval-steps', type=int, default=100) # per epoch cycle and MPI worker
parser.add_argument('--nb-rollout-steps', type=int, default=100) # per epoch cycle and MPI worker
# choices are adaptive-param_xx, ou_xx, normal_xx, none
parser.add_argument('--noise-type', type=str, default='adaptive-param_0.2')
parser.add_argument('--num-timesteps', type=int, default=int(1e6))
boolean_flag(parser, 'evaluation', default=False)
args = parser.parse_args()
dict_args = vars(args)
return dict_args
if __name__ == '__main__':
args = parse_args()
if MPI.COMM_WORLD.Get_rank() == 0:
logger.configure()
# Run actual script.
run(**args)
| import argparse
import time
import os
import gym
import tensorflow as tf
import numpy as np
from mpi4py import MPI
from stable_baselines import logger, bench
from stable_baselines.common.misc_util import set_global_seeds, boolean_flag
from stable_baselines.ddpg.policies import MlpPolicy, LnMlpPolicy
from stable_baselines.ddpg import DDPG
from stable_baselines.ddpg.memory import Memory
from stable_baselines.ddpg.noise import AdaptiveParamNoiseSpec, OrnsteinUhlenbeckActionNoise, NormalActionNoise
def run(env_id, seed, noise_type, layer_norm, evaluation, **kwargs):
"""
run the training of DDPG
:param env_id: (str) the environment ID
:param seed: (int) the initial random seed
:param noise_type: (str) the wanted noises ('adaptive-param', 'normal' or 'ou'), can use multiple noise type by
seperating them with commas
:param layer_norm: (bool) use layer normalization
:param evaluation: (bool) enable evaluation of DDPG training
:param kwargs: (dict) extra keywords for the training.train function
"""
# Configure things.
rank = MPI.COMM_WORLD.Get_rank()
if rank != 0:
logger.set_level(logger.DISABLED)
# Create envs.
env = gym.make(env_id)
env = bench.Monitor(env, logger.get_dir() and os.path.join(logger.get_dir(), str(rank)))
if evaluation and rank == 0:
eval_env = gym.make(env_id)
eval_env = bench.Monitor(eval_env, os.path.join(logger.get_dir(), 'gym_eval'))
env = bench.Monitor(env, None)
else:
eval_env = None
# Parse noise_type
action_noise = None
param_noise = None
nb_actions = env.action_space.shape[-1]
for current_noise_type in noise_type.split(','):
current_noise_type = current_noise_type.strip()
if current_noise_type == 'none':
pass
elif 'adaptive-param' in current_noise_type:
_, stddev = current_noise_type.split('_')
param_noise = AdaptiveParamNoiseSpec(initial_stddev=float(stddev), desired_action_stddev=float(stddev))
elif 'normal' in current_noise_type:
_, stddev = current_noise_type.split('_')
action_noise = NormalActionNoise(mean=np.zeros(nb_actions), sigma=float(stddev) * np.ones(nb_actions))
elif 'ou' in current_noise_type:
_, stddev = current_noise_type.split('_')
action_noise = OrnsteinUhlenbeckActionNoise(mean=np.zeros(nb_actions),
sigma=float(stddev) * np.ones(nb_actions))
else:
raise RuntimeError('unknown noise type "{}"'.format(current_noise_type))
# Seed everything to make things reproducible.
seed = seed + 1000000 * rank
logger.info('rank {}: seed={}, logdir={}'.format(rank, seed, logger.get_dir()))
tf.reset_default_graph()
set_global_seeds(seed)
env.seed(seed)
if eval_env is not None:
eval_env.seed(seed)
# Disable logging for rank != 0 to avoid noise.
start_time = 0
if rank == 0:
start_time = time.time()
if layer_norm:
policy = LnMlpPolicy
else:
policy = MlpPolicy
num_timesteps = kwargs['num_timesteps']
del kwargs['num_timesteps']
model = DDPG(policy=policy, env=env, memory_policy=Memory, eval_env=eval_env, param_noise=param_noise,
action_noise=action_noise, memory_limit=int(1e6), verbose=2, **kwargs)
model.learn(total_timesteps=num_timesteps)
env.close()
if eval_env is not None:
eval_env.close()
if rank == 0:
logger.info('total runtime: {}s'.format(time.time() - start_time))
def parse_args():
"""
parse the arguments for DDPG training
:return: (dict) the arguments
"""
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--env-id', type=str, default='HalfCheetah-v1')
boolean_flag(parser, 'render-eval', default=False)
boolean_flag(parser, 'layer-norm', default=True)
boolean_flag(parser, 'render', default=False)
boolean_flag(parser, 'normalize-returns', default=False)
boolean_flag(parser, 'normalize-observations', default=True)
parser.add_argument('--seed', help='RNG seed', type=int, default=0)
parser.add_argument('--critic-l2-reg', type=float, default=1e-2)
parser.add_argument('--batch-size', type=int, default=64) # per MPI worker
parser.add_argument('--actor-lr', type=float, default=1e-4)
parser.add_argument('--critic-lr', type=float, default=1e-3)
boolean_flag(parser, 'enable-popart', default=False)
parser.add_argument('--gamma', type=float, default=0.99)
parser.add_argument('--reward-scale', type=float, default=1.)
parser.add_argument('--clip-norm', type=float, default=None)
parser.add_argument('--nb-train-steps', type=int, default=50) # per epoch cycle and MPI worker
parser.add_argument('--nb-eval-steps', type=int, default=100) # per epoch cycle and MPI worker
parser.add_argument('--nb-rollout-steps', type=int, default=100) # per epoch cycle and MPI worker
# choices are adaptive-param_xx, ou_xx, normal_xx, none
parser.add_argument('--noise-type', type=str, default='adaptive-param_0.2')
parser.add_argument('--num-timesteps', type=int, default=int(1e6))
boolean_flag(parser, 'evaluation', default=False)
args = parser.parse_args()
dict_args = vars(args)
return dict_args
if __name__ == '__main__':
args = parse_args()
if MPI.COMM_WORLD.Get_rank() == 0:
logger.configure()
# Run actual script.
run(**args)
| en | 0.68398 | run the training of DDPG
:param env_id: (str) the environment ID
:param seed: (int) the initial random seed
:param noise_type: (str) the wanted noises ('adaptive-param', 'normal' or 'ou'), can use multiple noise type by
seperating them with commas
:param layer_norm: (bool) use layer normalization
:param evaluation: (bool) enable evaluation of DDPG training
:param kwargs: (dict) extra keywords for the training.train function # Configure things. # Create envs. # Parse noise_type # Seed everything to make things reproducible. # Disable logging for rank != 0 to avoid noise. parse the arguments for DDPG training
:return: (dict) the arguments # per MPI worker # per epoch cycle and MPI worker # per epoch cycle and MPI worker # per epoch cycle and MPI worker # choices are adaptive-param_xx, ou_xx, normal_xx, none # Run actual script. | 2.220142 | 2 |
synapse/storage/schema/main/delta/61/03recreate_min_depth.py | mlakkadshaw/synapse | 9,945 | 6630278 | # Copyright 2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This migration handles the process of changing the type of `room_depth.min_depth` to
a BIGINT.
"""
from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine
from synapse.storage.types import Cursor
def run_create(cur: Cursor, database_engine: BaseDatabaseEngine, *args, **kwargs):
if not isinstance(database_engine, PostgresEngine):
# this only applies to postgres - sqlite does not distinguish between big and
# little ints.
return
# First add a new column to contain the bigger min_depth
cur.execute("ALTER TABLE room_depth ADD COLUMN min_depth2 BIGINT")
# Create a trigger which will keep it populated.
cur.execute(
"""
CREATE OR REPLACE FUNCTION populate_min_depth2() RETURNS trigger AS $BODY$
BEGIN
new.min_depth2 := new.min_depth;
RETURN NEW;
END;
$BODY$ LANGUAGE plpgsql
"""
)
cur.execute(
"""
CREATE TRIGGER populate_min_depth2_trigger BEFORE INSERT OR UPDATE ON room_depth
FOR EACH ROW
EXECUTE PROCEDURE populate_min_depth2()
"""
)
# Start a bg process to populate it for old rooms
cur.execute(
"""
INSERT INTO background_updates (ordering, update_name, progress_json) VALUES
(6103, 'populate_room_depth_min_depth2', '{}')
"""
)
# and another to switch them over once it completes.
cur.execute(
"""
INSERT INTO background_updates (ordering, update_name, progress_json, depends_on) VALUES
(6103, 'replace_room_depth_min_depth', '{}', 'populate_room_depth2')
"""
)
def run_upgrade(cur: Cursor, database_engine: BaseDatabaseEngine, *args, **kwargs):
pass
| # Copyright 2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This migration handles the process of changing the type of `room_depth.min_depth` to
a BIGINT.
"""
from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine
from synapse.storage.types import Cursor
def run_create(cur: Cursor, database_engine: BaseDatabaseEngine, *args, **kwargs):
if not isinstance(database_engine, PostgresEngine):
# this only applies to postgres - sqlite does not distinguish between big and
# little ints.
return
# First add a new column to contain the bigger min_depth
cur.execute("ALTER TABLE room_depth ADD COLUMN min_depth2 BIGINT")
# Create a trigger which will keep it populated.
cur.execute(
"""
CREATE OR REPLACE FUNCTION populate_min_depth2() RETURNS trigger AS $BODY$
BEGIN
new.min_depth2 := new.min_depth;
RETURN NEW;
END;
$BODY$ LANGUAGE plpgsql
"""
)
cur.execute(
"""
CREATE TRIGGER populate_min_depth2_trigger BEFORE INSERT OR UPDATE ON room_depth
FOR EACH ROW
EXECUTE PROCEDURE populate_min_depth2()
"""
)
# Start a bg process to populate it for old rooms
cur.execute(
"""
INSERT INTO background_updates (ordering, update_name, progress_json) VALUES
(6103, 'populate_room_depth_min_depth2', '{}')
"""
)
# and another to switch them over once it completes.
cur.execute(
"""
INSERT INTO background_updates (ordering, update_name, progress_json, depends_on) VALUES
(6103, 'replace_room_depth_min_depth', '{}', 'populate_room_depth2')
"""
)
def run_upgrade(cur: Cursor, database_engine: BaseDatabaseEngine, *args, **kwargs):
pass
| en | 0.714647 | # Copyright 2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. This migration handles the process of changing the type of `room_depth.min_depth` to a BIGINT. # this only applies to postgres - sqlite does not distinguish between big and # little ints. # First add a new column to contain the bigger min_depth # Create a trigger which will keep it populated. CREATE OR REPLACE FUNCTION populate_min_depth2() RETURNS trigger AS $BODY$ BEGIN new.min_depth2 := new.min_depth; RETURN NEW; END; $BODY$ LANGUAGE plpgsql CREATE TRIGGER populate_min_depth2_trigger BEFORE INSERT OR UPDATE ON room_depth FOR EACH ROW EXECUTE PROCEDURE populate_min_depth2() # Start a bg process to populate it for old rooms INSERT INTO background_updates (ordering, update_name, progress_json) VALUES (6103, 'populate_room_depth_min_depth2', '{}') # and another to switch them over once it completes. INSERT INTO background_updates (ordering, update_name, progress_json, depends_on) VALUES (6103, 'replace_room_depth_min_depth', '{}', 'populate_room_depth2') | 2.163694 | 2 |
API/moviepiapi/CastingList.py | theoarmengou/MoviePi | 1 | 6630279 | <filename>API/moviepiapi/CastingList.py<gh_stars>1-10
##
# EPITECH PROJECT, 2019
# MoviePi
# File description:
# actorList.py
##
from flask_restful import Resource
from moviepiapi.utils import fill_return_packet, db
from flask import request
###############################################################################
# CASTING LIST #
# DOC : DOCUMENTATION/CASTINGLIST.MD #
###############################################################################
class CastingList(Resource):
def get(self, film_id):
if not film_id:
return fill_return_packet(0, "Aucune ID n'est detecté", None)
result = db.request(
"SELECT fk_actors FROM films_casting WHERE fk_films=%s", str(film_id))
if not result:
return fill_return_packet(0, "Le film n'a aucun casting", None)
casting_list = result[0]['fk_actors']
query = "SELECT id, name, image FROM actors WHERE id IN(" + \
casting_list + ")"
result = db.request(query)
if not result:
return fill_return_packet(0, "KO", None)
return fill_return_packet(1, "OK", result)
| <filename>API/moviepiapi/CastingList.py<gh_stars>1-10
##
# EPITECH PROJECT, 2019
# MoviePi
# File description:
# actorList.py
##
from flask_restful import Resource
from moviepiapi.utils import fill_return_packet, db
from flask import request
###############################################################################
# CASTING LIST #
# DOC : DOCUMENTATION/CASTINGLIST.MD #
###############################################################################
class CastingList(Resource):
def get(self, film_id):
if not film_id:
return fill_return_packet(0, "Aucune ID n'est detecté", None)
result = db.request(
"SELECT fk_actors FROM films_casting WHERE fk_films=%s", str(film_id))
if not result:
return fill_return_packet(0, "Le film n'a aucun casting", None)
casting_list = result[0]['fk_actors']
query = "SELECT id, name, image FROM actors WHERE id IN(" + \
casting_list + ")"
result = db.request(query)
if not result:
return fill_return_packet(0, "KO", None)
return fill_return_packet(1, "OK", result)
| de | 0.533493 | ## # EPITECH PROJECT, 2019 # MoviePi # File description: # actorList.py ## ############################################################################### # CASTING LIST # # DOC : DOCUMENTATION/CASTINGLIST.MD # ############################################################################### | 2.954164 | 3 |
implementations/python/mzlib/tools/cli.py | wulongict/mzSpecLib | 0 | 6630280 | import click
from mzlib.spectrum_library import SpectrumLibrary
from mzlib.index import MemoryIndex, SQLIndex
from mzlib.backends.text import TextSpectralLibraryWriter
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
@click.group(context_settings=CONTEXT_SETTINGS)
def main():
'''A collection of utilities for inspecting and manipulating
spectral libraries.
'''
pass
@main.command("describe", short_help=("Produce a minimal textual description"
" of a spectral library"))
@click.argument('path', type=click.Path(exists=True))
@click.option("-d", "--diagnostics", is_flag=True,
help="Run more diagnostics, greatly increasing runtime but producing additional information")
def describe(path, diagnostics=False):
'''Produces a minimal textual description of a spectral library.
'''
click.echo("Describing \"%s\"" % (path,))
if SQLIndex.exists(path):
index_type = SQLIndex
else:
index_type = MemoryIndex
library = SpectrumLibrary(filename=path, index_type=index_type)
click.echo(f"Format: {library.format}")
click.echo(f"Size: {library.__len__()}")
fh = click.open_file("-", 'wt')
TextSpectralLibraryWriter(fh).write_header(library.backend)
@main.command("convert", short_help=("Convert a spectral library from one format to another"))
@click.argument('inpath', type=click.Path(exists=True))
@click.argument("outpath", type=click.Path())
@click.option("-f", "--format", type=click.Choice(["text", "json"]), default="text")
def convert(inpath, outpath, format=None):
'''Convert a spectral library from one format to another. If `outpath` is `-`,
instead of writing to file, data will instead be sent to STDOUT.
'''
if format is None:
format = "text"
if SQLIndex.exists(inpath):
index_type = SQLIndex
else:
index_type = MemoryIndex
library = SpectrumLibrary(filename=inpath, index_type=index_type)
fh = click.open_file(outpath, mode='w')
library.write(fh, format)
if __name__ == "__main__":
main()
| import click
from mzlib.spectrum_library import SpectrumLibrary
from mzlib.index import MemoryIndex, SQLIndex
from mzlib.backends.text import TextSpectralLibraryWriter
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
@click.group(context_settings=CONTEXT_SETTINGS)
def main():
'''A collection of utilities for inspecting and manipulating
spectral libraries.
'''
pass
@main.command("describe", short_help=("Produce a minimal textual description"
" of a spectral library"))
@click.argument('path', type=click.Path(exists=True))
@click.option("-d", "--diagnostics", is_flag=True,
help="Run more diagnostics, greatly increasing runtime but producing additional information")
def describe(path, diagnostics=False):
'''Produces a minimal textual description of a spectral library.
'''
click.echo("Describing \"%s\"" % (path,))
if SQLIndex.exists(path):
index_type = SQLIndex
else:
index_type = MemoryIndex
library = SpectrumLibrary(filename=path, index_type=index_type)
click.echo(f"Format: {library.format}")
click.echo(f"Size: {library.__len__()}")
fh = click.open_file("-", 'wt')
TextSpectralLibraryWriter(fh).write_header(library.backend)
@main.command("convert", short_help=("Convert a spectral library from one format to another"))
@click.argument('inpath', type=click.Path(exists=True))
@click.argument("outpath", type=click.Path())
@click.option("-f", "--format", type=click.Choice(["text", "json"]), default="text")
def convert(inpath, outpath, format=None):
'''Convert a spectral library from one format to another. If `outpath` is `-`,
instead of writing to file, data will instead be sent to STDOUT.
'''
if format is None:
format = "text"
if SQLIndex.exists(inpath):
index_type = SQLIndex
else:
index_type = MemoryIndex
library = SpectrumLibrary(filename=inpath, index_type=index_type)
fh = click.open_file(outpath, mode='w')
library.write(fh, format)
if __name__ == "__main__":
main()
| en | 0.801284 | A collection of utilities for inspecting and manipulating spectral libraries. Produces a minimal textual description of a spectral library. Convert a spectral library from one format to another. If `outpath` is `-`, instead of writing to file, data will instead be sent to STDOUT. | 2.271386 | 2 |
databricks_utils/vega.py | e2fyi/databricks-utils | 1 | 6630281 |
"""
Basic vega functions to plot vega charts in databricks or jupyter notebooks.
.. moduleauthor:: <EMAIL>
"""
import json
DEFAULT_VEGA_OPTS = dict(theme="quartz",
defaultStyle=True,
actions=dict(export=True,
source=True,
editor=False,
renderer="canvas"))
"""Default settings for `vega-embed` (See `https://github.com/vega/vega-embed`)."""
def vega_embed(spec, display=None, **kwargs):
"""
Display a vega chart. Also return the HTML to display the vega chart.
:param display: Callable to render the resultant HTML (e.g. displayHTML).
:param kwargs: See `https://github.com/vega/vega-embed` for the vega
embed settings.
"""
tmp = dict()
tmp.update(DEFAULT_VEGA_OPTS)
tmp.update(kwargs)
conf = json.dumps(tmp)
if isinstance(spec, dict):
spec = json.dumps(spec)
html = """
<!DOCTYPE html>
<html>
<head>
<script src="https://cdn.jsdelivr.net/npm/vega@3"></script>
<script src="https://cdn.jsdelivr.net/npm/vega-lite@2"></script>
<script src="https://cdn.jsdelivr.net/npm/vega-embed@3"></script>
<script src="https://cdn.jsdelivr.net/npm/vega-themes@2"></script>
</head>
<body>
<div id="vis"></div>
<script type="text/javascript">
var spec = """+spec+""";
vegaEmbed('#vis', spec, """+conf+""").catch(console.error);
</script>
</body>
</html>"""
if callable(display):
display(html) # pylint: disable=undefined-variable
return html
|
"""
Basic vega functions to plot vega charts in databricks or jupyter notebooks.
.. moduleauthor:: <EMAIL>
"""
import json
DEFAULT_VEGA_OPTS = dict(theme="quartz",
defaultStyle=True,
actions=dict(export=True,
source=True,
editor=False,
renderer="canvas"))
"""Default settings for `vega-embed` (See `https://github.com/vega/vega-embed`)."""
def vega_embed(spec, display=None, **kwargs):
"""
Display a vega chart. Also return the HTML to display the vega chart.
:param display: Callable to render the resultant HTML (e.g. displayHTML).
:param kwargs: See `https://github.com/vega/vega-embed` for the vega
embed settings.
"""
tmp = dict()
tmp.update(DEFAULT_VEGA_OPTS)
tmp.update(kwargs)
conf = json.dumps(tmp)
if isinstance(spec, dict):
spec = json.dumps(spec)
html = """
<!DOCTYPE html>
<html>
<head>
<script src="https://cdn.jsdelivr.net/npm/vega@3"></script>
<script src="https://cdn.jsdelivr.net/npm/vega-lite@2"></script>
<script src="https://cdn.jsdelivr.net/npm/vega-embed@3"></script>
<script src="https://cdn.jsdelivr.net/npm/vega-themes@2"></script>
</head>
<body>
<div id="vis"></div>
<script type="text/javascript">
var spec = """+spec+""";
vegaEmbed('#vis', spec, """+conf+""").catch(console.error);
</script>
</body>
</html>"""
if callable(display):
display(html) # pylint: disable=undefined-variable
return html
| en | 0.394517 | Basic vega functions to plot vega charts in databricks or jupyter notebooks. .. moduleauthor:: <EMAIL> Default settings for `vega-embed` (See `https://github.com/vega/vega-embed`). Display a vega chart. Also return the HTML to display the vega chart. :param display: Callable to render the resultant HTML (e.g. displayHTML). :param kwargs: See `https://github.com/vega/vega-embed` for the vega embed settings. <!DOCTYPE html> <html> <head> <script src="https://cdn.jsdelivr.net/npm/vega@3"></script> <script src="https://cdn.jsdelivr.net/npm/vega-lite@2"></script> <script src="https://cdn.jsdelivr.net/npm/vega-embed@3"></script> <script src="https://cdn.jsdelivr.net/npm/vega-themes@2"></script> </head> <body> <div id="vis"></div> <script type="text/javascript"> var spec = ; vegaEmbed('#vis', spec, ).catch(console.error); </script> </body> </html> # pylint: disable=undefined-variable | 2.827508 | 3 |
hackabot/quiz.py | Bulichek/pepequestbot | 0 | 6630282 | <gh_stars>0
Quizes = [
("Какую долю ваших расходов за месяц занимает косметика?",
["10% (5000 ₽)",
"5% (2500 ₽)",
"21% (10500 ₽)",
"13% (6500 ₽)"],
0),
("Назовите максимальную сумму, которую Вы тратили за раз в одном из магазинов:",
["15200 ₽",
"3840 ₽",
"7340 ₽",
"2710 ₽"],
0),
("На какую категорию товаров Вы потратили больше всего в прошлом месяце:",
["кино",
"фаст фуд",
"супермаркеты",
"искусство"],
2),
("Если ваша цель — просто сохранить свои деньги, оптимальной стратегией будет:",
["Хранить деньги под матрасом",
"Инвестировать в акции/облигации",
"Положить деньги в банк под стандартный процент",
"Часть держать на вкладах, часть — в инвестициях."],
2),
("Что из перечисленного нельзя купить на бирже?",
["акции",
"зерно",
"автомобиль",
"нефть"],
2),
("Правила минимизации валютных рисков заключается в том, чтобы брать кредиты:",
["в рублях",
"в долларах",
"в рублях и долларах",
"в той валюте в которой совершается большая часть расходов и получаются доходы"],
3),
("Предположим, вы положили 10 000 рублей на вклад под 5% годовых. "
"Какая сумма будет на этом вкладе через 10 лет?",
["10 000 * (1+10*0,05)",
"10 000 * (1+0,05)^10",
"10 000 * 1,05 * 10",
"10 000 * 10 / 1,05"],
1),
("Допустим, вы положили 100 000 рублей под 5% годовых на один год. "
"Инфляция за это время составила 3%. Сколько вы заработали на самом деле?",
["Всё просто, 100 000 * 1,05!",
"100 000 * 1,03",
"100 000 * (1,05-1,03)",
"100 000 * 1,05 * 0,97"],
2),
("Вы положили 50 000 рублей под 4% годовых на три года. "
"В первый год инфляция составила 2,2%, во второй — 6%, в третий — 3,9%. "
"Выгодным ли отказался вклад?",
["Да",
"Нет"],
1),
]
QuizPrompts = [
"Вот тебе квиз от Олега :)",
"Олег призывает тебя к ответу",
"А вот и квиз подъехал",
"Такс такс что тут у нас квиз от Олега наконец-тА",
"Квиз... (кродёться)",
]
QuizSuccess = [
"Верно!",
"Красава",
"Молодец, Олег гордится тобой",
"Олег гордится тобой",
"Молодец, кэшбэк твой",
]
QuizFail = [
"Неверно",
"Иди, учись",
"Мог бы и лучше ответить",
"Постарайся в следующий раз",
"И ты ещё хотел участвовать в квестах?"
]
| Quizes = [
("Какую долю ваших расходов за месяц занимает косметика?",
["10% (5000 ₽)",
"5% (2500 ₽)",
"21% (10500 ₽)",
"13% (6500 ₽)"],
0),
("Назовите максимальную сумму, которую Вы тратили за раз в одном из магазинов:",
["15200 ₽",
"3840 ₽",
"7340 ₽",
"2710 ₽"],
0),
("На какую категорию товаров Вы потратили больше всего в прошлом месяце:",
["кино",
"фаст фуд",
"супермаркеты",
"искусство"],
2),
("Если ваша цель — просто сохранить свои деньги, оптимальной стратегией будет:",
["Хранить деньги под матрасом",
"Инвестировать в акции/облигации",
"Положить деньги в банк под стандартный процент",
"Часть держать на вкладах, часть — в инвестициях."],
2),
("Что из перечисленного нельзя купить на бирже?",
["акции",
"зерно",
"автомобиль",
"нефть"],
2),
("Правила минимизации валютных рисков заключается в том, чтобы брать кредиты:",
["в рублях",
"в долларах",
"в рублях и долларах",
"в той валюте в которой совершается большая часть расходов и получаются доходы"],
3),
("Предположим, вы положили 10 000 рублей на вклад под 5% годовых. "
"Какая сумма будет на этом вкладе через 10 лет?",
["10 000 * (1+10*0,05)",
"10 000 * (1+0,05)^10",
"10 000 * 1,05 * 10",
"10 000 * 10 / 1,05"],
1),
("Допустим, вы положили 100 000 рублей под 5% годовых на один год. "
"Инфляция за это время составила 3%. Сколько вы заработали на самом деле?",
["Всё просто, 100 000 * 1,05!",
"100 000 * 1,03",
"100 000 * (1,05-1,03)",
"100 000 * 1,05 * 0,97"],
2),
("Вы положили 50 000 рублей под 4% годовых на три года. "
"В первый год инфляция составила 2,2%, во второй — 6%, в третий — 3,9%. "
"Выгодным ли отказался вклад?",
["Да",
"Нет"],
1),
]
QuizPrompts = [
"Вот тебе квиз от Олега :)",
"Олег призывает тебя к ответу",
"А вот и квиз подъехал",
"Такс такс что тут у нас квиз от Олега наконец-тА",
"Квиз... (кродёться)",
]
QuizSuccess = [
"Верно!",
"Красава",
"Молодец, Олег гордится тобой",
"Олег гордится тобой",
"Молодец, кэшбэк твой",
]
QuizFail = [
"Неверно",
"Иди, учись",
"Мог бы и лучше ответить",
"Постарайся в следующий раз",
"И ты ещё хотел участвовать в квестах?"
] | none | 1 | 2.231864 | 2 |
|
Chess Bot.py | life-elevated/ChessBot | 0 | 6630283 | <reponame>life-elevated/ChessBot<gh_stars>0
#minimax
import pygame, sys, time
from pygame.locals import *
import time
import pygame.gfxdraw
boardString = "" # 1=whitespace 2=blackspace 3=user 4=computer 5=selected
sizeBase = 600
rowcount = 10 # Adjust this to set the size of the board, must be divisible by 2
tileMeasurements = sizeBase/rowcount
if tileMeasurements > sizeBase/rowcount:
tileMeasurements = sizeBase/rowcount
active_player = 'user' # Set who goes first here. This is changed by switch_player() after every turn.
game_score = {'user':0, 'computer':0} # The scorecard
tileX = 0
tileY = 0
offset = False
pygame.init()
size = (sizeBase,sizeBase)
white = (255,255,255)
computer = (255,0,0)
user = (250,250,250)
black = (0,0,0)
screen = pygame.display.set_mode(size)
screen.fill(white)
pygame.display.set_caption("Checkers Bot")
black_square = pygame.Surface((tileMeasurements, tileMeasurements))
pygame.draw.rect(black_square, black, (0,0,tileMeasurements,tileMeasurements),0)
black_circle = pygame.Surface((tileMeasurements,tileMeasurements), pygame.SRCALPHA, 32)
black_circle.convert_alpha()
x = black_circle.get_rect()
pygame.draw.circle(black_circle,black,x.center,9)
user_circle = pygame.Surface((tileMeasurements,tileMeasurements), pygame.SRCALPHA, 32)
user_circle.convert_alpha()
x = user_circle.get_rect()
pygame.draw.circle(user_circle,user,x.center,9)
computer_circle = pygame.Surface((tileMeasurements,tileMeasurements), pygame.SRCALPHA, 32)
computer_circle.convert_alpha()
x = computer_circle.get_rect()
pygame.draw.circle(computer_circle,computer,x.center,9)
#pygame.draw.circle(black_circle,black,(x.centerx+tileMeasurements,x.centery/2),15)
playing = True
playtiles = pygame.sprite.Group()
gamepiece_group = pygame.sprite.Group()
class Board():
def __init__(self):
pass
def get_board(self):
pass
class Tile(pygame.sprite.Sprite):
def __init__(self, color, name, posX, posY, width, height, occupied=False, owner=None, tile_location={}):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface((tileMeasurements,tileMeasurements))
pygame.draw.rect(self.image, black,(0,0,width,height),0)
self.rect = self.image.get_rect()
self.rect.x = posX
self.rect.y = posY
self.name = name
self.occupied = occupied
self.owner = owner
self.tile_location = tile_location
tileSize = tileMeasurements
class gamePiece(pygame.sprite.Sprite):
def __init__(self, color, tile_location, name, posX, posY, radius, direction):
pygame.sprite.Sprite.__init__(self)
self.originalimage = pygame.Surface((tileMeasurements,tileMeasurements), pygame.SRCALPHA, 32)
self.originalimage.convert_alpha()
pygame.draw.circle(self.originalimage, color, (self.originalimage.get_width()/2,self.originalimage.get_height()/2), radius, 0)
self.rect = self.originalimage.get_rect()
self.image = self.originalimage
self.rect.x = posX
self.rect.y = posY
self.name = name
self.tile_location = tile_location
self.selected = False
self.direction = direction
def createBoard(screenSizeXY, tileSize):
global tileX, tileY, offset, boardString, tiles
print("Creating board in a "+str(int(screenSizeXY/tileSize))+"x"+str(int(screenSizeXY/tileSize))+" grid with each tile being "+str(int(tileSize))+" pixels in length and height ("+str(int(((screenSizeXY/tileSize)*(screenSizeXY/tileSize))/2))+" playable/unplayable tiles)")
tileX = int(tileMeasurements)
for i in range(int(((screenSizeXY/tileSize)*(screenSizeXY/tileSize))/2)):
if tileY <= 2*tileSize:
tile_location = {'row': int(tileY/(screenSizeXY/(screenSizeXY/tileSize))+1), 'col': int(tileX/(screenSizeXY/(screenSizeXY/tileSize))+1)}
playtiles.add(Tile(black, "Tile", tileX, tileY, tileSize, tileSize, occupied=True, owner='computer', tile_location=tile_location))
gamepiece_group.add(gamePiece(computer, tile_location, 'computer', tileX, tileY, int(tileSize*0.4), 'down'))
if offset:
boardString = boardString + "41"
else:
boardString = boardString + "14"
elif tileY >= ((screenSizeXY/tileSize)-3)*tileSize:
tile_location = {'row': int(tileY/(screenSizeXY/(screenSizeXY/tileSize))+1), 'col': int(tileX/(screenSizeXY/(screenSizeXY/tileSize))+1)}
playtiles.add(Tile(black, "Tile", tileX, tileY, tileSize, tileSize, occupied=True, owner='user', tile_location=tile_location))
gamepiece_group.add(gamePiece(user, tile_location, 'user', tileX, tileY, int(tileSize*0.4), 'up'))
if offset:
boardString = boardString + "31"
else:
boardString = boardString + "13"
else:
tile_location = {'row': int(tileY/(screenSizeXY/(screenSizeXY/tileSize))+1), 'col': int(tileX/(screenSizeXY/(screenSizeXY/tileSize))+1)}
newtile = Tile(black, "Tile", tileX, tileY, tileSize, tileSize, occupied=False, tile_location=tile_location)
playtiles.add(newtile)
if offset:
boardString = boardString + "21"
else:
boardString = boardString + "12"
if tileX >= screenSizeXY - (tileMeasurements*2):
tileY = tileY+tileSize
if offset:
offset = False
tileX = int(tileMeasurements)
else:
offset = True
tileX = 0
boardString = boardString + "\n"
else:
tileX = tileX+(tileSize*2)
def remove_highlight():
if active_player == 'user':
circle = user_circle
else:
circle = computer_circle
screen.blit(circle, (selected_piece.rect.x, selected_piece.rect.y))
def add_highlight():
screen.blit(black_circle, (selected_piece.rect.x, selected_piece.rect.y))
def switch_player():
global active_player, move_pending, selected_piece
if active_player == 'user':
active_player = 'computer'
else:
active_player = 'user'
if move_pending:
move_pending = False
selected_piece = None
print('USER TURN IS OVER')
def check_game_score():
global playing
user = game_score['user']
computer = game_score['computer']
winner = None
print('\nUser score: {}\nComputer score: {}'.format(user,computer))
print('Must reach {} to win the game'.format(winning_score))
if user == winning_score:
playing = False
winner = 'User'
elif computer == winning_score:
playing = False
winner = 'Computer'
if winner:
print('\n\n\n{} HAS WON THE GAME!!'.format(winner))
def check_if_piece_clicked(mouse):
global selected_piece, move_pending
for piece in gamepiece_group:
if piece.rect.collidepoint(mouse): # Find the game piece that is being clicked
if piece.name == active_player and not move_pending: # Only select a piece if it belongs to the active player and move is not pending
if selected_piece:
remove_highlight()
piece.selected = True
selected_piece = piece
add_highlight()
def check_if_tile_clicked(mouse):
global destination_tile
for tile in playtiles:
if tile.rect.collidepoint(mouse):
if selected_piece and not tile.occupied: # Only move if a piece is selected and destination is not occupied
destination_tile = tile
return True
def make_move():
# POSSIBLY DO SOME AI STUFF HERE WHEN DETERMINING
# all possible moves for the computer player.
if check_for_valid_move(): # If this returns true then proceed with the move.
_move()
def check_for_valid_move(): # Check if the move is valid and return True if it is. Otherwise returns implicitly as None.
global selected_piece, gamepiece, origin_tile, user_score, computer_score, middle_tile, move_pending
for tile in playtiles:
if tile.rect.x == selected_piece.rect.x and tile.rect.y == selected_piece.rect.y:
origin_tile = tile # This is the tile where the moving piece came from
direction = selected_piece.direction
origin_row = origin_tile.tile_location['row']
origin_column = origin_tile.tile_location['col']
destination_row = destination_tile.tile_location['row']
destination_column = destination_tile.tile_location['col']
if destination_row == origin_row + 1: # Piece is trying to move down 1 row
if move_pending:
return
if direction == 'down' or direction == 'both':
if destination_column == origin_column + 1 or destination_column == origin_column - 1: # Piece is trying to move 1 tile to the left or right
if not destination_tile.occupied: # Only allow if destination is not occupied
return True
elif destination_row == origin_row - 1: # Piece is trying to move up 1 row
if move_pending:
return
if direction == 'up' or direction == 'both':
if destination_column == origin_column + 1 or destination_column == origin_column - 1:
if not destination_tile.occupied:
return True
elif destination_row == origin_row + 2: # Piece is trying to move down 2 rows, possible jump
if direction == 'down' or direction == 'both':
if destination_column == origin_column + 2 or destination_column == origin_column - 2: # Piece is trying to move 2 tiles to the left or right
if destination_column == origin_column + 2: # jumping right
for tile in playtiles:
if tile.tile_location['row'] == origin_row + 1 and tile.tile_location['col'] == origin_column + 1:
if tile.owner and not tile.owner == active_player:
if not destination_tile.occupied:
game_score[active_player] += 1
middle_tile = tile
tile.owner = None
tile.occupied = False
if active_player == 'user':
move_pending = True
return True
elif destination_column == origin_column - 2: # jumping to left
for tile in playtiles: # Looping the tiles to find the tile that's being jumped.
if tile.tile_location['row'] == origin_row + 1 and tile.tile_location['col'] == origin_column - 1: # This is the tile being jumped
if tile.owner and not tile.owner == active_player: # Make sure the owner of the tile is the opponent so you don't jump your own piece
if not destination_tile.occupied:
game_score[active_player] += 1
middle_tile = tile
tile.owner = None # Remove the owner of the tile that was just jumped
tile.occupied = False # Mark the tile as unoccupied
if active_player == 'user':
move_pending = True
return True
elif destination_row == origin_row - 2: # Piece is trying to move up 2 rows, possible jump
if direction == 'up' or direction == 'both':
if destination_column == origin_column + 2 or destination_column == origin_column - 2:
if destination_column == origin_column + 2: # jumping to right
for tile in playtiles:
if tile.tile_location['row'] == origin_row - 1 and tile.tile_location['col'] == origin_column + 1:
if tile.owner and not tile.owner == active_player:
if not destination_tile.occupied:
game_score[active_player] += 1
middle_tile = tile
tile.owner = None
tile.occupied = False
if active_player == 'user':
move_pending = True #This is a valid jump so I set this to True to prevent selecting a different piece after a jump.
return True
elif destination_column == origin_column - 2: # jumping to left
for tile in playtiles:
if tile.tile_location['row'] == origin_row - 1 and tile.tile_location['col'] == origin_column - 1:
if tile.owner and not tile.owner == active_player:
if not destination_tile.occupied:
game_score[active_player] += 1
middle_tile = tile
tile.owner = None
tile.occupied = False
if active_player == 'user':
move_pending = True
return True
def _move(): # This should really only be called by make_move()
global selected_piece, gamepiece, origin_tile, middle_tile, jumped_piece
destination_tile.occupied = True # Sets the destination tile as being occupied
destination_tile.owner = active_player # Set the destination tile's owner as the active player
origin_tile.occupied = False
origin_tile.owner = None
screen.blit(black_square, (origin_tile.rect.x, origin_tile.rect.y))
selected_piece.rect.x = destination_tile.rect.x
selected_piece.rect.y = destination_tile.rect.y
gamepiece_group.draw(screen)
if middle_tile:
for piece in gamepiece_group:
if piece.rect.x == middle_tile.rect.x and piece.rect.y == middle_tile.rect.y:
jumped_piece = piece
break
jumped_piece.kill()
screen.blit(black_square,(middle_tile.rect.x, middle_tile.rect.y))
middle_tile = None
jumped_piece = None
check_game_score() # Check the game score and end the game if there is a winner
# WE NEED TO CHECK RIGHT HERE IF THE PIECE HAS REACHED THE
# FAR END OF THE BOARD OF THE OPPONENTS SIDE. THIS PIECE
# SHOULD BECOME A KING PIECE THAT CAN MOVE BOTH DIRECTIONS NOW.
# If the piece has reached the oppenents far side then
# set selected_piece.direction = 'both'
if not move_pending:
selected_piece=None
switch_player() # Move is done and move_pending is False, switch player.
else:
add_highlight()
createBoard(sizeBase, tileMeasurements)
playtiles.draw(screen)
gamepiece_group.draw(screen)
selected_piece = None # A variable to hold the selected piece that is trying to move.
destination_tile = None # A variable to hold the destination tile that is being moved to.
middle_tile = None # A variable to hold the tile that is between a jumping piece and it's destination
origin_tile = None # A variable to hold the tile where the moving piece originated from
jumped_piece = None # A variable to hold the game piece that was jumped
move_pending = False # A variable to hold whether or not a multi-move is pending. Not curently implemented
winning_score = len(gamepiece_group) / 2 # The score required to win the game.
while playing:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.MOUSEBUTTONUP:
mouse = pygame.mouse.get_pos()
check_if_piece_clicked(mouse) # Check if a game piece was clicked
if check_if_tile_clicked(mouse): # Check if a tile was clicked
make_move() # Attempt the move
if event.type == pygame.KEYUP:
if event.key == pygame.K_SPACE and move_pending:
remove_highlight()
switch_player()
pygame.display.flip()
| #minimax
import pygame, sys, time
from pygame.locals import *
import time
import pygame.gfxdraw
boardString = "" # 1=whitespace 2=blackspace 3=user 4=computer 5=selected
sizeBase = 600
rowcount = 10 # Adjust this to set the size of the board, must be divisible by 2
tileMeasurements = sizeBase/rowcount
if tileMeasurements > sizeBase/rowcount:
tileMeasurements = sizeBase/rowcount
active_player = 'user' # Set who goes first here. This is changed by switch_player() after every turn.
game_score = {'user':0, 'computer':0} # The scorecard
tileX = 0
tileY = 0
offset = False
pygame.init()
size = (sizeBase,sizeBase)
white = (255,255,255)
computer = (255,0,0)
user = (250,250,250)
black = (0,0,0)
screen = pygame.display.set_mode(size)
screen.fill(white)
pygame.display.set_caption("Checkers Bot")
black_square = pygame.Surface((tileMeasurements, tileMeasurements))
pygame.draw.rect(black_square, black, (0,0,tileMeasurements,tileMeasurements),0)
black_circle = pygame.Surface((tileMeasurements,tileMeasurements), pygame.SRCALPHA, 32)
black_circle.convert_alpha()
x = black_circle.get_rect()
pygame.draw.circle(black_circle,black,x.center,9)
user_circle = pygame.Surface((tileMeasurements,tileMeasurements), pygame.SRCALPHA, 32)
user_circle.convert_alpha()
x = user_circle.get_rect()
pygame.draw.circle(user_circle,user,x.center,9)
computer_circle = pygame.Surface((tileMeasurements,tileMeasurements), pygame.SRCALPHA, 32)
computer_circle.convert_alpha()
x = computer_circle.get_rect()
pygame.draw.circle(computer_circle,computer,x.center,9)
#pygame.draw.circle(black_circle,black,(x.centerx+tileMeasurements,x.centery/2),15)
playing = True
playtiles = pygame.sprite.Group()
gamepiece_group = pygame.sprite.Group()
class Board():
def __init__(self):
pass
def get_board(self):
pass
class Tile(pygame.sprite.Sprite):
def __init__(self, color, name, posX, posY, width, height, occupied=False, owner=None, tile_location={}):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface((tileMeasurements,tileMeasurements))
pygame.draw.rect(self.image, black,(0,0,width,height),0)
self.rect = self.image.get_rect()
self.rect.x = posX
self.rect.y = posY
self.name = name
self.occupied = occupied
self.owner = owner
self.tile_location = tile_location
tileSize = tileMeasurements
class gamePiece(pygame.sprite.Sprite):
def __init__(self, color, tile_location, name, posX, posY, radius, direction):
pygame.sprite.Sprite.__init__(self)
self.originalimage = pygame.Surface((tileMeasurements,tileMeasurements), pygame.SRCALPHA, 32)
self.originalimage.convert_alpha()
pygame.draw.circle(self.originalimage, color, (self.originalimage.get_width()/2,self.originalimage.get_height()/2), radius, 0)
self.rect = self.originalimage.get_rect()
self.image = self.originalimage
self.rect.x = posX
self.rect.y = posY
self.name = name
self.tile_location = tile_location
self.selected = False
self.direction = direction
def createBoard(screenSizeXY, tileSize):
global tileX, tileY, offset, boardString, tiles
print("Creating board in a "+str(int(screenSizeXY/tileSize))+"x"+str(int(screenSizeXY/tileSize))+" grid with each tile being "+str(int(tileSize))+" pixels in length and height ("+str(int(((screenSizeXY/tileSize)*(screenSizeXY/tileSize))/2))+" playable/unplayable tiles)")
tileX = int(tileMeasurements)
for i in range(int(((screenSizeXY/tileSize)*(screenSizeXY/tileSize))/2)):
if tileY <= 2*tileSize:
tile_location = {'row': int(tileY/(screenSizeXY/(screenSizeXY/tileSize))+1), 'col': int(tileX/(screenSizeXY/(screenSizeXY/tileSize))+1)}
playtiles.add(Tile(black, "Tile", tileX, tileY, tileSize, tileSize, occupied=True, owner='computer', tile_location=tile_location))
gamepiece_group.add(gamePiece(computer, tile_location, 'computer', tileX, tileY, int(tileSize*0.4), 'down'))
if offset:
boardString = boardString + "41"
else:
boardString = boardString + "14"
elif tileY >= ((screenSizeXY/tileSize)-3)*tileSize:
tile_location = {'row': int(tileY/(screenSizeXY/(screenSizeXY/tileSize))+1), 'col': int(tileX/(screenSizeXY/(screenSizeXY/tileSize))+1)}
playtiles.add(Tile(black, "Tile", tileX, tileY, tileSize, tileSize, occupied=True, owner='user', tile_location=tile_location))
gamepiece_group.add(gamePiece(user, tile_location, 'user', tileX, tileY, int(tileSize*0.4), 'up'))
if offset:
boardString = boardString + "31"
else:
boardString = boardString + "13"
else:
tile_location = {'row': int(tileY/(screenSizeXY/(screenSizeXY/tileSize))+1), 'col': int(tileX/(screenSizeXY/(screenSizeXY/tileSize))+1)}
newtile = Tile(black, "Tile", tileX, tileY, tileSize, tileSize, occupied=False, tile_location=tile_location)
playtiles.add(newtile)
if offset:
boardString = boardString + "21"
else:
boardString = boardString + "12"
if tileX >= screenSizeXY - (tileMeasurements*2):
tileY = tileY+tileSize
if offset:
offset = False
tileX = int(tileMeasurements)
else:
offset = True
tileX = 0
boardString = boardString + "\n"
else:
tileX = tileX+(tileSize*2)
def remove_highlight():
if active_player == 'user':
circle = user_circle
else:
circle = computer_circle
screen.blit(circle, (selected_piece.rect.x, selected_piece.rect.y))
def add_highlight():
screen.blit(black_circle, (selected_piece.rect.x, selected_piece.rect.y))
def switch_player():
global active_player, move_pending, selected_piece
if active_player == 'user':
active_player = 'computer'
else:
active_player = 'user'
if move_pending:
move_pending = False
selected_piece = None
print('USER TURN IS OVER')
def check_game_score():
global playing
user = game_score['user']
computer = game_score['computer']
winner = None
print('\nUser score: {}\nComputer score: {}'.format(user,computer))
print('Must reach {} to win the game'.format(winning_score))
if user == winning_score:
playing = False
winner = 'User'
elif computer == winning_score:
playing = False
winner = 'Computer'
if winner:
print('\n\n\n{} HAS WON THE GAME!!'.format(winner))
def check_if_piece_clicked(mouse):
global selected_piece, move_pending
for piece in gamepiece_group:
if piece.rect.collidepoint(mouse): # Find the game piece that is being clicked
if piece.name == active_player and not move_pending: # Only select a piece if it belongs to the active player and move is not pending
if selected_piece:
remove_highlight()
piece.selected = True
selected_piece = piece
add_highlight()
def check_if_tile_clicked(mouse):
global destination_tile
for tile in playtiles:
if tile.rect.collidepoint(mouse):
if selected_piece and not tile.occupied: # Only move if a piece is selected and destination is not occupied
destination_tile = tile
return True
def make_move():
# POSSIBLY DO SOME AI STUFF HERE WHEN DETERMINING
# all possible moves for the computer player.
if check_for_valid_move(): # If this returns true then proceed with the move.
_move()
def check_for_valid_move(): # Check if the move is valid and return True if it is. Otherwise returns implicitly as None.
global selected_piece, gamepiece, origin_tile, user_score, computer_score, middle_tile, move_pending
for tile in playtiles:
if tile.rect.x == selected_piece.rect.x and tile.rect.y == selected_piece.rect.y:
origin_tile = tile # This is the tile where the moving piece came from
direction = selected_piece.direction
origin_row = origin_tile.tile_location['row']
origin_column = origin_tile.tile_location['col']
destination_row = destination_tile.tile_location['row']
destination_column = destination_tile.tile_location['col']
if destination_row == origin_row + 1: # Piece is trying to move down 1 row
if move_pending:
return
if direction == 'down' or direction == 'both':
if destination_column == origin_column + 1 or destination_column == origin_column - 1: # Piece is trying to move 1 tile to the left or right
if not destination_tile.occupied: # Only allow if destination is not occupied
return True
elif destination_row == origin_row - 1: # Piece is trying to move up 1 row
if move_pending:
return
if direction == 'up' or direction == 'both':
if destination_column == origin_column + 1 or destination_column == origin_column - 1:
if not destination_tile.occupied:
return True
elif destination_row == origin_row + 2: # Piece is trying to move down 2 rows, possible jump
if direction == 'down' or direction == 'both':
if destination_column == origin_column + 2 or destination_column == origin_column - 2: # Piece is trying to move 2 tiles to the left or right
if destination_column == origin_column + 2: # jumping right
for tile in playtiles:
if tile.tile_location['row'] == origin_row + 1 and tile.tile_location['col'] == origin_column + 1:
if tile.owner and not tile.owner == active_player:
if not destination_tile.occupied:
game_score[active_player] += 1
middle_tile = tile
tile.owner = None
tile.occupied = False
if active_player == 'user':
move_pending = True
return True
elif destination_column == origin_column - 2: # jumping to left
for tile in playtiles: # Looping the tiles to find the tile that's being jumped.
if tile.tile_location['row'] == origin_row + 1 and tile.tile_location['col'] == origin_column - 1: # This is the tile being jumped
if tile.owner and not tile.owner == active_player: # Make sure the owner of the tile is the opponent so you don't jump your own piece
if not destination_tile.occupied:
game_score[active_player] += 1
middle_tile = tile
tile.owner = None # Remove the owner of the tile that was just jumped
tile.occupied = False # Mark the tile as unoccupied
if active_player == 'user':
move_pending = True
return True
elif destination_row == origin_row - 2: # Piece is trying to move up 2 rows, possible jump
if direction == 'up' or direction == 'both':
if destination_column == origin_column + 2 or destination_column == origin_column - 2:
if destination_column == origin_column + 2: # jumping to right
for tile in playtiles:
if tile.tile_location['row'] == origin_row - 1 and tile.tile_location['col'] == origin_column + 1:
if tile.owner and not tile.owner == active_player:
if not destination_tile.occupied:
game_score[active_player] += 1
middle_tile = tile
tile.owner = None
tile.occupied = False
if active_player == 'user':
move_pending = True #This is a valid jump so I set this to True to prevent selecting a different piece after a jump.
return True
elif destination_column == origin_column - 2: # jumping to left
for tile in playtiles:
if tile.tile_location['row'] == origin_row - 1 and tile.tile_location['col'] == origin_column - 1:
if tile.owner and not tile.owner == active_player:
if not destination_tile.occupied:
game_score[active_player] += 1
middle_tile = tile
tile.owner = None
tile.occupied = False
if active_player == 'user':
move_pending = True
return True
def _move(): # This should really only be called by make_move()
global selected_piece, gamepiece, origin_tile, middle_tile, jumped_piece
destination_tile.occupied = True # Sets the destination tile as being occupied
destination_tile.owner = active_player # Set the destination tile's owner as the active player
origin_tile.occupied = False
origin_tile.owner = None
screen.blit(black_square, (origin_tile.rect.x, origin_tile.rect.y))
selected_piece.rect.x = destination_tile.rect.x
selected_piece.rect.y = destination_tile.rect.y
gamepiece_group.draw(screen)
if middle_tile:
for piece in gamepiece_group:
if piece.rect.x == middle_tile.rect.x and piece.rect.y == middle_tile.rect.y:
jumped_piece = piece
break
jumped_piece.kill()
screen.blit(black_square,(middle_tile.rect.x, middle_tile.rect.y))
middle_tile = None
jumped_piece = None
check_game_score() # Check the game score and end the game if there is a winner
# WE NEED TO CHECK RIGHT HERE IF THE PIECE HAS REACHED THE
# FAR END OF THE BOARD OF THE OPPONENTS SIDE. THIS PIECE
# SHOULD BECOME A KING PIECE THAT CAN MOVE BOTH DIRECTIONS NOW.
# If the piece has reached the oppenents far side then
# set selected_piece.direction = 'both'
if not move_pending:
selected_piece=None
switch_player() # Move is done and move_pending is False, switch player.
else:
add_highlight()
createBoard(sizeBase, tileMeasurements)
playtiles.draw(screen)
gamepiece_group.draw(screen)
selected_piece = None # A variable to hold the selected piece that is trying to move.
destination_tile = None # A variable to hold the destination tile that is being moved to.
middle_tile = None # A variable to hold the tile that is between a jumping piece and it's destination
origin_tile = None # A variable to hold the tile where the moving piece originated from
jumped_piece = None # A variable to hold the game piece that was jumped
move_pending = False # A variable to hold whether or not a multi-move is pending. Not curently implemented
winning_score = len(gamepiece_group) / 2 # The score required to win the game.
while playing:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.MOUSEBUTTONUP:
mouse = pygame.mouse.get_pos()
check_if_piece_clicked(mouse) # Check if a game piece was clicked
if check_if_tile_clicked(mouse): # Check if a tile was clicked
make_move() # Attempt the move
if event.type == pygame.KEYUP:
if event.key == pygame.K_SPACE and move_pending:
remove_highlight()
switch_player()
pygame.display.flip() | en | 0.930689 | #minimax # 1=whitespace 2=blackspace 3=user 4=computer 5=selected # Adjust this to set the size of the board, must be divisible by 2 # Set who goes first here. This is changed by switch_player() after every turn. # The scorecard #pygame.draw.circle(black_circle,black,(x.centerx+tileMeasurements,x.centery/2),15) # Find the game piece that is being clicked # Only select a piece if it belongs to the active player and move is not pending # Only move if a piece is selected and destination is not occupied # POSSIBLY DO SOME AI STUFF HERE WHEN DETERMINING # all possible moves for the computer player. # If this returns true then proceed with the move. # Check if the move is valid and return True if it is. Otherwise returns implicitly as None. # This is the tile where the moving piece came from # Piece is trying to move down 1 row # Piece is trying to move 1 tile to the left or right # Only allow if destination is not occupied # Piece is trying to move up 1 row # Piece is trying to move down 2 rows, possible jump # Piece is trying to move 2 tiles to the left or right # jumping right # jumping to left # Looping the tiles to find the tile that's being jumped. # This is the tile being jumped # Make sure the owner of the tile is the opponent so you don't jump your own piece # Remove the owner of the tile that was just jumped # Mark the tile as unoccupied # Piece is trying to move up 2 rows, possible jump # jumping to right #This is a valid jump so I set this to True to prevent selecting a different piece after a jump. # jumping to left # This should really only be called by make_move() # Sets the destination tile as being occupied # Set the destination tile's owner as the active player # Check the game score and end the game if there is a winner # WE NEED TO CHECK RIGHT HERE IF THE PIECE HAS REACHED THE # FAR END OF THE BOARD OF THE OPPONENTS SIDE. THIS PIECE # SHOULD BECOME A KING PIECE THAT CAN MOVE BOTH DIRECTIONS NOW. # If the piece has reached the oppenents far side then # set selected_piece.direction = 'both' # Move is done and move_pending is False, switch player. # A variable to hold the selected piece that is trying to move. # A variable to hold the destination tile that is being moved to. # A variable to hold the tile that is between a jumping piece and it's destination # A variable to hold the tile where the moving piece originated from # A variable to hold the game piece that was jumped # A variable to hold whether or not a multi-move is pending. Not curently implemented # The score required to win the game. # Check if a game piece was clicked # Check if a tile was clicked # Attempt the move | 3.054881 | 3 |
examples/applications/semantic_search_quora_annoy.py | zhangxieyang2/sentence-transformers | 1 | 6630284 | <filename>examples/applications/semantic_search_quora_annoy.py
"""
This example uses Approximate Nearest Neighbor Search (ANN) with Annoy (https://github.com/spotify/annoy).
Searching a large corpus with Millions of embeddings can be time-consuming. To speed this up,
ANN can index the existent vectors. For a new query vector, this index can be used to find the nearest neighbors.
This nearest neighbor search is not perfect, i.e., it might not perfectly find all top-k nearest neighbors.
In this example, we use Annoy. It learns to a tree that partitions embeddings into smaller sections. For our query embeddings,
we can efficiently check which section matches and only search that section for nearest neighbor.
Selecting the n_trees parameter is quite important. With more trees, we get a better recall, but a worse run-time.
This script will compare the result from ANN with exact nearest neighbor search and output a Recall@k value
as well as the missing results in the top-k hits list.
See the Annoy repository, how to install Annoy.
For details how Annoy works, see: https://erikbern.com/2015/10/01/nearest-neighbors-and-vector-models-part-2-how-to-search-in-high-dimensional-spaces.html
As dataset, we use the Quora Duplicate Questions dataset, which contains about 500k questions:
https://www.quora.com/q/quoradata/First-Quora-Dataset-Release-Question-Pairs
Questions are embedded and Annoy is used for (approximate) semantic similarity search.
"""
from sentence_transformers import SentenceTransformer, util
import os
import csv
import pickle
import time
import torch
from annoy import AnnoyIndex
if __name__ == '__main__':
model_name = 'distilbert-base-nli-stsb-quora-ranking'
model = SentenceTransformer(model_name)
url = "http://qim.fs.quoracdn.net/quora_duplicate_questions.tsv"
dataset_path = "quora_duplicate_questions.tsv"
max_corpus_size = 100000
n_trees = 256 #Number of trees used for Annoy. More trees => better recall, worse run-time
embedding_size = 768 #Size of embeddings
top_k_hits = 10 #Output k hits
annoy_index_path = 'quora-embeddings-{}-size-{}-annoy_index-trees-{}.ann'.format(model_name.replace('/', '_'), max_corpus_size,n_trees)
embedding_cache_path = 'quora-embeddings-{}-size-{}.pkl'.format(model_name.replace('/', '_'), max_corpus_size)
#Check if embedding cache path exists
if not os.path.exists(embedding_cache_path):
# Check if the dataset exists. If not, download and extract
# Download dataset if needed
if not os.path.exists(dataset_path):
print("Download dataset")
util.http_get(url, dataset_path)
# Get all unique sentences from the file
corpus_sentences = set()
with open(dataset_path, encoding='utf8') as fIn:
reader = csv.DictReader(fIn, delimiter='\t', quoting=csv.QUOTE_MINIMAL)
for row in reader:
corpus_sentences.add(row['question1'])
if len(corpus_sentences) >= max_corpus_size:
break
corpus_sentences.add(row['question2'])
if len(corpus_sentences) >= max_corpus_size:
break
corpus_sentences = list(corpus_sentences)
print("Encode the corpus. This might take a while")
corpus_embeddings = model.encode(corpus_sentences, show_progress_bar=True, convert_to_numpy=True, num_workers=2)
print("Store file on disc")
with open(embedding_cache_path, "wb") as fOut:
pickle.dump({'sentences': corpus_sentences, 'embeddings': corpus_embeddings}, fOut)
else:
print("Load pre-computed embeddings from disc")
with open(embedding_cache_path, "rb") as fIn:
cache_data = pickle.load(fIn)
corpus_sentences = cache_data['sentences']
corpus_embeddings = cache_data['embeddings']
if not os.path.exists(annoy_index_path):
# Create Annoy Index
print("Create Annoy index with {} trees. This can take some time.".format(n_trees))
annoy_index = AnnoyIndex(embedding_size, 'angular')
for i in range(len(corpus_embeddings)):
annoy_index.add_item(i, corpus_embeddings[i])
annoy_index.build(n_trees)
annoy_index.save(annoy_index_path)
else:
#Load Annoy Index from disc
annoy_index = AnnoyIndex(embedding_size, 'angular')
annoy_index.load(annoy_index_path)
corpus_embeddings = torch.from_numpy(corpus_embeddings)
######### Search in the index ###########
print("Corpus loaded with {} sentences / embeddings".format(len(corpus_sentences)))
while True:
inp_question = input("Please enter a question: ")
start_time = time.time()
question_embedding = model.encode(inp_question)
corpus_ids, scores = annoy_index.get_nns_by_vector(question_embedding, top_k_hits, include_distances=True)
hits = []
for id, score in zip(corpus_ids, scores):
hits.append({'corpus_id': id, 'score': 1-((score**2) / 2)})
end_time = time.time()
print("Input question:", inp_question)
print("Results (after {:.3f} seconds):".format(end_time-start_time))
for hit in hits[0:top_k_hits]:
print("\t{:.3f}\t{}".format(hit['score'], corpus_sentences[hit['corpus_id']]))
# Approximate Nearest Neighbor (ANN) is not exact, it might miss entries with high cosine similarity
# Here, we compute the recall of ANN compared to the exact results
correct_hits = util.semantic_search(question_embedding, corpus_embeddings, top_k=top_k_hits)[0]
correct_hits_ids = set([hit['corpus_id'] for hit in correct_hits])
#Compute recall
ann_corpus_ids = set(corpus_ids)
if len(ann_corpus_ids) != len(correct_hits_ids):
print("Approximate Nearest Neighbor returned a different number of results than expected")
recall = len(ann_corpus_ids.intersection(correct_hits_ids)) / len(correct_hits_ids)
print("\nApproximate Nearest Neighbor Recall@{}: {:.2f}".format(top_k_hits, recall * 100))
if recall < 1:
print("Missing results:")
for hit in correct_hits[0:top_k_hits]:
if hit['corpus_id'] not in ann_corpus_ids:
print("\t{:.3f}\t{}".format(hit['score'], corpus_sentences[hit['corpus_id']]))
print("\n\n========\n")
| <filename>examples/applications/semantic_search_quora_annoy.py
"""
This example uses Approximate Nearest Neighbor Search (ANN) with Annoy (https://github.com/spotify/annoy).
Searching a large corpus with Millions of embeddings can be time-consuming. To speed this up,
ANN can index the existent vectors. For a new query vector, this index can be used to find the nearest neighbors.
This nearest neighbor search is not perfect, i.e., it might not perfectly find all top-k nearest neighbors.
In this example, we use Annoy. It learns to a tree that partitions embeddings into smaller sections. For our query embeddings,
we can efficiently check which section matches and only search that section for nearest neighbor.
Selecting the n_trees parameter is quite important. With more trees, we get a better recall, but a worse run-time.
This script will compare the result from ANN with exact nearest neighbor search and output a Recall@k value
as well as the missing results in the top-k hits list.
See the Annoy repository, how to install Annoy.
For details how Annoy works, see: https://erikbern.com/2015/10/01/nearest-neighbors-and-vector-models-part-2-how-to-search-in-high-dimensional-spaces.html
As dataset, we use the Quora Duplicate Questions dataset, which contains about 500k questions:
https://www.quora.com/q/quoradata/First-Quora-Dataset-Release-Question-Pairs
Questions are embedded and Annoy is used for (approximate) semantic similarity search.
"""
from sentence_transformers import SentenceTransformer, util
import os
import csv
import pickle
import time
import torch
from annoy import AnnoyIndex
if __name__ == '__main__':
model_name = 'distilbert-base-nli-stsb-quora-ranking'
model = SentenceTransformer(model_name)
url = "http://qim.fs.quoracdn.net/quora_duplicate_questions.tsv"
dataset_path = "quora_duplicate_questions.tsv"
max_corpus_size = 100000
n_trees = 256 #Number of trees used for Annoy. More trees => better recall, worse run-time
embedding_size = 768 #Size of embeddings
top_k_hits = 10 #Output k hits
annoy_index_path = 'quora-embeddings-{}-size-{}-annoy_index-trees-{}.ann'.format(model_name.replace('/', '_'), max_corpus_size,n_trees)
embedding_cache_path = 'quora-embeddings-{}-size-{}.pkl'.format(model_name.replace('/', '_'), max_corpus_size)
#Check if embedding cache path exists
if not os.path.exists(embedding_cache_path):
# Check if the dataset exists. If not, download and extract
# Download dataset if needed
if not os.path.exists(dataset_path):
print("Download dataset")
util.http_get(url, dataset_path)
# Get all unique sentences from the file
corpus_sentences = set()
with open(dataset_path, encoding='utf8') as fIn:
reader = csv.DictReader(fIn, delimiter='\t', quoting=csv.QUOTE_MINIMAL)
for row in reader:
corpus_sentences.add(row['question1'])
if len(corpus_sentences) >= max_corpus_size:
break
corpus_sentences.add(row['question2'])
if len(corpus_sentences) >= max_corpus_size:
break
corpus_sentences = list(corpus_sentences)
print("Encode the corpus. This might take a while")
corpus_embeddings = model.encode(corpus_sentences, show_progress_bar=True, convert_to_numpy=True, num_workers=2)
print("Store file on disc")
with open(embedding_cache_path, "wb") as fOut:
pickle.dump({'sentences': corpus_sentences, 'embeddings': corpus_embeddings}, fOut)
else:
print("Load pre-computed embeddings from disc")
with open(embedding_cache_path, "rb") as fIn:
cache_data = pickle.load(fIn)
corpus_sentences = cache_data['sentences']
corpus_embeddings = cache_data['embeddings']
if not os.path.exists(annoy_index_path):
# Create Annoy Index
print("Create Annoy index with {} trees. This can take some time.".format(n_trees))
annoy_index = AnnoyIndex(embedding_size, 'angular')
for i in range(len(corpus_embeddings)):
annoy_index.add_item(i, corpus_embeddings[i])
annoy_index.build(n_trees)
annoy_index.save(annoy_index_path)
else:
#Load Annoy Index from disc
annoy_index = AnnoyIndex(embedding_size, 'angular')
annoy_index.load(annoy_index_path)
corpus_embeddings = torch.from_numpy(corpus_embeddings)
######### Search in the index ###########
print("Corpus loaded with {} sentences / embeddings".format(len(corpus_sentences)))
while True:
inp_question = input("Please enter a question: ")
start_time = time.time()
question_embedding = model.encode(inp_question)
corpus_ids, scores = annoy_index.get_nns_by_vector(question_embedding, top_k_hits, include_distances=True)
hits = []
for id, score in zip(corpus_ids, scores):
hits.append({'corpus_id': id, 'score': 1-((score**2) / 2)})
end_time = time.time()
print("Input question:", inp_question)
print("Results (after {:.3f} seconds):".format(end_time-start_time))
for hit in hits[0:top_k_hits]:
print("\t{:.3f}\t{}".format(hit['score'], corpus_sentences[hit['corpus_id']]))
# Approximate Nearest Neighbor (ANN) is not exact, it might miss entries with high cosine similarity
# Here, we compute the recall of ANN compared to the exact results
correct_hits = util.semantic_search(question_embedding, corpus_embeddings, top_k=top_k_hits)[0]
correct_hits_ids = set([hit['corpus_id'] for hit in correct_hits])
#Compute recall
ann_corpus_ids = set(corpus_ids)
if len(ann_corpus_ids) != len(correct_hits_ids):
print("Approximate Nearest Neighbor returned a different number of results than expected")
recall = len(ann_corpus_ids.intersection(correct_hits_ids)) / len(correct_hits_ids)
print("\nApproximate Nearest Neighbor Recall@{}: {:.2f}".format(top_k_hits, recall * 100))
if recall < 1:
print("Missing results:")
for hit in correct_hits[0:top_k_hits]:
if hit['corpus_id'] not in ann_corpus_ids:
print("\t{:.3f}\t{}".format(hit['score'], corpus_sentences[hit['corpus_id']]))
print("\n\n========\n")
| en | 0.843108 | This example uses Approximate Nearest Neighbor Search (ANN) with Annoy (https://github.com/spotify/annoy). Searching a large corpus with Millions of embeddings can be time-consuming. To speed this up, ANN can index the existent vectors. For a new query vector, this index can be used to find the nearest neighbors. This nearest neighbor search is not perfect, i.e., it might not perfectly find all top-k nearest neighbors. In this example, we use Annoy. It learns to a tree that partitions embeddings into smaller sections. For our query embeddings, we can efficiently check which section matches and only search that section for nearest neighbor. Selecting the n_trees parameter is quite important. With more trees, we get a better recall, but a worse run-time. This script will compare the result from ANN with exact nearest neighbor search and output a Recall@k value as well as the missing results in the top-k hits list. See the Annoy repository, how to install Annoy. For details how Annoy works, see: https://erikbern.com/2015/10/01/nearest-neighbors-and-vector-models-part-2-how-to-search-in-high-dimensional-spaces.html As dataset, we use the Quora Duplicate Questions dataset, which contains about 500k questions: https://www.quora.com/q/quoradata/First-Quora-Dataset-Release-Question-Pairs Questions are embedded and Annoy is used for (approximate) semantic similarity search. #Number of trees used for Annoy. More trees => better recall, worse run-time #Size of embeddings #Output k hits #Check if embedding cache path exists # Check if the dataset exists. If not, download and extract # Download dataset if needed # Get all unique sentences from the file # Create Annoy Index #Load Annoy Index from disc ######### Search in the index ########### # Approximate Nearest Neighbor (ANN) is not exact, it might miss entries with high cosine similarity # Here, we compute the recall of ANN compared to the exact results #Compute recall | 3.041238 | 3 |
packages/python/plotly/plotly/validators/carpet/baxis/__init__.py | sgn/plotly.py | 3 | 6630285 | <reponame>sgn/plotly.py
import _plotly_utils.basevalidators
class TypeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="type", parent_name="carpet.baxis", **kwargs):
super(TypeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
values=kwargs.pop("values", ["-", "linear", "date", "category"]),
**kwargs
)
import _plotly_utils.basevalidators
class TitleValidator(_plotly_utils.basevalidators.TitleValidator):
def __init__(self, plotly_name="title", parent_name="carpet.baxis", **kwargs):
super(TitleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Title"),
data_docs=kwargs.pop(
"data_docs",
"""
font
Sets this axis' title font. Note that the
title's font used to be set by the now
deprecated `titlefont` attribute.
offset
An additional amount by which to offset the
title from the tick labels, given in pixels.
Note that this used to be set by the now
deprecated `titleoffset` attribute.
text
Sets the title of this axis. Note that before
the existence of `title.text`, the title's
contents used to be defined as the `title`
attribute itself. This behavior has been
deprecated.
""",
),
**kwargs
)
import _plotly_utils.basevalidators
class TickvalssrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="tickvalssrc", parent_name="carpet.baxis", **kwargs):
super(TickvalssrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class TickvalsValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="tickvals", parent_name="carpet.baxis", **kwargs):
super(TickvalsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "data"),
**kwargs
)
import _plotly_utils.basevalidators
class TicktextsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="ticktextsrc", parent_name="carpet.baxis", **kwargs):
super(TicktextsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class TicktextValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="ticktext", parent_name="carpet.baxis", **kwargs):
super(TicktextValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "data"),
**kwargs
)
import _plotly_utils.basevalidators
class TicksuffixValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="ticksuffix", parent_name="carpet.baxis", **kwargs):
super(TicksuffixValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class TickprefixValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="tickprefix", parent_name="carpet.baxis", **kwargs):
super(TickprefixValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class TickmodeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="tickmode", parent_name="carpet.baxis", **kwargs):
super(TickmodeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
values=kwargs.pop("values", ["linear", "array"]),
**kwargs
)
import _plotly_utils.basevalidators
class TickformatstopValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name="tickformatstopdefaults", parent_name="carpet.baxis", **kwargs
):
super(TickformatstopValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Tickformatstop"),
data_docs=kwargs.pop(
"data_docs",
"""
""",
),
**kwargs
)
import _plotly_utils.basevalidators
class TickformatstopsValidator(_plotly_utils.basevalidators.CompoundArrayValidator):
def __init__(
self, plotly_name="tickformatstops", parent_name="carpet.baxis", **kwargs
):
super(TickformatstopsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Tickformatstop"),
data_docs=kwargs.pop(
"data_docs",
"""
dtickrange
range [*min*, *max*], where "min", "max" -
dtick values which describe some zoom level, it
is possible to omit "min" or "max" value by
passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level,
the same as "tickformat"
""",
),
**kwargs
)
import _plotly_utils.basevalidators
class TickformatValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="tickformat", parent_name="carpet.baxis", **kwargs):
super(TickformatValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class TickfontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="tickfont", parent_name="carpet.baxis", **kwargs):
super(TickfontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Tickfont"),
data_docs=kwargs.pop(
"data_docs",
"""
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The plotly service (at https://plot.ly
or on-premise) generates images on a server,
where only a select number of fonts are
installed and supported. These include "Arial",
"Balto", "Courier New", "Droid Sans",, "Droid
Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
""",
),
**kwargs
)
import _plotly_utils.basevalidators
class TickangleValidator(_plotly_utils.basevalidators.AngleValidator):
def __init__(self, plotly_name="tickangle", parent_name="carpet.baxis", **kwargs):
super(TickangleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class Tick0Validator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="tick0", parent_name="carpet.baxis", **kwargs):
super(Tick0Validator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class StartlinewidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="startlinewidth", parent_name="carpet.baxis", **kwargs
):
super(StartlinewidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class StartlinecolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="startlinecolor", parent_name="carpet.baxis", **kwargs
):
super(StartlinecolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class StartlineValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(self, plotly_name="startline", parent_name="carpet.baxis", **kwargs):
super(StartlineValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class SmoothingValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="smoothing", parent_name="carpet.baxis", **kwargs):
super(SmoothingValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
max=kwargs.pop("max", 1.3),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class ShowticksuffixValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="showticksuffix", parent_name="carpet.baxis", **kwargs
):
super(ShowticksuffixValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
values=kwargs.pop("values", ["all", "first", "last", "none"]),
**kwargs
)
import _plotly_utils.basevalidators
class ShowtickprefixValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="showtickprefix", parent_name="carpet.baxis", **kwargs
):
super(ShowtickprefixValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
values=kwargs.pop("values", ["all", "first", "last", "none"]),
**kwargs
)
import _plotly_utils.basevalidators
class ShowticklabelsValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="showticklabels", parent_name="carpet.baxis", **kwargs
):
super(ShowticklabelsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
values=kwargs.pop("values", ["start", "end", "both", "none"]),
**kwargs
)
import _plotly_utils.basevalidators
class ShowlineValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(self, plotly_name="showline", parent_name="carpet.baxis", **kwargs):
super(ShowlineValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class ShowgridValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(self, plotly_name="showgrid", parent_name="carpet.baxis", **kwargs):
super(ShowgridValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class ShowexponentValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="showexponent", parent_name="carpet.baxis", **kwargs
):
super(ShowexponentValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
values=kwargs.pop("values", ["all", "first", "last", "none"]),
**kwargs
)
import _plotly_utils.basevalidators
class SeparatethousandsValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name="separatethousands", parent_name="carpet.baxis", **kwargs
):
super(SeparatethousandsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class RangemodeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="rangemode", parent_name="carpet.baxis", **kwargs):
super(RangemodeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
values=kwargs.pop("values", ["normal", "tozero", "nonnegative"]),
**kwargs
)
import _plotly_utils.basevalidators
class RangeValidator(_plotly_utils.basevalidators.InfoArrayValidator):
def __init__(self, plotly_name="range", parent_name="carpet.baxis", **kwargs):
super(RangeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
items=kwargs.pop(
"items",
[
{"valType": "any", "editType": "calc"},
{"valType": "any", "editType": "calc"},
],
),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class NticksValidator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(self, plotly_name="nticks", parent_name="carpet.baxis", **kwargs):
super(NticksValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class MinorgridwidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="minorgridwidth", parent_name="carpet.baxis", **kwargs
):
super(MinorgridwidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class MinorgridcountValidator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(
self, plotly_name="minorgridcount", parent_name="carpet.baxis", **kwargs
):
super(MinorgridcountValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class MinorgridcolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="minorgridcolor", parent_name="carpet.baxis", **kwargs
):
super(MinorgridcolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class LinewidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="linewidth", parent_name="carpet.baxis", **kwargs):
super(LinewidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class LinecolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(self, plotly_name="linecolor", parent_name="carpet.baxis", **kwargs):
super(LinecolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class LabelsuffixValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="labelsuffix", parent_name="carpet.baxis", **kwargs):
super(LabelsuffixValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class LabelprefixValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="labelprefix", parent_name="carpet.baxis", **kwargs):
super(LabelprefixValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class LabelpaddingValidator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(
self, plotly_name="labelpadding", parent_name="carpet.baxis", **kwargs
):
super(LabelpaddingValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class GridwidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="gridwidth", parent_name="carpet.baxis", **kwargs):
super(GridwidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class GridcolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(self, plotly_name="gridcolor", parent_name="carpet.baxis", **kwargs):
super(GridcolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class FixedrangeValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(self, plotly_name="fixedrange", parent_name="carpet.baxis", **kwargs):
super(FixedrangeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class ExponentformatValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="exponentformat", parent_name="carpet.baxis", **kwargs
):
super(ExponentformatValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
values=kwargs.pop("values", ["none", "e", "E", "power", "SI", "B"]),
**kwargs
)
import _plotly_utils.basevalidators
class EndlinewidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="endlinewidth", parent_name="carpet.baxis", **kwargs
):
super(EndlinewidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class EndlinecolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="endlinecolor", parent_name="carpet.baxis", **kwargs
):
super(EndlinecolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class EndlineValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(self, plotly_name="endline", parent_name="carpet.baxis", **kwargs):
super(EndlineValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class DtickValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="dtick", parent_name="carpet.baxis", **kwargs):
super(DtickValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(self, plotly_name="color", parent_name="carpet.baxis", **kwargs):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class CheatertypeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="cheatertype", parent_name="carpet.baxis", **kwargs):
super(CheatertypeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
values=kwargs.pop("values", ["index", "value"]),
**kwargs
)
import _plotly_utils.basevalidators
class CategoryorderValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="categoryorder", parent_name="carpet.baxis", **kwargs
):
super(CategoryorderValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
values=kwargs.pop(
"values",
["trace", "category ascending", "category descending", "array"],
),
**kwargs
)
import _plotly_utils.basevalidators
class CategoryarraysrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="categoryarraysrc", parent_name="carpet.baxis", **kwargs
):
super(CategoryarraysrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class CategoryarrayValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(
self, plotly_name="categoryarray", parent_name="carpet.baxis", **kwargs
):
super(CategoryarrayValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "data"),
**kwargs
)
import _plotly_utils.basevalidators
class AutorangeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="autorange", parent_name="carpet.baxis", **kwargs):
super(AutorangeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
values=kwargs.pop("values", [True, False, "reversed"]),
**kwargs
)
import _plotly_utils.basevalidators
class Arraytick0Validator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(self, plotly_name="arraytick0", parent_name="carpet.baxis", **kwargs):
super(Arraytick0Validator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class ArraydtickValidator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(self, plotly_name="arraydtick", parent_name="carpet.baxis", **kwargs):
super(ArraydtickValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 1),
role=kwargs.pop("role", "info"),
**kwargs
)
| import _plotly_utils.basevalidators
class TypeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="type", parent_name="carpet.baxis", **kwargs):
super(TypeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
values=kwargs.pop("values", ["-", "linear", "date", "category"]),
**kwargs
)
import _plotly_utils.basevalidators
class TitleValidator(_plotly_utils.basevalidators.TitleValidator):
def __init__(self, plotly_name="title", parent_name="carpet.baxis", **kwargs):
super(TitleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Title"),
data_docs=kwargs.pop(
"data_docs",
"""
font
Sets this axis' title font. Note that the
title's font used to be set by the now
deprecated `titlefont` attribute.
offset
An additional amount by which to offset the
title from the tick labels, given in pixels.
Note that this used to be set by the now
deprecated `titleoffset` attribute.
text
Sets the title of this axis. Note that before
the existence of `title.text`, the title's
contents used to be defined as the `title`
attribute itself. This behavior has been
deprecated.
""",
),
**kwargs
)
import _plotly_utils.basevalidators
class TickvalssrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="tickvalssrc", parent_name="carpet.baxis", **kwargs):
super(TickvalssrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class TickvalsValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="tickvals", parent_name="carpet.baxis", **kwargs):
super(TickvalsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "data"),
**kwargs
)
import _plotly_utils.basevalidators
class TicktextsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="ticktextsrc", parent_name="carpet.baxis", **kwargs):
super(TicktextsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class TicktextValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="ticktext", parent_name="carpet.baxis", **kwargs):
super(TicktextValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "data"),
**kwargs
)
import _plotly_utils.basevalidators
class TicksuffixValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="ticksuffix", parent_name="carpet.baxis", **kwargs):
super(TicksuffixValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class TickprefixValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="tickprefix", parent_name="carpet.baxis", **kwargs):
super(TickprefixValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class TickmodeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="tickmode", parent_name="carpet.baxis", **kwargs):
super(TickmodeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
values=kwargs.pop("values", ["linear", "array"]),
**kwargs
)
import _plotly_utils.basevalidators
class TickformatstopValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name="tickformatstopdefaults", parent_name="carpet.baxis", **kwargs
):
super(TickformatstopValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Tickformatstop"),
data_docs=kwargs.pop(
"data_docs",
"""
""",
),
**kwargs
)
import _plotly_utils.basevalidators
class TickformatstopsValidator(_plotly_utils.basevalidators.CompoundArrayValidator):
def __init__(
self, plotly_name="tickformatstops", parent_name="carpet.baxis", **kwargs
):
super(TickformatstopsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Tickformatstop"),
data_docs=kwargs.pop(
"data_docs",
"""
dtickrange
range [*min*, *max*], where "min", "max" -
dtick values which describe some zoom level, it
is possible to omit "min" or "max" value by
passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level,
the same as "tickformat"
""",
),
**kwargs
)
import _plotly_utils.basevalidators
class TickformatValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="tickformat", parent_name="carpet.baxis", **kwargs):
super(TickformatValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class TickfontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="tickfont", parent_name="carpet.baxis", **kwargs):
super(TickfontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Tickfont"),
data_docs=kwargs.pop(
"data_docs",
"""
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The plotly service (at https://plot.ly
or on-premise) generates images on a server,
where only a select number of fonts are
installed and supported. These include "Arial",
"Balto", "Courier New", "Droid Sans",, "Droid
Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
""",
),
**kwargs
)
import _plotly_utils.basevalidators
class TickangleValidator(_plotly_utils.basevalidators.AngleValidator):
def __init__(self, plotly_name="tickangle", parent_name="carpet.baxis", **kwargs):
super(TickangleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class Tick0Validator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="tick0", parent_name="carpet.baxis", **kwargs):
super(Tick0Validator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class StartlinewidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="startlinewidth", parent_name="carpet.baxis", **kwargs
):
super(StartlinewidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class StartlinecolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="startlinecolor", parent_name="carpet.baxis", **kwargs
):
super(StartlinecolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class StartlineValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(self, plotly_name="startline", parent_name="carpet.baxis", **kwargs):
super(StartlineValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class SmoothingValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="smoothing", parent_name="carpet.baxis", **kwargs):
super(SmoothingValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
max=kwargs.pop("max", 1.3),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class ShowticksuffixValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="showticksuffix", parent_name="carpet.baxis", **kwargs
):
super(ShowticksuffixValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
values=kwargs.pop("values", ["all", "first", "last", "none"]),
**kwargs
)
import _plotly_utils.basevalidators
class ShowtickprefixValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="showtickprefix", parent_name="carpet.baxis", **kwargs
):
super(ShowtickprefixValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
values=kwargs.pop("values", ["all", "first", "last", "none"]),
**kwargs
)
import _plotly_utils.basevalidators
class ShowticklabelsValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="showticklabels", parent_name="carpet.baxis", **kwargs
):
super(ShowticklabelsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
values=kwargs.pop("values", ["start", "end", "both", "none"]),
**kwargs
)
import _plotly_utils.basevalidators
class ShowlineValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(self, plotly_name="showline", parent_name="carpet.baxis", **kwargs):
super(ShowlineValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class ShowgridValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(self, plotly_name="showgrid", parent_name="carpet.baxis", **kwargs):
super(ShowgridValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class ShowexponentValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="showexponent", parent_name="carpet.baxis", **kwargs
):
super(ShowexponentValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
values=kwargs.pop("values", ["all", "first", "last", "none"]),
**kwargs
)
import _plotly_utils.basevalidators
class SeparatethousandsValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name="separatethousands", parent_name="carpet.baxis", **kwargs
):
super(SeparatethousandsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class RangemodeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="rangemode", parent_name="carpet.baxis", **kwargs):
super(RangemodeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
values=kwargs.pop("values", ["normal", "tozero", "nonnegative"]),
**kwargs
)
import _plotly_utils.basevalidators
class RangeValidator(_plotly_utils.basevalidators.InfoArrayValidator):
def __init__(self, plotly_name="range", parent_name="carpet.baxis", **kwargs):
super(RangeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
items=kwargs.pop(
"items",
[
{"valType": "any", "editType": "calc"},
{"valType": "any", "editType": "calc"},
],
),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class NticksValidator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(self, plotly_name="nticks", parent_name="carpet.baxis", **kwargs):
super(NticksValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class MinorgridwidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="minorgridwidth", parent_name="carpet.baxis", **kwargs
):
super(MinorgridwidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class MinorgridcountValidator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(
self, plotly_name="minorgridcount", parent_name="carpet.baxis", **kwargs
):
super(MinorgridcountValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class MinorgridcolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="minorgridcolor", parent_name="carpet.baxis", **kwargs
):
super(MinorgridcolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class LinewidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="linewidth", parent_name="carpet.baxis", **kwargs):
super(LinewidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class LinecolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(self, plotly_name="linecolor", parent_name="carpet.baxis", **kwargs):
super(LinecolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class LabelsuffixValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="labelsuffix", parent_name="carpet.baxis", **kwargs):
super(LabelsuffixValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class LabelprefixValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="labelprefix", parent_name="carpet.baxis", **kwargs):
super(LabelprefixValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class LabelpaddingValidator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(
self, plotly_name="labelpadding", parent_name="carpet.baxis", **kwargs
):
super(LabelpaddingValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class GridwidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="gridwidth", parent_name="carpet.baxis", **kwargs):
super(GridwidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class GridcolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(self, plotly_name="gridcolor", parent_name="carpet.baxis", **kwargs):
super(GridcolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class FixedrangeValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(self, plotly_name="fixedrange", parent_name="carpet.baxis", **kwargs):
super(FixedrangeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class ExponentformatValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="exponentformat", parent_name="carpet.baxis", **kwargs
):
super(ExponentformatValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
values=kwargs.pop("values", ["none", "e", "E", "power", "SI", "B"]),
**kwargs
)
import _plotly_utils.basevalidators
class EndlinewidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="endlinewidth", parent_name="carpet.baxis", **kwargs
):
super(EndlinewidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class EndlinecolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="endlinecolor", parent_name="carpet.baxis", **kwargs
):
super(EndlinecolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class EndlineValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(self, plotly_name="endline", parent_name="carpet.baxis", **kwargs):
super(EndlineValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class DtickValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="dtick", parent_name="carpet.baxis", **kwargs):
super(DtickValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(self, plotly_name="color", parent_name="carpet.baxis", **kwargs):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class CheatertypeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="cheatertype", parent_name="carpet.baxis", **kwargs):
super(CheatertypeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
values=kwargs.pop("values", ["index", "value"]),
**kwargs
)
import _plotly_utils.basevalidators
class CategoryorderValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="categoryorder", parent_name="carpet.baxis", **kwargs
):
super(CategoryorderValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
values=kwargs.pop(
"values",
["trace", "category ascending", "category descending", "array"],
),
**kwargs
)
import _plotly_utils.basevalidators
class CategoryarraysrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="categoryarraysrc", parent_name="carpet.baxis", **kwargs
):
super(CategoryarraysrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class CategoryarrayValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(
self, plotly_name="categoryarray", parent_name="carpet.baxis", **kwargs
):
super(CategoryarrayValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "data"),
**kwargs
)
import _plotly_utils.basevalidators
class AutorangeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="autorange", parent_name="carpet.baxis", **kwargs):
super(AutorangeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
values=kwargs.pop("values", [True, False, "reversed"]),
**kwargs
)
import _plotly_utils.basevalidators
class Arraytick0Validator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(self, plotly_name="arraytick0", parent_name="carpet.baxis", **kwargs):
super(Arraytick0Validator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class ArraydtickValidator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(self, plotly_name="arraydtick", parent_name="carpet.baxis", **kwargs):
super(ArraydtickValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 1),
role=kwargs.pop("role", "info"),
**kwargs
) | en | 0.803403 | font Sets this axis' title font. Note that the title's font used to be set by the now deprecated `titlefont` attribute. offset An additional amount by which to offset the title from the tick labels, given in pixels. Note that this used to be set by the now deprecated `titleoffset` attribute. text Sets the title of this axis. Note that before the existence of `title.text`, the title's contents used to be defined as the `title` attribute itself. This behavior has been deprecated. dtickrange range [*min*, *max*], where "min", "max" - dtick values which describe some zoom level, it is possible to omit "min" or "max" value by passing "null" enabled Determines whether or not this stop is used. If `false`, this stop is ignored even within its `dtickrange`. name When used in a template, named items are created in the output figure in addition to any items the figure already has in this array. You can modify these items in the output figure by making your own item with `templateitemname` matching this `name` alongside your modifications (including `visible: false` or `enabled: false` to hide it). Has no effect outside of a template. templateitemname Used to refer to a named item in this array in the template. Named items from the template will be created even without a matching item in the input figure, but you can modify one by making an item with `templateitemname` matching its `name`, alongside your modifications (including `visible: false` or `enabled: false` to hide it). If there is no template or no matching item, this item will be hidden unless you explicitly show it with `visible: true`. value string - dtickformat for described zoom level, the same as "tickformat" color family HTML font family - the typeface that will be applied by the web browser. The web browser will only be able to apply a font if it is available on the system which it operates. Provide multiple font families, separated by commas, to indicate the preference in which to apply fonts if they aren't available on the system. The plotly service (at https://plot.ly or on-premise) generates images on a server, where only a select number of fonts are installed and supported. These include "Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New Roman". size | 2.333595 | 2 |
pysen/factory.py | linshoK/pysen | 423 | 6630286 | <gh_stars>100-1000
import dataclasses
import pathlib
from typing import Dict, List, Optional
from .black import Black, BlackSetting
from .component import ComponentBase
from .flake8 import Flake8, Flake8Setting
from .isort import Isort, IsortSectionName, IsortSetting
from .mypy import (
Mypy,
MypyFollowImports,
MypyPlugin,
MypyPreset,
MypySetting,
MypyTarget,
)
from .py_version import PythonVersion
from .source import Source
@dataclasses.dataclass
class MypyModuleOption:
preset: Optional[MypyPreset] = None
ignore_errors: bool = False
follow_imports: Optional[MypyFollowImports] = None
def __post_init__(self) -> None:
if self.preset is not None and self.ignore_errors:
raise ValueError("cannot specify both preset and ignore_errors")
def get_setting(self) -> MypySetting:
if self.ignore_errors:
return MypySetting(ignore_errors=True, follow_imports=self.follow_imports)
preset: MypyPreset
if self.preset is not None:
preset = self.preset
else:
preset = MypyPreset.STRICT
return preset.get_setting(follow_imports=self.follow_imports)
@dataclasses.dataclass
class ConfigureLintOptions:
enable_black: Optional[bool] = None
enable_flake8: Optional[bool] = None
enable_isort: Optional[bool] = None
enable_mypy: Optional[bool] = None
mypy_preset: Optional[MypyPreset] = None
mypy_modules: Optional[Dict[str, MypyModuleOption]] = None
source: Optional[Source] = None
line_length: Optional[int] = None
py_version: Optional[PythonVersion] = None
isort_known_third_party: Optional[List[str]] = None
isort_known_first_party: Optional[List[str]] = None
isort_default_section: Optional[IsortSectionName] = None
mypy_path: Optional[List[pathlib.Path]] = None
mypy_plugins: Optional[List[MypyPlugin]] = None
mypy_targets: Optional[List[MypyTarget]] = None
def configure_lint(options: ConfigureLintOptions) -> List[ComponentBase]:
components: List[ComponentBase] = []
python_version: PythonVersion
if options.py_version is not None:
python_version = options.py_version
else:
python_version = PythonVersion(3, 7)
line_length = options.line_length or 88
# NOTE: `isort` may format code in a way that violates `black` rules
# Apply `isort` after `black` to avoid such violation
if options.enable_isort:
isort_setting = IsortSetting.default()
isort_setting.line_length = line_length
isort_setting.default_section = (
options.isort_default_section or IsortSectionName.THIRDPARTY
)
if options.isort_known_third_party is not None:
isort_setting.known_third_party = set(options.isort_known_third_party)
if options.isort_known_first_party is not None:
isort_setting.known_first_party = set(options.isort_known_first_party)
if options.enable_black:
isort_setting = isort_setting.to_black_compatible()
isort = Isort(setting=isort_setting, source=options.source)
components.append(isort)
if options.enable_black:
black_setting = BlackSetting.default(python_version)
black_setting.line_length = line_length
black = Black(setting=black_setting, source=options.source)
components.append(black)
if options.enable_flake8:
flake8_setting = Flake8Setting.default()
flake8_setting.max_line_length = line_length
if options.enable_black:
flake8_setting = flake8_setting.to_black_compatible()
flake8 = Flake8(setting=flake8_setting, source=options.source)
components.append(flake8)
if options.enable_mypy:
if options.mypy_preset is not None:
mypy_setting = options.mypy_preset.get_setting()
else:
mypy_setting = MypySetting.strict()
mypy_setting.python_version = python_version
if options.mypy_path is not None:
mypy_setting.mypy_path = list(options.mypy_path)
if options.mypy_plugins is not None:
mypy_setting.plugins = list(options.mypy_plugins)
mypy_module_settings: Dict[str, MypySetting] = {}
if options.mypy_modules is not None:
for module_name, module_option in options.mypy_modules.items():
mypy_module_settings[module_name] = module_option.get_setting()
mypy = Mypy(
setting=mypy_setting,
module_settings=mypy_module_settings,
mypy_targets=options.mypy_targets,
)
components.append(mypy)
return components
| import dataclasses
import pathlib
from typing import Dict, List, Optional
from .black import Black, BlackSetting
from .component import ComponentBase
from .flake8 import Flake8, Flake8Setting
from .isort import Isort, IsortSectionName, IsortSetting
from .mypy import (
Mypy,
MypyFollowImports,
MypyPlugin,
MypyPreset,
MypySetting,
MypyTarget,
)
from .py_version import PythonVersion
from .source import Source
@dataclasses.dataclass
class MypyModuleOption:
preset: Optional[MypyPreset] = None
ignore_errors: bool = False
follow_imports: Optional[MypyFollowImports] = None
def __post_init__(self) -> None:
if self.preset is not None and self.ignore_errors:
raise ValueError("cannot specify both preset and ignore_errors")
def get_setting(self) -> MypySetting:
if self.ignore_errors:
return MypySetting(ignore_errors=True, follow_imports=self.follow_imports)
preset: MypyPreset
if self.preset is not None:
preset = self.preset
else:
preset = MypyPreset.STRICT
return preset.get_setting(follow_imports=self.follow_imports)
@dataclasses.dataclass
class ConfigureLintOptions:
enable_black: Optional[bool] = None
enable_flake8: Optional[bool] = None
enable_isort: Optional[bool] = None
enable_mypy: Optional[bool] = None
mypy_preset: Optional[MypyPreset] = None
mypy_modules: Optional[Dict[str, MypyModuleOption]] = None
source: Optional[Source] = None
line_length: Optional[int] = None
py_version: Optional[PythonVersion] = None
isort_known_third_party: Optional[List[str]] = None
isort_known_first_party: Optional[List[str]] = None
isort_default_section: Optional[IsortSectionName] = None
mypy_path: Optional[List[pathlib.Path]] = None
mypy_plugins: Optional[List[MypyPlugin]] = None
mypy_targets: Optional[List[MypyTarget]] = None
def configure_lint(options: ConfigureLintOptions) -> List[ComponentBase]:
components: List[ComponentBase] = []
python_version: PythonVersion
if options.py_version is not None:
python_version = options.py_version
else:
python_version = PythonVersion(3, 7)
line_length = options.line_length or 88
# NOTE: `isort` may format code in a way that violates `black` rules
# Apply `isort` after `black` to avoid such violation
if options.enable_isort:
isort_setting = IsortSetting.default()
isort_setting.line_length = line_length
isort_setting.default_section = (
options.isort_default_section or IsortSectionName.THIRDPARTY
)
if options.isort_known_third_party is not None:
isort_setting.known_third_party = set(options.isort_known_third_party)
if options.isort_known_first_party is not None:
isort_setting.known_first_party = set(options.isort_known_first_party)
if options.enable_black:
isort_setting = isort_setting.to_black_compatible()
isort = Isort(setting=isort_setting, source=options.source)
components.append(isort)
if options.enable_black:
black_setting = BlackSetting.default(python_version)
black_setting.line_length = line_length
black = Black(setting=black_setting, source=options.source)
components.append(black)
if options.enable_flake8:
flake8_setting = Flake8Setting.default()
flake8_setting.max_line_length = line_length
if options.enable_black:
flake8_setting = flake8_setting.to_black_compatible()
flake8 = Flake8(setting=flake8_setting, source=options.source)
components.append(flake8)
if options.enable_mypy:
if options.mypy_preset is not None:
mypy_setting = options.mypy_preset.get_setting()
else:
mypy_setting = MypySetting.strict()
mypy_setting.python_version = python_version
if options.mypy_path is not None:
mypy_setting.mypy_path = list(options.mypy_path)
if options.mypy_plugins is not None:
mypy_setting.plugins = list(options.mypy_plugins)
mypy_module_settings: Dict[str, MypySetting] = {}
if options.mypy_modules is not None:
for module_name, module_option in options.mypy_modules.items():
mypy_module_settings[module_name] = module_option.get_setting()
mypy = Mypy(
setting=mypy_setting,
module_settings=mypy_module_settings,
mypy_targets=options.mypy_targets,
)
components.append(mypy)
return components | en | 0.817744 | # NOTE: `isort` may format code in a way that violates `black` rules # Apply `isort` after `black` to avoid such violation | 1.991251 | 2 |
server_lifecycle.py | pauliacomi/separation-explorer | 11 | 6630287 | from threading import Thread
import src.datastore
def on_server_loaded(server_context):
''' If present, this function is called when the server first starts. '''
t = Thread(target=src.datastore.load, args=())
t.setDaemon(True)
t.start()
def on_server_unloaded(server_context):
''' If present, this function is called when the server shuts down. '''
pass
def on_session_created(session_context):
''' If present, this function is called when a session is created. '''
pass
def on_session_destroyed(session_context):
''' If present, this function is called when a session is closed. '''
pass
| from threading import Thread
import src.datastore
def on_server_loaded(server_context):
''' If present, this function is called when the server first starts. '''
t = Thread(target=src.datastore.load, args=())
t.setDaemon(True)
t.start()
def on_server_unloaded(server_context):
''' If present, this function is called when the server shuts down. '''
pass
def on_session_created(session_context):
''' If present, this function is called when a session is created. '''
pass
def on_session_destroyed(session_context):
''' If present, this function is called when a session is closed. '''
pass
| en | 0.939666 | If present, this function is called when the server first starts. If present, this function is called when the server shuts down. If present, this function is called when a session is created. If present, this function is called when a session is closed. | 2.968932 | 3 |
INBa/2014/MOCHALOV_V_V/task_2_15.py | YukkaSarasti/pythonintask | 0 | 6630288 | <filename>INBa/2014/MOCHALOV_V_V/task_2_15.py
# Задача 2. Вариант 15.
# Напишите программу, которая будет выводить на экран наиболее понравившееся вам высказывание, автором которого является Плутарх. Не забудьте о том, что автор должен быть упомянут на отдельной строке.
# Mochalov V. V.
# 02.03.2016
print("Два основных достояния человеческой природы - это ум и рассуждения.")
print(" Плутарх.")
input("\n\nНажмите Enter для выхода.")
| <filename>INBa/2014/MOCHALOV_V_V/task_2_15.py
# Задача 2. Вариант 15.
# Напишите программу, которая будет выводить на экран наиболее понравившееся вам высказывание, автором которого является Плутарх. Не забудьте о том, что автор должен быть упомянут на отдельной строке.
# Mochalov V. V.
# 02.03.2016
print("Два основных достояния человеческой природы - это ум и рассуждения.")
print(" Плутарх.")
input("\n\nНажмите Enter для выхода.")
| ru | 0.997321 | # Задача 2. Вариант 15. # Напишите программу, которая будет выводить на экран наиболее понравившееся вам высказывание, автором которого является Плутарх. Не забудьте о том, что автор должен быть упомянут на отдельной строке. # Mochalov V. V. # 02.03.2016 | 2.225796 | 2 |
authenticate/views.py | ockibagusp/cloud-platform | 1 | 6630289 | import jwt
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from authenticate.forms import SuperNodeAuthForm, UserAuthForm
from authenticate.utils import supernode_jwt_payload_handler, user_jwt_payload_handler
from authenticate.serializers import UserSerializer
from supernodes.serializers import SuperNodesSerializer
from cloud_platform import settings
class UserTokenCreator(APIView):
"""
Create token if user credentials was provided and valid.
"""
def post(self, request, format=None):
form = UserAuthForm(request.data)
if form.is_valid():
return Response({
'user': UserSerializer(form.user).data,
'token': self.create_token(form.user)
})
return Response(form.errors, status=status.HTTP_400_BAD_REQUEST)
@staticmethod
def create_token(user):
payload = user_jwt_payload_handler(user)
token = jwt.encode(payload, settings.SECRET_KEY)
return token.decode('unicode_escape')
class NodeTokenCreator(APIView):
"""
Create token if node credentials was provided and valid.
"""
def post(self, request, format=None):
form = SuperNodeAuthForm(request.data)
if form.is_valid():
return Response({
'supernode': SuperNodesSerializer(form.supernode, context={'request': request}).data,
'token': self.create_token(form.supernode)
})
return Response(form.errors, status=status.HTTP_400_BAD_REQUEST)
@staticmethod
def create_token(node):
payload = supernode_jwt_payload_handler(node)
token = jwt.encode(payload, settings.SECRET_KEY)
return token.decode('unicode_escape')
| import jwt
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from authenticate.forms import SuperNodeAuthForm, UserAuthForm
from authenticate.utils import supernode_jwt_payload_handler, user_jwt_payload_handler
from authenticate.serializers import UserSerializer
from supernodes.serializers import SuperNodesSerializer
from cloud_platform import settings
class UserTokenCreator(APIView):
"""
Create token if user credentials was provided and valid.
"""
def post(self, request, format=None):
form = UserAuthForm(request.data)
if form.is_valid():
return Response({
'user': UserSerializer(form.user).data,
'token': self.create_token(form.user)
})
return Response(form.errors, status=status.HTTP_400_BAD_REQUEST)
@staticmethod
def create_token(user):
payload = user_jwt_payload_handler(user)
token = jwt.encode(payload, settings.SECRET_KEY)
return token.decode('unicode_escape')
class NodeTokenCreator(APIView):
"""
Create token if node credentials was provided and valid.
"""
def post(self, request, format=None):
form = SuperNodeAuthForm(request.data)
if form.is_valid():
return Response({
'supernode': SuperNodesSerializer(form.supernode, context={'request': request}).data,
'token': self.create_token(form.supernode)
})
return Response(form.errors, status=status.HTTP_400_BAD_REQUEST)
@staticmethod
def create_token(node):
payload = supernode_jwt_payload_handler(node)
token = jwt.encode(payload, settings.SECRET_KEY)
return token.decode('unicode_escape')
| en | 0.937162 | Create token if user credentials was provided and valid. Create token if node credentials was provided and valid. | 2.307822 | 2 |
practica_kmedias/esqueleto_kmeans.py | binary-hideout/sistemas-adaptativos | 0 | 6630290 | <filename>practica_kmedias/esqueleto_kmeans.py
'''
INSTRUCCIONES: Completa la primera iteracion de k-medias. Para ello, utiliza la siguiente informacion y el esqueleto que a continuacion se te presenta.
'''
from math import sqrt
from sys import float_info
from random import randint
def calcularDistanciaEuclideana(puntoA, puntoB):
''' (A) Primer funcion a completar
Entradas: puntoA y puntoB -- son listas numericas de cualquier longitud (debe ser la misma longitud en ambas listas).
Salida: Distancia euclidiana entre las listas.'''
suma = 0
for A, B in zip(puntoA, puntoB):
suma += (B - A) ** 2
return sqrt(suma)
def actualizarCentroide(datos, grupos, indiceCentroide):
''' (B) Segunda funcion a completar
Entradas:
datos -- lista anidada donde cada sublista es un vector de caracteristicas
grupos -- lista numerica que contiene, para cada vector en datos, cual es el grupo al que corresponde
indiceCentroide -- centroide a actualizarCentroide
Salida: lista que contiene los nuevos valores para el centroide cuyo indice es indiceCentroide
'''
nuevo_centroide = list()
dimension = len(datos[0])
for d in range(dimension):
suma = 0.0
cantidad = 0
for indice, muestra in enumerate(datos):
if grupos[indice] == indiceCentroide:
suma += muestra[d]
cantidad += 1
nuevo_centroide.append(suma / cantidad)
return tuple(nuevo_centroide)
def centroideMasCercano(centroides, muestra):
'''Recibe una 'muestra' que almacena un elemento de una colección de datos y 'centroides' que almacena una colección de los centroides.
Regresa 'k' que es la posición del centroide más cercano.
'''
menor = float_info.max
for indice, centroide in enumerate(centroides):
dist = calcularDistanciaEuclideana(muestra, centroide)
if dist < menor:
menor = dist
cercano = indice
return cercano
def agrupar(datos, centroides):
'''Agrupa los datos y actualiza centroides.
'''
print('Centroides originales:')
print(centroides)
grupos = list()
# (C) Bloque de codigo: Calculo de distancias y asignacion de grupos
for muestra in datos:
pertenencia = centroideMasCercano(centroides, muestra)
grupos.append(pertenencia)
print('Grupos de pertenencia:')
print(grupos)
# (D) Bloque de codigo: Actualizacion de centroides
for i in range(len(centroides)):
centroides[i] = actualizarCentroide(datos, grupos, i)
print('Centroides actualizados:')
print(centroides) #Imprime centroides actualizados
print('\n-----Colores-----')
datos = ((153, 51, 255), (121, 236, 221), (209, 236, 121), (240, 164, 76), (240, 98, 76), (76, 93, 240), (50, 239, 94))
centroides = [(255, 0, 0), (0, 255, 0), (0, 0, 255)] #Inicializacion de centroides
agrupar(datos, centroides)
print('\n-----Archivo de datos-----')
data = list()
with open('datos_nivel_bonus.txt', 'r') as file:
for line in file.readlines():
temp = [int(num) for num in line.split()]
data.append(temp)
centers = [data[randint(0, 149)] for i in range(5)]
agrupar(data, centers) | <filename>practica_kmedias/esqueleto_kmeans.py
'''
INSTRUCCIONES: Completa la primera iteracion de k-medias. Para ello, utiliza la siguiente informacion y el esqueleto que a continuacion se te presenta.
'''
from math import sqrt
from sys import float_info
from random import randint
def calcularDistanciaEuclideana(puntoA, puntoB):
''' (A) Primer funcion a completar
Entradas: puntoA y puntoB -- son listas numericas de cualquier longitud (debe ser la misma longitud en ambas listas).
Salida: Distancia euclidiana entre las listas.'''
suma = 0
for A, B in zip(puntoA, puntoB):
suma += (B - A) ** 2
return sqrt(suma)
def actualizarCentroide(datos, grupos, indiceCentroide):
''' (B) Segunda funcion a completar
Entradas:
datos -- lista anidada donde cada sublista es un vector de caracteristicas
grupos -- lista numerica que contiene, para cada vector en datos, cual es el grupo al que corresponde
indiceCentroide -- centroide a actualizarCentroide
Salida: lista que contiene los nuevos valores para el centroide cuyo indice es indiceCentroide
'''
nuevo_centroide = list()
dimension = len(datos[0])
for d in range(dimension):
suma = 0.0
cantidad = 0
for indice, muestra in enumerate(datos):
if grupos[indice] == indiceCentroide:
suma += muestra[d]
cantidad += 1
nuevo_centroide.append(suma / cantidad)
return tuple(nuevo_centroide)
def centroideMasCercano(centroides, muestra):
'''Recibe una 'muestra' que almacena un elemento de una colección de datos y 'centroides' que almacena una colección de los centroides.
Regresa 'k' que es la posición del centroide más cercano.
'''
menor = float_info.max
for indice, centroide in enumerate(centroides):
dist = calcularDistanciaEuclideana(muestra, centroide)
if dist < menor:
menor = dist
cercano = indice
return cercano
def agrupar(datos, centroides):
'''Agrupa los datos y actualiza centroides.
'''
print('Centroides originales:')
print(centroides)
grupos = list()
# (C) Bloque de codigo: Calculo de distancias y asignacion de grupos
for muestra in datos:
pertenencia = centroideMasCercano(centroides, muestra)
grupos.append(pertenencia)
print('Grupos de pertenencia:')
print(grupos)
# (D) Bloque de codigo: Actualizacion de centroides
for i in range(len(centroides)):
centroides[i] = actualizarCentroide(datos, grupos, i)
print('Centroides actualizados:')
print(centroides) #Imprime centroides actualizados
print('\n-----Colores-----')
datos = ((153, 51, 255), (121, 236, 221), (209, 236, 121), (240, 164, 76), (240, 98, 76), (76, 93, 240), (50, 239, 94))
centroides = [(255, 0, 0), (0, 255, 0), (0, 0, 255)] #Inicializacion de centroides
agrupar(datos, centroides)
print('\n-----Archivo de datos-----')
data = list()
with open('datos_nivel_bonus.txt', 'r') as file:
for line in file.readlines():
temp = [int(num) for num in line.split()]
data.append(temp)
centers = [data[randint(0, 149)] for i in range(5)]
agrupar(data, centers) | es | 0.930229 | INSTRUCCIONES: Completa la primera iteracion de k-medias. Para ello, utiliza la siguiente informacion y el esqueleto que a continuacion se te presenta. (A) Primer funcion a completar Entradas: puntoA y puntoB -- son listas numericas de cualquier longitud (debe ser la misma longitud en ambas listas). Salida: Distancia euclidiana entre las listas. (B) Segunda funcion a completar Entradas: datos -- lista anidada donde cada sublista es un vector de caracteristicas grupos -- lista numerica que contiene, para cada vector en datos, cual es el grupo al que corresponde indiceCentroide -- centroide a actualizarCentroide Salida: lista que contiene los nuevos valores para el centroide cuyo indice es indiceCentroide Recibe una 'muestra' que almacena un elemento de una colección de datos y 'centroides' que almacena una colección de los centroides. Regresa 'k' que es la posición del centroide más cercano. Agrupa los datos y actualiza centroides. # (C) Bloque de codigo: Calculo de distancias y asignacion de grupos # (D) Bloque de codigo: Actualizacion de centroides #Imprime centroides actualizados #Inicializacion de centroides | 3.151297 | 3 |
app/models/adapters/helpers/node.py | mobile2015/neoPyth | 0 | 6630291 | <filename>app/models/adapters/helpers/node.py
__author__ = 'rikkt0r'
class Node:
def __init__(self, node):
self.node = node
@property
def serialize(self):
return {
"id": self.node.id,
"name": self.node.name
} | <filename>app/models/adapters/helpers/node.py
__author__ = 'rikkt0r'
class Node:
def __init__(self, node):
self.node = node
@property
def serialize(self):
return {
"id": self.node.id,
"name": self.node.name
} | none | 1 | 2.313192 | 2 |
|
wallet/test/helpers/metamask.py | EYBlockchain/nightfall_3 | 107 | 6630292 | from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from .find_elements import *
from time import sleep
def initializeMetamask(driver, findElements, metamaskConfig):
# Load metamask, check if the account is already set up
firstTimeButton = findElements.element_exist_xpath('//button[text()="Get Started"]')
if firstTimeButton:
#######################
# Set up the metamask #
#######################
firstTimeButton.click()
findElements.element_exist_xpath('//button[text()="Import wallet"]').click() # Import wallet
findElements.element_exist_xpath('//button[text()="I Agree"]').click() # Agree terms
sleep(3)
findElements.element_exist_xpath('//input[@placeholder="Paste Secret Recovery Phrase from clipboard"]').send_keys(metamaskConfig['mnemonic']) # Seed phrase
findElements.element_exist_xpath('//*[@id="password"]').send_keys(metamaskConfig['password']) # Password
findElements.element_exist_xpath('//*[@id="confirm-password"]').send_keys(metamaskConfig['password']) # Repeat password
findElements.element_exist_xpath('//div[contains(@class, "first-time-flow__checkbox first-time-flow__terms")]').click() # Read agreements ( for sure)
findElements.element_exist_xpath('//button[text()="Import"]').click() # Read agreements ( for sure)
findElements.element_exist_xpath('//button[text()="All Done"]').click() # All Done button
# Accept all the emerging popups of the first metamask login
driver.get('chrome-extension://nkbihfbeogaeaoehlefnkodbefgpgknn/home.html#')
while True:
popupMetamask = findElements.element_exist_xpath('//button[@class="fas fa-times popover-header__button"]')
if popupMetamask:
popupMetamask.click()
driver.get('chrome-extension://nkbihfbeogaeaoehlefnkodbefgpgknn/home.html#')
popupMetamask = findElements.element_exist_xpath('//button[@class="fas fa-times popover-header__button"]')
else:
break
else:
#######################
# Login metamask #
#######################
passwordElement = WebDriverWait(driver, 1000).until(
EC.presence_of_element_located((By.ID, "password"))
)
passwordElement.send_keys(metamaskConfig['password'])
clickElement = driver.find_element_by_class_name("MuiButton-label")
clickElement.click()
def selectNetworkMetamask(driver, findElements, networkConfig):
# Configure network
driver.get('chrome-extension://nkbihfbeogaeaoehlefnkodbefgpgknn/home.html#')
# Select network
findElements.element_exist_xpath('//*[@id="app-content"]/div/div[1]/div/div[2]/div[1]/div/span').click() # Select network
# Find network
networkElement = findElements.element_exist_xpath('(//*[contains(text(), "' + networkConfig['name'] + '")] | //*[@value="' + networkConfig['name'] + '"])')
if not networkElement:
findElements.element_exist_xpath('//button[text()="Add Network"]').click() # Add Network
#findElements.element_exist_xpath('(//*[contains(text(), "' + networkConfig['type'] + '")])').click()
findElements.element_exist_xpath('(//*[contains(@class, "form-field__input")])[1]').send_keys(networkConfig['name'])
findElements.element_exist_xpath('(//*[contains(@class, "form-field__input")])[2]').send_keys(networkConfig['url'])
findElements.element_exist_xpath('(//*[contains(@class, "form-field__input")])[3]').send_keys(networkConfig['chainId'])
#findElements.element_exist_xpath('//input[@id="network-name"]').send_keys(networkConfig['name']) # Name
#findElements.element_exist_xpath('//input[@id="rpc-url"]').send_keys(networkConfig['url']) # URL
#findElements.element_exist_xpath('//input[@id="chainId"]').send_keys(networkConfig['chainId']) # ChainId
#findElements.element_exist_xpath('//input[@id="network-ticker"]').send_keys(networkConfig['ticker']) # ChainId
findElements.element_exist_xpath('//button[text()="Save"]').click() # Save
else:
findElements.element_exist_xpath('(//*[contains(text(), "' + networkConfig['name'] + '")])').click()
def selectTestNetworkMetamask(driver, findElements, networkConfig):
# Configure network
driver.get('chrome-extension://nkbihfbeogaeaoehlefnkodbefgpgknn/home.html#')
# Select network
findElements.element_exist_xpath('//*[@id="app-content"]/div/div[1]/div/div[2]/div[1]/div/span').click() # Select network
# Find network
networkElement = findElements.element_exist_xpath('(//*[contains(text(), "' + networkConfig['name'] + '")] | //*[@value="' + networkConfig['name'] + '"])')
try:
networkElement.click()
except Exception:
findElements.element_exist_xpath('//*[contains(@class, "network-dropdown-content--link")]').click() # Show networks
findElements.element_exist_xpath('//*[@id="app-content"]/div/div[3]/div/div[2]/div[2]/div[2]/div[7]/div[2]/div/div/div[1]/div[2]').click() # Enable test networks
findElements.element_exist_xpath('//*[contains(@class, "settings-page__close-button")]').click() # Save
findElements.element_exist_xpath('//*[@id="app-content"]/div/div[1]/div/div[2]/div[1]/div/span').click() # Select network
findElements.element_exist_xpath('//*[contains(text(), "' + networkConfig['name'] + '")]').click()
def deleteNetworkMetamask(driver, findElements, networkConfig):
# Configure network
driver.get('chrome-extension://nkbihfbeogaeaoehlefnkodbefgpgknn/home.html#')
# Select network
findElements.element_exist_xpath('//*[local-name()="svg"]').click() # Color button
findElements.element_exist_xpath('//div[contains(text(),"Settings")]').click() # Settings
findElements.element_exist_xpath('//div[contains(text(),"Networks")]').click() # Network
networkToDelete = findElements.element_exist_xpath('//div[contains(text(), "' + networkConfig['name'] + '")]')
if networkToDelete:
networkToDelete.click()
findElements.element_exist_xpath('//button[text()="Delete"]').click() # Delete
findElements.element_exist_xpath('//button[text()="Delete"]').click() # Delete
findElements.element_exist_xpath('//div[contains(@class, "close-button")]').click() # Close
def addEthAccountMetamask(driver, findElements, accountParams):
# Configure network
driver.get('chrome-extension://nkbihfbeogaeaoehlefnkodbefgpgknn/home.html#')
# Select network
findElements.element_exist_xpath('//*[local-name()="svg"]').click() # Color button
findElements.element_exist_xpath('//div[contains(text(),"Import Account")]').click() # Import account
findElements.element_exist_xpath('//input[@id="private-key-box"]').send_keys(accountParams['privateKey']) # Private Key
findElements.element_exist_xpath('//button[text()="Import"]').click() # Import
def selectEthAccountMetamask(driver, findElements, accountParams):
# Configure network
driver.get('chrome-extension://nkbihfbeogaeaoehlefnkodbefgpgknn/home.html#')
# Select network
findElements.element_exist_xpath('//*[local-name()="svg"]').click() # Color button
findElements.element_exist_xpath('//div[contains(text(), "' + accountParams['name'] + '")]').click()
def addTokenMetamask(tokenAddress, findElements):
# Add DAI token
findElements.element_exist_xpath('//button[@class="button btn-secondary btn--rounded add-token-button__button"]').click() # Add token button
findElements.element_exist_xpath('//*[@id="custom-address"]').send_keys(tokenAddress) # Address textbox
# Check if the token is already added
isTokenAdded = findElements.element_exist_xpath("//*[contains(text(), 'Token has already been added')]")
if not isTokenAdded:
findElements.element_exist_xpath('//*[@id="app-content"]/div/div[4]/div/div[2]/div[2]/footer/button[2]').click() # Next
findElements.button_clickable_xpath('//*[@id="app-content"]/div/div[4]/div/div[3]/footer/button[2]').click() # Add Token
def signTransactionMetamask(driver, findElements, stop=0):
sleep(5)
activityButton = findElements.element_exist_xpath('//button[text()="Activity"]')
if activityButton:
activityButton.click()
while True:
sleep(4)
pendingTx = findElements.element_exist_xpath('//div[contains(@class, "list-item transaction-list-item transaction-list-item--unconfirmed")]')
approve = findElements.element_exist_xpath('//button[text()="Confirm"]') # Confirm approve
if pendingTx:
pendingTx.click()
elif approve:
approve.click()
else:
break
| from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from .find_elements import *
from time import sleep
def initializeMetamask(driver, findElements, metamaskConfig):
# Load metamask, check if the account is already set up
firstTimeButton = findElements.element_exist_xpath('//button[text()="Get Started"]')
if firstTimeButton:
#######################
# Set up the metamask #
#######################
firstTimeButton.click()
findElements.element_exist_xpath('//button[text()="Import wallet"]').click() # Import wallet
findElements.element_exist_xpath('//button[text()="I Agree"]').click() # Agree terms
sleep(3)
findElements.element_exist_xpath('//input[@placeholder="Paste Secret Recovery Phrase from clipboard"]').send_keys(metamaskConfig['mnemonic']) # Seed phrase
findElements.element_exist_xpath('//*[@id="password"]').send_keys(metamaskConfig['password']) # Password
findElements.element_exist_xpath('//*[@id="confirm-password"]').send_keys(metamaskConfig['password']) # Repeat password
findElements.element_exist_xpath('//div[contains(@class, "first-time-flow__checkbox first-time-flow__terms")]').click() # Read agreements ( for sure)
findElements.element_exist_xpath('//button[text()="Import"]').click() # Read agreements ( for sure)
findElements.element_exist_xpath('//button[text()="All Done"]').click() # All Done button
# Accept all the emerging popups of the first metamask login
driver.get('chrome-extension://nkbihfbeogaeaoehlefnkodbefgpgknn/home.html#')
while True:
popupMetamask = findElements.element_exist_xpath('//button[@class="fas fa-times popover-header__button"]')
if popupMetamask:
popupMetamask.click()
driver.get('chrome-extension://nkbihfbeogaeaoehlefnkodbefgpgknn/home.html#')
popupMetamask = findElements.element_exist_xpath('//button[@class="fas fa-times popover-header__button"]')
else:
break
else:
#######################
# Login metamask #
#######################
passwordElement = WebDriverWait(driver, 1000).until(
EC.presence_of_element_located((By.ID, "password"))
)
passwordElement.send_keys(metamaskConfig['password'])
clickElement = driver.find_element_by_class_name("MuiButton-label")
clickElement.click()
def selectNetworkMetamask(driver, findElements, networkConfig):
# Configure network
driver.get('chrome-extension://nkbihfbeogaeaoehlefnkodbefgpgknn/home.html#')
# Select network
findElements.element_exist_xpath('//*[@id="app-content"]/div/div[1]/div/div[2]/div[1]/div/span').click() # Select network
# Find network
networkElement = findElements.element_exist_xpath('(//*[contains(text(), "' + networkConfig['name'] + '")] | //*[@value="' + networkConfig['name'] + '"])')
if not networkElement:
findElements.element_exist_xpath('//button[text()="Add Network"]').click() # Add Network
#findElements.element_exist_xpath('(//*[contains(text(), "' + networkConfig['type'] + '")])').click()
findElements.element_exist_xpath('(//*[contains(@class, "form-field__input")])[1]').send_keys(networkConfig['name'])
findElements.element_exist_xpath('(//*[contains(@class, "form-field__input")])[2]').send_keys(networkConfig['url'])
findElements.element_exist_xpath('(//*[contains(@class, "form-field__input")])[3]').send_keys(networkConfig['chainId'])
#findElements.element_exist_xpath('//input[@id="network-name"]').send_keys(networkConfig['name']) # Name
#findElements.element_exist_xpath('//input[@id="rpc-url"]').send_keys(networkConfig['url']) # URL
#findElements.element_exist_xpath('//input[@id="chainId"]').send_keys(networkConfig['chainId']) # ChainId
#findElements.element_exist_xpath('//input[@id="network-ticker"]').send_keys(networkConfig['ticker']) # ChainId
findElements.element_exist_xpath('//button[text()="Save"]').click() # Save
else:
findElements.element_exist_xpath('(//*[contains(text(), "' + networkConfig['name'] + '")])').click()
def selectTestNetworkMetamask(driver, findElements, networkConfig):
# Configure network
driver.get('chrome-extension://nkbihfbeogaeaoehlefnkodbefgpgknn/home.html#')
# Select network
findElements.element_exist_xpath('//*[@id="app-content"]/div/div[1]/div/div[2]/div[1]/div/span').click() # Select network
# Find network
networkElement = findElements.element_exist_xpath('(//*[contains(text(), "' + networkConfig['name'] + '")] | //*[@value="' + networkConfig['name'] + '"])')
try:
networkElement.click()
except Exception:
findElements.element_exist_xpath('//*[contains(@class, "network-dropdown-content--link")]').click() # Show networks
findElements.element_exist_xpath('//*[@id="app-content"]/div/div[3]/div/div[2]/div[2]/div[2]/div[7]/div[2]/div/div/div[1]/div[2]').click() # Enable test networks
findElements.element_exist_xpath('//*[contains(@class, "settings-page__close-button")]').click() # Save
findElements.element_exist_xpath('//*[@id="app-content"]/div/div[1]/div/div[2]/div[1]/div/span').click() # Select network
findElements.element_exist_xpath('//*[contains(text(), "' + networkConfig['name'] + '")]').click()
def deleteNetworkMetamask(driver, findElements, networkConfig):
# Configure network
driver.get('chrome-extension://nkbihfbeogaeaoehlefnkodbefgpgknn/home.html#')
# Select network
findElements.element_exist_xpath('//*[local-name()="svg"]').click() # Color button
findElements.element_exist_xpath('//div[contains(text(),"Settings")]').click() # Settings
findElements.element_exist_xpath('//div[contains(text(),"Networks")]').click() # Network
networkToDelete = findElements.element_exist_xpath('//div[contains(text(), "' + networkConfig['name'] + '")]')
if networkToDelete:
networkToDelete.click()
findElements.element_exist_xpath('//button[text()="Delete"]').click() # Delete
findElements.element_exist_xpath('//button[text()="Delete"]').click() # Delete
findElements.element_exist_xpath('//div[contains(@class, "close-button")]').click() # Close
def addEthAccountMetamask(driver, findElements, accountParams):
# Configure network
driver.get('chrome-extension://nkbihfbeogaeaoehlefnkodbefgpgknn/home.html#')
# Select network
findElements.element_exist_xpath('//*[local-name()="svg"]').click() # Color button
findElements.element_exist_xpath('//div[contains(text(),"Import Account")]').click() # Import account
findElements.element_exist_xpath('//input[@id="private-key-box"]').send_keys(accountParams['privateKey']) # Private Key
findElements.element_exist_xpath('//button[text()="Import"]').click() # Import
def selectEthAccountMetamask(driver, findElements, accountParams):
# Configure network
driver.get('chrome-extension://nkbihfbeogaeaoehlefnkodbefgpgknn/home.html#')
# Select network
findElements.element_exist_xpath('//*[local-name()="svg"]').click() # Color button
findElements.element_exist_xpath('//div[contains(text(), "' + accountParams['name'] + '")]').click()
def addTokenMetamask(tokenAddress, findElements):
# Add DAI token
findElements.element_exist_xpath('//button[@class="button btn-secondary btn--rounded add-token-button__button"]').click() # Add token button
findElements.element_exist_xpath('//*[@id="custom-address"]').send_keys(tokenAddress) # Address textbox
# Check if the token is already added
isTokenAdded = findElements.element_exist_xpath("//*[contains(text(), 'Token has already been added')]")
if not isTokenAdded:
findElements.element_exist_xpath('//*[@id="app-content"]/div/div[4]/div/div[2]/div[2]/footer/button[2]').click() # Next
findElements.button_clickable_xpath('//*[@id="app-content"]/div/div[4]/div/div[3]/footer/button[2]').click() # Add Token
def signTransactionMetamask(driver, findElements, stop=0):
sleep(5)
activityButton = findElements.element_exist_xpath('//button[text()="Activity"]')
if activityButton:
activityButton.click()
while True:
sleep(4)
pendingTx = findElements.element_exist_xpath('//div[contains(@class, "list-item transaction-list-item transaction-list-item--unconfirmed")]')
approve = findElements.element_exist_xpath('//button[text()="Confirm"]') # Confirm approve
if pendingTx:
pendingTx.click()
elif approve:
approve.click()
else:
break
| en | 0.340234 | # Load metamask, check if the account is already set up ####################### # Set up the metamask # ####################### # Import wallet # Agree terms # Seed phrase # Password # Repeat password # Read agreements ( for sure) # Read agreements ( for sure) # All Done button # Accept all the emerging popups of the first metamask login #') #') ####################### # Login metamask # ####################### # Configure network #') # Select network # Select network # Find network # Add Network #findElements.element_exist_xpath('(//*[contains(text(), "' + networkConfig['type'] + '")])').click() #findElements.element_exist_xpath('//input[@id="network-name"]').send_keys(networkConfig['name']) # Name #findElements.element_exist_xpath('//input[@id="rpc-url"]').send_keys(networkConfig['url']) # URL #findElements.element_exist_xpath('//input[@id="chainId"]').send_keys(networkConfig['chainId']) # ChainId #findElements.element_exist_xpath('//input[@id="network-ticker"]').send_keys(networkConfig['ticker']) # ChainId # Save # Configure network #') # Select network # Select network # Find network # Show networks # Enable test networks # Save # Select network # Configure network #') # Select network # Color button # Settings # Network # Delete # Delete # Close # Configure network #') # Select network # Color button # Import account # Private Key # Import # Configure network #') # Select network # Color button # Add DAI token # Add token button # Address textbox # Check if the token is already added # Next # Add Token # Confirm approve | 2.575806 | 3 |
backend-service/visits-service/app/app/db/base.py | abhishek70/python-petclinic-microservices | 2 | 6630293 | <filename>backend-service/visits-service/app/app/db/base.py<gh_stars>1-10
# Import all the models, so that Base has them before being
# imported by Alembic
from ..models.visit import Visit # noqa
from .base_class import Base # noqa
| <filename>backend-service/visits-service/app/app/db/base.py<gh_stars>1-10
# Import all the models, so that Base has them before being
# imported by Alembic
from ..models.visit import Visit # noqa
from .base_class import Base # noqa
| en | 0.951317 | # Import all the models, so that Base has them before being # imported by Alembic # noqa # noqa | 1.361224 | 1 |
frontend.py | milonoir/yaml_rulz_frontend | 0 | 6630294 | from flask import Flask
from flask import request
from flask import render_template
from flask_wtf.csrf import CSRFProtect
from flask_wtf import FlaskForm
from wtforms import TextAreaField
from wtforms.validators import DataRequired
from yaml_rulz.validator import YAMLValidator
app = Flask(__name__)
csrf = CSRFProtect(app)
app.config['SECRET_KEY'] = b'\<KEY>'
class Form(FlaskForm):
schema = TextAreaField('Template', [DataRequired()])
resource = TextAreaField('Resource', [DataRequired()])
@app.route('/', methods=['GET', 'POST'])
def index():
form = Form()
issues = []
if request.method == 'POST' and form.validate():
try:
validator = YAMLValidator(
schema_content=form.schema.data,
resource_content=form.resource.data,
exclusions_content=None
)
except Exception as exc:
issues.append({'severity': 'Fatal', 'message': 'Error: {}'.format(exc)})
else:
_, issues = validator.get_validation_issues()
return render_template('frontend.html', form=form, issues=issues)
| from flask import Flask
from flask import request
from flask import render_template
from flask_wtf.csrf import CSRFProtect
from flask_wtf import FlaskForm
from wtforms import TextAreaField
from wtforms.validators import DataRequired
from yaml_rulz.validator import YAMLValidator
app = Flask(__name__)
csrf = CSRFProtect(app)
app.config['SECRET_KEY'] = b'\<KEY>'
class Form(FlaskForm):
schema = TextAreaField('Template', [DataRequired()])
resource = TextAreaField('Resource', [DataRequired()])
@app.route('/', methods=['GET', 'POST'])
def index():
form = Form()
issues = []
if request.method == 'POST' and form.validate():
try:
validator = YAMLValidator(
schema_content=form.schema.data,
resource_content=form.resource.data,
exclusions_content=None
)
except Exception as exc:
issues.append({'severity': 'Fatal', 'message': 'Error: {}'.format(exc)})
else:
_, issues = validator.get_validation_issues()
return render_template('frontend.html', form=form, issues=issues)
| none | 1 | 2.503997 | 3 |
|
genome_integration/resources/get_ensembl_gene_information.py | adriaan-vd-graaf/genome_integration | 13 | 6630295 | from genome_integration import gene_regions
import gzip
"""
These classes are intended to easily access and query ensembl genes information,
But they have other uses as well, so it is possible that these will be joined with the ensembl
"""
class EnsemblGene(gene_regions.StartEndRegion):
"""
Contains all the standard fields for gene information from ensembl.
Attributes
----------
ensg_id: str
ensembl id
gene_name: str
gene name
strand: str
strand of the gene either `+` or `-`
gc_percent: str
percentage of GC bases.
gene_type: str
gene type
ensembl_version:
ensembl version of the gene.
Methods
-------
None
"""
def __init__(self,
ensg_id,
gene_name,
chromosome,
start,
end,
strand,
gc_percent,
gene_type,
ensembl_version):
super().__init__([chromosome, start, end])
self.ensg_id = ensg_id
self.gene_name = gene_name
self.strand = strand
self.gc_percent = gc_percent
self.gene_type = gene_type
self.ensembl_version = ensembl_version
def __repr__(self):
return f"EnsemblGene object: {self.ensg_id}, {self.gene_name}, {self.chromosome}:{self.start}-{self.end},{self.strand}"
def __str__(self):
return f"{self.ensg_id}, {self.gene_name}, {self.chromosome}:{self.start}-{self.end},{self.strand}"
class EnsemblGenes:
"""
EnsemblGene information
This class contains many genes likely with ensembl data.
Attributes
----------
list_of_genes: list
list of EnsemblGenes objects
self.ensg_ids: set of strings
Ensembl gene ids in the set.
self.gene_names: list
names of the genes
self.ensg_to_full: dict
dict of ensembl ids as keys and associated EnsemblGene information as values
self.ensg_to_gene: dict
dict of ensembl ids as keys and
self.gene_to_full: dict
gene to full
self.genes_warned_about : set
genes that are duplicate.
self.allow_patch_overlapping_gene_names: bool
allows for overlapping gene names to be available.
Methods
-------
add_gene(ensembl_gene):
add an ensembl_gene object to self.
get_sorted_genes(self)
return sorted genes.
return_overlapping_regions(self, gene_region_to_check):
return the overlapping regions of a StartEndRegion object.
list_to_full(self, list, fail_on_bad_id=True)
returns an ensemblGenes object with only the genes in the files.
str_to_full(self, str)
return the Ensembl genes object associated with a certain string.
str_to_gene(self, str):
return the gene name associated with a certain string.
str_to_ensg(self, str):
return the ensembl id associated with a certain string.
"""
def __init__(self, allow_patch_overlapping_gene_names=False):
self.list_of_genes = []
self.ensg_ids = set()
self.gene_names = set()
self.ensg_to_full = {}
self.gene_to_full = {}
self.genes_warned_about = set()
self.allow_patch_overlapping_gene_names = allow_patch_overlapping_gene_names
def add_gene(self, ensembl_gene):
"""
Add an EnsemblGene object to self.
:param ensembl_gene:
:return: None
"""
self.list_of_genes.append(ensembl_gene)
if ensembl_gene.ensg_id in self.ensg_ids:
raise ValueError("ERROR: found duplicate ENSG ID, when adding {}, this should not happen.".format(ensembl_gene.ensg_id))
self.ensg_ids.add(ensembl_gene.ensg_id)
if ensembl_gene.gene_name in self.gene_names and ensembl_gene.gene_name not in self.genes_warned_about:
# print("WARNING: found duplicate gene name, when adding {}, lookups on gene name may be wrong.".format(ensembl_gene.gene_name))
self.genes_warned_about.add(ensembl_gene.gene_name)
self.ensg_to_full[ensembl_gene.ensg_id] = ensembl_gene
#this ensures that there will never be weird patch genes in the
if ensembl_gene.gene_name in self.gene_names and (not self.allow_patch_overlapping_gene_names):
try:
len(ensembl_gene.chromosome < 3) #only chromosome names smaller than 3.
except:
return
self.gene_names.add(ensembl_gene.gene_name)
self.gene_to_full[ensembl_gene.gene_name] = ensembl_gene
def get_sorted_genes(self):
return sorted(self.list_of_genes)
def return_overlapping_regions(self, gene_region_to_check):
"""
This may be a bit slow, as it will iterate over all gene regions here.
:param gene_region_to_check:
:return:
"""
sorted_genes = self.get_sorted_genes()
to_return = EnsemblGenes()
for gene in sorted_genes:
if gene_region_to_check.region_overlaps(gene):
to_return.add_gene(gene)
return to_return
def return_overlapping_regions_based_on_coordinates(self, chromosome, position):
"""
This may be a bit slow, as it will iterate over all gene regions here.
Cool thing though, this is sorted.
:param gene_region_to_check:
:return: EnsemblGenes object with overlapping genes.
"""
sorted_genes = self.get_sorted_genes()
to_return = EnsemblGenes()
for gene in sorted_genes:
if gene.snp_in_region(chromosome, position):
to_return.add_gene(gene)
return to_return
def __str__(self):
return "EnsemblGenes object containing {} genes".format(len(self.gene_names))
def list_to_full(self, list, fail_on_bad_id=True):
"""
turn a list of gene identifiers into a list of ensembl gene information
:param list: list of IDs you want to know all the ensembl information of.
:param fail_on_bad_id: bool,
if bad IDs should fail. Default is True.
:return: list of ensembl genes informaiton.
"""
if fail_on_bad_id:
return [self.str_to_full(x) for x in list]
else:
return_list = []
for gene in list:
try:
return_list.append(self.str_to_full(gene))
except ValueError:
print(f"Could not find {gene}, but continueing.")
return return_list
def str_to_full(self, str):
if str in self.ensg_to_full.keys():
return self.ensg_to_full[str]
elif str in self.gene_to_full.keys():
return self.gene_to_full[str]
else:
raise ValueError(f"{str} was not convertible to a gene that I know.")
def str_to_gene(self, str):
return self.str_to_full(str).gene_name
def str_to_ensg(self, str):
return self.str_to_full(str).ensg_id
def __iter__(self):
self.ordered_ensembl_info = sorted(self.list_of_genes)
self.ordered_ensg_ids = [x.ensg_id for x in self.ordered_ensembl_info]
self.ordered_gene_names = [x.gene_name for x in self.ordered_ensembl_info]
self.iterator_indice = 0
return self
def __next__(self):
if self.iterator_indice < len(self.ordered_ensg_ids):
self.iterator_indice += 1
return self.ensg_to_full[self.ordered_ensg_ids[self.iterator_indice - 1]]
else:
raise StopIteration()
def read_gene_information():
"""
This loads in the ENSG gene information from the package and returns it.
very handy to have if you want to do a quick check of a certain ENSG ID, or just want gene names of everything.
TODO: Properly handle the Ensembl and human genome versions.
:return:
EnsemblGene object with all the genes that are in the file '2018_05_18_ensembl_gene_information.txt.gz' in the
resource/ensembldata folder of this package.
"""
resource_path = '/'.join(('ensembl_data', '2018_05_18_ensembl_gene_information.txt.gz'))
if len(__file__.split("/")) > 1:
gene_file = "{}/{}".format("/".join(__file__.split("/")[:-1]), resource_path)
else:
gene_file = resource_path
ensembl_genes = EnsemblGenes()
with gzip.open(gene_file, "rb") as f:
f.readline()
for line in f:
split = line.decode("utf8").split()
ensembl_genes.add_gene(
EnsemblGene(
split[0],
split[1],
split[2],
split[3],
split[4],
split[5],
split[6],
split[7],
split[8]
)
)
return ensembl_genes | from genome_integration import gene_regions
import gzip
"""
These classes are intended to easily access and query ensembl genes information,
But they have other uses as well, so it is possible that these will be joined with the ensembl
"""
class EnsemblGene(gene_regions.StartEndRegion):
"""
Contains all the standard fields for gene information from ensembl.
Attributes
----------
ensg_id: str
ensembl id
gene_name: str
gene name
strand: str
strand of the gene either `+` or `-`
gc_percent: str
percentage of GC bases.
gene_type: str
gene type
ensembl_version:
ensembl version of the gene.
Methods
-------
None
"""
def __init__(self,
ensg_id,
gene_name,
chromosome,
start,
end,
strand,
gc_percent,
gene_type,
ensembl_version):
super().__init__([chromosome, start, end])
self.ensg_id = ensg_id
self.gene_name = gene_name
self.strand = strand
self.gc_percent = gc_percent
self.gene_type = gene_type
self.ensembl_version = ensembl_version
def __repr__(self):
return f"EnsemblGene object: {self.ensg_id}, {self.gene_name}, {self.chromosome}:{self.start}-{self.end},{self.strand}"
def __str__(self):
return f"{self.ensg_id}, {self.gene_name}, {self.chromosome}:{self.start}-{self.end},{self.strand}"
class EnsemblGenes:
"""
EnsemblGene information
This class contains many genes likely with ensembl data.
Attributes
----------
list_of_genes: list
list of EnsemblGenes objects
self.ensg_ids: set of strings
Ensembl gene ids in the set.
self.gene_names: list
names of the genes
self.ensg_to_full: dict
dict of ensembl ids as keys and associated EnsemblGene information as values
self.ensg_to_gene: dict
dict of ensembl ids as keys and
self.gene_to_full: dict
gene to full
self.genes_warned_about : set
genes that are duplicate.
self.allow_patch_overlapping_gene_names: bool
allows for overlapping gene names to be available.
Methods
-------
add_gene(ensembl_gene):
add an ensembl_gene object to self.
get_sorted_genes(self)
return sorted genes.
return_overlapping_regions(self, gene_region_to_check):
return the overlapping regions of a StartEndRegion object.
list_to_full(self, list, fail_on_bad_id=True)
returns an ensemblGenes object with only the genes in the files.
str_to_full(self, str)
return the Ensembl genes object associated with a certain string.
str_to_gene(self, str):
return the gene name associated with a certain string.
str_to_ensg(self, str):
return the ensembl id associated with a certain string.
"""
def __init__(self, allow_patch_overlapping_gene_names=False):
self.list_of_genes = []
self.ensg_ids = set()
self.gene_names = set()
self.ensg_to_full = {}
self.gene_to_full = {}
self.genes_warned_about = set()
self.allow_patch_overlapping_gene_names = allow_patch_overlapping_gene_names
def add_gene(self, ensembl_gene):
"""
Add an EnsemblGene object to self.
:param ensembl_gene:
:return: None
"""
self.list_of_genes.append(ensembl_gene)
if ensembl_gene.ensg_id in self.ensg_ids:
raise ValueError("ERROR: found duplicate ENSG ID, when adding {}, this should not happen.".format(ensembl_gene.ensg_id))
self.ensg_ids.add(ensembl_gene.ensg_id)
if ensembl_gene.gene_name in self.gene_names and ensembl_gene.gene_name not in self.genes_warned_about:
# print("WARNING: found duplicate gene name, when adding {}, lookups on gene name may be wrong.".format(ensembl_gene.gene_name))
self.genes_warned_about.add(ensembl_gene.gene_name)
self.ensg_to_full[ensembl_gene.ensg_id] = ensembl_gene
#this ensures that there will never be weird patch genes in the
if ensembl_gene.gene_name in self.gene_names and (not self.allow_patch_overlapping_gene_names):
try:
len(ensembl_gene.chromosome < 3) #only chromosome names smaller than 3.
except:
return
self.gene_names.add(ensembl_gene.gene_name)
self.gene_to_full[ensembl_gene.gene_name] = ensembl_gene
def get_sorted_genes(self):
return sorted(self.list_of_genes)
def return_overlapping_regions(self, gene_region_to_check):
"""
This may be a bit slow, as it will iterate over all gene regions here.
:param gene_region_to_check:
:return:
"""
sorted_genes = self.get_sorted_genes()
to_return = EnsemblGenes()
for gene in sorted_genes:
if gene_region_to_check.region_overlaps(gene):
to_return.add_gene(gene)
return to_return
def return_overlapping_regions_based_on_coordinates(self, chromosome, position):
"""
This may be a bit slow, as it will iterate over all gene regions here.
Cool thing though, this is sorted.
:param gene_region_to_check:
:return: EnsemblGenes object with overlapping genes.
"""
sorted_genes = self.get_sorted_genes()
to_return = EnsemblGenes()
for gene in sorted_genes:
if gene.snp_in_region(chromosome, position):
to_return.add_gene(gene)
return to_return
def __str__(self):
return "EnsemblGenes object containing {} genes".format(len(self.gene_names))
def list_to_full(self, list, fail_on_bad_id=True):
"""
turn a list of gene identifiers into a list of ensembl gene information
:param list: list of IDs you want to know all the ensembl information of.
:param fail_on_bad_id: bool,
if bad IDs should fail. Default is True.
:return: list of ensembl genes informaiton.
"""
if fail_on_bad_id:
return [self.str_to_full(x) for x in list]
else:
return_list = []
for gene in list:
try:
return_list.append(self.str_to_full(gene))
except ValueError:
print(f"Could not find {gene}, but continueing.")
return return_list
def str_to_full(self, str):
if str in self.ensg_to_full.keys():
return self.ensg_to_full[str]
elif str in self.gene_to_full.keys():
return self.gene_to_full[str]
else:
raise ValueError(f"{str} was not convertible to a gene that I know.")
def str_to_gene(self, str):
return self.str_to_full(str).gene_name
def str_to_ensg(self, str):
return self.str_to_full(str).ensg_id
def __iter__(self):
self.ordered_ensembl_info = sorted(self.list_of_genes)
self.ordered_ensg_ids = [x.ensg_id for x in self.ordered_ensembl_info]
self.ordered_gene_names = [x.gene_name for x in self.ordered_ensembl_info]
self.iterator_indice = 0
return self
def __next__(self):
if self.iterator_indice < len(self.ordered_ensg_ids):
self.iterator_indice += 1
return self.ensg_to_full[self.ordered_ensg_ids[self.iterator_indice - 1]]
else:
raise StopIteration()
def read_gene_information():
"""
This loads in the ENSG gene information from the package and returns it.
very handy to have if you want to do a quick check of a certain ENSG ID, or just want gene names of everything.
TODO: Properly handle the Ensembl and human genome versions.
:return:
EnsemblGene object with all the genes that are in the file '2018_05_18_ensembl_gene_information.txt.gz' in the
resource/ensembldata folder of this package.
"""
resource_path = '/'.join(('ensembl_data', '2018_05_18_ensembl_gene_information.txt.gz'))
if len(__file__.split("/")) > 1:
gene_file = "{}/{}".format("/".join(__file__.split("/")[:-1]), resource_path)
else:
gene_file = resource_path
ensembl_genes = EnsemblGenes()
with gzip.open(gene_file, "rb") as f:
f.readline()
for line in f:
split = line.decode("utf8").split()
ensembl_genes.add_gene(
EnsemblGene(
split[0],
split[1],
split[2],
split[3],
split[4],
split[5],
split[6],
split[7],
split[8]
)
)
return ensembl_genes | en | 0.774035 | These classes are intended to easily access and query ensembl genes information, But they have other uses as well, so it is possible that these will be joined with the ensembl Contains all the standard fields for gene information from ensembl. Attributes ---------- ensg_id: str ensembl id gene_name: str gene name strand: str strand of the gene either `+` or `-` gc_percent: str percentage of GC bases. gene_type: str gene type ensembl_version: ensembl version of the gene. Methods ------- None EnsemblGene information This class contains many genes likely with ensembl data. Attributes ---------- list_of_genes: list list of EnsemblGenes objects self.ensg_ids: set of strings Ensembl gene ids in the set. self.gene_names: list names of the genes self.ensg_to_full: dict dict of ensembl ids as keys and associated EnsemblGene information as values self.ensg_to_gene: dict dict of ensembl ids as keys and self.gene_to_full: dict gene to full self.genes_warned_about : set genes that are duplicate. self.allow_patch_overlapping_gene_names: bool allows for overlapping gene names to be available. Methods ------- add_gene(ensembl_gene): add an ensembl_gene object to self. get_sorted_genes(self) return sorted genes. return_overlapping_regions(self, gene_region_to_check): return the overlapping regions of a StartEndRegion object. list_to_full(self, list, fail_on_bad_id=True) returns an ensemblGenes object with only the genes in the files. str_to_full(self, str) return the Ensembl genes object associated with a certain string. str_to_gene(self, str): return the gene name associated with a certain string. str_to_ensg(self, str): return the ensembl id associated with a certain string. Add an EnsemblGene object to self. :param ensembl_gene: :return: None # print("WARNING: found duplicate gene name, when adding {}, lookups on gene name may be wrong.".format(ensembl_gene.gene_name)) #this ensures that there will never be weird patch genes in the #only chromosome names smaller than 3. This may be a bit slow, as it will iterate over all gene regions here. :param gene_region_to_check: :return: This may be a bit slow, as it will iterate over all gene regions here. Cool thing though, this is sorted. :param gene_region_to_check: :return: EnsemblGenes object with overlapping genes. turn a list of gene identifiers into a list of ensembl gene information :param list: list of IDs you want to know all the ensembl information of. :param fail_on_bad_id: bool, if bad IDs should fail. Default is True. :return: list of ensembl genes informaiton. This loads in the ENSG gene information from the package and returns it. very handy to have if you want to do a quick check of a certain ENSG ID, or just want gene names of everything. TODO: Properly handle the Ensembl and human genome versions. :return: EnsemblGene object with all the genes that are in the file '2018_05_18_ensembl_gene_information.txt.gz' in the resource/ensembldata folder of this package. | 2.94819 | 3 |
lib/sedna/algorithms/unseen_task_detect/unseen_task_detect.py | chou-shun/sedna | 0 | 6630296 | <gh_stars>0
# Copyright 2021 The KubeEdge Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unseen task detection algorithms for Lifelong Learning"""
import abc
from typing import List
import numpy as np
from sedna.algorithms.multi_task_learning.task_jobs.artifact import Task
from sedna.common.class_factory import ClassFactory, ClassType
__all__ = ('ModelProbeFilter', 'TaskAttrFilter')
class BaseFilter(metaclass=abc.ABCMeta):
"""The base class to define unified interface."""
def __call__(self, task: Task = None):
"""predict function, and it must be implemented by
different methods class.
:param task: inference task
:return: `True` means unseen task, `False` means not an unseen task.
"""
raise NotImplementedError
@ClassFactory.register(ClassType.UTD)
class ModelProbeFilter(BaseFilter, abc.ABC):
def __init__(self):
pass
def __call__(self, tasks: List[Task] = None, threshold=0.5, **kwargs):
all_proba = []
for task in tasks:
sample = task.samples
model = task.model
if hasattr(model, "predict_proba"):
proba = model.predict_proba(sample)
all_proba.append(np.max(proba))
return np.mean(all_proba) > threshold if all_proba else True
@ClassFactory.register(ClassType.UTD)
class TaskAttrFilter(BaseFilter, abc.ABC):
def __init__(self):
pass
def __call__(self, tasks: List[Task] = None, **kwargs):
for task in tasks:
model_attr = list(map(list, task.model.meta_attr))
sample_attr = list(map(list, task.samples.meta_attr))
if not (model_attr and sample_attr):
continue
if list(model_attr) == list(sample_attr):
return False
return True
| # Copyright 2021 The KubeEdge Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unseen task detection algorithms for Lifelong Learning"""
import abc
from typing import List
import numpy as np
from sedna.algorithms.multi_task_learning.task_jobs.artifact import Task
from sedna.common.class_factory import ClassFactory, ClassType
__all__ = ('ModelProbeFilter', 'TaskAttrFilter')
class BaseFilter(metaclass=abc.ABCMeta):
"""The base class to define unified interface."""
def __call__(self, task: Task = None):
"""predict function, and it must be implemented by
different methods class.
:param task: inference task
:return: `True` means unseen task, `False` means not an unseen task.
"""
raise NotImplementedError
@ClassFactory.register(ClassType.UTD)
class ModelProbeFilter(BaseFilter, abc.ABC):
def __init__(self):
pass
def __call__(self, tasks: List[Task] = None, threshold=0.5, **kwargs):
all_proba = []
for task in tasks:
sample = task.samples
model = task.model
if hasattr(model, "predict_proba"):
proba = model.predict_proba(sample)
all_proba.append(np.max(proba))
return np.mean(all_proba) > threshold if all_proba else True
@ClassFactory.register(ClassType.UTD)
class TaskAttrFilter(BaseFilter, abc.ABC):
def __init__(self):
pass
def __call__(self, tasks: List[Task] = None, **kwargs):
for task in tasks:
model_attr = list(map(list, task.model.meta_attr))
sample_attr = list(map(list, task.samples.meta_attr))
if not (model_attr and sample_attr):
continue
if list(model_attr) == list(sample_attr):
return False
return True | en | 0.836428 | # Copyright 2021 The KubeEdge Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Unseen task detection algorithms for Lifelong Learning The base class to define unified interface. predict function, and it must be implemented by different methods class. :param task: inference task :return: `True` means unseen task, `False` means not an unseen task. | 2.14575 | 2 |
ee_api/__init__.py | dgketchum/MT_RSense | 0 | 6630297 | import ee
def is_authorized():
try:
ee.Initialize()
print('Authorized')
except Exception as e:
print('You are not authorized: {}'.format(e))
exit(1)
return None
if __name__ == '__main__':
pass
# ========================= EOF ====================================================================
| import ee
def is_authorized():
try:
ee.Initialize()
print('Authorized')
except Exception as e:
print('You are not authorized: {}'.format(e))
exit(1)
return None
if __name__ == '__main__':
pass
# ========================= EOF ====================================================================
| en | 0.354309 | # ========================= EOF ==================================================================== | 2.676602 | 3 |
scripts/check_pipfile_and_toxini.py | BuildJet/agents-aea | 1 | 6630298 | <filename>scripts/check_pipfile_and_toxini.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This script checks that dependencies in tox.ini and Pipfile match."""
import sys
from typing import Dict
# specified in setup.py
WHITELIST = {"base58": ">=1.0.3"}
def get_deps_in_pipfile(file: str = "Pipfile") -> Dict[str, str]:
"""
Get the dependencies of the Pipfile.
:param file: the file to check.
:return: dictionary with dependencies and their versions
"""
result: Dict[str, str] = WHITELIST
with open(file, "r") as f:
is_dev_dependency = False
for line in f:
if line == "[dev-packages]\n":
is_dev_dependency = True
continue
if line == "[packages]\n":
is_dev_dependency = True
continue
if not is_dev_dependency:
continue
try:
package, version = line.split(" = ")
result[package] = version.strip("\n").strip('"')
except Exception: # nosec # pylint: disable=broad-except
pass
return result
def check_versions_in_tox_correct(file: str = "tox.ini") -> None:
"""
Check the versions in tox are matching the ones in Pipfile.
:param file: the file to check.
:param dependencies: the deps in pipfile
:return: True if match
"""
dependencies = get_deps_in_pipfile()
with open(file, "r") as f:
for line in f:
for match_type in ["==", ">="]:
if match_type in line:
name_part, version_part = line.split(match_type)
check_match(
name_part.strip(" "),
version_part.strip("\n"),
dependencies,
match_type,
)
def check_match(
name_part: str, version_part: str, dependencies: Dict[str, str], match_type: str
) -> None:
"""Check for a match independencies."""
result = False
for package, version_and_match_type in dependencies.items():
if package == name_part:
if version_and_match_type == f"{match_type}{version_part}":
result = True
break
print(
f"Non-matching versions for package={package}, {name_part}. Expected='{version_and_match_type}', found='{match_type}{version_part}'."
)
sys.exit(1)
if not result:
print(f"Package not found for: {name_part}")
sys.exit(1)
if __name__ == "__main__":
check_versions_in_tox_correct()
print("OK")
sys.exit(0)
| <filename>scripts/check_pipfile_and_toxini.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This script checks that dependencies in tox.ini and Pipfile match."""
import sys
from typing import Dict
# specified in setup.py
WHITELIST = {"base58": ">=1.0.3"}
def get_deps_in_pipfile(file: str = "Pipfile") -> Dict[str, str]:
"""
Get the dependencies of the Pipfile.
:param file: the file to check.
:return: dictionary with dependencies and their versions
"""
result: Dict[str, str] = WHITELIST
with open(file, "r") as f:
is_dev_dependency = False
for line in f:
if line == "[dev-packages]\n":
is_dev_dependency = True
continue
if line == "[packages]\n":
is_dev_dependency = True
continue
if not is_dev_dependency:
continue
try:
package, version = line.split(" = ")
result[package] = version.strip("\n").strip('"')
except Exception: # nosec # pylint: disable=broad-except
pass
return result
def check_versions_in_tox_correct(file: str = "tox.ini") -> None:
"""
Check the versions in tox are matching the ones in Pipfile.
:param file: the file to check.
:param dependencies: the deps in pipfile
:return: True if match
"""
dependencies = get_deps_in_pipfile()
with open(file, "r") as f:
for line in f:
for match_type in ["==", ">="]:
if match_type in line:
name_part, version_part = line.split(match_type)
check_match(
name_part.strip(" "),
version_part.strip("\n"),
dependencies,
match_type,
)
def check_match(
name_part: str, version_part: str, dependencies: Dict[str, str], match_type: str
) -> None:
"""Check for a match independencies."""
result = False
for package, version_and_match_type in dependencies.items():
if package == name_part:
if version_and_match_type == f"{match_type}{version_part}":
result = True
break
print(
f"Non-matching versions for package={package}, {name_part}. Expected='{version_and_match_type}', found='{match_type}{version_part}'."
)
sys.exit(1)
if not result:
print(f"Package not found for: {name_part}")
sys.exit(1)
if __name__ == "__main__":
check_versions_in_tox_correct()
print("OK")
sys.exit(0)
| en | 0.740296 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- # ------------------------------------------------------------------------------ # # Copyright 2018-2019 Fetch.AI Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # ------------------------------------------------------------------------------ This script checks that dependencies in tox.ini and Pipfile match. # specified in setup.py Get the dependencies of the Pipfile. :param file: the file to check. :return: dictionary with dependencies and their versions # nosec # pylint: disable=broad-except Check the versions in tox are matching the ones in Pipfile. :param file: the file to check. :param dependencies: the deps in pipfile :return: True if match Check for a match independencies. | 2.182487 | 2 |
scheduled_bots/drugs/pharma/Mixtures.py | turoger/scheduled-bots | 6 | 6630299 | <filename>scheduled_bots/drugs/pharma/Mixtures.py
"""
create drug/product mixtures
Example: https://www.wikidata.org/wiki/Q4663143
"""
import time
from collections import defaultdict
from wikidataintegrator import wdi_core, wdi_login, wdi_helpers
from scheduled_bots.local import WDPASS, WDUSER
def make_ref(rxnorm):
refs = [[
wdi_core.WDItemID(value='Q7383767', prop_nr='P248', is_reference=True), # stated in rxnorm
wdi_core.WDExternalID(value=rxnorm, prop_nr='P3345', is_reference=True), # rxcui
wdi_core.WDTime(time=time.strftime('+%Y-%m-%dT00:00:00Z'), prop_nr='P813', is_reference=True) # retrieved
]]
return refs
class Mixtures:
def __init__(self):
self.login = wdi_login.WDLogin(WDUSER, WDPASS)
self._get_mixtures_in_wd()
rxnorm_qid = wdi_helpers.id_mapper("P3345", return_as_set=True)
rxnorm_qid = {k: list(v)[0] for k, v in rxnorm_qid.items() if len(v) == 1}
self.rxnorm_qid = rxnorm_qid
def _get_mixtures_in_wd(self):
query = """
SELECT distinct ?drug ?compound WHERE {
values ?chemical {wd:Q12140 wd:Q11173 wd:Q79529}
?drug wdt:P527 ?compound .
?drug wdt:P31 ?chemical .
?compound wdt:P652 ?unii
}"""
mixd = defaultdict(set)
r = wdi_core.WDItemEngine.execute_sparql_query(query=query)
for x in r['results']['bindings']:
parent = x['drug']['value'].split("/")[-1]
mixd[parent].add(x['compound']['value'].split("/")[-1])
self.mixture_components = {k: v for k, v in mixd.items() if len(v) > 1}
self.components_mixture = {frozenset(v): k for k, v in self.mixture_components.items()}
# to create, needs: label, ingredients, rxcui
def create(self, label: str, rxcui: str, ingredient_qids: list):
rxcui = str(rxcui)
# check to make sure it doesn't exist
if rxcui in self.rxnorm_qid:
raise ValueError("rxcui {} already exists: {}".format(rxcui, self.rxnorm_qid[rxcui]))
# check by ingredients
qid = self.get_mixture_qid(ingredient_qids)
if qid:
raise ValueError("mixture already exists: {}".format(qid))
# has part
s = [wdi_core.WDItemID(x, 'P527', references=make_ref(rxcui)) for x in ingredient_qids]
# instance of
s.append(wdi_core.WDItemID('Q12140', 'P31', references=make_ref(rxcui))) # drug
s.append(wdi_core.WDItemID('Q79529', 'P31', references=make_ref(rxcui))) # chemical substance
s.append(wdi_core.WDItemID('Q169336', 'P31', references=make_ref(rxcui))) # mixture
# rxnorm
s.append(wdi_core.WDExternalID(rxcui, "P3345", references=make_ref(rxcui)))
item = wdi_core.WDItemEngine(data=s)
if item.create_new_item:
item.set_label(label)
item.set_label(label)
if not item.get_description():
item.set_description("combination drug")
item.write(self.login)
qid = item.wd_item_id
# update cache
self.components_mixture[frozenset(ingredient_qids)] = qid
self.mixture_components[qid] = ingredient_qids
self.rxnorm_qid[rxcui] = qid
return qid
def get_or_create(self, label, rxcui, ingredient_qids):
if rxcui in self.rxnorm_qid:
return self.rxnorm_qid[rxcui]
qid = self.get_mixture_qid(ingredient_qids)
if qid:
return qid
return self.create(label, rxcui, ingredient_qids)
def get_mixture_qid(self, ingredient_qids):
# get the qid for the mixture from the ingredient qids
return self.components_mixture.get(frozenset(ingredient_qids))
| <filename>scheduled_bots/drugs/pharma/Mixtures.py
"""
create drug/product mixtures
Example: https://www.wikidata.org/wiki/Q4663143
"""
import time
from collections import defaultdict
from wikidataintegrator import wdi_core, wdi_login, wdi_helpers
from scheduled_bots.local import WDPASS, WDUSER
def make_ref(rxnorm):
refs = [[
wdi_core.WDItemID(value='Q7383767', prop_nr='P248', is_reference=True), # stated in rxnorm
wdi_core.WDExternalID(value=rxnorm, prop_nr='P3345', is_reference=True), # rxcui
wdi_core.WDTime(time=time.strftime('+%Y-%m-%dT00:00:00Z'), prop_nr='P813', is_reference=True) # retrieved
]]
return refs
class Mixtures:
def __init__(self):
self.login = wdi_login.WDLogin(WDUSER, WDPASS)
self._get_mixtures_in_wd()
rxnorm_qid = wdi_helpers.id_mapper("P3345", return_as_set=True)
rxnorm_qid = {k: list(v)[0] for k, v in rxnorm_qid.items() if len(v) == 1}
self.rxnorm_qid = rxnorm_qid
def _get_mixtures_in_wd(self):
query = """
SELECT distinct ?drug ?compound WHERE {
values ?chemical {wd:Q12140 wd:Q11173 wd:Q79529}
?drug wdt:P527 ?compound .
?drug wdt:P31 ?chemical .
?compound wdt:P652 ?unii
}"""
mixd = defaultdict(set)
r = wdi_core.WDItemEngine.execute_sparql_query(query=query)
for x in r['results']['bindings']:
parent = x['drug']['value'].split("/")[-1]
mixd[parent].add(x['compound']['value'].split("/")[-1])
self.mixture_components = {k: v for k, v in mixd.items() if len(v) > 1}
self.components_mixture = {frozenset(v): k for k, v in self.mixture_components.items()}
# to create, needs: label, ingredients, rxcui
def create(self, label: str, rxcui: str, ingredient_qids: list):
rxcui = str(rxcui)
# check to make sure it doesn't exist
if rxcui in self.rxnorm_qid:
raise ValueError("rxcui {} already exists: {}".format(rxcui, self.rxnorm_qid[rxcui]))
# check by ingredients
qid = self.get_mixture_qid(ingredient_qids)
if qid:
raise ValueError("mixture already exists: {}".format(qid))
# has part
s = [wdi_core.WDItemID(x, 'P527', references=make_ref(rxcui)) for x in ingredient_qids]
# instance of
s.append(wdi_core.WDItemID('Q12140', 'P31', references=make_ref(rxcui))) # drug
s.append(wdi_core.WDItemID('Q79529', 'P31', references=make_ref(rxcui))) # chemical substance
s.append(wdi_core.WDItemID('Q169336', 'P31', references=make_ref(rxcui))) # mixture
# rxnorm
s.append(wdi_core.WDExternalID(rxcui, "P3345", references=make_ref(rxcui)))
item = wdi_core.WDItemEngine(data=s)
if item.create_new_item:
item.set_label(label)
item.set_label(label)
if not item.get_description():
item.set_description("combination drug")
item.write(self.login)
qid = item.wd_item_id
# update cache
self.components_mixture[frozenset(ingredient_qids)] = qid
self.mixture_components[qid] = ingredient_qids
self.rxnorm_qid[rxcui] = qid
return qid
def get_or_create(self, label, rxcui, ingredient_qids):
if rxcui in self.rxnorm_qid:
return self.rxnorm_qid[rxcui]
qid = self.get_mixture_qid(ingredient_qids)
if qid:
return qid
return self.create(label, rxcui, ingredient_qids)
def get_mixture_qid(self, ingredient_qids):
# get the qid for the mixture from the ingredient qids
return self.components_mixture.get(frozenset(ingredient_qids))
| en | 0.660315 | create drug/product mixtures Example: https://www.wikidata.org/wiki/Q4663143 # stated in rxnorm # rxcui # retrieved SELECT distinct ?drug ?compound WHERE { values ?chemical {wd:Q12140 wd:Q11173 wd:Q79529} ?drug wdt:P527 ?compound . ?drug wdt:P31 ?chemical . ?compound wdt:P652 ?unii } # to create, needs: label, ingredients, rxcui # check to make sure it doesn't exist # check by ingredients # has part # instance of # drug # chemical substance # mixture # rxnorm # update cache # get the qid for the mixture from the ingredient qids | 2.735621 | 3 |
excepthook.py | wildbeez/SmokeDetector | 0 | 6630300 | # coding=utf-8
from datetime import datetime
import os
import traceback
import threading
import sys
# noinspection PyPackageRequirements
from websocket import WebSocketConnectionClosedException
import requests
from helpers import log, log_exception
from globalvars import GlobalVars
# noinspection PyProtectedMember
def uncaught_exception(exctype, value, tb):
delta = datetime.utcnow() - GlobalVars.startup_utc_date
log_exception(exctype, value, tb)
if delta.total_seconds() < 180 and exctype not in \
{KeyboardInterrupt, SystemExit, requests.ConnectionError, WebSocketConnectionClosedException}:
os._exit(4)
else:
os._exit(1)
def install_thread_excepthook():
"""
Workaround for sys.excepthook thread bug
From
http://spyced.blogspot.com/2007/06/workaround-for-sysexcepthook-bug.html
(https://sourceforge.net/tracker/?func=detail&atid=105470&aid=1230540&group_id=5470).
Call once from __main__ before creating any threads.
If using psyco, call psyco.cannotcompile(threading.Thread.run)
since this replaces a new-style class method.
"""
init_old = threading.Thread.__init__
def init(self, *args, **kwargs):
init_old(self, *args, **kwargs)
run_old = self.run
# noinspection PyBroadException,PyShadowingNames
def run_with_except_hook(*args, **kw):
try:
run_old(*args, **kw)
except Exception: # Broad exception makes sense here
sys.excepthook(*sys.exc_info())
except BaseException: # KeyboardInterrupt and SystemExit
raise
self.run = run_with_except_hook
threading.Thread.__init__ = init
| # coding=utf-8
from datetime import datetime
import os
import traceback
import threading
import sys
# noinspection PyPackageRequirements
from websocket import WebSocketConnectionClosedException
import requests
from helpers import log, log_exception
from globalvars import GlobalVars
# noinspection PyProtectedMember
def uncaught_exception(exctype, value, tb):
delta = datetime.utcnow() - GlobalVars.startup_utc_date
log_exception(exctype, value, tb)
if delta.total_seconds() < 180 and exctype not in \
{KeyboardInterrupt, SystemExit, requests.ConnectionError, WebSocketConnectionClosedException}:
os._exit(4)
else:
os._exit(1)
def install_thread_excepthook():
"""
Workaround for sys.excepthook thread bug
From
http://spyced.blogspot.com/2007/06/workaround-for-sysexcepthook-bug.html
(https://sourceforge.net/tracker/?func=detail&atid=105470&aid=1230540&group_id=5470).
Call once from __main__ before creating any threads.
If using psyco, call psyco.cannotcompile(threading.Thread.run)
since this replaces a new-style class method.
"""
init_old = threading.Thread.__init__
def init(self, *args, **kwargs):
init_old(self, *args, **kwargs)
run_old = self.run
# noinspection PyBroadException,PyShadowingNames
def run_with_except_hook(*args, **kw):
try:
run_old(*args, **kw)
except Exception: # Broad exception makes sense here
sys.excepthook(*sys.exc_info())
except BaseException: # KeyboardInterrupt and SystemExit
raise
self.run = run_with_except_hook
threading.Thread.__init__ = init
| en | 0.522295 | # coding=utf-8 # noinspection PyPackageRequirements # noinspection PyProtectedMember Workaround for sys.excepthook thread bug From http://spyced.blogspot.com/2007/06/workaround-for-sysexcepthook-bug.html (https://sourceforge.net/tracker/?func=detail&atid=105470&aid=1230540&group_id=5470). Call once from __main__ before creating any threads. If using psyco, call psyco.cannotcompile(threading.Thread.run) since this replaces a new-style class method. # noinspection PyBroadException,PyShadowingNames # Broad exception makes sense here # KeyboardInterrupt and SystemExit | 1.890899 | 2 |
uni_parser/ebnf/ebnf_ast.py | nonemaw/YeTi | 1 | 6630301 | class Ast:
def __init__(self, name: str, token_position: tuple, grammars: list = None,
grammar: str = None):
"""
I am an AST tree, grammars are my children
`grammars` can be both a spelling string, or a list of Ast instance
"""
# matched grammar object name
self.name = name
self.children = grammars
self.child = grammar
# (line_start, char_start, line_end, char_end)
self.position = token_position
def __str__(self):
return f'{self.name} {self.children} {self.print_position()}'
def __repr__(self):
return f'{self.name}'
def __iter__(self):
return iter(self.children)
def __getitem__(self, item):
if self.children:
return self.children.__getitem__(item)
else:
return self.child
def empty(self):
self.children.clear()
def append(self, obj):
self.children.append(obj)
def extend(self, obj):
self.children.extend(obj.children)
def print_position(self) -> str:
return f'{self.position[0]}({self.position[1]})...{self.position[2]}({self.position[3]})'
def format(self, level=4):
indent = ' ' * level
end_indent = ' ' * (level - 4)
# child case (single grammar case, e.g. a matched literal grammar)
if self.child:
child = 'CR' if self.child == '\n' else self.child
return f'{self.name} < {child} >\n'
# children case (a list of various grammars)
else:
next_indent = ' ' * level
children = next_indent.join(
map(lambda ast: ast.format(level + 4), self))
return f'{self.name} {{\n{indent}{children}{end_indent}}}\n'
| class Ast:
def __init__(self, name: str, token_position: tuple, grammars: list = None,
grammar: str = None):
"""
I am an AST tree, grammars are my children
`grammars` can be both a spelling string, or a list of Ast instance
"""
# matched grammar object name
self.name = name
self.children = grammars
self.child = grammar
# (line_start, char_start, line_end, char_end)
self.position = token_position
def __str__(self):
return f'{self.name} {self.children} {self.print_position()}'
def __repr__(self):
return f'{self.name}'
def __iter__(self):
return iter(self.children)
def __getitem__(self, item):
if self.children:
return self.children.__getitem__(item)
else:
return self.child
def empty(self):
self.children.clear()
def append(self, obj):
self.children.append(obj)
def extend(self, obj):
self.children.extend(obj.children)
def print_position(self) -> str:
return f'{self.position[0]}({self.position[1]})...{self.position[2]}({self.position[3]})'
def format(self, level=4):
indent = ' ' * level
end_indent = ' ' * (level - 4)
# child case (single grammar case, e.g. a matched literal grammar)
if self.child:
child = 'CR' if self.child == '\n' else self.child
return f'{self.name} < {child} >\n'
# children case (a list of various grammars)
else:
next_indent = ' ' * level
children = next_indent.join(
map(lambda ast: ast.format(level + 4), self))
return f'{self.name} {{\n{indent}{children}{end_indent}}}\n'
| en | 0.726645 | I am an AST tree, grammars are my children `grammars` can be both a spelling string, or a list of Ast instance # matched grammar object name # (line_start, char_start, line_end, char_end) # child case (single grammar case, e.g. a matched literal grammar) # children case (a list of various grammars) | 3.492293 | 3 |
src/template_finder.py | OppOeds/botty | 0 | 6630302 | import cv2
from screen import Screen
from typing import Tuple, Union, List
import numpy as np
from logger import Logger
import time
import os
from config import Config
from utils.misc import load_template
class TemplateFinder:
def __init__(self, screen: Screen, scale_factor: float = None):
"""
:param screen: Screen object
:param scale_factor: Scale factor that is used for templates. Note: UI and NPC templates will always have scale of 1.0
"""
self.last_score = -1.0
self._screen = screen
self._config = Config()
if scale_factor is None:
scale_factor = 0.7 if self._config.general['res'] == "1920_1080" else 1.0
self._scale_factor = scale_factor
res_str = "" if self._config.general['res'] == "1920_1080" else "_1280_720"
self._templates = {
# Templates for node in A5 Town
"A5_TOWN_0": [load_template(f"assets/templates{res_str}/a5_town/a5_town_0.png", self._scale_factor), self._scale_factor],
"A5_TOWN_0.5": [load_template(f"assets/templates{res_str}/a5_town/a5_town_0.5.png", self._scale_factor), self._scale_factor],
"A5_TOWN_1": [load_template(f"assets/templates{res_str}/a5_town/a5_town_1.png", self._scale_factor), self._scale_factor],
"A5_TOWN_2": [load_template(f"assets/templates{res_str}/a5_town/a5_town_2.png", self._scale_factor), self._scale_factor],
"A5_TOWN_3": [load_template(f"assets/templates{res_str}/a5_town/a5_town_3.png", self._scale_factor), self._scale_factor],
"A5_TOWN_4": [load_template(f"assets/templates{res_str}/a5_town/a5_town_4.png", self._scale_factor), self._scale_factor],
"A5_TOWN_5": [load_template(f"assets/templates{res_str}/a5_town/a5_town_5.png", self._scale_factor), self._scale_factor],
"A5_TOWN_6": [load_template(f"assets/templates{res_str}/a5_town/a5_town_6.png", self._scale_factor), self._scale_factor],
"A5_TOWN_7": [load_template(f"assets/templates{res_str}/a5_town/a5_town_7.png", self._scale_factor), self._scale_factor],
"A5_TOWN_8": [load_template(f"assets/templates{res_str}/a5_town/a5_town_8.png", self._scale_factor), self._scale_factor],
"A5_TOWN_9": [load_template(f"assets/templates{res_str}/a5_town/a5_town_9.png", self._scale_factor), self._scale_factor],
"A5_TOWN_10": [load_template(f"assets/templates{res_str}/a5_town/a5_town_10.png", self._scale_factor), self._scale_factor],
# Templates for nod at Pindle
"PINDLE_0": [load_template(f"assets/templates{res_str}/pindle/pindle_0.png", self._scale_factor), self._scale_factor],
"PINDLE_1": [load_template(f"assets/templates{res_str}/pindle/pindle_1.png", self._scale_factor), self._scale_factor],
"PINDLE_2": [load_template(f"assets/templates{res_str}/pindle/pindle_2.png", self._scale_factor), self._scale_factor],
"PINDLE_3": [load_template(f"assets/templates{res_str}/pindle/pindle_3.png", self._scale_factor), self._scale_factor],
"PINDLE_4": [load_template(f"assets/templates{res_str}/pindle/pindle_4.png", self._scale_factor), self._scale_factor],
"PINDLE_5": [load_template(f"assets/templates{res_str}/pindle/pindle_5.png", self._scale_factor), self._scale_factor],
"PINDLE_6": [load_template(f"assets/templates{res_str}/pindle/pindle_6.png", self._scale_factor), self._scale_factor],
"PINDLE_7": [load_template(f"assets/templates{res_str}/pindle/pindle_7.png", self._scale_factor), self._scale_factor],
# Templates for nodes to Eldritch
"ELDRITCH_START": [load_template(f"assets/templates{res_str}/eldritch/eldritch_start.png", self._scale_factor), self._scale_factor],
"ELDRITCH_0": [load_template(f"assets/templates{res_str}/eldritch/eldritch_0.png", self._scale_factor), self._scale_factor],
"ELDRITCH_1": [load_template(f"assets/templates{res_str}/eldritch/eldritch_1.png", self._scale_factor), self._scale_factor],
"ELDRITCH_2": [load_template(f"assets/templates{res_str}/eldritch/eldritch_2.png", self._scale_factor), self._scale_factor],
"ELDRITCH_3": [load_template(f"assets/templates{res_str}/eldritch/eldritch_3.png", self._scale_factor), self._scale_factor],
"ELDRITCH_4": [load_template(f"assets/templates{res_str}/eldritch/eldritch_4.png", self._scale_factor), self._scale_factor],
# Templates for nodes to Shenk (from Eldritch)
"SHENK_0": [load_template(f"assets/templates{res_str}/shenk/shenk_0.png", self._scale_factor), self._scale_factor],
"SHENK_1": [load_template(f"assets/templates{res_str}/shenk/shenk_1.png", self._scale_factor), self._scale_factor],
"SHENK_2": [load_template(f"assets/templates{res_str}/shenk/shenk_2.png", self._scale_factor), self._scale_factor],
"SHENK_3": [load_template(f"assets/templates{res_str}/shenk/shenk_3.png", self._scale_factor), self._scale_factor],
"SHENK_4": [load_template(f"assets/templates{res_str}/shenk/shenk_4.png", self._scale_factor), self._scale_factor],
"SHENK_6": [load_template(f"assets/templates{res_str}/shenk/shenk_6.png", self._scale_factor), self._scale_factor],
"SHENK_7": [load_template(f"assets/templates{res_str}/shenk/shenk_7.png", self._scale_factor), self._scale_factor],
"SHENK_8": [load_template(f"assets/templates{res_str}/shenk/shenk_8.png", self._scale_factor), self._scale_factor],
"SHENK_9": [load_template(f"assets/templates{res_str}/shenk/shenk_9.png", self._scale_factor), self._scale_factor],
"SHENK_10": [load_template(f"assets/templates{res_str}/shenk/shenk_10.png", self._scale_factor), self._scale_factor],
"SHENK_11": [load_template(f"assets/templates{res_str}/shenk/shenk_11.png", self._scale_factor), self._scale_factor],
"SHENK_12": [load_template(f"assets/templates{res_str}/shenk/shenk_12.png", self._scale_factor), self._scale_factor],
"SHENK_13": [load_template(f"assets/templates{res_str}/shenk/shenk_13.png", self._scale_factor), self._scale_factor],
"SHENK_15": [load_template(f"assets/templates{res_str}/shenk/shenk_15.png", self._scale_factor), self._scale_factor],
"SHENK_16": [load_template(f"assets/templates{res_str}/shenk/shenk_16.png", self._scale_factor), self._scale_factor],
"SHENK_17": [load_template(f"assets/templates{res_str}/shenk/shenk_17.png", self._scale_factor), self._scale_factor],
# Template Selectables
"A5_STASH": [load_template(f"assets/templates{res_str}/a5_stash.png", self._scale_factor), self._scale_factor],
"A5_WP": [load_template(f"assets/templates{res_str}/a5_wp.png", self._scale_factor), self._scale_factor],
"A5_RED_PORTAL": [load_template(f"assets/templates{res_str}/a5_red_portal.png", self._scale_factor), self._scale_factor],
"A5_RED_PORTAL_TEXT": [load_template(f"assets/templates{res_str}/a5_red_portal_with_text.png", self._scale_factor), self._scale_factor],
"BLUE_PORTAL": [load_template(f"assets/templates{res_str}/blue_portal.png", self._scale_factor), self._scale_factor],
"BLUE_PORTAL_2": [load_template(f"assets/templates{res_str}/blue_portal_2.png", self._scale_factor), self._scale_factor],
# Template Inventory / UI
"INVENTORY_GOLD_BTN": [load_template(f"assets/templates{res_str}/inventory_gold_btn.png", 1.0), 1.0],
"D2_LOGO_HS": [load_template(f"assets/templates{res_str}/d2_logo_hs.png", 1.0), 1.0],
"LOADING": [load_template(f"assets/templates{res_str}/loading.png", 1.0), 1.0],
"PLAY_BTN": [load_template(f"assets/templates{res_str}/play_btn.png", 1.0), 1.0],
"PLAY_BTN_GRAY": [load_template(f"assets/templates{res_str}/play_btn_gray.png", 1.0), 1.0],
"NORMAL_BTN": [load_template(f"assets/templates{res_str}/normal_btn.png", 1.0), 1.0],
"NIGHTMARE_BTN": [load_template(f"assets/templates{res_str}/nightmare_btn.png", 1.0), 1.0],
"HELL_BTN": [load_template(f"assets/templates{res_str}/hell_btn.png", 1.0), 1.0],
"SAVE_AND_EXIT_NO_HIGHLIGHT": [load_template(f"assets/templates{res_str}/save_and_exit_no_highlight.png", 1.0), 1.0],
"SAVE_AND_EXIT_HIGHLIGHT": [load_template(f"assets/templates{res_str}/save_and_exit_highlight.png", 1.0), 1.0],
"SERVER_ISSUES": [load_template(f"assets/templates{res_str}/server_issues.png", 1.0), 1.0],
"WAYPOINT_MENU": [load_template(f"assets/templates{res_str}/waypoint_menu.png", 1.0), 1.0],
"MERC": [load_template(f"assets/templates{res_str}/merc.png", 1.0), 1.0],
"TELE_ACTIVE": [load_template(f"assets/templates{res_str}/tele_active.png", 1.0), 1.0],
"TELE_INACTIVE": [load_template(f"assets/templates{res_str}/tele_inactive.png", 1.0), 1.0],
"VIGOR": [load_template(f"assets/templates{res_str}/vigor.png", 1.0), 1.0],
"REPAIR_BTN": [load_template(f"assets/templates{res_str}/repair_btn.png", 1.0), 1.0],
"TP_TOMB": [load_template(f"assets/templates{res_str}/tp_tomb.png", 1.0), 1.0],
"SUPER_HEALING_POTION": [load_template(f"assets/templates{res_str}/super_healing_potion.png", 1.0), 1.0],
"SUPER_MANA_POTION": [load_template(f"assets/templates{res_str}/super_mana_potion.png", 1.0), 1.0],
"FULL_REJUV_POTION": [load_template(f"assets/templates{res_str}/full_rejuv_potion.png", 1.0), 1.0],
"REJUV_POTION": [load_template(f"assets/templates{res_str}/rejuv_potion.png", 1.0), 1.0],
# NPC: Qual-Kehk
"QUAL_FRONT": [load_template(f"assets/npc{res_str}/qual_kehk/qual_front.png", 1.0), 1.0],
"QUAL_SIDE": [load_template(f"assets/npc{res_str}/qual_kehk/qual_side.png", 1.0), 1.0],
"QUAL_BACK": [load_template(f"assets/npc{res_str}/qual_kehk/qual_back.png", 1.0), 1.0],
"QUAL_45": [load_template(f"assets/npc{res_str}/qual_kehk/qual_45.png", 1.0), 1.0],
"QUAL_45_2": [load_template(f"assets/npc{res_str}/qual_kehk/qual_45_2.png", 1.0), 1.0],
"QUAL_45_3": [load_template(f"assets/npc{res_str}/qual_kehk/qual_45_3.png", 1.0), 1.0],
"QUAL_NAME_TAG_WHITE": [load_template(f"assets/npc{res_str}/qual_kehk/qual_kehk_white.png", 1.0), 1.0],
"QUAL_NAME_TAG_GOLD": [load_template(f"assets/npc{res_str}/qual_kehk/qual_kehk_gold.png", 1.0), 1.0],
"QUAL_RESURRECT_BTN": [load_template(f"assets/npc{res_str}/qual_kehk/resurrect_btn.png", 1.0), 1.0],
# NPC: Malah
"MALAH_FRONT": [load_template(f"assets/npc{res_str}/malah/malah_front.png", 1.0), 1.0],
"MALAH_BACK": [load_template(f"assets/npc{res_str}/malah/malah_BACK.png", 1.0), 1.0],
"MALAH_45": [load_template(f"assets/npc{res_str}/malah/malah_45.png", 1.0), 1.0],
"MALAH_SIDE": [load_template(f"assets/npc{res_str}/malah/malah_side.png", 1.0), 1.0],
"MALAH_SIDE_2": [load_template(f"assets/npc{res_str}/malah/malah_side_2.png", 1.0), 1.0],
"MALAH_NAME_TAG_WHITE": [load_template(f"assets/npc{res_str}/malah/malah_white.png", 1.0), 1.0],
"MALAH_NAME_TAG_GOLD": [load_template(f"assets/npc{res_str}/malah/malah_gold.png", 1.0), 1.0],
"MALAH_TRADE_BTN": [load_template(f"assets/npc{res_str}/malah/trade_btn.png", 1.0), 1.0],
# NPC: Larzuk
"LARZUK_FRONT": [load_template(f"assets/npc{res_str}/larzuk/larzuk_front.png", 1.0), 1.0],
"LARZUK_BACK": [load_template(f"assets/npc{res_str}/larzuk/larzuk_back.png", 1.0), 1.0],
"LARZUK_SIDE": [load_template(f"assets/npc{res_str}/larzuk/larzuk_side.png", 1.0), 1.0],
"LARZUK_SIDE_2": [load_template(f"assets/npc{res_str}/larzuk/larzuk_side_2.png", 1.0), 1.0],
"LARZUK_SIDE_3": [load_template(f"assets/npc{res_str}/larzuk/larzuk_side_3.png", 1.0), 1.0],
"LARZUK_NAME_TAG_WHITE": [load_template(f"assets/npc{res_str}/larzuk/larzuk_white.png", 1.0), 1.0],
"LARZUK_NAME_TAG_GOLD": [load_template(f"assets/npc{res_str}/larzuk/larzuk_gold.png", 1.0), 1.0],
"LARZUK_TRADE_REPAIR_BTN": [load_template(f"assets/npc{res_str}/larzuk/trade_repair_btn.png", 1.0), 1.0],
# NPC: Anya
"ANYA_FRONT": [load_template(f"assets/npc{res_str}/anya/anya_front.png", 1.0), 1.0],
"ANYA_BACK": [load_template(f"assets/npc{res_str}/anya/anya_back.png", 1.0), 1.0],
"ANYA_SIDE": [load_template(f"assets/npc{res_str}/anya/anya_side.png", 1.0), 1.0],
"ANYA_NAME_TAG_GOLD": [load_template(f"assets/npc{res_str}/anya/anya_gold.png", 1.0), 1.0],
"ANYA_NAME_TAG_WHITE": [load_template(f"assets/npc{res_str}/anya/anya_white.png", 1.0), 1.0],
"ANYA_TRADE_BTN": [load_template(f"assets/npc{res_str}/anya/trade_btn.png", 1.0), 1.0],
}
def get_template(self, key):
return self._templates[key][0]
def search(
self,
ref: Union[str, np.ndarray],
inp_img: np.ndarray,
threshold: float = None,
roi: List[float] = None,
normalize_monitor: bool = False,
) -> Tuple[bool, Tuple[float, float]]:
"""
Search for a template in an image
:param ref: Either key of a already loaded template or a image which is used as template
:param inp_img: Image in which the template will be searched
:param threshold: Threshold which determines if a template is found or not
:param roi: Region of Interest of the inp_img to restrict search area. Format [left, top, width, height]
:return: Returns found flag and the position as [bool, [x, y]]. If not found, position will be None. Position in image space.
"""
threshold = self._config.advanced_options["template_threshold"] if threshold is None else threshold
if roi is None:
# if no roi is provided roi = full inp_img
roi = [0, 0, inp_img.shape[1], inp_img.shape[0]]
rx, ry, rw, rh = roi
inp_img = inp_img[ry:ry + rh, rx:rx + rw]
if type(ref) == str:
template = self._templates[ref][0]
scale = self._templates[ref][1]
else:
template = ref
scale = 1.0
img: np.ndarray = cv2.resize(inp_img, None, fx=scale, fy=scale, interpolation=cv2.INTER_NEAREST)
rx *= scale
ry *= scale
rw *= scale
rh *= scale
if img.shape[0] > template.shape[0] and img.shape[1] > template.shape[1]:
res = cv2.matchTemplate(img, template, cv2.TM_CCOEFF_NORMED)
_, max_val, _, max_pos = cv2.minMaxLoc(res)
self.last_score = max_val
if max_val > threshold:
ref_point = (max_pos[0] + int(template.shape[1] * 0.5) + rx, max_pos[1] + int(template.shape[0] * 0.5) + ry)
ref_point = (int(ref_point[0] * (1.0 / scale)), int(ref_point[1] * (1.0 / scale)))
if normalize_monitor:
ref_point = self._screen.convert_screen_to_monitor(ref_point)
return True, ref_point
return False, None
def search_and_wait(
self,
ref: Union[str, List[str]],
roi: List[float] = None,
time_out: float = None,
threshold: float = None,
take_ss: bool = True
) -> Tuple[bool, Tuple[float, float]]:
"""
Helper function that will loop and keep searching for a template
:param ref: Key of template which has been loaded beforehand
:param time_out: After this amount of time the search will stop and it will return [False, None]
:param threshold: Adapt threshold for being found
:param take_ss: Bool value to take screenshot on timeout or not (flag must still be set in params!)
Rest of params same as TemplateFinder.search()
"""
threshold = self._config.advanced_options["template_threshold"] if threshold is None else threshold
Logger.debug(f"Waiting for Template {ref}")
start = time.time()
while 1:
img = self._screen.grab()
is_loading_black_roi = np.average(img[:, 0:self._config.ui_roi["loading_left_black"][2]]) < 1.0
if type(ref) is str:
ref = [ref]
for x in ref:
success, pos = self.search(x, img, roi=roi, threshold=threshold)
if success:
break
if not is_loading_black_roi:
if success:
return True, pos
elif time_out is not None and (time.time() - start) > time_out:
if self._config.general["info_screenshots"] and take_ss:
cv2.imwrite(f"./info_screenshots/info_wait_for_{ref}_time_out_" + time.strftime("%Y%m%d_%H%M%S") + ".png", img)
return False, None
# Testing: Have whatever you want to find on the screen
if __name__ == "__main__":
from screen import Screen
from config import Config
config = Config()
screen = Screen(config.general["monitor"])
template_finder = TemplateFinder(screen)
search_templates = ["ELDRITCH_4", "ELDRITCH_3", "ELDRITCH_2", "ELDRITCH_1"]
scores = {}
while 1:
# img = cv2.imread("")
img = screen.grab()
display_img = img.copy()
for template_name in search_templates:
success, pos = template_finder.search(template_name, img)
scores[template_name] = template_finder.last_score
if success:
cv2.putText(display_img, str(template_name), pos, cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2, cv2.LINE_AA)
cv2.circle(display_img, pos, 7, (255, 0, 0), thickness=5)
display_img = cv2.resize(display_img, None, fx=0.5, fy=0.5, interpolation=cv2.INTER_NEAREST)
print(scores)
cv2.imshow('test', display_img)
key = cv2.waitKey(1)
| import cv2
from screen import Screen
from typing import Tuple, Union, List
import numpy as np
from logger import Logger
import time
import os
from config import Config
from utils.misc import load_template
class TemplateFinder:
def __init__(self, screen: Screen, scale_factor: float = None):
"""
:param screen: Screen object
:param scale_factor: Scale factor that is used for templates. Note: UI and NPC templates will always have scale of 1.0
"""
self.last_score = -1.0
self._screen = screen
self._config = Config()
if scale_factor is None:
scale_factor = 0.7 if self._config.general['res'] == "1920_1080" else 1.0
self._scale_factor = scale_factor
res_str = "" if self._config.general['res'] == "1920_1080" else "_1280_720"
self._templates = {
# Templates for node in A5 Town
"A5_TOWN_0": [load_template(f"assets/templates{res_str}/a5_town/a5_town_0.png", self._scale_factor), self._scale_factor],
"A5_TOWN_0.5": [load_template(f"assets/templates{res_str}/a5_town/a5_town_0.5.png", self._scale_factor), self._scale_factor],
"A5_TOWN_1": [load_template(f"assets/templates{res_str}/a5_town/a5_town_1.png", self._scale_factor), self._scale_factor],
"A5_TOWN_2": [load_template(f"assets/templates{res_str}/a5_town/a5_town_2.png", self._scale_factor), self._scale_factor],
"A5_TOWN_3": [load_template(f"assets/templates{res_str}/a5_town/a5_town_3.png", self._scale_factor), self._scale_factor],
"A5_TOWN_4": [load_template(f"assets/templates{res_str}/a5_town/a5_town_4.png", self._scale_factor), self._scale_factor],
"A5_TOWN_5": [load_template(f"assets/templates{res_str}/a5_town/a5_town_5.png", self._scale_factor), self._scale_factor],
"A5_TOWN_6": [load_template(f"assets/templates{res_str}/a5_town/a5_town_6.png", self._scale_factor), self._scale_factor],
"A5_TOWN_7": [load_template(f"assets/templates{res_str}/a5_town/a5_town_7.png", self._scale_factor), self._scale_factor],
"A5_TOWN_8": [load_template(f"assets/templates{res_str}/a5_town/a5_town_8.png", self._scale_factor), self._scale_factor],
"A5_TOWN_9": [load_template(f"assets/templates{res_str}/a5_town/a5_town_9.png", self._scale_factor), self._scale_factor],
"A5_TOWN_10": [load_template(f"assets/templates{res_str}/a5_town/a5_town_10.png", self._scale_factor), self._scale_factor],
# Templates for nod at Pindle
"PINDLE_0": [load_template(f"assets/templates{res_str}/pindle/pindle_0.png", self._scale_factor), self._scale_factor],
"PINDLE_1": [load_template(f"assets/templates{res_str}/pindle/pindle_1.png", self._scale_factor), self._scale_factor],
"PINDLE_2": [load_template(f"assets/templates{res_str}/pindle/pindle_2.png", self._scale_factor), self._scale_factor],
"PINDLE_3": [load_template(f"assets/templates{res_str}/pindle/pindle_3.png", self._scale_factor), self._scale_factor],
"PINDLE_4": [load_template(f"assets/templates{res_str}/pindle/pindle_4.png", self._scale_factor), self._scale_factor],
"PINDLE_5": [load_template(f"assets/templates{res_str}/pindle/pindle_5.png", self._scale_factor), self._scale_factor],
"PINDLE_6": [load_template(f"assets/templates{res_str}/pindle/pindle_6.png", self._scale_factor), self._scale_factor],
"PINDLE_7": [load_template(f"assets/templates{res_str}/pindle/pindle_7.png", self._scale_factor), self._scale_factor],
# Templates for nodes to Eldritch
"ELDRITCH_START": [load_template(f"assets/templates{res_str}/eldritch/eldritch_start.png", self._scale_factor), self._scale_factor],
"ELDRITCH_0": [load_template(f"assets/templates{res_str}/eldritch/eldritch_0.png", self._scale_factor), self._scale_factor],
"ELDRITCH_1": [load_template(f"assets/templates{res_str}/eldritch/eldritch_1.png", self._scale_factor), self._scale_factor],
"ELDRITCH_2": [load_template(f"assets/templates{res_str}/eldritch/eldritch_2.png", self._scale_factor), self._scale_factor],
"ELDRITCH_3": [load_template(f"assets/templates{res_str}/eldritch/eldritch_3.png", self._scale_factor), self._scale_factor],
"ELDRITCH_4": [load_template(f"assets/templates{res_str}/eldritch/eldritch_4.png", self._scale_factor), self._scale_factor],
# Templates for nodes to Shenk (from Eldritch)
"SHENK_0": [load_template(f"assets/templates{res_str}/shenk/shenk_0.png", self._scale_factor), self._scale_factor],
"SHENK_1": [load_template(f"assets/templates{res_str}/shenk/shenk_1.png", self._scale_factor), self._scale_factor],
"SHENK_2": [load_template(f"assets/templates{res_str}/shenk/shenk_2.png", self._scale_factor), self._scale_factor],
"SHENK_3": [load_template(f"assets/templates{res_str}/shenk/shenk_3.png", self._scale_factor), self._scale_factor],
"SHENK_4": [load_template(f"assets/templates{res_str}/shenk/shenk_4.png", self._scale_factor), self._scale_factor],
"SHENK_6": [load_template(f"assets/templates{res_str}/shenk/shenk_6.png", self._scale_factor), self._scale_factor],
"SHENK_7": [load_template(f"assets/templates{res_str}/shenk/shenk_7.png", self._scale_factor), self._scale_factor],
"SHENK_8": [load_template(f"assets/templates{res_str}/shenk/shenk_8.png", self._scale_factor), self._scale_factor],
"SHENK_9": [load_template(f"assets/templates{res_str}/shenk/shenk_9.png", self._scale_factor), self._scale_factor],
"SHENK_10": [load_template(f"assets/templates{res_str}/shenk/shenk_10.png", self._scale_factor), self._scale_factor],
"SHENK_11": [load_template(f"assets/templates{res_str}/shenk/shenk_11.png", self._scale_factor), self._scale_factor],
"SHENK_12": [load_template(f"assets/templates{res_str}/shenk/shenk_12.png", self._scale_factor), self._scale_factor],
"SHENK_13": [load_template(f"assets/templates{res_str}/shenk/shenk_13.png", self._scale_factor), self._scale_factor],
"SHENK_15": [load_template(f"assets/templates{res_str}/shenk/shenk_15.png", self._scale_factor), self._scale_factor],
"SHENK_16": [load_template(f"assets/templates{res_str}/shenk/shenk_16.png", self._scale_factor), self._scale_factor],
"SHENK_17": [load_template(f"assets/templates{res_str}/shenk/shenk_17.png", self._scale_factor), self._scale_factor],
# Template Selectables
"A5_STASH": [load_template(f"assets/templates{res_str}/a5_stash.png", self._scale_factor), self._scale_factor],
"A5_WP": [load_template(f"assets/templates{res_str}/a5_wp.png", self._scale_factor), self._scale_factor],
"A5_RED_PORTAL": [load_template(f"assets/templates{res_str}/a5_red_portal.png", self._scale_factor), self._scale_factor],
"A5_RED_PORTAL_TEXT": [load_template(f"assets/templates{res_str}/a5_red_portal_with_text.png", self._scale_factor), self._scale_factor],
"BLUE_PORTAL": [load_template(f"assets/templates{res_str}/blue_portal.png", self._scale_factor), self._scale_factor],
"BLUE_PORTAL_2": [load_template(f"assets/templates{res_str}/blue_portal_2.png", self._scale_factor), self._scale_factor],
# Template Inventory / UI
"INVENTORY_GOLD_BTN": [load_template(f"assets/templates{res_str}/inventory_gold_btn.png", 1.0), 1.0],
"D2_LOGO_HS": [load_template(f"assets/templates{res_str}/d2_logo_hs.png", 1.0), 1.0],
"LOADING": [load_template(f"assets/templates{res_str}/loading.png", 1.0), 1.0],
"PLAY_BTN": [load_template(f"assets/templates{res_str}/play_btn.png", 1.0), 1.0],
"PLAY_BTN_GRAY": [load_template(f"assets/templates{res_str}/play_btn_gray.png", 1.0), 1.0],
"NORMAL_BTN": [load_template(f"assets/templates{res_str}/normal_btn.png", 1.0), 1.0],
"NIGHTMARE_BTN": [load_template(f"assets/templates{res_str}/nightmare_btn.png", 1.0), 1.0],
"HELL_BTN": [load_template(f"assets/templates{res_str}/hell_btn.png", 1.0), 1.0],
"SAVE_AND_EXIT_NO_HIGHLIGHT": [load_template(f"assets/templates{res_str}/save_and_exit_no_highlight.png", 1.0), 1.0],
"SAVE_AND_EXIT_HIGHLIGHT": [load_template(f"assets/templates{res_str}/save_and_exit_highlight.png", 1.0), 1.0],
"SERVER_ISSUES": [load_template(f"assets/templates{res_str}/server_issues.png", 1.0), 1.0],
"WAYPOINT_MENU": [load_template(f"assets/templates{res_str}/waypoint_menu.png", 1.0), 1.0],
"MERC": [load_template(f"assets/templates{res_str}/merc.png", 1.0), 1.0],
"TELE_ACTIVE": [load_template(f"assets/templates{res_str}/tele_active.png", 1.0), 1.0],
"TELE_INACTIVE": [load_template(f"assets/templates{res_str}/tele_inactive.png", 1.0), 1.0],
"VIGOR": [load_template(f"assets/templates{res_str}/vigor.png", 1.0), 1.0],
"REPAIR_BTN": [load_template(f"assets/templates{res_str}/repair_btn.png", 1.0), 1.0],
"TP_TOMB": [load_template(f"assets/templates{res_str}/tp_tomb.png", 1.0), 1.0],
"SUPER_HEALING_POTION": [load_template(f"assets/templates{res_str}/super_healing_potion.png", 1.0), 1.0],
"SUPER_MANA_POTION": [load_template(f"assets/templates{res_str}/super_mana_potion.png", 1.0), 1.0],
"FULL_REJUV_POTION": [load_template(f"assets/templates{res_str}/full_rejuv_potion.png", 1.0), 1.0],
"REJUV_POTION": [load_template(f"assets/templates{res_str}/rejuv_potion.png", 1.0), 1.0],
# NPC: Qual-Kehk
"QUAL_FRONT": [load_template(f"assets/npc{res_str}/qual_kehk/qual_front.png", 1.0), 1.0],
"QUAL_SIDE": [load_template(f"assets/npc{res_str}/qual_kehk/qual_side.png", 1.0), 1.0],
"QUAL_BACK": [load_template(f"assets/npc{res_str}/qual_kehk/qual_back.png", 1.0), 1.0],
"QUAL_45": [load_template(f"assets/npc{res_str}/qual_kehk/qual_45.png", 1.0), 1.0],
"QUAL_45_2": [load_template(f"assets/npc{res_str}/qual_kehk/qual_45_2.png", 1.0), 1.0],
"QUAL_45_3": [load_template(f"assets/npc{res_str}/qual_kehk/qual_45_3.png", 1.0), 1.0],
"QUAL_NAME_TAG_WHITE": [load_template(f"assets/npc{res_str}/qual_kehk/qual_kehk_white.png", 1.0), 1.0],
"QUAL_NAME_TAG_GOLD": [load_template(f"assets/npc{res_str}/qual_kehk/qual_kehk_gold.png", 1.0), 1.0],
"QUAL_RESURRECT_BTN": [load_template(f"assets/npc{res_str}/qual_kehk/resurrect_btn.png", 1.0), 1.0],
# NPC: Malah
"MALAH_FRONT": [load_template(f"assets/npc{res_str}/malah/malah_front.png", 1.0), 1.0],
"MALAH_BACK": [load_template(f"assets/npc{res_str}/malah/malah_BACK.png", 1.0), 1.0],
"MALAH_45": [load_template(f"assets/npc{res_str}/malah/malah_45.png", 1.0), 1.0],
"MALAH_SIDE": [load_template(f"assets/npc{res_str}/malah/malah_side.png", 1.0), 1.0],
"MALAH_SIDE_2": [load_template(f"assets/npc{res_str}/malah/malah_side_2.png", 1.0), 1.0],
"MALAH_NAME_TAG_WHITE": [load_template(f"assets/npc{res_str}/malah/malah_white.png", 1.0), 1.0],
"MALAH_NAME_TAG_GOLD": [load_template(f"assets/npc{res_str}/malah/malah_gold.png", 1.0), 1.0],
"MALAH_TRADE_BTN": [load_template(f"assets/npc{res_str}/malah/trade_btn.png", 1.0), 1.0],
# NPC: Larzuk
"LARZUK_FRONT": [load_template(f"assets/npc{res_str}/larzuk/larzuk_front.png", 1.0), 1.0],
"LARZUK_BACK": [load_template(f"assets/npc{res_str}/larzuk/larzuk_back.png", 1.0), 1.0],
"LARZUK_SIDE": [load_template(f"assets/npc{res_str}/larzuk/larzuk_side.png", 1.0), 1.0],
"LARZUK_SIDE_2": [load_template(f"assets/npc{res_str}/larzuk/larzuk_side_2.png", 1.0), 1.0],
"LARZUK_SIDE_3": [load_template(f"assets/npc{res_str}/larzuk/larzuk_side_3.png", 1.0), 1.0],
"LARZUK_NAME_TAG_WHITE": [load_template(f"assets/npc{res_str}/larzuk/larzuk_white.png", 1.0), 1.0],
"LARZUK_NAME_TAG_GOLD": [load_template(f"assets/npc{res_str}/larzuk/larzuk_gold.png", 1.0), 1.0],
"LARZUK_TRADE_REPAIR_BTN": [load_template(f"assets/npc{res_str}/larzuk/trade_repair_btn.png", 1.0), 1.0],
# NPC: Anya
"ANYA_FRONT": [load_template(f"assets/npc{res_str}/anya/anya_front.png", 1.0), 1.0],
"ANYA_BACK": [load_template(f"assets/npc{res_str}/anya/anya_back.png", 1.0), 1.0],
"ANYA_SIDE": [load_template(f"assets/npc{res_str}/anya/anya_side.png", 1.0), 1.0],
"ANYA_NAME_TAG_GOLD": [load_template(f"assets/npc{res_str}/anya/anya_gold.png", 1.0), 1.0],
"ANYA_NAME_TAG_WHITE": [load_template(f"assets/npc{res_str}/anya/anya_white.png", 1.0), 1.0],
"ANYA_TRADE_BTN": [load_template(f"assets/npc{res_str}/anya/trade_btn.png", 1.0), 1.0],
}
def get_template(self, key):
return self._templates[key][0]
def search(
self,
ref: Union[str, np.ndarray],
inp_img: np.ndarray,
threshold: float = None,
roi: List[float] = None,
normalize_monitor: bool = False,
) -> Tuple[bool, Tuple[float, float]]:
"""
Search for a template in an image
:param ref: Either key of a already loaded template or a image which is used as template
:param inp_img: Image in which the template will be searched
:param threshold: Threshold which determines if a template is found or not
:param roi: Region of Interest of the inp_img to restrict search area. Format [left, top, width, height]
:return: Returns found flag and the position as [bool, [x, y]]. If not found, position will be None. Position in image space.
"""
threshold = self._config.advanced_options["template_threshold"] if threshold is None else threshold
if roi is None:
# if no roi is provided roi = full inp_img
roi = [0, 0, inp_img.shape[1], inp_img.shape[0]]
rx, ry, rw, rh = roi
inp_img = inp_img[ry:ry + rh, rx:rx + rw]
if type(ref) == str:
template = self._templates[ref][0]
scale = self._templates[ref][1]
else:
template = ref
scale = 1.0
img: np.ndarray = cv2.resize(inp_img, None, fx=scale, fy=scale, interpolation=cv2.INTER_NEAREST)
rx *= scale
ry *= scale
rw *= scale
rh *= scale
if img.shape[0] > template.shape[0] and img.shape[1] > template.shape[1]:
res = cv2.matchTemplate(img, template, cv2.TM_CCOEFF_NORMED)
_, max_val, _, max_pos = cv2.minMaxLoc(res)
self.last_score = max_val
if max_val > threshold:
ref_point = (max_pos[0] + int(template.shape[1] * 0.5) + rx, max_pos[1] + int(template.shape[0] * 0.5) + ry)
ref_point = (int(ref_point[0] * (1.0 / scale)), int(ref_point[1] * (1.0 / scale)))
if normalize_monitor:
ref_point = self._screen.convert_screen_to_monitor(ref_point)
return True, ref_point
return False, None
def search_and_wait(
self,
ref: Union[str, List[str]],
roi: List[float] = None,
time_out: float = None,
threshold: float = None,
take_ss: bool = True
) -> Tuple[bool, Tuple[float, float]]:
"""
Helper function that will loop and keep searching for a template
:param ref: Key of template which has been loaded beforehand
:param time_out: After this amount of time the search will stop and it will return [False, None]
:param threshold: Adapt threshold for being found
:param take_ss: Bool value to take screenshot on timeout or not (flag must still be set in params!)
Rest of params same as TemplateFinder.search()
"""
threshold = self._config.advanced_options["template_threshold"] if threshold is None else threshold
Logger.debug(f"Waiting for Template {ref}")
start = time.time()
while 1:
img = self._screen.grab()
is_loading_black_roi = np.average(img[:, 0:self._config.ui_roi["loading_left_black"][2]]) < 1.0
if type(ref) is str:
ref = [ref]
for x in ref:
success, pos = self.search(x, img, roi=roi, threshold=threshold)
if success:
break
if not is_loading_black_roi:
if success:
return True, pos
elif time_out is not None and (time.time() - start) > time_out:
if self._config.general["info_screenshots"] and take_ss:
cv2.imwrite(f"./info_screenshots/info_wait_for_{ref}_time_out_" + time.strftime("%Y%m%d_%H%M%S") + ".png", img)
return False, None
# Testing: Have whatever you want to find on the screen
if __name__ == "__main__":
from screen import Screen
from config import Config
config = Config()
screen = Screen(config.general["monitor"])
template_finder = TemplateFinder(screen)
search_templates = ["ELDRITCH_4", "ELDRITCH_3", "ELDRITCH_2", "ELDRITCH_1"]
scores = {}
while 1:
# img = cv2.imread("")
img = screen.grab()
display_img = img.copy()
for template_name in search_templates:
success, pos = template_finder.search(template_name, img)
scores[template_name] = template_finder.last_score
if success:
cv2.putText(display_img, str(template_name), pos, cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2, cv2.LINE_AA)
cv2.circle(display_img, pos, 7, (255, 0, 0), thickness=5)
display_img = cv2.resize(display_img, None, fx=0.5, fy=0.5, interpolation=cv2.INTER_NEAREST)
print(scores)
cv2.imshow('test', display_img)
key = cv2.waitKey(1)
| en | 0.805381 | :param screen: Screen object :param scale_factor: Scale factor that is used for templates. Note: UI and NPC templates will always have scale of 1.0 # Templates for node in A5 Town # Templates for nod at Pindle # Templates for nodes to Eldritch # Templates for nodes to Shenk (from Eldritch) # Template Selectables # Template Inventory / UI # NPC: Qual-Kehk # NPC: Malah # NPC: Larzuk # NPC: Anya Search for a template in an image :param ref: Either key of a already loaded template or a image which is used as template :param inp_img: Image in which the template will be searched :param threshold: Threshold which determines if a template is found or not :param roi: Region of Interest of the inp_img to restrict search area. Format [left, top, width, height] :return: Returns found flag and the position as [bool, [x, y]]. If not found, position will be None. Position in image space. # if no roi is provided roi = full inp_img Helper function that will loop and keep searching for a template :param ref: Key of template which has been loaded beforehand :param time_out: After this amount of time the search will stop and it will return [False, None] :param threshold: Adapt threshold for being found :param take_ss: Bool value to take screenshot on timeout or not (flag must still be set in params!) Rest of params same as TemplateFinder.search() # Testing: Have whatever you want to find on the screen # img = cv2.imread("") | 2.439086 | 2 |
graphnas/gnn_model_manager.py | GraphNAS/GraphNAS | 94 | 6630303 | import os
import time
import numpy as np
import torch
import torch.nn.functional as F
from dgl import DGLGraph
from dgl.data import load_data
from graphnas.gnn import GraphNet
from graphnas.utils.model_utils import EarlyStop, TopAverage, process_action
def load(args, save_file=".npy"):
save_file = args.dataset + save_file
if os.path.exists(save_file):
return np.load(save_file).tolist()
else:
datas = load_data(args)
np.save(save_file, datas)
return datas
def evaluate(output, labels, mask):
_, indices = torch.max(output, dim=1)
correct = torch.sum(indices[mask] == labels[mask])
return correct.item() * 1.0 / mask.sum().item()
# manager the train process of GNN on citation dataset
class CitationGNNManager(object):
def __init__(self, args):
self.args = args
if hasattr(args, 'dataset') and args.dataset in ["cora", "citeseer", "pubmed"]:
self.data = load(args)
self.args.in_feats = self.in_feats = self.data.features.shape[1]
self.args.num_class = self.n_classes = self.data.num_labels
self.early_stop_manager = EarlyStop(10)
self.reward_manager = TopAverage(10)
self.args = args
self.drop_out = args.in_drop
self.multi_label = args.multi_label
self.lr = args.lr
self.weight_decay = args.weight_decay
self.retrain_epochs = args.retrain_epochs
self.loss_fn = torch.nn.BCELoss()
self.epochs = args.epochs
self.train_graph_index = 0
self.train_set_length = 10
self.param_file = args.param_file
self.shared_params = None
self.loss_fn = torch.nn.functional.nll_loss
def load_param(self):
# don't share param
pass
def save_param(self, model, update_all=False):
# don't share param
pass
# train from scratch
def evaluate(self, actions=None, format="two"):
actions = process_action(actions, format, self.args)
print("train action:", actions)
# create model
model = self.build_gnn(actions)
if self.args.cuda:
model.cuda()
# use optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=self.args.lr, weight_decay=self.args.weight_decay)
try:
model, val_acc, test_acc = self.run_model(model, optimizer, self.loss_fn, self.data, self.epochs,
cuda=self.args.cuda, return_best=True,
half_stop_score=max(self.reward_manager.get_top_average() * 0.7,
0.4))
except RuntimeError as e:
if "cuda" in str(e) or "CUDA" in str(e):
print(e)
val_acc = 0
test_acc = 0
else:
raise e
return val_acc, test_acc
# train from scratch
def train(self, actions=None, format="two"):
origin_action = actions
actions = process_action(actions, format, self.args)
print("train action:", actions)
# create model
model = self.build_gnn(actions)
try:
if self.args.cuda:
model.cuda()
# use optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=self.args.lr, weight_decay=self.args.weight_decay)
model, val_acc = self.run_model(model, optimizer, self.loss_fn, self.data, self.epochs, cuda=self.args.cuda,
half_stop_score=max(self.reward_manager.get_top_average() * 0.7, 0.4))
except RuntimeError as e:
if "cuda" in str(e) or "CUDA" in str(e):
print(e)
val_acc = 0
else:
raise e
reward = self.reward_manager.get_reward(val_acc)
self.save_param(model, update_all=(reward > 0))
self.record_action_info(origin_action, reward, val_acc)
return reward, val_acc
def record_action_info(self, origin_action, reward, val_acc):
with open(self.args.dataset + "_" + self.args.search_mode + self.args.submanager_log_file, "a") as file:
# with open(f'{self.args.dataset}_{self.args.search_mode}_{self.args.format}_manager_result.txt', "a") as file:
file.write(str(origin_action))
file.write(";")
file.write(str(reward))
file.write(";")
file.write(str(val_acc))
file.write("\n")
def build_gnn(self, actions):
model = GraphNet(actions, self.in_feats, self.n_classes, drop_out=self.args.in_drop, multi_label=False,
batch_normal=False)
return model
def retrain(self, actions, format="two"):
return self.train(actions, format)
def test_with_param(self, actions=None, format="two", with_retrain=False):
return self.train(actions, format)
@staticmethod
def run_model(model, optimizer, loss_fn, data, epochs, early_stop=5, tmp_model_file="geo_citation.pkl",
half_stop_score=0, return_best=False, cuda=True, need_early_stop=False, show_info=False):
dur = []
begin_time = time.time()
best_performance = 0
min_val_loss = float("inf")
min_train_loss = float("inf")
model_val_acc = 0
features, g, labels, mask, val_mask, test_mask, n_edges = CitationGNNManager.prepare_data(data, cuda)
for epoch in range(1, epochs + 1):
model.train()
t0 = time.time()
# forward
logits = model(features, g)
logits = F.log_softmax(logits, 1)
loss = loss_fn(logits[mask], labels[mask])
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss = loss.item()
# evaluate
model.eval()
logits = model(features, g)
logits = F.log_softmax(logits, 1)
train_acc = evaluate(logits, labels, mask)
dur.append(time.time() - t0)
val_loss = float(loss_fn(logits[val_mask], labels[val_mask]))
val_acc = evaluate(logits, labels, val_mask)
test_acc = evaluate(logits, labels, test_mask)
if val_loss < min_val_loss: # and train_loss < min_train_loss
min_val_loss = val_loss
min_train_loss = train_loss
model_val_acc = val_acc
if test_acc > best_performance:
best_performance = test_acc
if show_info:
print(
"Epoch {:05d} | Loss {:.4f} | Time(s) {:.4f} | acc {:.4f} | val_acc {:.4f} | test_acc {:.4f}".format(
epoch, loss.item(), np.mean(dur), train_acc, val_acc, test_acc))
end_time = time.time()
print("Each Epoch Cost Time: %f " % ((end_time - begin_time) / epoch))
print(f"val_score:{model_val_acc},test_score:{best_performance}")
if return_best:
return model, model_val_acc, best_performance
else:
return model, model_val_acc
# @staticmethod
# def run_model(model, optimizer, loss_fn, data, epochs, early_stop=5, tmp_model_file="citation_testing_2.pkl",
# half_stop_score=0, return_best=False, cuda=True, need_early_stop=False):
#
# early_stop_manager = EarlyStop(early_stop)
# # initialize graph
# dur = []
# begin_time = time.time()
# features, g, labels, mask, val_mask, test_mask, n_edges = CitationGNNManager.prepare_data(data, cuda)
# saved = False
# best_performance = 0
# for epoch in range(1, epochs + 1):
# should_break = False
# t0 = time.time()
#
# model.train()
# logits = model(features, g)
# logits = F.log_softmax(logits, 1)
# loss = loss_fn(logits[mask], labels[mask])
# optimizer.zero_grad()
# loss.backward()
# optimizer.step()
#
# model.eval()
# logits = model(features, g)
# logits = F.log_softmax(logits, 1)
# train_acc = evaluate(logits, labels, mask)
# train_loss = float(loss)
# dur.append(time.time() - t0)
#
# val_loss = float(loss_fn(logits[val_mask], labels[val_mask]))
# val_acc = evaluate(logits, labels, val_mask)
# test_acc = evaluate(logits, labels, test_mask)
#
# print(
# "Epoch {:05d} | Loss {:.4f} | Time(s) {:.4f} | acc {:.4f} | val_acc {:.4f} | test_acc {:.4f}".format(
# epoch, loss.item(), np.mean(dur), train_acc, val_acc, test_acc))
#
# end_time = time.time()
# print("Each Epoch Cost Time: %f " % ((end_time - begin_time) / epoch))
# # print("Test Accuracy {:.4f}".format(acc))
# if early_stop_manager.should_save(train_loss, train_acc, val_loss, val_acc):
# saved = True
# torch.save(model.state_dict(), tmp_model_file)
# if test_acc > best_performance:
# best_performance = test_acc
# if need_early_stop and early_stop_manager.should_stop(train_loss, train_acc, val_loss, val_acc):
# should_break = True
# if should_break and epoch > 50:
# print("early stop")
# break
# if half_stop_score > 0 and epoch > (epochs / 2) and val_acc < half_stop_score:
# print("half_stop")
# break
# if saved:
# model.load_state_dict(torch.load(tmp_model_file))
# model.eval()
# val_acc = evaluate(model(features, g), labels, val_mask)
# print(evaluate(model(features, g), labels, test_mask))
# if return_best:
# return model, val_acc, best_performance
# else:
# return model, val_acc
@staticmethod
def prepare_data(data, cuda=True):
features = torch.FloatTensor(data.features)
labels = torch.LongTensor(data.labels)
mask = torch.ByteTensor(data.train_mask)
test_mask = torch.ByteTensor(data.test_mask)
val_mask = torch.ByteTensor(data.val_mask)
n_edges = data.graph.number_of_edges()
# create DGL graph
g = DGLGraph(data.graph)
# add self loop
g.add_edges(g.nodes(), g.nodes())
degs = g.in_degrees().float()
norm = torch.pow(degs, -0.5)
norm[torch.isinf(norm)] = 0
if cuda:
features = features.cuda()
labels = labels.cuda()
norm = norm.cuda()
g.ndata['norm'] = norm.unsqueeze(1)
return features, g, labels, mask, val_mask, test_mask, n_edges
| import os
import time
import numpy as np
import torch
import torch.nn.functional as F
from dgl import DGLGraph
from dgl.data import load_data
from graphnas.gnn import GraphNet
from graphnas.utils.model_utils import EarlyStop, TopAverage, process_action
def load(args, save_file=".npy"):
save_file = args.dataset + save_file
if os.path.exists(save_file):
return np.load(save_file).tolist()
else:
datas = load_data(args)
np.save(save_file, datas)
return datas
def evaluate(output, labels, mask):
_, indices = torch.max(output, dim=1)
correct = torch.sum(indices[mask] == labels[mask])
return correct.item() * 1.0 / mask.sum().item()
# manager the train process of GNN on citation dataset
class CitationGNNManager(object):
def __init__(self, args):
self.args = args
if hasattr(args, 'dataset') and args.dataset in ["cora", "citeseer", "pubmed"]:
self.data = load(args)
self.args.in_feats = self.in_feats = self.data.features.shape[1]
self.args.num_class = self.n_classes = self.data.num_labels
self.early_stop_manager = EarlyStop(10)
self.reward_manager = TopAverage(10)
self.args = args
self.drop_out = args.in_drop
self.multi_label = args.multi_label
self.lr = args.lr
self.weight_decay = args.weight_decay
self.retrain_epochs = args.retrain_epochs
self.loss_fn = torch.nn.BCELoss()
self.epochs = args.epochs
self.train_graph_index = 0
self.train_set_length = 10
self.param_file = args.param_file
self.shared_params = None
self.loss_fn = torch.nn.functional.nll_loss
def load_param(self):
# don't share param
pass
def save_param(self, model, update_all=False):
# don't share param
pass
# train from scratch
def evaluate(self, actions=None, format="two"):
actions = process_action(actions, format, self.args)
print("train action:", actions)
# create model
model = self.build_gnn(actions)
if self.args.cuda:
model.cuda()
# use optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=self.args.lr, weight_decay=self.args.weight_decay)
try:
model, val_acc, test_acc = self.run_model(model, optimizer, self.loss_fn, self.data, self.epochs,
cuda=self.args.cuda, return_best=True,
half_stop_score=max(self.reward_manager.get_top_average() * 0.7,
0.4))
except RuntimeError as e:
if "cuda" in str(e) or "CUDA" in str(e):
print(e)
val_acc = 0
test_acc = 0
else:
raise e
return val_acc, test_acc
# train from scratch
def train(self, actions=None, format="two"):
origin_action = actions
actions = process_action(actions, format, self.args)
print("train action:", actions)
# create model
model = self.build_gnn(actions)
try:
if self.args.cuda:
model.cuda()
# use optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=self.args.lr, weight_decay=self.args.weight_decay)
model, val_acc = self.run_model(model, optimizer, self.loss_fn, self.data, self.epochs, cuda=self.args.cuda,
half_stop_score=max(self.reward_manager.get_top_average() * 0.7, 0.4))
except RuntimeError as e:
if "cuda" in str(e) or "CUDA" in str(e):
print(e)
val_acc = 0
else:
raise e
reward = self.reward_manager.get_reward(val_acc)
self.save_param(model, update_all=(reward > 0))
self.record_action_info(origin_action, reward, val_acc)
return reward, val_acc
def record_action_info(self, origin_action, reward, val_acc):
with open(self.args.dataset + "_" + self.args.search_mode + self.args.submanager_log_file, "a") as file:
# with open(f'{self.args.dataset}_{self.args.search_mode}_{self.args.format}_manager_result.txt', "a") as file:
file.write(str(origin_action))
file.write(";")
file.write(str(reward))
file.write(";")
file.write(str(val_acc))
file.write("\n")
def build_gnn(self, actions):
model = GraphNet(actions, self.in_feats, self.n_classes, drop_out=self.args.in_drop, multi_label=False,
batch_normal=False)
return model
def retrain(self, actions, format="two"):
return self.train(actions, format)
def test_with_param(self, actions=None, format="two", with_retrain=False):
return self.train(actions, format)
@staticmethod
def run_model(model, optimizer, loss_fn, data, epochs, early_stop=5, tmp_model_file="geo_citation.pkl",
half_stop_score=0, return_best=False, cuda=True, need_early_stop=False, show_info=False):
dur = []
begin_time = time.time()
best_performance = 0
min_val_loss = float("inf")
min_train_loss = float("inf")
model_val_acc = 0
features, g, labels, mask, val_mask, test_mask, n_edges = CitationGNNManager.prepare_data(data, cuda)
for epoch in range(1, epochs + 1):
model.train()
t0 = time.time()
# forward
logits = model(features, g)
logits = F.log_softmax(logits, 1)
loss = loss_fn(logits[mask], labels[mask])
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss = loss.item()
# evaluate
model.eval()
logits = model(features, g)
logits = F.log_softmax(logits, 1)
train_acc = evaluate(logits, labels, mask)
dur.append(time.time() - t0)
val_loss = float(loss_fn(logits[val_mask], labels[val_mask]))
val_acc = evaluate(logits, labels, val_mask)
test_acc = evaluate(logits, labels, test_mask)
if val_loss < min_val_loss: # and train_loss < min_train_loss
min_val_loss = val_loss
min_train_loss = train_loss
model_val_acc = val_acc
if test_acc > best_performance:
best_performance = test_acc
if show_info:
print(
"Epoch {:05d} | Loss {:.4f} | Time(s) {:.4f} | acc {:.4f} | val_acc {:.4f} | test_acc {:.4f}".format(
epoch, loss.item(), np.mean(dur), train_acc, val_acc, test_acc))
end_time = time.time()
print("Each Epoch Cost Time: %f " % ((end_time - begin_time) / epoch))
print(f"val_score:{model_val_acc},test_score:{best_performance}")
if return_best:
return model, model_val_acc, best_performance
else:
return model, model_val_acc
# @staticmethod
# def run_model(model, optimizer, loss_fn, data, epochs, early_stop=5, tmp_model_file="citation_testing_2.pkl",
# half_stop_score=0, return_best=False, cuda=True, need_early_stop=False):
#
# early_stop_manager = EarlyStop(early_stop)
# # initialize graph
# dur = []
# begin_time = time.time()
# features, g, labels, mask, val_mask, test_mask, n_edges = CitationGNNManager.prepare_data(data, cuda)
# saved = False
# best_performance = 0
# for epoch in range(1, epochs + 1):
# should_break = False
# t0 = time.time()
#
# model.train()
# logits = model(features, g)
# logits = F.log_softmax(logits, 1)
# loss = loss_fn(logits[mask], labels[mask])
# optimizer.zero_grad()
# loss.backward()
# optimizer.step()
#
# model.eval()
# logits = model(features, g)
# logits = F.log_softmax(logits, 1)
# train_acc = evaluate(logits, labels, mask)
# train_loss = float(loss)
# dur.append(time.time() - t0)
#
# val_loss = float(loss_fn(logits[val_mask], labels[val_mask]))
# val_acc = evaluate(logits, labels, val_mask)
# test_acc = evaluate(logits, labels, test_mask)
#
# print(
# "Epoch {:05d} | Loss {:.4f} | Time(s) {:.4f} | acc {:.4f} | val_acc {:.4f} | test_acc {:.4f}".format(
# epoch, loss.item(), np.mean(dur), train_acc, val_acc, test_acc))
#
# end_time = time.time()
# print("Each Epoch Cost Time: %f " % ((end_time - begin_time) / epoch))
# # print("Test Accuracy {:.4f}".format(acc))
# if early_stop_manager.should_save(train_loss, train_acc, val_loss, val_acc):
# saved = True
# torch.save(model.state_dict(), tmp_model_file)
# if test_acc > best_performance:
# best_performance = test_acc
# if need_early_stop and early_stop_manager.should_stop(train_loss, train_acc, val_loss, val_acc):
# should_break = True
# if should_break and epoch > 50:
# print("early stop")
# break
# if half_stop_score > 0 and epoch > (epochs / 2) and val_acc < half_stop_score:
# print("half_stop")
# break
# if saved:
# model.load_state_dict(torch.load(tmp_model_file))
# model.eval()
# val_acc = evaluate(model(features, g), labels, val_mask)
# print(evaluate(model(features, g), labels, test_mask))
# if return_best:
# return model, val_acc, best_performance
# else:
# return model, val_acc
@staticmethod
def prepare_data(data, cuda=True):
features = torch.FloatTensor(data.features)
labels = torch.LongTensor(data.labels)
mask = torch.ByteTensor(data.train_mask)
test_mask = torch.ByteTensor(data.test_mask)
val_mask = torch.ByteTensor(data.val_mask)
n_edges = data.graph.number_of_edges()
# create DGL graph
g = DGLGraph(data.graph)
# add self loop
g.add_edges(g.nodes(), g.nodes())
degs = g.in_degrees().float()
norm = torch.pow(degs, -0.5)
norm[torch.isinf(norm)] = 0
if cuda:
features = features.cuda()
labels = labels.cuda()
norm = norm.cuda()
g.ndata['norm'] = norm.unsqueeze(1)
return features, g, labels, mask, val_mask, test_mask, n_edges
| en | 0.475434 | # manager the train process of GNN on citation dataset # don't share param # don't share param # train from scratch # create model # use optimizer # train from scratch # create model # use optimizer # with open(f'{self.args.dataset}_{self.args.search_mode}_{self.args.format}_manager_result.txt', "a") as file: # forward # evaluate # and train_loss < min_train_loss # @staticmethod # def run_model(model, optimizer, loss_fn, data, epochs, early_stop=5, tmp_model_file="citation_testing_2.pkl", # half_stop_score=0, return_best=False, cuda=True, need_early_stop=False): # # early_stop_manager = EarlyStop(early_stop) # # initialize graph # dur = [] # begin_time = time.time() # features, g, labels, mask, val_mask, test_mask, n_edges = CitationGNNManager.prepare_data(data, cuda) # saved = False # best_performance = 0 # for epoch in range(1, epochs + 1): # should_break = False # t0 = time.time() # # model.train() # logits = model(features, g) # logits = F.log_softmax(logits, 1) # loss = loss_fn(logits[mask], labels[mask]) # optimizer.zero_grad() # loss.backward() # optimizer.step() # # model.eval() # logits = model(features, g) # logits = F.log_softmax(logits, 1) # train_acc = evaluate(logits, labels, mask) # train_loss = float(loss) # dur.append(time.time() - t0) # # val_loss = float(loss_fn(logits[val_mask], labels[val_mask])) # val_acc = evaluate(logits, labels, val_mask) # test_acc = evaluate(logits, labels, test_mask) # # print( # "Epoch {:05d} | Loss {:.4f} | Time(s) {:.4f} | acc {:.4f} | val_acc {:.4f} | test_acc {:.4f}".format( # epoch, loss.item(), np.mean(dur), train_acc, val_acc, test_acc)) # # end_time = time.time() # print("Each Epoch Cost Time: %f " % ((end_time - begin_time) / epoch)) # # print("Test Accuracy {:.4f}".format(acc)) # if early_stop_manager.should_save(train_loss, train_acc, val_loss, val_acc): # saved = True # torch.save(model.state_dict(), tmp_model_file) # if test_acc > best_performance: # best_performance = test_acc # if need_early_stop and early_stop_manager.should_stop(train_loss, train_acc, val_loss, val_acc): # should_break = True # if should_break and epoch > 50: # print("early stop") # break # if half_stop_score > 0 and epoch > (epochs / 2) and val_acc < half_stop_score: # print("half_stop") # break # if saved: # model.load_state_dict(torch.load(tmp_model_file)) # model.eval() # val_acc = evaluate(model(features, g), labels, val_mask) # print(evaluate(model(features, g), labels, test_mask)) # if return_best: # return model, val_acc, best_performance # else: # return model, val_acc # create DGL graph # add self loop | 2.284593 | 2 |
tests/ea/selection/selector/select/select_2/case_data.py | stevenbennett96/stk | 21 | 6630304 | <filename>tests/ea/selection/selector/select/select_2/case_data.py
class CaseData:
"""
A test case.
Attributes
----------
selector : :class:`.Selector`
The selector to test.
population : :class:`tuple` of :class:`.MoleculeRecord`
The population from which batches are selected.
selected : :class:`tuple` of :class:`.Batch`
The batches which should be selected.
"""
def __init__(self, selector, population, selected):
"""
Initialize a :class:`.CaseData` instance.
Parameters
----------
selector : :class:`.Selector`
The selector to test.
population : :class:`tuple` of :class:`.MoleculeRecord`
The population from which batches are selected.
selected : :class:`tuple` of :class:`.Batch`
The batches which should be selected.
"""
self.selector = selector
self.population = population
self.selected = selected
| <filename>tests/ea/selection/selector/select/select_2/case_data.py
class CaseData:
"""
A test case.
Attributes
----------
selector : :class:`.Selector`
The selector to test.
population : :class:`tuple` of :class:`.MoleculeRecord`
The population from which batches are selected.
selected : :class:`tuple` of :class:`.Batch`
The batches which should be selected.
"""
def __init__(self, selector, population, selected):
"""
Initialize a :class:`.CaseData` instance.
Parameters
----------
selector : :class:`.Selector`
The selector to test.
population : :class:`tuple` of :class:`.MoleculeRecord`
The population from which batches are selected.
selected : :class:`tuple` of :class:`.Batch`
The batches which should be selected.
"""
self.selector = selector
self.population = population
self.selected = selected
| en | 0.657115 | A test case. Attributes ---------- selector : :class:`.Selector` The selector to test. population : :class:`tuple` of :class:`.MoleculeRecord` The population from which batches are selected. selected : :class:`tuple` of :class:`.Batch` The batches which should be selected. Initialize a :class:`.CaseData` instance. Parameters ---------- selector : :class:`.Selector` The selector to test. population : :class:`tuple` of :class:`.MoleculeRecord` The population from which batches are selected. selected : :class:`tuple` of :class:`.Batch` The batches which should be selected. | 2.802844 | 3 |
testing/mongo_ins_del_loop.py | Rippling/mongoproxy | 19 | 6630305 | <gh_stars>10-100
import pymongo
con = pymongo.MongoClient("mongodb://localhost:27111")
bigcollection = con['test']['bigcollection']
while True:
print "Inserting"
for i in range(1000):
bigcollection.insert_one({ "a": "bbbbbbbbbbbbbbbbbbbb", "b": "CCCCCCCCCCCCCCCCCC"})
print "Removing"
res = bigcollection.delete_many({})
print "deleted:", res.deleted_count
| import pymongo
con = pymongo.MongoClient("mongodb://localhost:27111")
bigcollection = con['test']['bigcollection']
while True:
print "Inserting"
for i in range(1000):
bigcollection.insert_one({ "a": "bbbbbbbbbbbbbbbbbbbb", "b": "CCCCCCCCCCCCCCCCCC"})
print "Removing"
res = bigcollection.delete_many({})
print "deleted:", res.deleted_count | none | 1 | 2.795352 | 3 |
|
tests/commands/autoupdate_test.py | MahmoudHussien/pre-commit | 0 | 6630306 | from __future__ import unicode_literals
import pipes
import pytest
import pre_commit.constants as C
from pre_commit import git
from pre_commit.commands.autoupdate import _check_hooks_still_exist_at_rev
from pre_commit.commands.autoupdate import autoupdate
from pre_commit.commands.autoupdate import RepositoryCannotBeUpdatedError
from pre_commit.commands.autoupdate import RevInfo
from pre_commit.util import cmd_output
from testing.auto_namedtuple import auto_namedtuple
from testing.fixtures import add_config_to_repo
from testing.fixtures import make_config_from_repo
from testing.fixtures import make_repo
from testing.fixtures import modify_manifest
from testing.fixtures import read_config
from testing.fixtures import sample_local_config
from testing.fixtures import write_config
from testing.util import git_commit
@pytest.fixture
def up_to_date(tempdir_factory):
yield make_repo(tempdir_factory, 'python_hooks_repo')
@pytest.fixture
def out_of_date(tempdir_factory):
path = make_repo(tempdir_factory, 'python_hooks_repo')
original_rev = git.head_rev(path)
git_commit(cwd=path)
head_rev = git.head_rev(path)
yield auto_namedtuple(
path=path, original_rev=original_rev, head_rev=head_rev,
)
@pytest.fixture
def tagged(out_of_date):
cmd_output('git', 'tag', 'v1.2.3', cwd=out_of_date.path)
yield out_of_date
@pytest.fixture
def hook_disappearing(tempdir_factory):
path = make_repo(tempdir_factory, 'python_hooks_repo')
original_rev = git.head_rev(path)
with modify_manifest(path) as manifest:
manifest[0]['id'] = 'bar'
yield auto_namedtuple(path=path, original_rev=original_rev)
def test_rev_info_from_config():
info = RevInfo.from_config({'repo': 'repo/path', 'rev': 'v1.2.3'})
assert info == RevInfo('repo/path', 'v1.2.3', None)
def test_rev_info_update_up_to_date_repo(up_to_date):
config = make_config_from_repo(up_to_date)
info = RevInfo.from_config(config)
new_info = info.update(tags_only=False, freeze=False)
assert info == new_info
def test_rev_info_update_out_of_date_repo(out_of_date):
config = make_config_from_repo(
out_of_date.path, rev=out_of_date.original_rev,
)
info = RevInfo.from_config(config)
new_info = info.update(tags_only=False, freeze=False)
assert new_info.rev == out_of_date.head_rev
def test_rev_info_update_non_master_default_branch(out_of_date):
# change the default branch to be not-master
cmd_output('git', '-C', out_of_date.path, 'branch', '-m', 'dev')
test_rev_info_update_out_of_date_repo(out_of_date)
def test_rev_info_update_tags_even_if_not_tags_only(tagged):
config = make_config_from_repo(tagged.path, rev=tagged.original_rev)
info = RevInfo.from_config(config)
new_info = info.update(tags_only=False, freeze=False)
assert new_info.rev == 'v1.2.3'
def test_rev_info_update_tags_only_does_not_pick_tip(tagged):
git_commit(cwd=tagged.path)
config = make_config_from_repo(tagged.path, rev=tagged.original_rev)
info = RevInfo.from_config(config)
new_info = info.update(tags_only=True, freeze=False)
assert new_info.rev == 'v1.2.3'
def test_rev_info_update_freeze_tag(tagged):
git_commit(cwd=tagged.path)
config = make_config_from_repo(tagged.path, rev=tagged.original_rev)
info = RevInfo.from_config(config)
new_info = info.update(tags_only=True, freeze=True)
assert new_info.rev == tagged.head_rev
assert new_info.frozen == 'v1.2.3'
def test_rev_info_update_does_not_freeze_if_already_sha(out_of_date):
config = make_config_from_repo(
out_of_date.path, rev=out_of_date.original_rev,
)
info = RevInfo.from_config(config)
new_info = info.update(tags_only=True, freeze=True)
assert new_info.rev == out_of_date.head_rev
assert new_info.frozen is None
def test_autoupdate_up_to_date_repo(up_to_date, tmpdir, store):
contents = (
'repos:\n'
'- repo: {}\n'
' rev: {}\n'
' hooks:\n'
' - id: foo\n'
).format(up_to_date, git.head_rev(up_to_date))
cfg = tmpdir.join(C.CONFIG_FILE)
cfg.write(contents)
assert autoupdate(str(cfg), store, freeze=False, tags_only=False) == 0
assert cfg.read() == contents
def test_autoupdate_old_revision_broken(tempdir_factory, in_tmpdir, store):
"""In $FUTURE_VERSION, hooks.yaml will no longer be supported. This
asserts that when that day comes, pre-commit will be able to autoupdate
despite not being able to read hooks.yaml in that repository.
"""
path = make_repo(tempdir_factory, 'python_hooks_repo')
config = make_config_from_repo(path, check=False)
cmd_output('git', 'mv', C.MANIFEST_FILE, 'nope.yaml', cwd=path)
git_commit(cwd=path)
# Assume this is the revision the user's old repository was at
rev = git.head_rev(path)
cmd_output('git', 'mv', 'nope.yaml', C.MANIFEST_FILE, cwd=path)
git_commit(cwd=path)
update_rev = git.head_rev(path)
config['rev'] = rev
write_config('.', config)
with open(C.CONFIG_FILE) as f:
before = f.read()
assert autoupdate(C.CONFIG_FILE, store, freeze=False, tags_only=False) == 0
with open(C.CONFIG_FILE) as f:
after = f.read()
assert before != after
assert update_rev in after
def test_autoupdate_out_of_date_repo(out_of_date, tmpdir, store):
fmt = (
'repos:\n'
'- repo: {}\n'
' rev: {}\n'
' hooks:\n'
' - id: foo\n'
)
cfg = tmpdir.join(C.CONFIG_FILE)
cfg.write(fmt.format(out_of_date.path, out_of_date.original_rev))
assert autoupdate(str(cfg), store, freeze=False, tags_only=False) == 0
assert cfg.read() == fmt.format(out_of_date.path, out_of_date.head_rev)
def test_autoupdate_only_one_to_update(up_to_date, out_of_date, tmpdir, store):
fmt = (
'repos:\n'
'- repo: {}\n'
' rev: {}\n'
' hooks:\n'
' - id: foo\n'
'- repo: {}\n'
' rev: {}\n'
' hooks:\n'
' - id: foo\n'
)
cfg = tmpdir.join(C.CONFIG_FILE)
before = fmt.format(
up_to_date, git.head_rev(up_to_date),
out_of_date.path, out_of_date.original_rev,
)
cfg.write(before)
assert autoupdate(str(cfg), store, freeze=False, tags_only=False) == 0
assert cfg.read() == fmt.format(
up_to_date, git.head_rev(up_to_date),
out_of_date.path, out_of_date.head_rev,
)
def test_autoupdate_out_of_date_repo_with_correct_repo_name(
out_of_date, in_tmpdir, store,
):
stale_config = make_config_from_repo(
out_of_date.path, rev=out_of_date.original_rev, check=False,
)
local_config = sample_local_config()
config = {'repos': [stale_config, local_config]}
write_config('.', config)
with open(C.CONFIG_FILE) as f:
before = f.read()
repo_name = 'file://{}'.format(out_of_date.path)
ret = autoupdate(
C.CONFIG_FILE, store, freeze=False, tags_only=False,
repos=(repo_name,),
)
with open(C.CONFIG_FILE) as f:
after = f.read()
assert ret == 0
assert before != after
assert out_of_date.head_rev in after
assert 'local' in after
def test_autoupdate_out_of_date_repo_with_wrong_repo_name(
out_of_date, in_tmpdir, store,
):
config = make_config_from_repo(
out_of_date.path, rev=out_of_date.original_rev, check=False,
)
write_config('.', config)
with open(C.CONFIG_FILE) as f:
before = f.read()
# It will not update it, because the name doesn't match
ret = autoupdate(
C.CONFIG_FILE, store, freeze=False, tags_only=False,
repos=('dne',),
)
with open(C.CONFIG_FILE) as f:
after = f.read()
assert ret == 0
assert before == after
def test_does_not_reformat(tmpdir, out_of_date, store):
fmt = (
'repos:\n'
'- repo: {}\n'
' rev: {} # definitely the version I want!\n'
' hooks:\n'
' - id: foo\n'
' # These args are because reasons!\n'
' args: [foo, bar, baz]\n'
)
cfg = tmpdir.join(C.CONFIG_FILE)
cfg.write(fmt.format(out_of_date.path, out_of_date.original_rev))
assert autoupdate(str(cfg), store, freeze=False, tags_only=False) == 0
expected = fmt.format(out_of_date.path, out_of_date.head_rev)
assert cfg.read() == expected
def test_loses_formatting_when_not_detectable(out_of_date, store, tmpdir):
"""A best-effort attempt is made at updating rev without rewriting
formatting. When the original formatting cannot be detected, this
is abandoned.
"""
config = (
'repos: [\n'
' {{\n'
' repo: {}, rev: {},\n'
' hooks: [\n'
' # A comment!\n'
' {{id: foo}},\n'
' ],\n'
' }}\n'
']\n'.format(
pipes.quote(out_of_date.path), out_of_date.original_rev,
)
)
cfg = tmpdir.join(C.CONFIG_FILE)
cfg.write(config)
assert autoupdate(str(cfg), store, freeze=False, tags_only=False) == 0
expected = (
'repos:\n'
'- repo: {}\n'
' rev: {}\n'
' hooks:\n'
' - id: foo\n'
).format(out_of_date.path, out_of_date.head_rev)
assert cfg.read() == expected
def test_autoupdate_tagged_repo(tagged, in_tmpdir, store):
config = make_config_from_repo(tagged.path, rev=tagged.original_rev)
write_config('.', config)
assert autoupdate(C.CONFIG_FILE, store, freeze=False, tags_only=False) == 0
with open(C.CONFIG_FILE) as f:
assert 'v1.2.3' in f.read()
def test_autoupdate_freeze(tagged, in_tmpdir, store):
config = make_config_from_repo(tagged.path, rev=tagged.original_rev)
write_config('.', config)
assert autoupdate(C.CONFIG_FILE, store, freeze=True, tags_only=False) == 0
with open(C.CONFIG_FILE) as f:
expected = 'rev: {} # frozen: v1.2.3'.format(tagged.head_rev)
assert expected in f.read()
# if we un-freeze it should remove the frozen comment
assert autoupdate(C.CONFIG_FILE, store, freeze=False, tags_only=False) == 0
with open(C.CONFIG_FILE) as f:
assert 'rev: v1.2.3\n' in f.read()
def test_autoupdate_tags_only(tagged, in_tmpdir, store):
# add some commits after the tag
git_commit(cwd=tagged.path)
config = make_config_from_repo(tagged.path, rev=tagged.original_rev)
write_config('.', config)
assert autoupdate(C.CONFIG_FILE, store, freeze=False, tags_only=True) == 0
with open(C.CONFIG_FILE) as f:
assert 'v1.2.3' in f.read()
def test_autoupdate_latest_no_config(out_of_date, in_tmpdir, store):
config = make_config_from_repo(
out_of_date.path, rev=out_of_date.original_rev,
)
write_config('.', config)
cmd_output('git', 'rm', '-r', ':/', cwd=out_of_date.path)
git_commit(cwd=out_of_date.path)
assert autoupdate(C.CONFIG_FILE, store, freeze=False, tags_only=False) == 1
with open(C.CONFIG_FILE) as f:
assert out_of_date.original_rev in f.read()
def test_hook_disppearing_repo_raises(hook_disappearing, store):
config = make_config_from_repo(
hook_disappearing.path,
rev=hook_disappearing.original_rev,
hooks=[{'id': 'foo'}],
)
info = RevInfo.from_config(config).update(tags_only=False, freeze=False)
with pytest.raises(RepositoryCannotBeUpdatedError):
_check_hooks_still_exist_at_rev(config, info, store)
def test_autoupdate_hook_disappearing_repo(hook_disappearing, tmpdir, store):
contents = (
'repos:\n'
'- repo: {}\n'
' rev: {}\n'
' hooks:\n'
' - id: foo\n'
).format(hook_disappearing.path, hook_disappearing.original_rev)
cfg = tmpdir.join(C.CONFIG_FILE)
cfg.write(contents)
assert autoupdate(str(cfg), store, freeze=False, tags_only=False) == 1
assert cfg.read() == contents
def test_autoupdate_local_hooks(in_git_dir, store):
config = sample_local_config()
add_config_to_repo('.', config)
assert autoupdate(C.CONFIG_FILE, store, freeze=False, tags_only=False) == 0
new_config_writen = read_config('.')
assert len(new_config_writen['repos']) == 1
assert new_config_writen['repos'][0] == config
def test_autoupdate_local_hooks_with_out_of_date_repo(
out_of_date, in_tmpdir, store,
):
stale_config = make_config_from_repo(
out_of_date.path, rev=out_of_date.original_rev, check=False,
)
local_config = sample_local_config()
config = {'repos': [local_config, stale_config]}
write_config('.', config)
assert autoupdate(C.CONFIG_FILE, store, freeze=False, tags_only=False) == 0
new_config_writen = read_config('.')
assert len(new_config_writen['repos']) == 2
assert new_config_writen['repos'][0] == local_config
def test_autoupdate_meta_hooks(tmpdir, store):
cfg = tmpdir.join(C.CONFIG_FILE)
cfg.write(
'repos:\n'
'- repo: meta\n'
' hooks:\n'
' - id: check-useless-excludes\n',
)
assert autoupdate(str(cfg), store, freeze=False, tags_only=True) == 0
assert cfg.read() == (
'repos:\n'
'- repo: meta\n'
' hooks:\n'
' - id: check-useless-excludes\n'
)
def test_updates_old_format_to_new_format(tmpdir, capsys, store):
cfg = tmpdir.join(C.CONFIG_FILE)
cfg.write(
'- repo: local\n'
' hooks:\n'
' - id: foo\n'
' name: foo\n'
' entry: ./bin/foo.sh\n'
' language: script\n',
)
assert autoupdate(str(cfg), store, freeze=False, tags_only=True) == 0
contents = cfg.read()
assert contents == (
'repos:\n'
'- repo: local\n'
' hooks:\n'
' - id: foo\n'
' name: foo\n'
' entry: ./bin/foo.sh\n'
' language: script\n'
)
out, _ = capsys.readouterr()
assert out == 'Configuration has been migrated.\n'
| from __future__ import unicode_literals
import pipes
import pytest
import pre_commit.constants as C
from pre_commit import git
from pre_commit.commands.autoupdate import _check_hooks_still_exist_at_rev
from pre_commit.commands.autoupdate import autoupdate
from pre_commit.commands.autoupdate import RepositoryCannotBeUpdatedError
from pre_commit.commands.autoupdate import RevInfo
from pre_commit.util import cmd_output
from testing.auto_namedtuple import auto_namedtuple
from testing.fixtures import add_config_to_repo
from testing.fixtures import make_config_from_repo
from testing.fixtures import make_repo
from testing.fixtures import modify_manifest
from testing.fixtures import read_config
from testing.fixtures import sample_local_config
from testing.fixtures import write_config
from testing.util import git_commit
@pytest.fixture
def up_to_date(tempdir_factory):
yield make_repo(tempdir_factory, 'python_hooks_repo')
@pytest.fixture
def out_of_date(tempdir_factory):
path = make_repo(tempdir_factory, 'python_hooks_repo')
original_rev = git.head_rev(path)
git_commit(cwd=path)
head_rev = git.head_rev(path)
yield auto_namedtuple(
path=path, original_rev=original_rev, head_rev=head_rev,
)
@pytest.fixture
def tagged(out_of_date):
cmd_output('git', 'tag', 'v1.2.3', cwd=out_of_date.path)
yield out_of_date
@pytest.fixture
def hook_disappearing(tempdir_factory):
path = make_repo(tempdir_factory, 'python_hooks_repo')
original_rev = git.head_rev(path)
with modify_manifest(path) as manifest:
manifest[0]['id'] = 'bar'
yield auto_namedtuple(path=path, original_rev=original_rev)
def test_rev_info_from_config():
info = RevInfo.from_config({'repo': 'repo/path', 'rev': 'v1.2.3'})
assert info == RevInfo('repo/path', 'v1.2.3', None)
def test_rev_info_update_up_to_date_repo(up_to_date):
config = make_config_from_repo(up_to_date)
info = RevInfo.from_config(config)
new_info = info.update(tags_only=False, freeze=False)
assert info == new_info
def test_rev_info_update_out_of_date_repo(out_of_date):
config = make_config_from_repo(
out_of_date.path, rev=out_of_date.original_rev,
)
info = RevInfo.from_config(config)
new_info = info.update(tags_only=False, freeze=False)
assert new_info.rev == out_of_date.head_rev
def test_rev_info_update_non_master_default_branch(out_of_date):
# change the default branch to be not-master
cmd_output('git', '-C', out_of_date.path, 'branch', '-m', 'dev')
test_rev_info_update_out_of_date_repo(out_of_date)
def test_rev_info_update_tags_even_if_not_tags_only(tagged):
config = make_config_from_repo(tagged.path, rev=tagged.original_rev)
info = RevInfo.from_config(config)
new_info = info.update(tags_only=False, freeze=False)
assert new_info.rev == 'v1.2.3'
def test_rev_info_update_tags_only_does_not_pick_tip(tagged):
git_commit(cwd=tagged.path)
config = make_config_from_repo(tagged.path, rev=tagged.original_rev)
info = RevInfo.from_config(config)
new_info = info.update(tags_only=True, freeze=False)
assert new_info.rev == 'v1.2.3'
def test_rev_info_update_freeze_tag(tagged):
git_commit(cwd=tagged.path)
config = make_config_from_repo(tagged.path, rev=tagged.original_rev)
info = RevInfo.from_config(config)
new_info = info.update(tags_only=True, freeze=True)
assert new_info.rev == tagged.head_rev
assert new_info.frozen == 'v1.2.3'
def test_rev_info_update_does_not_freeze_if_already_sha(out_of_date):
config = make_config_from_repo(
out_of_date.path, rev=out_of_date.original_rev,
)
info = RevInfo.from_config(config)
new_info = info.update(tags_only=True, freeze=True)
assert new_info.rev == out_of_date.head_rev
assert new_info.frozen is None
def test_autoupdate_up_to_date_repo(up_to_date, tmpdir, store):
contents = (
'repos:\n'
'- repo: {}\n'
' rev: {}\n'
' hooks:\n'
' - id: foo\n'
).format(up_to_date, git.head_rev(up_to_date))
cfg = tmpdir.join(C.CONFIG_FILE)
cfg.write(contents)
assert autoupdate(str(cfg), store, freeze=False, tags_only=False) == 0
assert cfg.read() == contents
def test_autoupdate_old_revision_broken(tempdir_factory, in_tmpdir, store):
"""In $FUTURE_VERSION, hooks.yaml will no longer be supported. This
asserts that when that day comes, pre-commit will be able to autoupdate
despite not being able to read hooks.yaml in that repository.
"""
path = make_repo(tempdir_factory, 'python_hooks_repo')
config = make_config_from_repo(path, check=False)
cmd_output('git', 'mv', C.MANIFEST_FILE, 'nope.yaml', cwd=path)
git_commit(cwd=path)
# Assume this is the revision the user's old repository was at
rev = git.head_rev(path)
cmd_output('git', 'mv', 'nope.yaml', C.MANIFEST_FILE, cwd=path)
git_commit(cwd=path)
update_rev = git.head_rev(path)
config['rev'] = rev
write_config('.', config)
with open(C.CONFIG_FILE) as f:
before = f.read()
assert autoupdate(C.CONFIG_FILE, store, freeze=False, tags_only=False) == 0
with open(C.CONFIG_FILE) as f:
after = f.read()
assert before != after
assert update_rev in after
def test_autoupdate_out_of_date_repo(out_of_date, tmpdir, store):
fmt = (
'repos:\n'
'- repo: {}\n'
' rev: {}\n'
' hooks:\n'
' - id: foo\n'
)
cfg = tmpdir.join(C.CONFIG_FILE)
cfg.write(fmt.format(out_of_date.path, out_of_date.original_rev))
assert autoupdate(str(cfg), store, freeze=False, tags_only=False) == 0
assert cfg.read() == fmt.format(out_of_date.path, out_of_date.head_rev)
def test_autoupdate_only_one_to_update(up_to_date, out_of_date, tmpdir, store):
fmt = (
'repos:\n'
'- repo: {}\n'
' rev: {}\n'
' hooks:\n'
' - id: foo\n'
'- repo: {}\n'
' rev: {}\n'
' hooks:\n'
' - id: foo\n'
)
cfg = tmpdir.join(C.CONFIG_FILE)
before = fmt.format(
up_to_date, git.head_rev(up_to_date),
out_of_date.path, out_of_date.original_rev,
)
cfg.write(before)
assert autoupdate(str(cfg), store, freeze=False, tags_only=False) == 0
assert cfg.read() == fmt.format(
up_to_date, git.head_rev(up_to_date),
out_of_date.path, out_of_date.head_rev,
)
def test_autoupdate_out_of_date_repo_with_correct_repo_name(
out_of_date, in_tmpdir, store,
):
stale_config = make_config_from_repo(
out_of_date.path, rev=out_of_date.original_rev, check=False,
)
local_config = sample_local_config()
config = {'repos': [stale_config, local_config]}
write_config('.', config)
with open(C.CONFIG_FILE) as f:
before = f.read()
repo_name = 'file://{}'.format(out_of_date.path)
ret = autoupdate(
C.CONFIG_FILE, store, freeze=False, tags_only=False,
repos=(repo_name,),
)
with open(C.CONFIG_FILE) as f:
after = f.read()
assert ret == 0
assert before != after
assert out_of_date.head_rev in after
assert 'local' in after
def test_autoupdate_out_of_date_repo_with_wrong_repo_name(
out_of_date, in_tmpdir, store,
):
config = make_config_from_repo(
out_of_date.path, rev=out_of_date.original_rev, check=False,
)
write_config('.', config)
with open(C.CONFIG_FILE) as f:
before = f.read()
# It will not update it, because the name doesn't match
ret = autoupdate(
C.CONFIG_FILE, store, freeze=False, tags_only=False,
repos=('dne',),
)
with open(C.CONFIG_FILE) as f:
after = f.read()
assert ret == 0
assert before == after
def test_does_not_reformat(tmpdir, out_of_date, store):
fmt = (
'repos:\n'
'- repo: {}\n'
' rev: {} # definitely the version I want!\n'
' hooks:\n'
' - id: foo\n'
' # These args are because reasons!\n'
' args: [foo, bar, baz]\n'
)
cfg = tmpdir.join(C.CONFIG_FILE)
cfg.write(fmt.format(out_of_date.path, out_of_date.original_rev))
assert autoupdate(str(cfg), store, freeze=False, tags_only=False) == 0
expected = fmt.format(out_of_date.path, out_of_date.head_rev)
assert cfg.read() == expected
def test_loses_formatting_when_not_detectable(out_of_date, store, tmpdir):
"""A best-effort attempt is made at updating rev without rewriting
formatting. When the original formatting cannot be detected, this
is abandoned.
"""
config = (
'repos: [\n'
' {{\n'
' repo: {}, rev: {},\n'
' hooks: [\n'
' # A comment!\n'
' {{id: foo}},\n'
' ],\n'
' }}\n'
']\n'.format(
pipes.quote(out_of_date.path), out_of_date.original_rev,
)
)
cfg = tmpdir.join(C.CONFIG_FILE)
cfg.write(config)
assert autoupdate(str(cfg), store, freeze=False, tags_only=False) == 0
expected = (
'repos:\n'
'- repo: {}\n'
' rev: {}\n'
' hooks:\n'
' - id: foo\n'
).format(out_of_date.path, out_of_date.head_rev)
assert cfg.read() == expected
def test_autoupdate_tagged_repo(tagged, in_tmpdir, store):
config = make_config_from_repo(tagged.path, rev=tagged.original_rev)
write_config('.', config)
assert autoupdate(C.CONFIG_FILE, store, freeze=False, tags_only=False) == 0
with open(C.CONFIG_FILE) as f:
assert 'v1.2.3' in f.read()
def test_autoupdate_freeze(tagged, in_tmpdir, store):
config = make_config_from_repo(tagged.path, rev=tagged.original_rev)
write_config('.', config)
assert autoupdate(C.CONFIG_FILE, store, freeze=True, tags_only=False) == 0
with open(C.CONFIG_FILE) as f:
expected = 'rev: {} # frozen: v1.2.3'.format(tagged.head_rev)
assert expected in f.read()
# if we un-freeze it should remove the frozen comment
assert autoupdate(C.CONFIG_FILE, store, freeze=False, tags_only=False) == 0
with open(C.CONFIG_FILE) as f:
assert 'rev: v1.2.3\n' in f.read()
def test_autoupdate_tags_only(tagged, in_tmpdir, store):
# add some commits after the tag
git_commit(cwd=tagged.path)
config = make_config_from_repo(tagged.path, rev=tagged.original_rev)
write_config('.', config)
assert autoupdate(C.CONFIG_FILE, store, freeze=False, tags_only=True) == 0
with open(C.CONFIG_FILE) as f:
assert 'v1.2.3' in f.read()
def test_autoupdate_latest_no_config(out_of_date, in_tmpdir, store):
config = make_config_from_repo(
out_of_date.path, rev=out_of_date.original_rev,
)
write_config('.', config)
cmd_output('git', 'rm', '-r', ':/', cwd=out_of_date.path)
git_commit(cwd=out_of_date.path)
assert autoupdate(C.CONFIG_FILE, store, freeze=False, tags_only=False) == 1
with open(C.CONFIG_FILE) as f:
assert out_of_date.original_rev in f.read()
def test_hook_disppearing_repo_raises(hook_disappearing, store):
config = make_config_from_repo(
hook_disappearing.path,
rev=hook_disappearing.original_rev,
hooks=[{'id': 'foo'}],
)
info = RevInfo.from_config(config).update(tags_only=False, freeze=False)
with pytest.raises(RepositoryCannotBeUpdatedError):
_check_hooks_still_exist_at_rev(config, info, store)
def test_autoupdate_hook_disappearing_repo(hook_disappearing, tmpdir, store):
contents = (
'repos:\n'
'- repo: {}\n'
' rev: {}\n'
' hooks:\n'
' - id: foo\n'
).format(hook_disappearing.path, hook_disappearing.original_rev)
cfg = tmpdir.join(C.CONFIG_FILE)
cfg.write(contents)
assert autoupdate(str(cfg), store, freeze=False, tags_only=False) == 1
assert cfg.read() == contents
def test_autoupdate_local_hooks(in_git_dir, store):
config = sample_local_config()
add_config_to_repo('.', config)
assert autoupdate(C.CONFIG_FILE, store, freeze=False, tags_only=False) == 0
new_config_writen = read_config('.')
assert len(new_config_writen['repos']) == 1
assert new_config_writen['repos'][0] == config
def test_autoupdate_local_hooks_with_out_of_date_repo(
out_of_date, in_tmpdir, store,
):
stale_config = make_config_from_repo(
out_of_date.path, rev=out_of_date.original_rev, check=False,
)
local_config = sample_local_config()
config = {'repos': [local_config, stale_config]}
write_config('.', config)
assert autoupdate(C.CONFIG_FILE, store, freeze=False, tags_only=False) == 0
new_config_writen = read_config('.')
assert len(new_config_writen['repos']) == 2
assert new_config_writen['repos'][0] == local_config
def test_autoupdate_meta_hooks(tmpdir, store):
cfg = tmpdir.join(C.CONFIG_FILE)
cfg.write(
'repos:\n'
'- repo: meta\n'
' hooks:\n'
' - id: check-useless-excludes\n',
)
assert autoupdate(str(cfg), store, freeze=False, tags_only=True) == 0
assert cfg.read() == (
'repos:\n'
'- repo: meta\n'
' hooks:\n'
' - id: check-useless-excludes\n'
)
def test_updates_old_format_to_new_format(tmpdir, capsys, store):
cfg = tmpdir.join(C.CONFIG_FILE)
cfg.write(
'- repo: local\n'
' hooks:\n'
' - id: foo\n'
' name: foo\n'
' entry: ./bin/foo.sh\n'
' language: script\n',
)
assert autoupdate(str(cfg), store, freeze=False, tags_only=True) == 0
contents = cfg.read()
assert contents == (
'repos:\n'
'- repo: local\n'
' hooks:\n'
' - id: foo\n'
' name: foo\n'
' entry: ./bin/foo.sh\n'
' language: script\n'
)
out, _ = capsys.readouterr()
assert out == 'Configuration has been migrated.\n'
| en | 0.880524 | # change the default branch to be not-master In $FUTURE_VERSION, hooks.yaml will no longer be supported. This asserts that when that day comes, pre-commit will be able to autoupdate despite not being able to read hooks.yaml in that repository. # Assume this is the revision the user's old repository was at # It will not update it, because the name doesn't match # definitely the version I want!\n' # These args are because reasons!\n' A best-effort attempt is made at updating rev without rewriting formatting. When the original formatting cannot be detected, this is abandoned. # A comment!\n' # frozen: v1.2.3'.format(tagged.head_rev) # if we un-freeze it should remove the frozen comment # add some commits after the tag | 1.877319 | 2 |
03_Day_Operators/21.py | diegofregolente/30-Days-Of-Python | 0 | 6630307 | hours = float(input('Hours: '))
rate = float(input('Rate per hour: '))
pay = hours * rate
print('Weekly earning is ', pay) # 21
| hours = float(input('Hours: '))
rate = float(input('Rate per hour: '))
pay = hours * rate
print('Weekly earning is ', pay) # 21
| none | 1 | 3.956808 | 4 |
|
weakened_algorithm/run_visualizations.py | jessicawang225/caltech-ee148-spring2020-hw02 | 0 | 6630308 | <reponame>jessicawang225/caltech-ee148-spring2020-hw02<filename>weakened_algorithm/run_visualizations.py
import json
import numpy as np
from PIL import Image, ImageDraw
import os
def draw(I, boxes):
for box in boxes:
draw = ImageDraw.Draw(I)
# Draw bounding box in neon yellow
top, left, bottom, right = box[:4]
draw.rectangle([left, top, right, bottom], outline=(204, 255, 0))
del draw
return I
# set the path to the downloaded data:
data_path = './data'
# set a path for saving predictions:
preds_path = './predictions'
# set a path for saving visualizations:
vis_path = './visualizations'
# load splits:
split_path = './splits'
file_names_train = np.load(os.path.join(split_path, 'file_names_train.npy'))
file_names_test = np.load(os.path.join(split_path, 'file_names_test.npy'))
# get bounding boxes
with open(os.path.join(preds_path, 'preds_train.json')) as f:
bounding_boxes_train = json.load(f)
with open(os.path.join(preds_path, 'preds_test.json')) as f:
bounding_boxes_test = json.load(f)
for i in range(len(file_names_train)):
# read image using PIL:
I = Image.open(os.path.join(data_path, file_names_train[i]))
I = draw(I, bounding_boxes_train[file_names_train[i]])
I.save(os.path.join(vis_path, file_names_train[i]))
for i in range(len(file_names_test)):
# read image using PIL:
I = Image.open(os.path.join(data_path, file_names_test[i]))
I = draw(I, bounding_boxes_test[file_names_test[i]])
I.save(os.path.join(vis_path, file_names_test[i])) | import json
import numpy as np
from PIL import Image, ImageDraw
import os
def draw(I, boxes):
for box in boxes:
draw = ImageDraw.Draw(I)
# Draw bounding box in neon yellow
top, left, bottom, right = box[:4]
draw.rectangle([left, top, right, bottom], outline=(204, 255, 0))
del draw
return I
# set the path to the downloaded data:
data_path = './data'
# set a path for saving predictions:
preds_path = './predictions'
# set a path for saving visualizations:
vis_path = './visualizations'
# load splits:
split_path = './splits'
file_names_train = np.load(os.path.join(split_path, 'file_names_train.npy'))
file_names_test = np.load(os.path.join(split_path, 'file_names_test.npy'))
# get bounding boxes
with open(os.path.join(preds_path, 'preds_train.json')) as f:
bounding_boxes_train = json.load(f)
with open(os.path.join(preds_path, 'preds_test.json')) as f:
bounding_boxes_test = json.load(f)
for i in range(len(file_names_train)):
# read image using PIL:
I = Image.open(os.path.join(data_path, file_names_train[i]))
I = draw(I, bounding_boxes_train[file_names_train[i]])
I.save(os.path.join(vis_path, file_names_train[i]))
for i in range(len(file_names_test)):
# read image using PIL:
I = Image.open(os.path.join(data_path, file_names_test[i]))
I = draw(I, bounding_boxes_test[file_names_test[i]])
I.save(os.path.join(vis_path, file_names_test[i])) | en | 0.808476 | # Draw bounding box in neon yellow # set the path to the downloaded data: # set a path for saving predictions: # set a path for saving visualizations: # load splits: # get bounding boxes # read image using PIL: # read image using PIL: | 2.737702 | 3 |
tf-gnn-samples/utils/add_child_ids.py | tech-srl/bottleneck | 56 | 6630309 | <reponame>tech-srl/bottleneck<filename>tf-gnn-samples/utils/add_child_ids.py
import pickle
from argparse import ArgumentParser
raw_keys = ['Child', 'NextToken', 'ComputedFrom', 'LastUse', 'LastWrite', 'LastLexicalUse', 'FormalArgName', 'GuardedBy', 'GuardedByNegation', 'UsesSubtoken']
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument("--edges", dest="edges", required=True)
args = parser.parse_args()
with open(args.edges, 'rb') as file:
raw_edges = pickle.load(file)
parent_to_children = {}
child_to_parent = {}
for s, t in raw_edges['Child']:
if not s in parent_to_children:
parent_to_children[s] = []
parent_to_children[s].append(t)
child_to_parent[t] = s
cur = 0
next_map = {}
for s, t in raw_edges['NextToken']:
next_map[s] = t
prev_map = {t:s for s,t in next_map.items()}
def get_all_next(n):
result = []
cur = n
while cur in next_map:
next_item = next_map[cur]
result.append(next_item)
cur = next_item
return result
def get_all_prev(n):
result = []
cur = n
while cur in prev_map:
prev_item = prev_map[cur]
result.append(prev_item)
cur = prev_item
return result
nodes = child_to_parent.keys()
left_nodes = list(nodes)
parent_to_descendants = {}
def get_parent_to_descendants(p):
desc = set()
for c in parent_to_children[p]:
if c in parent_to_children: # if c is a parent itself
desc.update(get_parent_to_descendants(c))
else:
desc.add(c)
return desc
for p in parent_to_children.keys():
desc = get_parent_to_descendants(p)
parent_to_descendants[p] = desc
roots = set()
for n in nodes:
cur = n
while cur in child_to_parent:
cur = child_to_parent[cur]
roots.add(cur)
print(raw_edges)
| import pickle
from argparse import ArgumentParser
raw_keys = ['Child', 'NextToken', 'ComputedFrom', 'LastUse', 'LastWrite', 'LastLexicalUse', 'FormalArgName', 'GuardedBy', 'GuardedByNegation', 'UsesSubtoken']
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument("--edges", dest="edges", required=True)
args = parser.parse_args()
with open(args.edges, 'rb') as file:
raw_edges = pickle.load(file)
parent_to_children = {}
child_to_parent = {}
for s, t in raw_edges['Child']:
if not s in parent_to_children:
parent_to_children[s] = []
parent_to_children[s].append(t)
child_to_parent[t] = s
cur = 0
next_map = {}
for s, t in raw_edges['NextToken']:
next_map[s] = t
prev_map = {t:s for s,t in next_map.items()}
def get_all_next(n):
result = []
cur = n
while cur in next_map:
next_item = next_map[cur]
result.append(next_item)
cur = next_item
return result
def get_all_prev(n):
result = []
cur = n
while cur in prev_map:
prev_item = prev_map[cur]
result.append(prev_item)
cur = prev_item
return result
nodes = child_to_parent.keys()
left_nodes = list(nodes)
parent_to_descendants = {}
def get_parent_to_descendants(p):
desc = set()
for c in parent_to_children[p]:
if c in parent_to_children: # if c is a parent itself
desc.update(get_parent_to_descendants(c))
else:
desc.add(c)
return desc
for p in parent_to_children.keys():
desc = get_parent_to_descendants(p)
parent_to_descendants[p] = desc
roots = set()
for n in nodes:
cur = n
while cur in child_to_parent:
cur = child_to_parent[cur]
roots.add(cur)
print(raw_edges) | en | 0.785429 | # if c is a parent itself | 2.711272 | 3 |
python/orthomcl/geneid2cluster.py | lotharwissler/bioinformatics | 10 | 6630310 | #!/usr/bin/python
import os, sys, string
from low import *
from orthomcl import OrthoMCLCluster
# =============================================================================
def usage():
print >> sys.stderr, "prints a mapping between each gene id and its cluster from orthomcl output\n"
print >> sys.stderr, "usage: " + sys.argv[0] + " orthomcl.out"
sys.exit(1)
def plausi():
if len(sys.argv) != 2: usage()
inFile = sys.argv[1]
return inFile
def main():
inFile = plausi()
fo = open(inFile)
for line in fo:
o = OrthoMCLCluster(line.rstrip())
name = o.get_name()
geneHash = o.get_gene_hash()
for geneid, species in geneHash.iteritems(): print geneid + "\t" + name
main()
| #!/usr/bin/python
import os, sys, string
from low import *
from orthomcl import OrthoMCLCluster
# =============================================================================
def usage():
print >> sys.stderr, "prints a mapping between each gene id and its cluster from orthomcl output\n"
print >> sys.stderr, "usage: " + sys.argv[0] + " orthomcl.out"
sys.exit(1)
def plausi():
if len(sys.argv) != 2: usage()
inFile = sys.argv[1]
return inFile
def main():
inFile = plausi()
fo = open(inFile)
for line in fo:
o = OrthoMCLCluster(line.rstrip())
name = o.get_name()
geneHash = o.get_gene_hash()
for geneid, species in geneHash.iteritems(): print geneid + "\t" + name
main()
| fr | 0.317879 | #!/usr/bin/python # ============================================================================= | 2.785269 | 3 |
src/spidery/spider/news/__init__.py | A2Media-id/spidery | 0 | 6630311 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import logging
import re
import traceback
from abc import abstractmethod, ABC
from typing import List
from bs4 import BeautifulSoup
from spidery.spider.engine import BaseCrawl
from spidery.spider.resource import DataNews, DataArticle
class NewsEngine(BaseCrawl, ABC):
_me = __file__
def __init__(self, **kwargs):
super(NewsEngine, self).__init__(**kwargs)
@staticmethod
def _get_all_images(soup: BeautifulSoup) -> List:
results = []
try:
attrs = ['src', 'data-src', 'data-srcset']
datas = soup.find_all('img') or []
added = set()
for i, im in enumerate(datas):
default_text = im.get('alt') or im.text
parent = im.parent
if not default_text and parent:
default_text = parent.string
text = str(default_text).replace('\n', '').strip()
for atr in attrs:
if not im.get(atr):
continue
ims = str(im.get(atr)).split()
for img in ims:
if re.search(r"https?://([A-Za-z_0-9.-]+)(\/[^\s]+)?", img, re.IGNORECASE) and img not in added:
image = re.sub(r"(,(w_\d+|ar_\d+:\d+)|\/w\d+$)", "", str(img).strip(), 0,
re.IGNORECASE | re.VERBOSE)
added.add(img)
results.append((image, text))
except Exception as error:
logging.error(
''.join(traceback.format_exception(etype=type(error), value=error, tb=error.__traceback__)))
finally:
return results
@abstractmethod
def get_detail(self, data: DataNews) -> DataArticle:
pass
@abstractmethod
def get_latest(self) -> List[DataNews]:
pass
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import logging
import re
import traceback
from abc import abstractmethod, ABC
from typing import List
from bs4 import BeautifulSoup
from spidery.spider.engine import BaseCrawl
from spidery.spider.resource import DataNews, DataArticle
class NewsEngine(BaseCrawl, ABC):
_me = __file__
def __init__(self, **kwargs):
super(NewsEngine, self).__init__(**kwargs)
@staticmethod
def _get_all_images(soup: BeautifulSoup) -> List:
results = []
try:
attrs = ['src', 'data-src', 'data-srcset']
datas = soup.find_all('img') or []
added = set()
for i, im in enumerate(datas):
default_text = im.get('alt') or im.text
parent = im.parent
if not default_text and parent:
default_text = parent.string
text = str(default_text).replace('\n', '').strip()
for atr in attrs:
if not im.get(atr):
continue
ims = str(im.get(atr)).split()
for img in ims:
if re.search(r"https?://([A-Za-z_0-9.-]+)(\/[^\s]+)?", img, re.IGNORECASE) and img not in added:
image = re.sub(r"(,(w_\d+|ar_\d+:\d+)|\/w\d+$)", "", str(img).strip(), 0,
re.IGNORECASE | re.VERBOSE)
added.add(img)
results.append((image, text))
except Exception as error:
logging.error(
''.join(traceback.format_exception(etype=type(error), value=error, tb=error.__traceback__)))
finally:
return results
@abstractmethod
def get_detail(self, data: DataNews) -> DataArticle:
pass
@abstractmethod
def get_latest(self) -> List[DataNews]:
pass
| en | 0.308914 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- | 2.445538 | 2 |
tests/test_edit_contact.py | AlreyQuin/python_training | 0 | 6630312 | # -*- coding: utf-8 -*-
from model.contact import Contact
import random
def test_edit_name(app, db, json_contacts, check_ui):
contact = json_contacts
if len(db.get_contact_list()) == 0:
app.contact.create(contact)
old_contact = db.get_contact_list()
c = random.choice(old_contact)
app.contact.edit_contact_by_id(contact, c.id)
new_contact = db.get_contact_list()
assert len(old_contact) == len(new_contact)
contact.id = c.id
old_contact.remove(c)
old_contact.append(contact)
assert sorted(old_contact, key=Contact.id_or_max) == sorted(new_contact, key=Contact.id_or_max)
if check_ui:
assert sorted(new_contact, key=Contact.id_or_max) == sorted(app.contact.get_group_list(), key=Contact.id_or_max)
| # -*- coding: utf-8 -*-
from model.contact import Contact
import random
def test_edit_name(app, db, json_contacts, check_ui):
contact = json_contacts
if len(db.get_contact_list()) == 0:
app.contact.create(contact)
old_contact = db.get_contact_list()
c = random.choice(old_contact)
app.contact.edit_contact_by_id(contact, c.id)
new_contact = db.get_contact_list()
assert len(old_contact) == len(new_contact)
contact.id = c.id
old_contact.remove(c)
old_contact.append(contact)
assert sorted(old_contact, key=Contact.id_or_max) == sorted(new_contact, key=Contact.id_or_max)
if check_ui:
assert sorted(new_contact, key=Contact.id_or_max) == sorted(app.contact.get_group_list(), key=Contact.id_or_max)
| en | 0.769321 | # -*- coding: utf-8 -*- | 2.628039 | 3 |
mmtbx/conformation_dependent_library/multi_base_class.py | hbrunie/cctbx_project | 2 | 6630313 | <reponame>hbrunie/cctbx_project
from __future__ import absolute_import, division, print_function
from mmtbx.conformation_dependent_library.LinkedResidues import LinkedResidues
from mmtbx.conformation_dependent_library.cdl_utils import \
get_c_ca_n
from six.moves import range
def calc_pseudorotation(t0,t1,t2,t3,t4):
import math
if t0 > 180.0: t0 = t0 - 360.0
if t1 > 180.0: t1 = t1 - 360.0
if t2 > 180.0: t2 = t2 - 360.0
#JC hack
if t2 == 0.0: t2 = 0.1
#/JC
if t3 > 180.0: t3 = t3 - 360.0
if t4 > 180.0: t4 = t4 - 360.0
taus = [t0, t1, t2, t3, t4]
tanP = ((taus[4] + taus[1]) - (taus[3] + taus[0]))/(2 * taus[2] * (math.sin(36.0*math.pi/180.0) + math.sin(72.0*math.pi/180.0)))
P = math.atan(tanP)*180.0/math.pi
if taus[2] < 0: P = P + 180.0
elif tanP < 0: P = P + 360.0
#P = "%.1f" % P
return P
def _get_atoms(atom_group, atom_names):
atoms, outl = get_c_ca_n(atom_group, atom_names)
if atoms is None:
for i in range(len(atom_names)):
atom_names[i] = atom_names[i].replace("'", '*')
atoms, outl = get_c_ca_n(atom_group, atom_names)
return atoms
def get_distance(ag1, ag2, an1, an2):
atoms = _get_atoms(ag1, an1) + _get_atoms(ag2, an2)
# for atom in atoms: print atom.quote()
return atoms[0].distance(atoms[1])
def get_torsion(ag1, ag2, an1, an2, limits='-180-180'):
from scitbx.math import dihedral_angle
atoms = _get_atoms(ag1, an1) + _get_atoms(ag2, an2)
omega = dihedral_angle(sites=[atom.xyz for atom in atoms], deg=True)
if limits=='-180-180':
if omega>180:
print(omega, limits)
assert 0
elif limits=='0-360':
if omega<0:
omega+=360
# for atom in atoms: print atom.quote()
return omega
class TwoNucleicResidues(LinkedResidues):
def show(self):
outl = "%sNucleicResidues" % self.length
for residue in self:
if residue is not None: outl += " %s(%s)" % (residue.resname, residue.resseq)
else: outl += ' "%s"' % residue
outl += " %s" % self.are_linked(return_value=True)
if self.start is not None: outl += " start=T"
if self.end is not None: outl += " end=T"
return outl
@staticmethod
def get_o3prime_p(residue, return_subset=False):
rc = get_c_ca_n(residue, atom_name_list=[' O3', ' P '], return_subset=return_subset)
if rc[0] is None:
rc = get_c_ca_n(residue, atom_name_list=[' O3*', ' P '], return_subset=return_subset)
return rc
def are_linked(self,
return_value=False,
use_distance_always=False,
bond_cut_off=3.5, # Same as link_distance_cutoff of pdb_interpretation
verbose=True,
):
bond_cut_off *= bond_cut_off
for i, residue in enumerate(self):
if i==0: continue
op1, outl1 = self.get_o3prime_p(residue, return_subset=False)
# if self[i-1] is None: # place holder for omega CDL
# return False
op2, outl2 = self.get_o3prime_p(self[i-1], return_subset=False)
# if ccn1 is None:
# for line in outl1:
# if line not in self.errors:
# self.errors.append(line)
# break
# if ccn2 is None:
# for line in outl2:
# if line not in self.errors:
# self.errors.append(line)
# break
p = op1[1]
o3prime = op2[0]
if p is None or o3prime is None: return False
if self.bond_params_table is None:
d2 = distance2(p,o3prime)
if d2<bond_cut_off: bond=True
else: bond=False
else:
bond=self.bond_params_table.lookup(p.i_seq, o3prime.i_seq)
if not bond and use_distance_always:
# needed for situations where atoms are added and the i_seq is updated
if distance2(p,o3prime)<bond_cut_off: bond=True
if not bond:
break
else:
return True
if return_value: return d2
return False
def get_base_types(self):
rc = []
for base in self:
for atom in base.atoms():
if atom.name==' N9 ':
rc.append('R')
break
else:
rc.append('Y')
return rc
def get_id(self):
outl = []
outl.append(self[0].parent().parent().id)
outl.append(self[0].resname.strip())
outl.append(self[0].resseq.strip())
assert not self[0].parent().altloc
outl.append(self[1].resname.strip())
outl.append(self[1].resseq.strip())
assert not self[1].parent().altloc
return '_'.join(outl)
def get_ntc_angles(self):
angles = {
'd' :[[" C5'", " C4'", " C3'", " O3'"],[]], # delta0
'e' :[[" C4'", " C3'", " O3'" ], [" P "]], # epsilon
'z' :[[" C3'", " O3'"], [" P ", " O5'"]], # zeta
'a1':[[" O3'"], [" P ", " O5'", " C5'"]], # alpha
'b1':[[], [" P ", " O5'", " C5'", " C4'"]], # beta
'g1':[[], [" O5'", " C5'", " C4'", " C3'"]], # gamma
'd1':[[], [" C5'", " C4'", " C3'", " O3'"]], # delta1
}
types = self.get_base_types()
if types[0]=='R':
angles['ch'] = [[" O4'", " C1'", " N9 ", " C4 "],[]] # chi0
N0 = ' N9 '
else:
angles['ch'] = [[" O4'", " C1'", " N1 ", " C2 "],[]] # chi0
N0 = ' N1 '
if types[1]=='R':
angles['ch1'] = [[], [" O4'", " C1'", " N9 ", " C4 "]] # chi1
N1 = ' N9 '
else:
angles['ch1'] = [[], [" O4'", " C1'", " N1 ", " C2 "]] # chi1
N1 = ' N1 '
angles['NCCN'] = [[N0, " C1'"], [" C1'", N1]]
rc = {}
for angle, atom_names in angles.items():
rc[angle] = get_torsion(self[0], self[1], atom_names[0], atom_names[1], limits='0-360')
rc['NN'] = get_distance(self[0], self[1], [N0], [N1])
rc['CC'] = get_distance(self[0], self[1], [" C1'"], [" C1'"])
# tau
args1 = []
args2 = []
for atom_names in [
[" C4'", " O4'", " C1'", " C2'"],
[" O4'", " C1'", " C2'", " C3'"],
[" C1'", " C2'", " C3'", " C4'"],
[" C2'", " C3'", " C4'", " O4'"],
[" C3'", " C4'", " O4'", " C1'"],
]:
args1.append(get_torsion(self[0], self[1], atom_names, []))
args2.append(get_torsion(self[0], self[1], [], atom_names))
rc['P'] = calc_pseudorotation(*tuple(args1))
rc['P1'] = calc_pseudorotation(*tuple(args2))
for label, item in rc.items():
# print ' %s : %0.2f' % (label, item)
rc[label] = '%0.1f' % item
rc['step_id'] = self.get_id()
return rc
def get_ntc_coordinates(self):
query = {}
for atom_key in ['C5pa',
'C4pa',
'O4pa',
'C3pa',
'O3pa',
'C2pa',
'C1pa',
'N19a',
'C24a',
'Pb',
'O5pb',
'C5pb',
'C4pb',
'O4pb',
'C3pb',
'O3pb',
'C2pb',
'C1pb',
'N19b',
'C24b',
]:
if atom_key[-1]=='a': atom_group = self[0]
elif atom_key[-1]=='b': atom_group = self[1]
else: assert 0
if atom_key.find('P')>-1: names = [' P ']
elif atom_key.find('N19')>-1: names = [' N1 ', ' N9 ']
elif atom_key.find('C24')>-1: names = [' C2 ', ' C4 ']
else: names = ['%4s' % atom_key[:-1].replace('p',"'")]
for name in names:
atom = atom_group.find_atom_by(name=name)
if atom is None:
atom = atom_group.find_atom_by(name=name.replace("'", '*'))
if atom: break
else:
assert atom
query[atom_key]= ['%s'%atom.xyz[0], '%s'%atom.xyz[1], '%s'%atom.xyz[2]]
query['step_id'] = self.get_id()
return query
| from __future__ import absolute_import, division, print_function
from mmtbx.conformation_dependent_library.LinkedResidues import LinkedResidues
from mmtbx.conformation_dependent_library.cdl_utils import \
get_c_ca_n
from six.moves import range
def calc_pseudorotation(t0,t1,t2,t3,t4):
import math
if t0 > 180.0: t0 = t0 - 360.0
if t1 > 180.0: t1 = t1 - 360.0
if t2 > 180.0: t2 = t2 - 360.0
#JC hack
if t2 == 0.0: t2 = 0.1
#/JC
if t3 > 180.0: t3 = t3 - 360.0
if t4 > 180.0: t4 = t4 - 360.0
taus = [t0, t1, t2, t3, t4]
tanP = ((taus[4] + taus[1]) - (taus[3] + taus[0]))/(2 * taus[2] * (math.sin(36.0*math.pi/180.0) + math.sin(72.0*math.pi/180.0)))
P = math.atan(tanP)*180.0/math.pi
if taus[2] < 0: P = P + 180.0
elif tanP < 0: P = P + 360.0
#P = "%.1f" % P
return P
def _get_atoms(atom_group, atom_names):
atoms, outl = get_c_ca_n(atom_group, atom_names)
if atoms is None:
for i in range(len(atom_names)):
atom_names[i] = atom_names[i].replace("'", '*')
atoms, outl = get_c_ca_n(atom_group, atom_names)
return atoms
def get_distance(ag1, ag2, an1, an2):
atoms = _get_atoms(ag1, an1) + _get_atoms(ag2, an2)
# for atom in atoms: print atom.quote()
return atoms[0].distance(atoms[1])
def get_torsion(ag1, ag2, an1, an2, limits='-180-180'):
from scitbx.math import dihedral_angle
atoms = _get_atoms(ag1, an1) + _get_atoms(ag2, an2)
omega = dihedral_angle(sites=[atom.xyz for atom in atoms], deg=True)
if limits=='-180-180':
if omega>180:
print(omega, limits)
assert 0
elif limits=='0-360':
if omega<0:
omega+=360
# for atom in atoms: print atom.quote()
return omega
class TwoNucleicResidues(LinkedResidues):
def show(self):
outl = "%sNucleicResidues" % self.length
for residue in self:
if residue is not None: outl += " %s(%s)" % (residue.resname, residue.resseq)
else: outl += ' "%s"' % residue
outl += " %s" % self.are_linked(return_value=True)
if self.start is not None: outl += " start=T"
if self.end is not None: outl += " end=T"
return outl
@staticmethod
def get_o3prime_p(residue, return_subset=False):
rc = get_c_ca_n(residue, atom_name_list=[' O3', ' P '], return_subset=return_subset)
if rc[0] is None:
rc = get_c_ca_n(residue, atom_name_list=[' O3*', ' P '], return_subset=return_subset)
return rc
def are_linked(self,
return_value=False,
use_distance_always=False,
bond_cut_off=3.5, # Same as link_distance_cutoff of pdb_interpretation
verbose=True,
):
bond_cut_off *= bond_cut_off
for i, residue in enumerate(self):
if i==0: continue
op1, outl1 = self.get_o3prime_p(residue, return_subset=False)
# if self[i-1] is None: # place holder for omega CDL
# return False
op2, outl2 = self.get_o3prime_p(self[i-1], return_subset=False)
# if ccn1 is None:
# for line in outl1:
# if line not in self.errors:
# self.errors.append(line)
# break
# if ccn2 is None:
# for line in outl2:
# if line not in self.errors:
# self.errors.append(line)
# break
p = op1[1]
o3prime = op2[0]
if p is None or o3prime is None: return False
if self.bond_params_table is None:
d2 = distance2(p,o3prime)
if d2<bond_cut_off: bond=True
else: bond=False
else:
bond=self.bond_params_table.lookup(p.i_seq, o3prime.i_seq)
if not bond and use_distance_always:
# needed for situations where atoms are added and the i_seq is updated
if distance2(p,o3prime)<bond_cut_off: bond=True
if not bond:
break
else:
return True
if return_value: return d2
return False
def get_base_types(self):
rc = []
for base in self:
for atom in base.atoms():
if atom.name==' N9 ':
rc.append('R')
break
else:
rc.append('Y')
return rc
def get_id(self):
outl = []
outl.append(self[0].parent().parent().id)
outl.append(self[0].resname.strip())
outl.append(self[0].resseq.strip())
assert not self[0].parent().altloc
outl.append(self[1].resname.strip())
outl.append(self[1].resseq.strip())
assert not self[1].parent().altloc
return '_'.join(outl)
def get_ntc_angles(self):
angles = {
'd' :[[" C5'", " C4'", " C3'", " O3'"],[]], # delta0
'e' :[[" C4'", " C3'", " O3'" ], [" P "]], # epsilon
'z' :[[" C3'", " O3'"], [" P ", " O5'"]], # zeta
'a1':[[" O3'"], [" P ", " O5'", " C5'"]], # alpha
'b1':[[], [" P ", " O5'", " C5'", " C4'"]], # beta
'g1':[[], [" O5'", " C5'", " C4'", " C3'"]], # gamma
'd1':[[], [" C5'", " C4'", " C3'", " O3'"]], # delta1
}
types = self.get_base_types()
if types[0]=='R':
angles['ch'] = [[" O4'", " C1'", " N9 ", " C4 "],[]] # chi0
N0 = ' N9 '
else:
angles['ch'] = [[" O4'", " C1'", " N1 ", " C2 "],[]] # chi0
N0 = ' N1 '
if types[1]=='R':
angles['ch1'] = [[], [" O4'", " C1'", " N9 ", " C4 "]] # chi1
N1 = ' N9 '
else:
angles['ch1'] = [[], [" O4'", " C1'", " N1 ", " C2 "]] # chi1
N1 = ' N1 '
angles['NCCN'] = [[N0, " C1'"], [" C1'", N1]]
rc = {}
for angle, atom_names in angles.items():
rc[angle] = get_torsion(self[0], self[1], atom_names[0], atom_names[1], limits='0-360')
rc['NN'] = get_distance(self[0], self[1], [N0], [N1])
rc['CC'] = get_distance(self[0], self[1], [" C1'"], [" C1'"])
# tau
args1 = []
args2 = []
for atom_names in [
[" C4'", " O4'", " C1'", " C2'"],
[" O4'", " C1'", " C2'", " C3'"],
[" C1'", " C2'", " C3'", " C4'"],
[" C2'", " C3'", " C4'", " O4'"],
[" C3'", " C4'", " O4'", " C1'"],
]:
args1.append(get_torsion(self[0], self[1], atom_names, []))
args2.append(get_torsion(self[0], self[1], [], atom_names))
rc['P'] = calc_pseudorotation(*tuple(args1))
rc['P1'] = calc_pseudorotation(*tuple(args2))
for label, item in rc.items():
# print ' %s : %0.2f' % (label, item)
rc[label] = '%0.1f' % item
rc['step_id'] = self.get_id()
return rc
def get_ntc_coordinates(self):
query = {}
for atom_key in ['C5pa',
'C4pa',
'O4pa',
'C3pa',
'O3pa',
'C2pa',
'C1pa',
'N19a',
'C24a',
'Pb',
'O5pb',
'C5pb',
'C4pb',
'O4pb',
'C3pb',
'O3pb',
'C2pb',
'C1pb',
'N19b',
'C24b',
]:
if atom_key[-1]=='a': atom_group = self[0]
elif atom_key[-1]=='b': atom_group = self[1]
else: assert 0
if atom_key.find('P')>-1: names = [' P ']
elif atom_key.find('N19')>-1: names = [' N1 ', ' N9 ']
elif atom_key.find('C24')>-1: names = [' C2 ', ' C4 ']
else: names = ['%4s' % atom_key[:-1].replace('p',"'")]
for name in names:
atom = atom_group.find_atom_by(name=name)
if atom is None:
atom = atom_group.find_atom_by(name=name.replace("'", '*'))
if atom: break
else:
assert atom
query[atom_key]= ['%s'%atom.xyz[0], '%s'%atom.xyz[1], '%s'%atom.xyz[2]]
query['step_id'] = self.get_id()
return query | en | 0.602065 | #JC hack #/JC #P = "%.1f" % P # for atom in atoms: print atom.quote() # for atom in atoms: print atom.quote() # Same as link_distance_cutoff of pdb_interpretation # if self[i-1] is None: # place holder for omega CDL # return False # if ccn1 is None: # for line in outl1: # if line not in self.errors: # self.errors.append(line) # break # if ccn2 is None: # for line in outl2: # if line not in self.errors: # self.errors.append(line) # break # needed for situations where atoms are added and the i_seq is updated # delta0 # epsilon # zeta # alpha # beta # gamma # delta1 # chi0 # chi0 # chi1 # chi1 # tau # print ' %s : %0.2f' % (label, item) | 1.843688 | 2 |
neutron_tempest_plugin/scenario/test_qos.py | cloudification-io/neutron-tempest-plugin | 0 | 6630314 | <gh_stars>0
# Copyright 2016 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import socket
import time
from neutron_lib.services.qos import constants as qos_consts
from oslo_log import log as logging
from tempest.common import utils as tutils
from tempest.lib import decorators
from neutron_tempest_plugin.api import base as base_api
from neutron_tempest_plugin.common import ssh
from neutron_tempest_plugin.common import utils
from neutron_tempest_plugin import config
from neutron_tempest_plugin.scenario import base
from neutron_tempest_plugin.scenario import constants
from neutron_tempest_plugin.scenario import exceptions as sc_exceptions
CONF = config.CONF
LOG = logging.getLogger(__name__)
def _try_connect(host_ip, port, socket_timeout):
try:
client_socket = socket.socket(socket.AF_INET,
socket.SOCK_STREAM)
client_socket.connect((host_ip, port))
client_socket.settimeout(socket_timeout)
return client_socket
except socket.error as serr:
if serr.errno == errno.ECONNREFUSED:
raise sc_exceptions.SocketConnectionRefused(host=host_ip,
port=port)
else:
raise
def _connect_socket(host, port, socket_timeout):
"""Try to initiate a connection to a host using an ip address and a port.
Trying couple of times until a timeout is reached in case the listening
host is not ready yet.
"""
start = time.time()
while True:
try:
return _try_connect(host, port, socket_timeout)
except sc_exceptions.SocketConnectionRefused:
if time.time() - start > constants.SOCKET_CONNECT_TIMEOUT:
raise sc_exceptions.ConnectionTimeoutException(host=host,
port=port)
class QoSTestMixin(object):
credentials = ['primary', 'admin']
force_tenant_isolation = False
FILE_SIZE = 1024 * 1024
TOLERANCE_FACTOR = 1.5
BUFFER_SIZE = 512
COUNT = FILE_SIZE / BUFFER_SIZE
LIMIT_BYTES_SEC = (constants.LIMIT_KILO_BITS_PER_SECOND * 1024 *
TOLERANCE_FACTOR / 8.0)
FILE_PATH = "/tmp/img"
NC_PORT = 1234
FILE_DOWNLOAD_TIMEOUT = 120
def _create_file_for_bw_tests(self, ssh_client):
cmd = ("(dd if=/dev/zero bs=%(bs)d count=%(count)d of=%(file_path)s) "
% {'bs': self.BUFFER_SIZE, 'count': self.COUNT,
'file_path': self.FILE_PATH})
ssh_client.exec_command(cmd, timeout=5)
cmd = "stat -c %%s %s" % self.FILE_PATH
filesize = ssh_client.exec_command(cmd, timeout=5)
if int(filesize.strip()) != self.FILE_SIZE:
raise sc_exceptions.FileCreationFailedException(
file=self.FILE_PATH)
def _check_bw(self, ssh_client, host, port, expected_bw=LIMIT_BYTES_SEC):
utils.kill_nc_process(ssh_client)
cmd = ("(nc -ll -p %(port)d < %(file_path)s > /dev/null &)" % {
'port': port, 'file_path': self.FILE_PATH})
ssh_client.exec_command(cmd, timeout=5)
# Open TCP socket to remote VM and download big file
start_time = time.time()
socket_timeout = self.FILE_SIZE * self.TOLERANCE_FACTOR / expected_bw
client_socket = _connect_socket(host, port, socket_timeout)
total_bytes_read = 0
try:
while total_bytes_read < self.FILE_SIZE:
data = client_socket.recv(self.BUFFER_SIZE)
total_bytes_read += len(data)
# Calculate and return actual BW + logging result
time_elapsed = time.time() - start_time
bytes_per_second = total_bytes_read / time_elapsed
LOG.debug("time_elapsed = %(time_elapsed).16f, "
"total_bytes_read = %(total_bytes_read)d, "
"bytes_per_second = %(bytes_per_second)d",
{'time_elapsed': time_elapsed,
'total_bytes_read': total_bytes_read,
'bytes_per_second': bytes_per_second})
return bytes_per_second <= expected_bw
except socket.timeout:
LOG.warning('Socket timeout while reading the remote file, bytes '
'read: %s', total_bytes_read)
utils.kill_nc_process(ssh_client)
return False
finally:
client_socket.close()
def _create_ssh_client(self):
return ssh.Client(self.fip['floating_ip_address'],
CONF.validation.image_ssh_user,
pkey=self.keypair['private_key'])
def _test_basic_resources(self):
self.setup_network_and_server()
self.check_connectivity(self.fip['floating_ip_address'],
CONF.validation.image_ssh_user,
self.keypair['private_key'])
rulesets = [{'protocol': 'tcp',
'direction': 'ingress',
'port_range_min': self.NC_PORT,
'port_range_max': self.NC_PORT,
'remote_ip_prefix': '0.0.0.0/0'}]
self.create_secgroup_rules(rulesets,
self.security_groups[-1]['id'])
def _create_qos_policy(self):
policy = self.os_admin.network_client.create_qos_policy(
name='test-policy',
description='test-qos-policy',
shared=True)
return policy['policy']['id']
class QoSTest(QoSTestMixin, base.BaseTempestTestCase):
@classmethod
@tutils.requires_ext(extension="qos", service="network")
@base_api.require_qos_rule_type(qos_consts.RULE_TYPE_BANDWIDTH_LIMIT)
def resource_setup(cls):
super(QoSTest, cls).resource_setup()
@decorators.idempotent_id('00682a0c-b72e-11e8-b81e-8c16450ea513')
def test_qos_basic_and_update(self):
"""This test covers both:
1) Basic QoS functionality
This is a basic test that check that a QoS policy with
a bandwidth limit rule is applied correctly by sending
a file from the instance to the test node.
Then calculating the bandwidth every ~1 sec by the number of bits
received / elapsed time.
2) Update QoS policy
Administrator has the ability to update existing QoS policy,
this test is planned to verify that:
- actual BW is affected as expected after updating QoS policy.
Test scenario:
1) Associating QoS Policy with "Original_bandwidth"
to the test node
2) BW validation - by downloading file on test node.
("Original_bandwidth" is expected)
3) Updating existing QoS Policy to a new BW value
"Updated_bandwidth"
4) BW validation - by downloading file on test node.
("Updated_bandwidth" is expected)
Note:
There are two options to associate QoS policy to VM:
"Neutron Port" or "Network", in this test
both options are covered.
"""
# Setup resources
self._test_basic_resources()
ssh_client = self._create_ssh_client()
# Create QoS policy
bw_limit_policy_id = self._create_qos_policy()
# As admin user create QoS rule
rule_id = self.os_admin.network_client.create_bandwidth_limit_rule(
policy_id=bw_limit_policy_id,
max_kbps=constants.LIMIT_KILO_BITS_PER_SECOND,
max_burst_kbps=constants.LIMIT_KILO_BITS_PER_SECOND)[
'bandwidth_limit_rule']['id']
# Associate QoS to the network
self.os_admin.network_client.update_network(
self.network['id'], qos_policy_id=bw_limit_policy_id)
# Create file on VM
self._create_file_for_bw_tests(ssh_client)
# Basic test, Check that actual BW while downloading file
# is as expected (Original BW)
utils.wait_until_true(lambda: self._check_bw(
ssh_client,
self.fip['floating_ip_address'],
port=self.NC_PORT),
timeout=self.FILE_DOWNLOAD_TIMEOUT,
sleep=1)
# As admin user update QoS rule
self.os_admin.network_client.update_bandwidth_limit_rule(
bw_limit_policy_id,
rule_id,
max_kbps=constants.LIMIT_KILO_BITS_PER_SECOND * 2,
max_burst_kbps=constants.LIMIT_KILO_BITS_PER_SECOND * 2)
# Check that actual BW while downloading file
# is as expected (Update BW)
utils.wait_until_true(lambda: self._check_bw(
ssh_client,
self.fip['floating_ip_address'],
port=self.NC_PORT,
expected_bw=QoSTest.LIMIT_BYTES_SEC * 2),
timeout=self.FILE_DOWNLOAD_TIMEOUT,
sleep=1)
# Create a new QoS policy
bw_limit_policy_id_new = self._create_qos_policy()
# As admin user create a new QoS rule
rule_id_new = self.os_admin.network_client.create_bandwidth_limit_rule(
policy_id=bw_limit_policy_id_new,
max_kbps=constants.LIMIT_KILO_BITS_PER_SECOND,
max_burst_kbps=constants.LIMIT_KILO_BITS_PER_SECOND)[
'bandwidth_limit_rule']['id']
# Associate a new QoS policy to Neutron port
self.os_admin.network_client.update_port(
self.port['id'], qos_policy_id=bw_limit_policy_id_new)
# Check that actual BW while downloading file
# is as expected (Original BW)
utils.wait_until_true(lambda: self._check_bw(
ssh_client,
self.fip['floating_ip_address'],
port=self.NC_PORT),
timeout=self.FILE_DOWNLOAD_TIMEOUT,
sleep=1)
# As admin user update QoS rule
self.os_admin.network_client.update_bandwidth_limit_rule(
bw_limit_policy_id_new,
rule_id_new,
max_kbps=constants.LIMIT_KILO_BITS_PER_SECOND * 3,
max_burst_kbps=constants.LIMIT_KILO_BITS_PER_SECOND * 3)
# Check that actual BW while downloading file
# is as expected (Update BW)
utils.wait_until_true(lambda: self._check_bw(
ssh_client,
self.fip['floating_ip_address'],
port=self.NC_PORT, expected_bw=QoSTest.LIMIT_BYTES_SEC * 3),
timeout=self.FILE_DOWNLOAD_TIMEOUT,
sleep=1)
| # Copyright 2016 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import socket
import time
from neutron_lib.services.qos import constants as qos_consts
from oslo_log import log as logging
from tempest.common import utils as tutils
from tempest.lib import decorators
from neutron_tempest_plugin.api import base as base_api
from neutron_tempest_plugin.common import ssh
from neutron_tempest_plugin.common import utils
from neutron_tempest_plugin import config
from neutron_tempest_plugin.scenario import base
from neutron_tempest_plugin.scenario import constants
from neutron_tempest_plugin.scenario import exceptions as sc_exceptions
CONF = config.CONF
LOG = logging.getLogger(__name__)
def _try_connect(host_ip, port, socket_timeout):
try:
client_socket = socket.socket(socket.AF_INET,
socket.SOCK_STREAM)
client_socket.connect((host_ip, port))
client_socket.settimeout(socket_timeout)
return client_socket
except socket.error as serr:
if serr.errno == errno.ECONNREFUSED:
raise sc_exceptions.SocketConnectionRefused(host=host_ip,
port=port)
else:
raise
def _connect_socket(host, port, socket_timeout):
"""Try to initiate a connection to a host using an ip address and a port.
Trying couple of times until a timeout is reached in case the listening
host is not ready yet.
"""
start = time.time()
while True:
try:
return _try_connect(host, port, socket_timeout)
except sc_exceptions.SocketConnectionRefused:
if time.time() - start > constants.SOCKET_CONNECT_TIMEOUT:
raise sc_exceptions.ConnectionTimeoutException(host=host,
port=port)
class QoSTestMixin(object):
credentials = ['primary', 'admin']
force_tenant_isolation = False
FILE_SIZE = 1024 * 1024
TOLERANCE_FACTOR = 1.5
BUFFER_SIZE = 512
COUNT = FILE_SIZE / BUFFER_SIZE
LIMIT_BYTES_SEC = (constants.LIMIT_KILO_BITS_PER_SECOND * 1024 *
TOLERANCE_FACTOR / 8.0)
FILE_PATH = "/tmp/img"
NC_PORT = 1234
FILE_DOWNLOAD_TIMEOUT = 120
def _create_file_for_bw_tests(self, ssh_client):
cmd = ("(dd if=/dev/zero bs=%(bs)d count=%(count)d of=%(file_path)s) "
% {'bs': self.BUFFER_SIZE, 'count': self.COUNT,
'file_path': self.FILE_PATH})
ssh_client.exec_command(cmd, timeout=5)
cmd = "stat -c %%s %s" % self.FILE_PATH
filesize = ssh_client.exec_command(cmd, timeout=5)
if int(filesize.strip()) != self.FILE_SIZE:
raise sc_exceptions.FileCreationFailedException(
file=self.FILE_PATH)
def _check_bw(self, ssh_client, host, port, expected_bw=LIMIT_BYTES_SEC):
utils.kill_nc_process(ssh_client)
cmd = ("(nc -ll -p %(port)d < %(file_path)s > /dev/null &)" % {
'port': port, 'file_path': self.FILE_PATH})
ssh_client.exec_command(cmd, timeout=5)
# Open TCP socket to remote VM and download big file
start_time = time.time()
socket_timeout = self.FILE_SIZE * self.TOLERANCE_FACTOR / expected_bw
client_socket = _connect_socket(host, port, socket_timeout)
total_bytes_read = 0
try:
while total_bytes_read < self.FILE_SIZE:
data = client_socket.recv(self.BUFFER_SIZE)
total_bytes_read += len(data)
# Calculate and return actual BW + logging result
time_elapsed = time.time() - start_time
bytes_per_second = total_bytes_read / time_elapsed
LOG.debug("time_elapsed = %(time_elapsed).16f, "
"total_bytes_read = %(total_bytes_read)d, "
"bytes_per_second = %(bytes_per_second)d",
{'time_elapsed': time_elapsed,
'total_bytes_read': total_bytes_read,
'bytes_per_second': bytes_per_second})
return bytes_per_second <= expected_bw
except socket.timeout:
LOG.warning('Socket timeout while reading the remote file, bytes '
'read: %s', total_bytes_read)
utils.kill_nc_process(ssh_client)
return False
finally:
client_socket.close()
def _create_ssh_client(self):
return ssh.Client(self.fip['floating_ip_address'],
CONF.validation.image_ssh_user,
pkey=self.keypair['private_key'])
def _test_basic_resources(self):
self.setup_network_and_server()
self.check_connectivity(self.fip['floating_ip_address'],
CONF.validation.image_ssh_user,
self.keypair['private_key'])
rulesets = [{'protocol': 'tcp',
'direction': 'ingress',
'port_range_min': self.NC_PORT,
'port_range_max': self.NC_PORT,
'remote_ip_prefix': '0.0.0.0/0'}]
self.create_secgroup_rules(rulesets,
self.security_groups[-1]['id'])
def _create_qos_policy(self):
policy = self.os_admin.network_client.create_qos_policy(
name='test-policy',
description='test-qos-policy',
shared=True)
return policy['policy']['id']
class QoSTest(QoSTestMixin, base.BaseTempestTestCase):
@classmethod
@tutils.requires_ext(extension="qos", service="network")
@base_api.require_qos_rule_type(qos_consts.RULE_TYPE_BANDWIDTH_LIMIT)
def resource_setup(cls):
super(QoSTest, cls).resource_setup()
@decorators.idempotent_id('00682a0c-b72e-11e8-b81e-8c16450ea513')
def test_qos_basic_and_update(self):
"""This test covers both:
1) Basic QoS functionality
This is a basic test that check that a QoS policy with
a bandwidth limit rule is applied correctly by sending
a file from the instance to the test node.
Then calculating the bandwidth every ~1 sec by the number of bits
received / elapsed time.
2) Update QoS policy
Administrator has the ability to update existing QoS policy,
this test is planned to verify that:
- actual BW is affected as expected after updating QoS policy.
Test scenario:
1) Associating QoS Policy with "Original_bandwidth"
to the test node
2) BW validation - by downloading file on test node.
("Original_bandwidth" is expected)
3) Updating existing QoS Policy to a new BW value
"Updated_bandwidth"
4) BW validation - by downloading file on test node.
("Updated_bandwidth" is expected)
Note:
There are two options to associate QoS policy to VM:
"Neutron Port" or "Network", in this test
both options are covered.
"""
# Setup resources
self._test_basic_resources()
ssh_client = self._create_ssh_client()
# Create QoS policy
bw_limit_policy_id = self._create_qos_policy()
# As admin user create QoS rule
rule_id = self.os_admin.network_client.create_bandwidth_limit_rule(
policy_id=bw_limit_policy_id,
max_kbps=constants.LIMIT_KILO_BITS_PER_SECOND,
max_burst_kbps=constants.LIMIT_KILO_BITS_PER_SECOND)[
'bandwidth_limit_rule']['id']
# Associate QoS to the network
self.os_admin.network_client.update_network(
self.network['id'], qos_policy_id=bw_limit_policy_id)
# Create file on VM
self._create_file_for_bw_tests(ssh_client)
# Basic test, Check that actual BW while downloading file
# is as expected (Original BW)
utils.wait_until_true(lambda: self._check_bw(
ssh_client,
self.fip['floating_ip_address'],
port=self.NC_PORT),
timeout=self.FILE_DOWNLOAD_TIMEOUT,
sleep=1)
# As admin user update QoS rule
self.os_admin.network_client.update_bandwidth_limit_rule(
bw_limit_policy_id,
rule_id,
max_kbps=constants.LIMIT_KILO_BITS_PER_SECOND * 2,
max_burst_kbps=constants.LIMIT_KILO_BITS_PER_SECOND * 2)
# Check that actual BW while downloading file
# is as expected (Update BW)
utils.wait_until_true(lambda: self._check_bw(
ssh_client,
self.fip['floating_ip_address'],
port=self.NC_PORT,
expected_bw=QoSTest.LIMIT_BYTES_SEC * 2),
timeout=self.FILE_DOWNLOAD_TIMEOUT,
sleep=1)
# Create a new QoS policy
bw_limit_policy_id_new = self._create_qos_policy()
# As admin user create a new QoS rule
rule_id_new = self.os_admin.network_client.create_bandwidth_limit_rule(
policy_id=bw_limit_policy_id_new,
max_kbps=constants.LIMIT_KILO_BITS_PER_SECOND,
max_burst_kbps=constants.LIMIT_KILO_BITS_PER_SECOND)[
'bandwidth_limit_rule']['id']
# Associate a new QoS policy to Neutron port
self.os_admin.network_client.update_port(
self.port['id'], qos_policy_id=bw_limit_policy_id_new)
# Check that actual BW while downloading file
# is as expected (Original BW)
utils.wait_until_true(lambda: self._check_bw(
ssh_client,
self.fip['floating_ip_address'],
port=self.NC_PORT),
timeout=self.FILE_DOWNLOAD_TIMEOUT,
sleep=1)
# As admin user update QoS rule
self.os_admin.network_client.update_bandwidth_limit_rule(
bw_limit_policy_id_new,
rule_id_new,
max_kbps=constants.LIMIT_KILO_BITS_PER_SECOND * 3,
max_burst_kbps=constants.LIMIT_KILO_BITS_PER_SECOND * 3)
# Check that actual BW while downloading file
# is as expected (Update BW)
utils.wait_until_true(lambda: self._check_bw(
ssh_client,
self.fip['floating_ip_address'],
port=self.NC_PORT, expected_bw=QoSTest.LIMIT_BYTES_SEC * 3),
timeout=self.FILE_DOWNLOAD_TIMEOUT,
sleep=1) | en | 0.887427 | # Copyright 2016 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. Try to initiate a connection to a host using an ip address and a port. Trying couple of times until a timeout is reached in case the listening host is not ready yet. # Open TCP socket to remote VM and download big file # Calculate and return actual BW + logging result This test covers both: 1) Basic QoS functionality This is a basic test that check that a QoS policy with a bandwidth limit rule is applied correctly by sending a file from the instance to the test node. Then calculating the bandwidth every ~1 sec by the number of bits received / elapsed time. 2) Update QoS policy Administrator has the ability to update existing QoS policy, this test is planned to verify that: - actual BW is affected as expected after updating QoS policy. Test scenario: 1) Associating QoS Policy with "Original_bandwidth" to the test node 2) BW validation - by downloading file on test node. ("Original_bandwidth" is expected) 3) Updating existing QoS Policy to a new BW value "Updated_bandwidth" 4) BW validation - by downloading file on test node. ("Updated_bandwidth" is expected) Note: There are two options to associate QoS policy to VM: "Neutron Port" or "Network", in this test both options are covered. # Setup resources # Create QoS policy # As admin user create QoS rule # Associate QoS to the network # Create file on VM # Basic test, Check that actual BW while downloading file # is as expected (Original BW) # As admin user update QoS rule # Check that actual BW while downloading file # is as expected (Update BW) # Create a new QoS policy # As admin user create a new QoS rule # Associate a new QoS policy to Neutron port # Check that actual BW while downloading file # is as expected (Original BW) # As admin user update QoS rule # Check that actual BW while downloading file # is as expected (Update BW) | 1.726985 | 2 |
teamanalysis/water_2018.py | yoojunwoong/miniproject_self | 0 | 6630315 | <filename>teamanalysis/water_2018.py
import pandas as pd;
import numpy as np;
import json
from confing.settings import DATA_DIRS
df = pd.read_excel(DATA_DIRS[0] + '//health_2018.xlsx', engine='openpyxl');
dfh = df.copy();
# 행같은경우 1~18까지 데이터가 광역시별,도의 총통계로 되어있고,
# 열같은경우 물과 관련된 특정 데이터를 추출해야하는데, 특정값을 몰라서 (colunm1~colunm4)로함
#dfc1 = dfc.loc[1:18,['colunm1','colunm2','colunm3','colunm4']];
dfh1 = dfh.loc[1:7,['시도','대형교통사고_사망자수','총인구수','일본뇌염_발생자수']];
dfh2 = dfh.loc[9:17,['시도','대형교통사고_사망자수','총인구수','일본뇌염_발생자수']];
#세종시에 데이터를 제외시키기 위해서, 세종시 위,아래 데이터(dfc1과 dfc2를 concat하였음)
dfh3 = pd.concat([dfh1, dfh2], ignore_index=True,join='outer');
# 특정값(column1)에 대해서 NaN안 경우 값을 0.0으로하였음
dfh3['대형교통사고_사망자수'].replace(np.nan,0.0,inplace=True);
#print(dfh3);
#--------------------------------------------------------------------------------#
df2 = pd.read_excel(DATA_DIRS[0] + '//water_2018.xlsx', engine='openpyxl');
#print(df2);
dfw = df2.copy();
# 데이터를 가져올때, 수자원공사,세종시 데이터는 뺴고 가져옴
# 비교대상 ex)시설용량(㎥/일) 별 과망간산칼륨소비량(기준:10/ 단위:(mg/L)),
# 잔류염소(기준:4/ 단위:(mg/L))으로진행
# 서울,부산,대구,인천,광주,대전,울산
class water:
def p1(self,x):
#<수도 사업자가 서울특별시인 지역에, 잔류염소의 평균값 구하기>
dfw1 = dfw[dfw['수도사업자'].str.contains(x)];
dfwc1 = dfw1['시설용량(㎥/일)'] * dfw1['잔류염소(기준:4/ 단위:(mg/L))'] * 1000 ;
dfwc2 = dfw1['시설용량(㎥/일)'].sum();
dfwc3 = dfwc1.sum();
dfwc4 = (dfwc3 / dfwc2) * 0.001
#2018년도의 서울특별시의 잔류염소 평균값 = 0.46615449~
return(print(dfwc4));
#람다를 써야하나..?ㅠ
dataw = {
'서울특별시' : [0.46615449628127115],
'부산광역시' : [0.6888034488826325],
'대구광역시' : [0.5711069651741293],
'인천광역시' : [0.8074644634880428],
'광주광역시' : [0.6113274336283185],
'대전광역시' : [0.6110416666666666],
'울산광역시' : [0.5801515151515151],
};
dfwc2 = pd.DataFrame(dataw,['2018 광역시별 잔류염소']);
dfwc3 = dfwc2.T;
dfwc4 = dfwc3.reset_index();
#print(dfh3.head(7));
#print(dfwc4);
dfco_18 = pd.concat([dfh3.head(7),dfwc4],ignore_index=False,join='inner',axis=1);
as18 = dfco_18.drop('index', axis=1);
print(as18);
#concat,merge 해봤는데, 겹치는 부분이 안사라짐...
# if __name__ == '__main__':
# water().p1('서울특별시');
# water().p1('부산광역시');
# water().p1('대구광역시');
# water().p1('인천광역시');
# water().p1('광주광역시');
# water().p1('대전광역시');
# water().p1('울산광역시'); | <filename>teamanalysis/water_2018.py
import pandas as pd;
import numpy as np;
import json
from confing.settings import DATA_DIRS
df = pd.read_excel(DATA_DIRS[0] + '//health_2018.xlsx', engine='openpyxl');
dfh = df.copy();
# 행같은경우 1~18까지 데이터가 광역시별,도의 총통계로 되어있고,
# 열같은경우 물과 관련된 특정 데이터를 추출해야하는데, 특정값을 몰라서 (colunm1~colunm4)로함
#dfc1 = dfc.loc[1:18,['colunm1','colunm2','colunm3','colunm4']];
dfh1 = dfh.loc[1:7,['시도','대형교통사고_사망자수','총인구수','일본뇌염_발생자수']];
dfh2 = dfh.loc[9:17,['시도','대형교통사고_사망자수','총인구수','일본뇌염_발생자수']];
#세종시에 데이터를 제외시키기 위해서, 세종시 위,아래 데이터(dfc1과 dfc2를 concat하였음)
dfh3 = pd.concat([dfh1, dfh2], ignore_index=True,join='outer');
# 특정값(column1)에 대해서 NaN안 경우 값을 0.0으로하였음
dfh3['대형교통사고_사망자수'].replace(np.nan,0.0,inplace=True);
#print(dfh3);
#--------------------------------------------------------------------------------#
df2 = pd.read_excel(DATA_DIRS[0] + '//water_2018.xlsx', engine='openpyxl');
#print(df2);
dfw = df2.copy();
# 데이터를 가져올때, 수자원공사,세종시 데이터는 뺴고 가져옴
# 비교대상 ex)시설용량(㎥/일) 별 과망간산칼륨소비량(기준:10/ 단위:(mg/L)),
# 잔류염소(기준:4/ 단위:(mg/L))으로진행
# 서울,부산,대구,인천,광주,대전,울산
class water:
def p1(self,x):
#<수도 사업자가 서울특별시인 지역에, 잔류염소의 평균값 구하기>
dfw1 = dfw[dfw['수도사업자'].str.contains(x)];
dfwc1 = dfw1['시설용량(㎥/일)'] * dfw1['잔류염소(기준:4/ 단위:(mg/L))'] * 1000 ;
dfwc2 = dfw1['시설용량(㎥/일)'].sum();
dfwc3 = dfwc1.sum();
dfwc4 = (dfwc3 / dfwc2) * 0.001
#2018년도의 서울특별시의 잔류염소 평균값 = 0.46615449~
return(print(dfwc4));
#람다를 써야하나..?ㅠ
dataw = {
'서울특별시' : [0.46615449628127115],
'부산광역시' : [0.6888034488826325],
'대구광역시' : [0.5711069651741293],
'인천광역시' : [0.8074644634880428],
'광주광역시' : [0.6113274336283185],
'대전광역시' : [0.6110416666666666],
'울산광역시' : [0.5801515151515151],
};
dfwc2 = pd.DataFrame(dataw,['2018 광역시별 잔류염소']);
dfwc3 = dfwc2.T;
dfwc4 = dfwc3.reset_index();
#print(dfh3.head(7));
#print(dfwc4);
dfco_18 = pd.concat([dfh3.head(7),dfwc4],ignore_index=False,join='inner',axis=1);
as18 = dfco_18.drop('index', axis=1);
print(as18);
#concat,merge 해봤는데, 겹치는 부분이 안사라짐...
# if __name__ == '__main__':
# water().p1('서울특별시');
# water().p1('부산광역시');
# water().p1('대구광역시');
# water().p1('인천광역시');
# water().p1('광주광역시');
# water().p1('대전광역시');
# water().p1('울산광역시'); | ko | 0.896972 | # 행같은경우 1~18까지 데이터가 광역시별,도의 총통계로 되어있고, # 열같은경우 물과 관련된 특정 데이터를 추출해야하는데, 특정값을 몰라서 (colunm1~colunm4)로함 #dfc1 = dfc.loc[1:18,['colunm1','colunm2','colunm3','colunm4']]; #세종시에 데이터를 제외시키기 위해서, 세종시 위,아래 데이터(dfc1과 dfc2를 concat하였음) # 특정값(column1)에 대해서 NaN안 경우 값을 0.0으로하였음 #print(dfh3); #--------------------------------------------------------------------------------# #print(df2); # 데이터를 가져올때, 수자원공사,세종시 데이터는 뺴고 가져옴 # 비교대상 ex)시설용량(㎥/일) 별 과망간산칼륨소비량(기준:10/ 단위:(mg/L)), # 잔류염소(기준:4/ 단위:(mg/L))으로진행 # 서울,부산,대구,인천,광주,대전,울산 #<수도 사업자가 서울특별시인 지역에, 잔류염소의 평균값 구하기> #2018년도의 서울특별시의 잔류염소 평균값 = 0.46615449~ #람다를 써야하나..?ㅠ #print(dfh3.head(7)); #print(dfwc4); #concat,merge 해봤는데, 겹치는 부분이 안사라짐... # if __name__ == '__main__': # water().p1('서울특별시'); # water().p1('부산광역시'); # water().p1('대구광역시'); # water().p1('인천광역시'); # water().p1('광주광역시'); # water().p1('대전광역시'); # water().p1('울산광역시'); | 2.415797 | 2 |
Codes/gracekoo/interview_6.py | liuxiaohui1221/algorithm | 256 | 6630316 | # -*- coding: utf-8 -*-
# @Time: 2020/5/25 12:37
# @Author: GraceKoo
# @File: interview_6.py
# @Desc: https://www.nowcoder.com/practice/9f3231a991af4f55b95579b44b7a01ba?tpId=13&tqId=11159&tPage=1&rp=1&ru=/ta/
# coding-interviews&qru=/ta/coding-interviews/question-ranking
class Solution:
def minNumberInRotateArray(self, rotateArray):
# write code here
len_rotatearray = len(rotateArray)
if len_rotatearray <= 1:
return rotateArray
left = 0
right = len_rotatearray - 1
while left < right:
middle = (left + right) // 2
if rotateArray[middle] > rotateArray[right]:
left = middle + 1
else:
right = middle
return rotateArray[left]
so = Solution()
print(so.minNumberInRotateArray([1, 2, 3, 4, 5, 6, 7]))
print(so.minNumberInRotateArray([4, 5, 6, 7, 1, 2, 3]))
print(so.minNumberInRotateArray([6, 7, 1, 2, 3, 4, 5]))
| # -*- coding: utf-8 -*-
# @Time: 2020/5/25 12:37
# @Author: GraceKoo
# @File: interview_6.py
# @Desc: https://www.nowcoder.com/practice/9f3231a991af4f55b95579b44b7a01ba?tpId=13&tqId=11159&tPage=1&rp=1&ru=/ta/
# coding-interviews&qru=/ta/coding-interviews/question-ranking
class Solution:
def minNumberInRotateArray(self, rotateArray):
# write code here
len_rotatearray = len(rotateArray)
if len_rotatearray <= 1:
return rotateArray
left = 0
right = len_rotatearray - 1
while left < right:
middle = (left + right) // 2
if rotateArray[middle] > rotateArray[right]:
left = middle + 1
else:
right = middle
return rotateArray[left]
so = Solution()
print(so.minNumberInRotateArray([1, 2, 3, 4, 5, 6, 7]))
print(so.minNumberInRotateArray([4, 5, 6, 7, 1, 2, 3]))
print(so.minNumberInRotateArray([6, 7, 1, 2, 3, 4, 5]))
| en | 0.47457 | # -*- coding: utf-8 -*- # @Time: 2020/5/25 12:37 # @Author: GraceKoo # @File: interview_6.py # @Desc: https://www.nowcoder.com/practice/9f3231a991af4f55b95579b44b7a01ba?tpId=13&tqId=11159&tPage=1&rp=1&ru=/ta/ # coding-interviews&qru=/ta/coding-interviews/question-ranking # write code here | 3.527322 | 4 |
tests/languages/test_bengali.py | kevinbazira/revscoring | 49 | 6630317 | import pickle
from pytest import mark
from revscoring.datasources import revision_oriented
from revscoring.dependencies import solve
from revscoring.languages import bengali
from .util import compare_extraction
BAD = [
"magi",
"মাগী",
"বাল",
"পর্নো",
"পর্ণো",
"বেশ্যা",
"নষ্টা",
"মগা",
"আবাল",
"পেনিস",
"নিগ্রো",
"পায়খান",
"সেক্সি",
"সেক্স",
"চটি",
]
INFORMAL = [
"কর",
"করবি",
"থাম",
"হাহা",
"হাহাহা",
"হাহাহাহা",
"lol",
"লোল",
"লুল",
"ইউজার",
"ইউজ",
"ব্লা",
"ব্লাব্লা",
"জান",
"বিশ্রী",
"প্লিজ",
"পেত্নী",
]
OTHER = [
"""
সত্যজিৎ রায় একজন ভারতীয় চলচ্চিত্র নির্মাতা ও বিংশ শতাব্দীর অন্যতম শ্রেষ্ঠ
চলচ্চিত্র পরিচালক। কলকাতা শহরে সাহিত্য ও শিল্পের জগতে খ্যাতনামা এক বাঙালি
পরিবারে তাঁর জন্ম হয়। তিনি কলকাতার প্রেসিডেন্সি কলেজ ও শান্তিনিকেতনে
রবীন্দ্রনাথ ঠাকুরের প্রতিষ্ঠিত বিশ্বভারতী বিশ্ববিদ্যালয়ে পড়াশোনা করেন।
সত্যজিতের কর্মজীবন একজন বাণিজ্যিক চিত্রকর হিসেবে শুরু হলেও প্রথমে কলকাতায়
ফরাসী চলচ্চিত্র নির্মাতা জঁ রনোয়ারের সাথে সাক্ষাৎ ও পরে লন্ডন শহরে সফররত
অবস্থায় ইতালীয় নব্য বাস্তবতাবাদী ছবি লাদ্রি দি বিচিক্লেত্তে.
"""
]
r_text = revision_oriented.revision.text
@mark.nottravis
def test_badwords():
compare_extraction(bengali.badwords.revision.datasources.matches,
BAD, OTHER)
assert bengali.badwords == pickle.loads(pickle.dumps(bengali.badwords))
@mark.nottravis
def test_informals():
compare_extraction(bengali.informals.revision.datasources.matches,
INFORMAL, OTHER)
assert bengali.informals == pickle.loads(pickle.dumps(bengali.informals))
'''
def test_dictionary():
cache = {r_text: "দেখার পর তিনি worngly."}
assert_equal(solve(bengali.dictionary.revision.datasources.dict_words,
cache=cache),
['দেখার', 'পর', 'তিনি'])
assert_equal(solve(bengali.dictionary.revision.datasources.non_dict_words,
cache=cache),
["worngly"])
assert_equal(bengali.dictionary, pickle.loads(pickle.dumps(bengali.dictionary)))
'''
@mark.nottravis
def test_stopwords():
cache = {r_text: "আন চলচ্চিত্র."}
assert (solve(bengali.stopwords.revision.datasources.stopwords, cache=cache) ==
["আন"])
assert (solve(bengali.stopwords.revision.datasources.non_stopwords,
cache=cache) ==
['চলচ্চিত্র'])
assert bengali.stopwords == pickle.loads(pickle.dumps(bengali.stopwords))
| import pickle
from pytest import mark
from revscoring.datasources import revision_oriented
from revscoring.dependencies import solve
from revscoring.languages import bengali
from .util import compare_extraction
BAD = [
"magi",
"মাগী",
"বাল",
"পর্নো",
"পর্ণো",
"বেশ্যা",
"নষ্টা",
"মগা",
"আবাল",
"পেনিস",
"নিগ্রো",
"পায়খান",
"সেক্সি",
"সেক্স",
"চটি",
]
INFORMAL = [
"কর",
"করবি",
"থাম",
"হাহা",
"হাহাহা",
"হাহাহাহা",
"lol",
"লোল",
"লুল",
"ইউজার",
"ইউজ",
"ব্লা",
"ব্লাব্লা",
"জান",
"বিশ্রী",
"প্লিজ",
"পেত্নী",
]
OTHER = [
"""
সত্যজিৎ রায় একজন ভারতীয় চলচ্চিত্র নির্মাতা ও বিংশ শতাব্দীর অন্যতম শ্রেষ্ঠ
চলচ্চিত্র পরিচালক। কলকাতা শহরে সাহিত্য ও শিল্পের জগতে খ্যাতনামা এক বাঙালি
পরিবারে তাঁর জন্ম হয়। তিনি কলকাতার প্রেসিডেন্সি কলেজ ও শান্তিনিকেতনে
রবীন্দ্রনাথ ঠাকুরের প্রতিষ্ঠিত বিশ্বভারতী বিশ্ববিদ্যালয়ে পড়াশোনা করেন।
সত্যজিতের কর্মজীবন একজন বাণিজ্যিক চিত্রকর হিসেবে শুরু হলেও প্রথমে কলকাতায়
ফরাসী চলচ্চিত্র নির্মাতা জঁ রনোয়ারের সাথে সাক্ষাৎ ও পরে লন্ডন শহরে সফররত
অবস্থায় ইতালীয় নব্য বাস্তবতাবাদী ছবি লাদ্রি দি বিচিক্লেত্তে.
"""
]
r_text = revision_oriented.revision.text
@mark.nottravis
def test_badwords():
compare_extraction(bengali.badwords.revision.datasources.matches,
BAD, OTHER)
assert bengali.badwords == pickle.loads(pickle.dumps(bengali.badwords))
@mark.nottravis
def test_informals():
compare_extraction(bengali.informals.revision.datasources.matches,
INFORMAL, OTHER)
assert bengali.informals == pickle.loads(pickle.dumps(bengali.informals))
'''
def test_dictionary():
cache = {r_text: "দেখার পর তিনি worngly."}
assert_equal(solve(bengali.dictionary.revision.datasources.dict_words,
cache=cache),
['দেখার', 'পর', 'তিনি'])
assert_equal(solve(bengali.dictionary.revision.datasources.non_dict_words,
cache=cache),
["worngly"])
assert_equal(bengali.dictionary, pickle.loads(pickle.dumps(bengali.dictionary)))
'''
@mark.nottravis
def test_stopwords():
cache = {r_text: "আন চলচ্চিত্র."}
assert (solve(bengali.stopwords.revision.datasources.stopwords, cache=cache) ==
["আন"])
assert (solve(bengali.stopwords.revision.datasources.non_stopwords,
cache=cache) ==
['চলচ্চিত্র'])
assert bengali.stopwords == pickle.loads(pickle.dumps(bengali.stopwords))
| bn | 0.986141 | সত্যজিৎ রায় একজন ভারতীয় চলচ্চিত্র নির্মাতা ও বিংশ শতাব্দীর অন্যতম শ্রেষ্ঠ চলচ্চিত্র পরিচালক। কলকাতা শহরে সাহিত্য ও শিল্পের জগতে খ্যাতনামা এক বাঙালি পরিবারে তাঁর জন্ম হয়। তিনি কলকাতার প্রেসিডেন্সি কলেজ ও শান্তিনিকেতনে রবীন্দ্রনাথ ঠাকুরের প্রতিষ্ঠিত বিশ্বভারতী বিশ্ববিদ্যালয়ে পড়াশোনা করেন। সত্যজিতের কর্মজীবন একজন বাণিজ্যিক চিত্রকর হিসেবে শুরু হলেও প্রথমে কলকাতায় ফরাসী চলচ্চিত্র নির্মাতা জঁ রনোয়ারের সাথে সাক্ষাৎ ও পরে লন্ডন শহরে সফররত অবস্থায় ইতালীয় নব্য বাস্তবতাবাদী ছবি লাদ্রি দি বিচিক্লেত্তে. def test_dictionary(): cache = {r_text: "দেখার পর তিনি worngly."} assert_equal(solve(bengali.dictionary.revision.datasources.dict_words, cache=cache), ['দেখার', 'পর', 'তিনি']) assert_equal(solve(bengali.dictionary.revision.datasources.non_dict_words, cache=cache), ["worngly"]) assert_equal(bengali.dictionary, pickle.loads(pickle.dumps(bengali.dictionary))) | 2.066974 | 2 |
sympy/assumptions/handlers/calculus.py | nashalex/sympy | 8,323 | 6630318 | """
This module contains query handlers responsible for calculus queries:
infinitesimal, finite, etc.
"""
from sympy.assumptions import Q, ask
from sympy.core import Add, Mul, Pow, Symbol
from sympy.core.numbers import (ComplexInfinity, Exp1, GoldenRatio, ImaginaryUnit,
Infinity, NaN, NegativeInfinity, Number, Pi, TribonacciConstant, E)
from sympy.functions import cos, exp, log, sign, sin
from sympy.logic.boolalg import conjuncts
from ..predicates.calculus import (FinitePredicate, InfinitePredicate,
PositiveInfinitePredicate, NegativeInfinitePredicate)
# FinitePredicate
@FinitePredicate.register(Symbol) # type: ignore
def _(expr, assumptions):
"""
Handles Symbol.
"""
if expr.is_finite is not None:
return expr.is_finite
if Q.finite(expr) in conjuncts(assumptions):
return True
return None
@FinitePredicate.register(Add) # type: ignore
def _(expr, assumptions):
"""
Return True if expr is bounded, False if not and None if unknown.
Truth Table:
+-------+-----+-----------+-----------+
| | | | |
| | B | U | ? |
| | | | |
+-------+-----+---+---+---+---+---+---+
| | | | | | | | |
| | |'+'|'-'|'x'|'+'|'-'|'x'|
| | | | | | | | |
+-------+-----+---+---+---+---+---+---+
| | | | |
| B | B | U | ? |
| | | | |
+---+---+-----+---+---+---+---+---+---+
| | | | | | | | | |
| |'+'| | U | ? | ? | U | ? | ? |
| | | | | | | | | |
| +---+-----+---+---+---+---+---+---+
| | | | | | | | | |
| U |'-'| | ? | U | ? | ? | U | ? |
| | | | | | | | | |
| +---+-----+---+---+---+---+---+---+
| | | | | |
| |'x'| | ? | ? |
| | | | | |
+---+---+-----+---+---+---+---+---+---+
| | | | |
| ? | | | ? |
| | | | |
+-------+-----+-----------+---+---+---+
* 'B' = Bounded
* 'U' = Unbounded
* '?' = unknown boundedness
* '+' = positive sign
* '-' = negative sign
* 'x' = sign unknown
* All Bounded -> True
* 1 Unbounded and the rest Bounded -> False
* >1 Unbounded, all with same known sign -> False
* Any Unknown and unknown sign -> None
* Else -> None
When the signs are not the same you can have an undefined
result as in oo - oo, hence 'bounded' is also undefined.
"""
sign = -1 # sign of unknown or infinite
result = True
for arg in expr.args:
_bounded = ask(Q.finite(arg), assumptions)
if _bounded:
continue
s = ask(Q.extended_positive(arg), assumptions)
# if there has been more than one sign or if the sign of this arg
# is None and Bounded is None or there was already
# an unknown sign, return None
if sign != -1 and s != sign or \
s is None and None in (_bounded, sign):
return None
else:
sign = s
# once False, do not change
if result is not False:
result = _bounded
return result
@FinitePredicate.register(Mul) # type: ignore
def _(expr, assumptions):
"""
Return True if expr is bounded, False if not and None if unknown.
Truth Table:
+---+---+---+--------+
| | | | |
| | B | U | ? |
| | | | |
+---+---+---+---+----+
| | | | | |
| | | | s | /s |
| | | | | |
+---+---+---+---+----+
| | | | |
| B | B | U | ? |
| | | | |
+---+---+---+---+----+
| | | | | |
| U | | U | U | ? |
| | | | | |
+---+---+---+---+----+
| | | | |
| ? | | | ? |
| | | | |
+---+---+---+---+----+
* B = Bounded
* U = Unbounded
* ? = unknown boundedness
* s = signed (hence nonzero)
* /s = not signed
"""
result = True
for arg in expr.args:
_bounded = ask(Q.finite(arg), assumptions)
if _bounded:
continue
elif _bounded is None:
if result is None:
return None
if ask(Q.extended_nonzero(arg), assumptions) is None:
return None
if result is not False:
result = None
else:
result = False
return result
@FinitePredicate.register(Pow) # type: ignore
def _(expr, assumptions):
"""
* Unbounded ** NonZero -> Unbounded
* Bounded ** Bounded -> Bounded
* Abs()<=1 ** Positive -> Bounded
* Abs()>=1 ** Negative -> Bounded
* Otherwise unknown
"""
if expr.base == E:
return ask(Q.finite(expr.exp), assumptions)
base_bounded = ask(Q.finite(expr.base), assumptions)
exp_bounded = ask(Q.finite(expr.exp), assumptions)
if base_bounded is None and exp_bounded is None: # Common Case
return None
if base_bounded is False and ask(Q.extended_nonzero(expr.exp), assumptions):
return False
if base_bounded and exp_bounded:
return True
if (abs(expr.base) <= 1) == True and ask(Q.extended_positive(expr.exp), assumptions):
return True
if (abs(expr.base) >= 1) == True and ask(Q.extended_negative(expr.exp), assumptions):
return True
if (abs(expr.base) >= 1) == True and exp_bounded is False:
return False
return None
@FinitePredicate.register(exp) # type: ignore
def _(expr, assumptions):
return ask(Q.finite(expr.exp), assumptions)
@FinitePredicate.register(log) # type: ignore
def _(expr, assumptions):
# After complex -> finite fact is registered to new assumption system,
# querying Q.infinite may be removed.
if ask(Q.infinite(expr.args[0]), assumptions):
return False
return ask(~Q.zero(expr.args[0]), assumptions)
@FinitePredicate.register_many(cos, sin, Number, Pi, Exp1, GoldenRatio, # type: ignore
TribonacciConstant, ImaginaryUnit, sign)
def _(expr, assumptions):
return True
@FinitePredicate.register_many(ComplexInfinity, Infinity, NegativeInfinity) # type: ignore
def _(expr, assumptions):
return False
@FinitePredicate.register(NaN) # type: ignore
def _(expr, assumptions):
return None
# InfinitePredicate
@InfinitePredicate.register_many(ComplexInfinity, Infinity, NegativeInfinity) # type: ignore
def _(expr, assumptions):
return True
# PositiveInfinitePredicate
@PositiveInfinitePredicate.register(Infinity) # type: ignore
def _(expr, assumptions):
return True
@PositiveInfinitePredicate.register_many(NegativeInfinity, ComplexInfinity) # type: ignore
def _(expr, assumptions):
return False
# NegativeInfinitePredicate
@NegativeInfinitePredicate.register(NegativeInfinity) # type: ignore
def _(expr, assumptions):
return True
@NegativeInfinitePredicate.register_many(Infinity, ComplexInfinity) # type: ignore
def _(expr, assumptions):
return False
| """
This module contains query handlers responsible for calculus queries:
infinitesimal, finite, etc.
"""
from sympy.assumptions import Q, ask
from sympy.core import Add, Mul, Pow, Symbol
from sympy.core.numbers import (ComplexInfinity, Exp1, GoldenRatio, ImaginaryUnit,
Infinity, NaN, NegativeInfinity, Number, Pi, TribonacciConstant, E)
from sympy.functions import cos, exp, log, sign, sin
from sympy.logic.boolalg import conjuncts
from ..predicates.calculus import (FinitePredicate, InfinitePredicate,
PositiveInfinitePredicate, NegativeInfinitePredicate)
# FinitePredicate
@FinitePredicate.register(Symbol) # type: ignore
def _(expr, assumptions):
"""
Handles Symbol.
"""
if expr.is_finite is not None:
return expr.is_finite
if Q.finite(expr) in conjuncts(assumptions):
return True
return None
@FinitePredicate.register(Add) # type: ignore
def _(expr, assumptions):
"""
Return True if expr is bounded, False if not and None if unknown.
Truth Table:
+-------+-----+-----------+-----------+
| | | | |
| | B | U | ? |
| | | | |
+-------+-----+---+---+---+---+---+---+
| | | | | | | | |
| | |'+'|'-'|'x'|'+'|'-'|'x'|
| | | | | | | | |
+-------+-----+---+---+---+---+---+---+
| | | | |
| B | B | U | ? |
| | | | |
+---+---+-----+---+---+---+---+---+---+
| | | | | | | | | |
| |'+'| | U | ? | ? | U | ? | ? |
| | | | | | | | | |
| +---+-----+---+---+---+---+---+---+
| | | | | | | | | |
| U |'-'| | ? | U | ? | ? | U | ? |
| | | | | | | | | |
| +---+-----+---+---+---+---+---+---+
| | | | | |
| |'x'| | ? | ? |
| | | | | |
+---+---+-----+---+---+---+---+---+---+
| | | | |
| ? | | | ? |
| | | | |
+-------+-----+-----------+---+---+---+
* 'B' = Bounded
* 'U' = Unbounded
* '?' = unknown boundedness
* '+' = positive sign
* '-' = negative sign
* 'x' = sign unknown
* All Bounded -> True
* 1 Unbounded and the rest Bounded -> False
* >1 Unbounded, all with same known sign -> False
* Any Unknown and unknown sign -> None
* Else -> None
When the signs are not the same you can have an undefined
result as in oo - oo, hence 'bounded' is also undefined.
"""
sign = -1 # sign of unknown or infinite
result = True
for arg in expr.args:
_bounded = ask(Q.finite(arg), assumptions)
if _bounded:
continue
s = ask(Q.extended_positive(arg), assumptions)
# if there has been more than one sign or if the sign of this arg
# is None and Bounded is None or there was already
# an unknown sign, return None
if sign != -1 and s != sign or \
s is None and None in (_bounded, sign):
return None
else:
sign = s
# once False, do not change
if result is not False:
result = _bounded
return result
@FinitePredicate.register(Mul) # type: ignore
def _(expr, assumptions):
"""
Return True if expr is bounded, False if not and None if unknown.
Truth Table:
+---+---+---+--------+
| | | | |
| | B | U | ? |
| | | | |
+---+---+---+---+----+
| | | | | |
| | | | s | /s |
| | | | | |
+---+---+---+---+----+
| | | | |
| B | B | U | ? |
| | | | |
+---+---+---+---+----+
| | | | | |
| U | | U | U | ? |
| | | | | |
+---+---+---+---+----+
| | | | |
| ? | | | ? |
| | | | |
+---+---+---+---+----+
* B = Bounded
* U = Unbounded
* ? = unknown boundedness
* s = signed (hence nonzero)
* /s = not signed
"""
result = True
for arg in expr.args:
_bounded = ask(Q.finite(arg), assumptions)
if _bounded:
continue
elif _bounded is None:
if result is None:
return None
if ask(Q.extended_nonzero(arg), assumptions) is None:
return None
if result is not False:
result = None
else:
result = False
return result
@FinitePredicate.register(Pow) # type: ignore
def _(expr, assumptions):
"""
* Unbounded ** NonZero -> Unbounded
* Bounded ** Bounded -> Bounded
* Abs()<=1 ** Positive -> Bounded
* Abs()>=1 ** Negative -> Bounded
* Otherwise unknown
"""
if expr.base == E:
return ask(Q.finite(expr.exp), assumptions)
base_bounded = ask(Q.finite(expr.base), assumptions)
exp_bounded = ask(Q.finite(expr.exp), assumptions)
if base_bounded is None and exp_bounded is None: # Common Case
return None
if base_bounded is False and ask(Q.extended_nonzero(expr.exp), assumptions):
return False
if base_bounded and exp_bounded:
return True
if (abs(expr.base) <= 1) == True and ask(Q.extended_positive(expr.exp), assumptions):
return True
if (abs(expr.base) >= 1) == True and ask(Q.extended_negative(expr.exp), assumptions):
return True
if (abs(expr.base) >= 1) == True and exp_bounded is False:
return False
return None
@FinitePredicate.register(exp) # type: ignore
def _(expr, assumptions):
return ask(Q.finite(expr.exp), assumptions)
@FinitePredicate.register(log) # type: ignore
def _(expr, assumptions):
# After complex -> finite fact is registered to new assumption system,
# querying Q.infinite may be removed.
if ask(Q.infinite(expr.args[0]), assumptions):
return False
return ask(~Q.zero(expr.args[0]), assumptions)
@FinitePredicate.register_many(cos, sin, Number, Pi, Exp1, GoldenRatio, # type: ignore
TribonacciConstant, ImaginaryUnit, sign)
def _(expr, assumptions):
return True
@FinitePredicate.register_many(ComplexInfinity, Infinity, NegativeInfinity) # type: ignore
def _(expr, assumptions):
return False
@FinitePredicate.register(NaN) # type: ignore
def _(expr, assumptions):
return None
# InfinitePredicate
@InfinitePredicate.register_many(ComplexInfinity, Infinity, NegativeInfinity) # type: ignore
def _(expr, assumptions):
return True
# PositiveInfinitePredicate
@PositiveInfinitePredicate.register(Infinity) # type: ignore
def _(expr, assumptions):
return True
@PositiveInfinitePredicate.register_many(NegativeInfinity, ComplexInfinity) # type: ignore
def _(expr, assumptions):
return False
# NegativeInfinitePredicate
@NegativeInfinitePredicate.register(NegativeInfinity) # type: ignore
def _(expr, assumptions):
return True
@NegativeInfinitePredicate.register_many(Infinity, ComplexInfinity) # type: ignore
def _(expr, assumptions):
return False
| en | 0.740584 | This module contains query handlers responsible for calculus queries: infinitesimal, finite, etc. # FinitePredicate # type: ignore Handles Symbol. # type: ignore Return True if expr is bounded, False if not and None if unknown. Truth Table: +-------+-----+-----------+-----------+ | | | | | | | B | U | ? | | | | | | +-------+-----+---+---+---+---+---+---+ | | | | | | | | | | | |'+'|'-'|'x'|'+'|'-'|'x'| | | | | | | | | | +-------+-----+---+---+---+---+---+---+ | | | | | | B | B | U | ? | | | | | | +---+---+-----+---+---+---+---+---+---+ | | | | | | | | | | | |'+'| | U | ? | ? | U | ? | ? | | | | | | | | | | | | +---+-----+---+---+---+---+---+---+ | | | | | | | | | | | U |'-'| | ? | U | ? | ? | U | ? | | | | | | | | | | | | +---+-----+---+---+---+---+---+---+ | | | | | | | |'x'| | ? | ? | | | | | | | +---+---+-----+---+---+---+---+---+---+ | | | | | | ? | | | ? | | | | | | +-------+-----+-----------+---+---+---+ * 'B' = Bounded * 'U' = Unbounded * '?' = unknown boundedness * '+' = positive sign * '-' = negative sign * 'x' = sign unknown * All Bounded -> True * 1 Unbounded and the rest Bounded -> False * >1 Unbounded, all with same known sign -> False * Any Unknown and unknown sign -> None * Else -> None When the signs are not the same you can have an undefined result as in oo - oo, hence 'bounded' is also undefined. # sign of unknown or infinite # if there has been more than one sign or if the sign of this arg # is None and Bounded is None or there was already # an unknown sign, return None # once False, do not change # type: ignore Return True if expr is bounded, False if not and None if unknown. Truth Table: +---+---+---+--------+ | | | | | | | B | U | ? | | | | | | +---+---+---+---+----+ | | | | | | | | | | s | /s | | | | | | | +---+---+---+---+----+ | | | | | | B | B | U | ? | | | | | | +---+---+---+---+----+ | | | | | | | U | | U | U | ? | | | | | | | +---+---+---+---+----+ | | | | | | ? | | | ? | | | | | | +---+---+---+---+----+ * B = Bounded * U = Unbounded * ? = unknown boundedness * s = signed (hence nonzero) * /s = not signed # type: ignore * Unbounded ** NonZero -> Unbounded * Bounded ** Bounded -> Bounded * Abs()<=1 ** Positive -> Bounded * Abs()>=1 ** Negative -> Bounded * Otherwise unknown # Common Case # type: ignore # type: ignore # After complex -> finite fact is registered to new assumption system, # querying Q.infinite may be removed. # type: ignore # type: ignore # type: ignore # InfinitePredicate # type: ignore # PositiveInfinitePredicate # type: ignore # type: ignore # NegativeInfinitePredicate # type: ignore # type: ignore | 2.396347 | 2 |
CircuitPython_SharpDisplay_Displayio/code.py | albinger/Adafruit_Learning_System_Guides | 0 | 6630319 | <reponame>albinger/Adafruit_Learning_System_Guides<filename>CircuitPython_SharpDisplay_Displayio/code.py<gh_stars>0
# SPDX-FileCopyrightText: 2020 <NAME> for Adafruit Industries
# SPDX-FileCopyrightText: 2020 <NAME> for Adafruit Industries
#
# SPDX-License-Identifier: MIT
import random
import time
import adafruit_display_text.label
from adafruit_bitmap_font import bitmap_font
import board
import displayio
import framebufferio
import sharpdisplay
## When making several changes, this ensures they aren't shown partially
## completed (except for the time to actually update the display)
class BatchDisplayUpdate:
def __init__(self, the_display):
self.the_display = the_display
self.auto_refresh = the_display.auto_refresh
def __enter__(self):
self.the_display.auto_refresh = False
def __exit__(self, unused1, unused2, unused3):
self.the_display.refresh()
self.the_display.auto_refresh = self.auto_refresh
# https://saytheirnames.com/
# real people, not just #hashtags
names = [
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
]
# A function to choose "k" different items from the "population" list
# We'll use it to select the names to display
def sample(population, k):
population = population[:]
for _ in range(k):
j = random.randint(0, len(population)-1)
yield population[j]
population[j] = population[-1]
population.pop()
# Initialize the display, cleaning up after a display from the previous run
# if necessary
displayio.release_displays()
bus = board.SPI()
framebuffer = sharpdisplay.SharpMemoryFramebuffer(bus, board.D6, 400, 240)
display = framebufferio.FramebufferDisplay(framebuffer, auto_refresh = True)
# Load our font
font = bitmap_font.load_font("/GothamBlack-54.bdf")
# Create a Group for the BLM text
blm_group = displayio.Group()
display.show(blm_group)
# Create a 3 line set of text for BLM
blm_font = [None, None, None]
for line in range(3):
label = adafruit_display_text.label.Label(font, color=0xFFFFFF)
label.anchor_point = (0, 0)
label.anchored_position = (8, line*84+8)
blm_font[line] = label
blm_group.append(label)
# Get something on the display as soon as possible by loading
# specific glyphs.
font.load_glyphs(b"BLACK")
blm_font[0].text = "BLACK"
font.load_glyphs(b"ISEV")
blm_font[1].text = "LIVES"
font.load_glyphs(b"RMT")
blm_font[2].text = "MATTER"
font.load_glyphs(b"' DFGHJNOPQUWXYZabcdefghijklmnopqrstuvwxyz")
# Create a 2 line set of font text for names
names_font = [None, None]
for line in range(2):
label = adafruit_display_text.label.Label(font, color=0xFFFFFF)
# Center each line horizontally, position vertically
label.anchor_point = (0.5, 0)
label.anchored_position = (200, line*84+42)
names_font[line] = label
# Create a Group for the name text
name_group = displayio.Group()
for line in names_font:
name_group.append(line)
# Repeatedly show the BLM slogan and then 5 names.
while True:
display.show(blm_group)
# Show the BLM slogan
with BatchDisplayUpdate(display):
blm_font[1].color = blm_font[2].color = 0 # hide lines 2&3
time.sleep(1)
with BatchDisplayUpdate(display):
blm_font[1].color = 0xFFFFFF # show middle line
blm_font[0].color = blm_font[2].color = 0 # hide lines 1&3
time.sleep(1)
with BatchDisplayUpdate(display):
blm_font[2].color = 0xFFFFFF # show last line
blm_font[0].color = blm_font[1].color = 0 # hide lines 1&2
time.sleep(1)
with BatchDisplayUpdate(display):
for line in blm_font:
line.color = 0xFFFFFF
time.sleep(2)
# Show 5 names
display.show(name_group)
for name in sample(names, 5):
print(name)
lines = name.split(" ")
with BatchDisplayUpdate(display):
for i in range(2):
names_font[i].text = lines[i]
# Due to a bug in adafruit_display_text, we need to reestablish
# the position of the labels when updating them.
# Once https://github.com/adafruit/Adafruit_CircuitPython_Display_Text/issues/82
# has been resolved, this code will no longer be necessary (but
# will not be harmful either)
names_font[i].anchor_point = (0.5, 0)
names_font[i].anchored_position = (200, i*84+42)
time.sleep(5)
names_font[0].text = names_font[1].text = ""
| # SPDX-FileCopyrightText: 2020 <NAME> for Adafruit Industries
# SPDX-FileCopyrightText: 2020 <NAME> for Adafruit Industries
#
# SPDX-License-Identifier: MIT
import random
import time
import adafruit_display_text.label
from adafruit_bitmap_font import bitmap_font
import board
import displayio
import framebufferio
import sharpdisplay
## When making several changes, this ensures they aren't shown partially
## completed (except for the time to actually update the display)
class BatchDisplayUpdate:
def __init__(self, the_display):
self.the_display = the_display
self.auto_refresh = the_display.auto_refresh
def __enter__(self):
self.the_display.auto_refresh = False
def __exit__(self, unused1, unused2, unused3):
self.the_display.refresh()
self.the_display.auto_refresh = self.auto_refresh
# https://saytheirnames.com/
# real people, not just #hashtags
names = [
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
]
# A function to choose "k" different items from the "population" list
# We'll use it to select the names to display
def sample(population, k):
population = population[:]
for _ in range(k):
j = random.randint(0, len(population)-1)
yield population[j]
population[j] = population[-1]
population.pop()
# Initialize the display, cleaning up after a display from the previous run
# if necessary
displayio.release_displays()
bus = board.SPI()
framebuffer = sharpdisplay.SharpMemoryFramebuffer(bus, board.D6, 400, 240)
display = framebufferio.FramebufferDisplay(framebuffer, auto_refresh = True)
# Load our font
font = bitmap_font.load_font("/GothamBlack-54.bdf")
# Create a Group for the BLM text
blm_group = displayio.Group()
display.show(blm_group)
# Create a 3 line set of text for BLM
blm_font = [None, None, None]
for line in range(3):
label = adafruit_display_text.label.Label(font, color=0xFFFFFF)
label.anchor_point = (0, 0)
label.anchored_position = (8, line*84+8)
blm_font[line] = label
blm_group.append(label)
# Get something on the display as soon as possible by loading
# specific glyphs.
font.load_glyphs(b"BLACK")
blm_font[0].text = "BLACK"
font.load_glyphs(b"ISEV")
blm_font[1].text = "LIVES"
font.load_glyphs(b"RMT")
blm_font[2].text = "MATTER"
font.load_glyphs(b"' DFGHJNOPQUWXYZabcdefghijklmnopqrstuvwxyz")
# Create a 2 line set of font text for names
names_font = [None, None]
for line in range(2):
label = adafruit_display_text.label.Label(font, color=0xFFFFFF)
# Center each line horizontally, position vertically
label.anchor_point = (0.5, 0)
label.anchored_position = (200, line*84+42)
names_font[line] = label
# Create a Group for the name text
name_group = displayio.Group()
for line in names_font:
name_group.append(line)
# Repeatedly show the BLM slogan and then 5 names.
while True:
display.show(blm_group)
# Show the BLM slogan
with BatchDisplayUpdate(display):
blm_font[1].color = blm_font[2].color = 0 # hide lines 2&3
time.sleep(1)
with BatchDisplayUpdate(display):
blm_font[1].color = 0xFFFFFF # show middle line
blm_font[0].color = blm_font[2].color = 0 # hide lines 1&3
time.sleep(1)
with BatchDisplayUpdate(display):
blm_font[2].color = 0xFFFFFF # show last line
blm_font[0].color = blm_font[1].color = 0 # hide lines 1&2
time.sleep(1)
with BatchDisplayUpdate(display):
for line in blm_font:
line.color = 0xFFFFFF
time.sleep(2)
# Show 5 names
display.show(name_group)
for name in sample(names, 5):
print(name)
lines = name.split(" ")
with BatchDisplayUpdate(display):
for i in range(2):
names_font[i].text = lines[i]
# Due to a bug in adafruit_display_text, we need to reestablish
# the position of the labels when updating them.
# Once https://github.com/adafruit/Adafruit_CircuitPython_Display_Text/issues/82
# has been resolved, this code will no longer be necessary (but
# will not be harmful either)
names_font[i].anchor_point = (0.5, 0)
names_font[i].anchored_position = (200, i*84+42)
time.sleep(5)
names_font[0].text = names_font[1].text = "" | en | 0.770719 | # SPDX-FileCopyrightText: 2020 <NAME> for Adafruit Industries # SPDX-FileCopyrightText: 2020 <NAME> for Adafruit Industries # # SPDX-License-Identifier: MIT ## When making several changes, this ensures they aren't shown partially ## completed (except for the time to actually update the display) # https://saytheirnames.com/ # real people, not just #hashtags # A function to choose "k" different items from the "population" list # We'll use it to select the names to display # Initialize the display, cleaning up after a display from the previous run # if necessary # Load our font # Create a Group for the BLM text # Create a 3 line set of text for BLM # Get something on the display as soon as possible by loading # specific glyphs. # Create a 2 line set of font text for names # Center each line horizontally, position vertically # Create a Group for the name text # Repeatedly show the BLM slogan and then 5 names. # Show the BLM slogan # hide lines 2&3 # show middle line # hide lines 1&3 # show last line # hide lines 1&2 # Show 5 names # Due to a bug in adafruit_display_text, we need to reestablish # the position of the labels when updating them. # Once https://github.com/adafruit/Adafruit_CircuitPython_Display_Text/issues/82 # has been resolved, this code will no longer be necessary (but # will not be harmful either) | 2.06433 | 2 |
ai_framework/ai_visualization/test_ai_demo.py | Scott-Morgan-Foundation/Highcliff-SDK | 0 | 6630320 | import time
import unittest
from ai_framework.ai_visualization import AIDemo
from ai_framework.ai_actions import ActionStatus
class TestAIDemo(unittest.TestCase):
@classmethod
def setUpClass(cls):
demo_markdown_file_folder = '/Users/jerry/OneDrive/Documents/Obsidian Vault/'
cls._ai_demo = AIDemo(demo_mode=True, markdown_folder=demo_markdown_file_folder)
try:
cls._ai_demo.reset_demo()
pass
except:
pass
def test_demo_goals(self):
test_goals = [
{"goal_state": True}
]
self._ai_demo.demo_goals(test_goals)
self.assertTrue(True)
def test_demo_diary_entry(self):
# make the first diary entry
goal = {"goal_state": True}
world_state_before = []
world_state_after = [
{"condition_one": True}
]
action_status = ActionStatus.SUCCESS
diary_entry = {
"my_goal": goal,
"the_world_state_before": world_state_before,
"my_plan": "plan",
"action_status": action_status,
"the_world_state_after": world_state_after
}
self._ai_demo.demo_diary_entry(diary_entry)
# make the second diary entry
world_state_before = [
{"condition_one": True}
]
world_state_after = [
{"condition_one": True},
{"condition_two": True}
]
diary_entry = {
"my_goal": goal,
"the_world_state_before": world_state_before,
"my_plan": "plan",
"action_status": action_status,
"the_world_state_after": world_state_after
}
self._ai_demo.demo_diary_entry(diary_entry)
# make the third diary entry
world_state_before = [
{"condition_one": True},
{"condition_two": True}
]
world_state_after = [
goal,
{"condition_one": True},
{"condition_two": True}
]
diary_entry = {
"my_goal": goal,
"the_world_state_before": world_state_before,
"my_plan": "plan",
"action_status": action_status,
"the_world_state_after": world_state_after
}
self._ai_demo.demo_diary_entry(diary_entry)
self.assertTrue(True)
def test_multiple_diary_entries(self):
for i in range(50):
self.test_demo_diary_entry()
self._ai_demo.retire_nodes()
time.sleep(5)
update = "[PlanStep(action=<__main__.AcmeTemperatureMonitor object at 0x7f76245a2a90>, services={})]"
self._ai_demo.update_demo_goals(update)
time.sleep(10)
if __name__ == '__main__':
unittest.main()
| import time
import unittest
from ai_framework.ai_visualization import AIDemo
from ai_framework.ai_actions import ActionStatus
class TestAIDemo(unittest.TestCase):
@classmethod
def setUpClass(cls):
demo_markdown_file_folder = '/Users/jerry/OneDrive/Documents/Obsidian Vault/'
cls._ai_demo = AIDemo(demo_mode=True, markdown_folder=demo_markdown_file_folder)
try:
cls._ai_demo.reset_demo()
pass
except:
pass
def test_demo_goals(self):
test_goals = [
{"goal_state": True}
]
self._ai_demo.demo_goals(test_goals)
self.assertTrue(True)
def test_demo_diary_entry(self):
# make the first diary entry
goal = {"goal_state": True}
world_state_before = []
world_state_after = [
{"condition_one": True}
]
action_status = ActionStatus.SUCCESS
diary_entry = {
"my_goal": goal,
"the_world_state_before": world_state_before,
"my_plan": "plan",
"action_status": action_status,
"the_world_state_after": world_state_after
}
self._ai_demo.demo_diary_entry(diary_entry)
# make the second diary entry
world_state_before = [
{"condition_one": True}
]
world_state_after = [
{"condition_one": True},
{"condition_two": True}
]
diary_entry = {
"my_goal": goal,
"the_world_state_before": world_state_before,
"my_plan": "plan",
"action_status": action_status,
"the_world_state_after": world_state_after
}
self._ai_demo.demo_diary_entry(diary_entry)
# make the third diary entry
world_state_before = [
{"condition_one": True},
{"condition_two": True}
]
world_state_after = [
goal,
{"condition_one": True},
{"condition_two": True}
]
diary_entry = {
"my_goal": goal,
"the_world_state_before": world_state_before,
"my_plan": "plan",
"action_status": action_status,
"the_world_state_after": world_state_after
}
self._ai_demo.demo_diary_entry(diary_entry)
self.assertTrue(True)
def test_multiple_diary_entries(self):
for i in range(50):
self.test_demo_diary_entry()
self._ai_demo.retire_nodes()
time.sleep(5)
update = "[PlanStep(action=<__main__.AcmeTemperatureMonitor object at 0x7f76245a2a90>, services={})]"
self._ai_demo.update_demo_goals(update)
time.sleep(10)
if __name__ == '__main__':
unittest.main()
| en | 0.860858 | # make the first diary entry # make the second diary entry # make the third diary entry | 2.576413 | 3 |
handsdown/processors/rst.py | vemel/handsdown | 47 | 6630321 | <filename>handsdown/processors/rst.py
"""
# reStructuredText Docstring Processor
Docstring processor for restructured text docstring format.
Supported features:
- `:param <name> <?type>: <?description>` directive is added to `Arguments` section
- `:type: <?description>` directive transformed to `Type: <type>`
- `:returns <?type>: <?description>` directive is added to `Returns` section
- `:rtype: <?description>` directive transformed to `Type: <type>`
- `:raises: <?description>` directive is added to `Raises` section
- `.. seealso::` directive is added to `See also` section
- `.. note::` directive is added to `Notes` section
- `.. warning:: <version>` directive is added to `Warnings` section
- `.. versionadded:: <version>` directive is formatted in Sphinx-style and added
to `Notes` section
- `.. versionchanged:: <version>` directive is formatted in Sphinx-style and added
to `Notes` section
- `.. deprecated::` directive is formatted in Sphinx-style and added to `Notes` section
- `.. code-block::` directive is formatted as Markdown Python codeblock
- `.. code-block:: <language>` directive is formatted as Markdown codeblock
- `.. math::` directive is formatted as Markdown Python codeblock
- `.. highlight::` directive is formatted as Markdown Python codeblock
- `.. highlight:: <language>` directive is formatted as Markdown codeblock
"""
import re
from handsdown.processors.base import BaseDocstringProcessor
class RSTDocstringProcessor(BaseDocstringProcessor):
"""
Docstring processor for restructured text docstring format.
"""
_section_re = re.compile(r"^\.\. (?P<section>\S+)::(?: (?P<body>.*))?")
line_re_map = (
# PEP 287 arg typed with description
(
re.compile(
r"^:(?P<section>param|parameter)\s+(?P<type>\w+)"
r"\s+(?P<param>\w+)\s*:\s*(?P<desc>.+)$"
),
"- `{param}` *{type}* - {desc}",
),
# PEP 287 arg with description
(
re.compile(r"^:(?P<section>param|parameter)\s+(?P<param>\w+)\s*:\s*(?P<desc>.+)$"),
"- `{param}` - {desc}",
),
# PEP 287 arg typed
(
re.compile(r"^:(?P<section>param|parameter)\s+(?P<type>\w+)\s+(?P<param>\w+)\s*:$"),
"- `{param}` *{type}*",
),
# PEP 287 arg
(
re.compile(r"^:(?P<section>param|parameter)\s+(?P<param>\w+)\s*:$"),
"- `{param}`",
),
# PEP 287 return
(re.compile(r":(?P<section>returns?)\s*:\s*(?P<desc>.*)?$"), "{desc}"),
# PEP 287 return typed
(re.compile(r":(?P<section>returns?)\s+(?P<type>[^:]+):$"), "Type: *{type}*"),
# PEP 287 return typed with description
(
re.compile(r":(?P<section>returns?)\s+(?P<type>[^:]+)\s*:\s*(?P<desc>.+)$"),
"Type: *{type}*\n{desc}",
),
# PEP 287 rtype
(re.compile(r":(?P<section>rtype)\s*:\s+(?P<type>[^:]+)$"), "Type: *{type}*"),
# PEP 287 raises typed
(re.compile(r":(?P<section>raises?)\s+(?P<type>\w+)\s*:$"), "- `{type}`"),
# PEP 287 raises typed with description
(
re.compile(r":(?P<section>raises?)\s+(?P<type>\w+)\s*:(?P<desc>.+)$"),
"- `{type}` - {desc}",
),
)
replace_map = {
":attr:`": "attribute `",
":data:`": "`",
":class:``~": "class ``",
":class:`~": "class `",
":class:`": "class `",
":exc:`": "exception `",
}
section_name_map = {
"raise": "Raises",
"raises": "Raises",
"rtype": "Returns",
"return": "Returns",
"returns": "Returns",
"param": "Arguments",
"parameter": "Arguments",
}
section_directive_map = {
"seealso": "See also",
"note": "Notes",
"warning": "Warnings",
}
version_directive_map = {
"versionadded": "Added",
"versionchanged": "Changed",
"deprecated": "Deprecated",
}
def _parse_regular_line(self, line: str) -> None:
section_match = self._section_re.match(line)
if section_match:
directive_name = section_match.groupdict()["section"]
body = section_match.groupdict()["body"]
if directive_name in self.section_directive_map:
self.current_section_name = self.section_directive_map[directive_name]
self._add_line("")
if directive_name in self.version_directive_map:
self.current_section_name = "Notes"
line = self.version_directive_map[directive_name]
if body:
line = "{} in version {}".format(line, body)
self._add_line("")
self._add_line(line)
return
if directive_name in ("code-block", "math", "highlight"):
self._in_codeblock = True
self._in_indent_codeblock = True
self._codeblock_indent = self._current_indent
self._codeblock_lines_count = 0
self._add_block()
self._add_line("")
self._add_line("```{}".format(body or "python"))
return
if body is None:
return
line = body
super()._parse_regular_line(line)
| <filename>handsdown/processors/rst.py
"""
# reStructuredText Docstring Processor
Docstring processor for restructured text docstring format.
Supported features:
- `:param <name> <?type>: <?description>` directive is added to `Arguments` section
- `:type: <?description>` directive transformed to `Type: <type>`
- `:returns <?type>: <?description>` directive is added to `Returns` section
- `:rtype: <?description>` directive transformed to `Type: <type>`
- `:raises: <?description>` directive is added to `Raises` section
- `.. seealso::` directive is added to `See also` section
- `.. note::` directive is added to `Notes` section
- `.. warning:: <version>` directive is added to `Warnings` section
- `.. versionadded:: <version>` directive is formatted in Sphinx-style and added
to `Notes` section
- `.. versionchanged:: <version>` directive is formatted in Sphinx-style and added
to `Notes` section
- `.. deprecated::` directive is formatted in Sphinx-style and added to `Notes` section
- `.. code-block::` directive is formatted as Markdown Python codeblock
- `.. code-block:: <language>` directive is formatted as Markdown codeblock
- `.. math::` directive is formatted as Markdown Python codeblock
- `.. highlight::` directive is formatted as Markdown Python codeblock
- `.. highlight:: <language>` directive is formatted as Markdown codeblock
"""
import re
from handsdown.processors.base import BaseDocstringProcessor
class RSTDocstringProcessor(BaseDocstringProcessor):
"""
Docstring processor for restructured text docstring format.
"""
_section_re = re.compile(r"^\.\. (?P<section>\S+)::(?: (?P<body>.*))?")
line_re_map = (
# PEP 287 arg typed with description
(
re.compile(
r"^:(?P<section>param|parameter)\s+(?P<type>\w+)"
r"\s+(?P<param>\w+)\s*:\s*(?P<desc>.+)$"
),
"- `{param}` *{type}* - {desc}",
),
# PEP 287 arg with description
(
re.compile(r"^:(?P<section>param|parameter)\s+(?P<param>\w+)\s*:\s*(?P<desc>.+)$"),
"- `{param}` - {desc}",
),
# PEP 287 arg typed
(
re.compile(r"^:(?P<section>param|parameter)\s+(?P<type>\w+)\s+(?P<param>\w+)\s*:$"),
"- `{param}` *{type}*",
),
# PEP 287 arg
(
re.compile(r"^:(?P<section>param|parameter)\s+(?P<param>\w+)\s*:$"),
"- `{param}`",
),
# PEP 287 return
(re.compile(r":(?P<section>returns?)\s*:\s*(?P<desc>.*)?$"), "{desc}"),
# PEP 287 return typed
(re.compile(r":(?P<section>returns?)\s+(?P<type>[^:]+):$"), "Type: *{type}*"),
# PEP 287 return typed with description
(
re.compile(r":(?P<section>returns?)\s+(?P<type>[^:]+)\s*:\s*(?P<desc>.+)$"),
"Type: *{type}*\n{desc}",
),
# PEP 287 rtype
(re.compile(r":(?P<section>rtype)\s*:\s+(?P<type>[^:]+)$"), "Type: *{type}*"),
# PEP 287 raises typed
(re.compile(r":(?P<section>raises?)\s+(?P<type>\w+)\s*:$"), "- `{type}`"),
# PEP 287 raises typed with description
(
re.compile(r":(?P<section>raises?)\s+(?P<type>\w+)\s*:(?P<desc>.+)$"),
"- `{type}` - {desc}",
),
)
replace_map = {
":attr:`": "attribute `",
":data:`": "`",
":class:``~": "class ``",
":class:`~": "class `",
":class:`": "class `",
":exc:`": "exception `",
}
section_name_map = {
"raise": "Raises",
"raises": "Raises",
"rtype": "Returns",
"return": "Returns",
"returns": "Returns",
"param": "Arguments",
"parameter": "Arguments",
}
section_directive_map = {
"seealso": "See also",
"note": "Notes",
"warning": "Warnings",
}
version_directive_map = {
"versionadded": "Added",
"versionchanged": "Changed",
"deprecated": "Deprecated",
}
def _parse_regular_line(self, line: str) -> None:
section_match = self._section_re.match(line)
if section_match:
directive_name = section_match.groupdict()["section"]
body = section_match.groupdict()["body"]
if directive_name in self.section_directive_map:
self.current_section_name = self.section_directive_map[directive_name]
self._add_line("")
if directive_name in self.version_directive_map:
self.current_section_name = "Notes"
line = self.version_directive_map[directive_name]
if body:
line = "{} in version {}".format(line, body)
self._add_line("")
self._add_line(line)
return
if directive_name in ("code-block", "math", "highlight"):
self._in_codeblock = True
self._in_indent_codeblock = True
self._codeblock_indent = self._current_indent
self._codeblock_lines_count = 0
self._add_block()
self._add_line("")
self._add_line("```{}".format(body or "python"))
return
if body is None:
return
line = body
super()._parse_regular_line(line)
| en | 0.763137 | # reStructuredText Docstring Processor Docstring processor for restructured text docstring format. Supported features: - `:param <name> <?type>: <?description>` directive is added to `Arguments` section - `:type: <?description>` directive transformed to `Type: <type>` - `:returns <?type>: <?description>` directive is added to `Returns` section - `:rtype: <?description>` directive transformed to `Type: <type>` - `:raises: <?description>` directive is added to `Raises` section - `.. seealso::` directive is added to `See also` section - `.. note::` directive is added to `Notes` section - `.. warning:: <version>` directive is added to `Warnings` section - `.. versionadded:: <version>` directive is formatted in Sphinx-style and added to `Notes` section - `.. versionchanged:: <version>` directive is formatted in Sphinx-style and added to `Notes` section - `.. deprecated::` directive is formatted in Sphinx-style and added to `Notes` section - `.. code-block::` directive is formatted as Markdown Python codeblock - `.. code-block:: <language>` directive is formatted as Markdown codeblock - `.. math::` directive is formatted as Markdown Python codeblock - `.. highlight::` directive is formatted as Markdown Python codeblock - `.. highlight:: <language>` directive is formatted as Markdown codeblock Docstring processor for restructured text docstring format. # PEP 287 arg typed with description # PEP 287 arg with description # PEP 287 arg typed # PEP 287 arg # PEP 287 return # PEP 287 return typed # PEP 287 return typed with description # PEP 287 rtype # PEP 287 raises typed # PEP 287 raises typed with description | 2.28147 | 2 |
figures/pipeline/loaders.py | groovetch/edx-figures | 43 | 6630322 | <reponame>groovetch/edx-figures
"""
"""
from __future__ import absolute_import
from figures.models import LearnerCourseGradeMetrics
def save_learner_course_grades(site, date_for, course_enrollment, course_progress_details):
"""
``course_progress_details`` data are the ``course_progress_details`` from the
``LearnerCourseGrades.course_progress method``
"""
# details = course_progress['course_progress_details']
data = dict(
points_possible=course_progress_details['points_possible'],
points_earned=course_progress_details['points_earned'],
sections_worked=course_progress_details['sections_worked'],
sections_possible=course_progress_details['count']
)
obj, created = LearnerCourseGradeMetrics.objects.update_or_create(
site=site,
user=course_enrollment.user,
course_id=str(course_enrollment.course_id),
date_for=date_for,
defaults=data)
return obj, created
| """
"""
from __future__ import absolute_import
from figures.models import LearnerCourseGradeMetrics
def save_learner_course_grades(site, date_for, course_enrollment, course_progress_details):
"""
``course_progress_details`` data are the ``course_progress_details`` from the
``LearnerCourseGrades.course_progress method``
"""
# details = course_progress['course_progress_details']
data = dict(
points_possible=course_progress_details['points_possible'],
points_earned=course_progress_details['points_earned'],
sections_worked=course_progress_details['sections_worked'],
sections_possible=course_progress_details['count']
)
obj, created = LearnerCourseGradeMetrics.objects.update_or_create(
site=site,
user=course_enrollment.user,
course_id=str(course_enrollment.course_id),
date_for=date_for,
defaults=data)
return obj, created | en | 0.536191 | ``course_progress_details`` data are the ``course_progress_details`` from the ``LearnerCourseGrades.course_progress method`` # details = course_progress['course_progress_details'] | 2.486479 | 2 |
Classification K-NN/Developing a K-NN (Nearest Neighbors) Classification Model.py | csitedexperts/DSML_MadeEasy | 1 | 6630323 |
# Developing a K-NN (Nearest Neighbors) Classification Model
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('CompSurvey_Product1.csv')
X = dataset.iloc[:, [1, 7]].values
y = dataset.iloc[:, 8].values
# Splitting the dataset into the Training set and Test set
# Splitting the dataset4 into the Training set and Test set
#from sklearn import cross_validation as cv
## cross_validation is deprecated since version 0.18. This module will be removed in 0.20. Use sklearn.model_selection.train_test_split instead.
## Source: https://stackoverflow.com/questions/53978901/importerror-cannot-import-name-cross-validation-from-sklearn
from sklearn.model_selection import train_test_split
# Splitting the data sets into Training and Test sets
X_train, X_test = train_test_split(X, test_size = .2, random_state = 0)
y_train, y_test = train_test_split(y, train_size = .8, random_state = 0)
# Training => 80%
## Just keelping backup copies
X_train0, X_test0 = X_train, X_test
y_train0, y_test0 = y_train, y_test
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# Fitting K-NN to the Training set
# The Classifier codes go here
from sklearn.neighbors import KNeighborsClassifier
classifier = KNeighborsClassifier(n_neighbors = 3, metric = 'minkowski', p = 2)
#classifier = KNeighborsClassifier(n_neighbors = 6)
classifier.fit(X_train, y_train)
# Predicting the Test set results
y_pred = classifier.predict(X_test)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
# Visualising the Training set results
from matplotlib.colors import ListedColormap
X_set, y_set = X_train, y_train
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('K-NN Plot for the Training dataset)')
plt.xlabel('Education in Year')
plt.ylabel('Annual Salary')
plt.legend()
plt.grid()
plt.show()
# Visualising the Test set results
from matplotlib.colors import ListedColormap
X_set, y_set = X_test, y_test
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('K-NN for the Test dataset)')
plt.xlabel('Education in Year')
plt.ylabel('Annual Salary')
plt.legend()
plt.grid()
plt.show()
|
# Developing a K-NN (Nearest Neighbors) Classification Model
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('CompSurvey_Product1.csv')
X = dataset.iloc[:, [1, 7]].values
y = dataset.iloc[:, 8].values
# Splitting the dataset into the Training set and Test set
# Splitting the dataset4 into the Training set and Test set
#from sklearn import cross_validation as cv
## cross_validation is deprecated since version 0.18. This module will be removed in 0.20. Use sklearn.model_selection.train_test_split instead.
## Source: https://stackoverflow.com/questions/53978901/importerror-cannot-import-name-cross-validation-from-sklearn
from sklearn.model_selection import train_test_split
# Splitting the data sets into Training and Test sets
X_train, X_test = train_test_split(X, test_size = .2, random_state = 0)
y_train, y_test = train_test_split(y, train_size = .8, random_state = 0)
# Training => 80%
## Just keelping backup copies
X_train0, X_test0 = X_train, X_test
y_train0, y_test0 = y_train, y_test
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# Fitting K-NN to the Training set
# The Classifier codes go here
from sklearn.neighbors import KNeighborsClassifier
classifier = KNeighborsClassifier(n_neighbors = 3, metric = 'minkowski', p = 2)
#classifier = KNeighborsClassifier(n_neighbors = 6)
classifier.fit(X_train, y_train)
# Predicting the Test set results
y_pred = classifier.predict(X_test)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
# Visualising the Training set results
from matplotlib.colors import ListedColormap
X_set, y_set = X_train, y_train
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('K-NN Plot for the Training dataset)')
plt.xlabel('Education in Year')
plt.ylabel('Annual Salary')
plt.legend()
plt.grid()
plt.show()
# Visualising the Test set results
from matplotlib.colors import ListedColormap
X_set, y_set = X_test, y_test
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('K-NN for the Test dataset)')
plt.xlabel('Education in Year')
plt.ylabel('Annual Salary')
plt.legend()
plt.grid()
plt.show()
| en | 0.737028 | # Developing a K-NN (Nearest Neighbors) Classification Model # Importing the libraries # Importing the dataset # Splitting the dataset into the Training set and Test set # Splitting the dataset4 into the Training set and Test set #from sklearn import cross_validation as cv ## cross_validation is deprecated since version 0.18. This module will be removed in 0.20. Use sklearn.model_selection.train_test_split instead. ## Source: https://stackoverflow.com/questions/53978901/importerror-cannot-import-name-cross-validation-from-sklearn # Splitting the data sets into Training and Test sets # Training => 80% ## Just keelping backup copies # Feature Scaling # Fitting K-NN to the Training set # The Classifier codes go here #classifier = KNeighborsClassifier(n_neighbors = 6) # Predicting the Test set results # Making the Confusion Matrix # Visualising the Training set results # Visualising the Test set results | 3.4564 | 3 |
huddlebot/settings/production.py | Hipo/huddlebot | 0 | 6630324 | <filename>huddlebot/settings/production.py<gh_stars>0
from huddlebot.settings.base import * # noqa
import sentry_sdk
from sentry_sdk.integrations.django import DjangoIntegration
SECRET_KEY = secrets.SECRET_KEY
DEBUG = False
SERVER_URL = "http://huddlebot.hack.hipolabs.com"
ALLOWED_HOSTS = [
'localhost',
'127.0.0.1',
'huddlebot.hack.hipolabs.com',
]
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': "huddlebot",
'USER': "huddlebot",
'PASSWORD': secrets.POSTGRES_PASSWORD,
'HOST': "hackdb.cmq91upkqjfq.us-east-1.rds.amazonaws.com",
'PORT': '5432',
}
}
sentry_sdk.init(
dsn=secrets.SENTRY_DSN,
integrations=[DjangoIntegration()]
)
| <filename>huddlebot/settings/production.py<gh_stars>0
from huddlebot.settings.base import * # noqa
import sentry_sdk
from sentry_sdk.integrations.django import DjangoIntegration
SECRET_KEY = secrets.SECRET_KEY
DEBUG = False
SERVER_URL = "http://huddlebot.hack.hipolabs.com"
ALLOWED_HOSTS = [
'localhost',
'127.0.0.1',
'huddlebot.hack.hipolabs.com',
]
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': "huddlebot",
'USER': "huddlebot",
'PASSWORD': secrets.POSTGRES_PASSWORD,
'HOST': "hackdb.cmq91upkqjfq.us-east-1.rds.amazonaws.com",
'PORT': '5432',
}
}
sentry_sdk.init(
dsn=secrets.SENTRY_DSN,
integrations=[DjangoIntegration()]
)
| none | 1 | 1.542613 | 2 |
|
load_inputs/cobalt_spin_resolved.py | DanielaZahn/TTM_inputs_from_DFT_results | 1 | 6630325 | import scipy.constants as constants
import numpy as np
import re
# required constants
HARTREE_TO_EV = constants.physical_constants['joule-electron volt relationship'][0]\
/constants.physical_constants['joule-hartree relationship'][0] # conversion factor from Hartree to eV
AVOGADROS_NUMBER = constants.Avogadro # particles/mol
# material-specific data
# read electronic DOS (preferably per unit cell, see unit cell volume)
# units here: Hartree, states per Hartree per unit cell (is converted to eV below)
e_dos_majo = np.loadtxt('inputs/Co_spinResolved_eDOS_majority.txt')
e_dos_mino = np.loadtxt('inputs/Co_spinResolved_eDOS_minority.txt')
# unit cell volume (or, if e_dos and v_dos are not given per unit cell, corresponding other volume)
# Here, the unit cell volume is calculated from the molar volume.
# Cobalt has a hcp structure and thus two atoms per (primitive) unit cell.
# Therefore, a factor of 2 is necessary here to get the correct unit cell volume.
molar_volume = 6.67e-6 # m^3/mol
unit_cell_volume = molar_volume/AVOGADROS_NUMBER*2 # m^3 per unit cell
# IMPORTANT: The volume of the variable "unit_cell_volume" has to match the units of the densities of states.
# Otherwise the heat capacities and G_ep will be WRONG!! (by a factor)
# For example, here e_dos and v_dos are in units of states per eV PER UNIT CELL and the corresponding volume
# is the unit cell volume.
# read Fermi energy (which is the same for both spin types of course)
file = open('inputs/Co_spinResolved_eDOS_majority.txt')
alltext = file.read()
file.close()
# find the line of the text file in which the Fermi energy is written
index1 = alltext.find('Fermi energy')
index2 = alltext[index1:].find('\n')
# find the number in this line (which is the Fermi energy)
fermi_energy = float(np.squeeze(re.findall('[-+]?[.]?[\d]+(?:,\d\d\d)*[\.]?\d*(?:[eE][-+]?\d+)?',\
alltext[index1:index1+index2])))
# convert e_dos and fermi_energy from Hartree to eV
e_dos_majo[:,0] = e_dos_majo[:,0]*HARTREE_TO_EV # energy needs to be in eV
e_dos_mino[:,0] = e_dos_mino[:,0]*HARTREE_TO_EV # energy needs to be in eV
fermi_energy=fermi_energy*HARTREE_TO_EV
e_dos_majo[:,1] = e_dos_majo[:,1]/HARTREE_TO_EV # DOS needs to be in states per eV
e_dos_mino[:,1] = e_dos_mino[:,1]/HARTREE_TO_EV # DOS needs to be in states per eV
# load Eliashberg function
eliashberg = np.loadtxt('inputs/Co_spinResolved_EliashbergFunction_majorityAndMinority.txt')
# convert energy from Hartree to eV
eliashberg[:,0] = eliashberg[:,0]*HARTREE_TO_EV # energy needs to be in eV
# the second column (the Eliashberg function) has no units and therefore doesn't need to be converted
# split into majority and minority Eliashberg function
eliashberg_majo = eliashberg[:int(np.shape(eliashberg)[0]/2),:]
eliashberg_mino = eliashberg[int(np.shape(eliashberg)[0]/2):,:]
del eliashberg
# load phonon density of states
v_dos=np.loadtxt('inputs/Co_spinResolved_vDOS.txt')
# convert energy from Hartree to eV
v_dos[:,0] = v_dos[:,0]*HARTREE_TO_EV # energy needs to be in eV
v_dos[:,1] = v_dos[:,1]/HARTREE_TO_EV # DOS needs to be in states per eV
v_dos = v_dos[:,0:2]
# optional double-check: integrating the phonon DOS has to yield 3 times the atoms per unit cell
# (here: 2 atoms per unit cell / integral has to be 6)
#print(np.trapz(v_dos[:,1],v_dos[:,0]))
print('Material-specific data for cobalt has been loaded.')
| import scipy.constants as constants
import numpy as np
import re
# required constants
HARTREE_TO_EV = constants.physical_constants['joule-electron volt relationship'][0]\
/constants.physical_constants['joule-hartree relationship'][0] # conversion factor from Hartree to eV
AVOGADROS_NUMBER = constants.Avogadro # particles/mol
# material-specific data
# read electronic DOS (preferably per unit cell, see unit cell volume)
# units here: Hartree, states per Hartree per unit cell (is converted to eV below)
e_dos_majo = np.loadtxt('inputs/Co_spinResolved_eDOS_majority.txt')
e_dos_mino = np.loadtxt('inputs/Co_spinResolved_eDOS_minority.txt')
# unit cell volume (or, if e_dos and v_dos are not given per unit cell, corresponding other volume)
# Here, the unit cell volume is calculated from the molar volume.
# Cobalt has a hcp structure and thus two atoms per (primitive) unit cell.
# Therefore, a factor of 2 is necessary here to get the correct unit cell volume.
molar_volume = 6.67e-6 # m^3/mol
unit_cell_volume = molar_volume/AVOGADROS_NUMBER*2 # m^3 per unit cell
# IMPORTANT: The volume of the variable "unit_cell_volume" has to match the units of the densities of states.
# Otherwise the heat capacities and G_ep will be WRONG!! (by a factor)
# For example, here e_dos and v_dos are in units of states per eV PER UNIT CELL and the corresponding volume
# is the unit cell volume.
# read Fermi energy (which is the same for both spin types of course)
file = open('inputs/Co_spinResolved_eDOS_majority.txt')
alltext = file.read()
file.close()
# find the line of the text file in which the Fermi energy is written
index1 = alltext.find('Fermi energy')
index2 = alltext[index1:].find('\n')
# find the number in this line (which is the Fermi energy)
fermi_energy = float(np.squeeze(re.findall('[-+]?[.]?[\d]+(?:,\d\d\d)*[\.]?\d*(?:[eE][-+]?\d+)?',\
alltext[index1:index1+index2])))
# convert e_dos and fermi_energy from Hartree to eV
e_dos_majo[:,0] = e_dos_majo[:,0]*HARTREE_TO_EV # energy needs to be in eV
e_dos_mino[:,0] = e_dos_mino[:,0]*HARTREE_TO_EV # energy needs to be in eV
fermi_energy=fermi_energy*HARTREE_TO_EV
e_dos_majo[:,1] = e_dos_majo[:,1]/HARTREE_TO_EV # DOS needs to be in states per eV
e_dos_mino[:,1] = e_dos_mino[:,1]/HARTREE_TO_EV # DOS needs to be in states per eV
# load Eliashberg function
eliashberg = np.loadtxt('inputs/Co_spinResolved_EliashbergFunction_majorityAndMinority.txt')
# convert energy from Hartree to eV
eliashberg[:,0] = eliashberg[:,0]*HARTREE_TO_EV # energy needs to be in eV
# the second column (the Eliashberg function) has no units and therefore doesn't need to be converted
# split into majority and minority Eliashberg function
eliashberg_majo = eliashberg[:int(np.shape(eliashberg)[0]/2),:]
eliashberg_mino = eliashberg[int(np.shape(eliashberg)[0]/2):,:]
del eliashberg
# load phonon density of states
v_dos=np.loadtxt('inputs/Co_spinResolved_vDOS.txt')
# convert energy from Hartree to eV
v_dos[:,0] = v_dos[:,0]*HARTREE_TO_EV # energy needs to be in eV
v_dos[:,1] = v_dos[:,1]/HARTREE_TO_EV # DOS needs to be in states per eV
v_dos = v_dos[:,0:2]
# optional double-check: integrating the phonon DOS has to yield 3 times the atoms per unit cell
# (here: 2 atoms per unit cell / integral has to be 6)
#print(np.trapz(v_dos[:,1],v_dos[:,0]))
print('Material-specific data for cobalt has been loaded.')
| en | 0.823925 | # required constants # conversion factor from Hartree to eV # particles/mol # material-specific data # read electronic DOS (preferably per unit cell, see unit cell volume) # units here: Hartree, states per Hartree per unit cell (is converted to eV below) # unit cell volume (or, if e_dos and v_dos are not given per unit cell, corresponding other volume) # Here, the unit cell volume is calculated from the molar volume. # Cobalt has a hcp structure and thus two atoms per (primitive) unit cell. # Therefore, a factor of 2 is necessary here to get the correct unit cell volume. # m^3/mol # m^3 per unit cell # IMPORTANT: The volume of the variable "unit_cell_volume" has to match the units of the densities of states. # Otherwise the heat capacities and G_ep will be WRONG!! (by a factor) # For example, here e_dos and v_dos are in units of states per eV PER UNIT CELL and the corresponding volume # is the unit cell volume. # read Fermi energy (which is the same for both spin types of course) # find the line of the text file in which the Fermi energy is written # find the number in this line (which is the Fermi energy) # convert e_dos and fermi_energy from Hartree to eV # energy needs to be in eV # energy needs to be in eV # DOS needs to be in states per eV # DOS needs to be in states per eV # load Eliashberg function # convert energy from Hartree to eV # energy needs to be in eV # the second column (the Eliashberg function) has no units and therefore doesn't need to be converted # split into majority and minority Eliashberg function # load phonon density of states # convert energy from Hartree to eV # energy needs to be in eV # DOS needs to be in states per eV # optional double-check: integrating the phonon DOS has to yield 3 times the atoms per unit cell # (here: 2 atoms per unit cell / integral has to be 6) #print(np.trapz(v_dos[:,1],v_dos[:,0])) | 2.781754 | 3 |
scripts/tests/sample_module/sample3.py | pv/pydocweb | 2 | 6630326 | <filename>scripts/tests/sample_module/sample3.py
func0 = lambda x: x
func0.__name__ = "func0"
class Cls4(object):
func1 = lambda x: x
func1.__name__ = "func1"
func2 = lambda x: x
func2.__name__ = "func2"
| <filename>scripts/tests/sample_module/sample3.py
func0 = lambda x: x
func0.__name__ = "func0"
class Cls4(object):
func1 = lambda x: x
func1.__name__ = "func1"
func2 = lambda x: x
func2.__name__ = "func2"
| none | 1 | 2.026446 | 2 |
|
GenomicConsensus/__init__.py | PacificBiosciences/GenomicConsensus | 96 | 6630327 | <reponame>PacificBiosciences/GenomicConsensus
# Author: <NAME>, <NAME>
from __future__ import absolute_import, division, print_function
__VERSION__ = '2.3.3' # don't forget to update setup.py and doc/conf.py too
| # Author: <NAME>, <NAME>
from __future__ import absolute_import, division, print_function
__VERSION__ = '2.3.3' # don't forget to update setup.py and doc/conf.py too | en | 0.915611 | # Author: <NAME>, <NAME> # don't forget to update setup.py and doc/conf.py too | 0.934117 | 1 |
tools/dns-sync/tests/test_audit_log_loop.py | ruchirjain86/professional-services | 2,116 | 6630328 | <filename>tools/dns-sync/tests/test_audit_log_loop.py
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import datetime
import json
import logging
import unittest
from google.cloud.datastore import Entity
from google.cloud.datastore.client import Client
import mock
import webapp2
import common
from dns_sync import api
from dns_sync import audit_log
from dns_sync import auth
from dns_sync import main
class TestHandlers(unittest.TestCase):
def setUp(self):
logging.basicConfig(level=logging.DEBUG)
def test_audit_log_loop_start(self):
"""Test that we can start the audit loop."""
url = '/start_audit_log_loop'
request = webapp2.Request.blank(url)
request.method = 'POST'
request.headers['content-type'] = 'application/json'
data_files = common.read_data_files([
'tests/data/monitoring.v3.json',
'tests/data/audit-log-start-metric-list.json',
'tests/data/audit-log-start-metric-create.json',
'tests/data/compute.v1.json',
'tests/data/audit-log-resource-get.json',
'tests/data/audit-log-start-resource-insert.json'
])
success = {'status': '200'}
not_found = {'status': '404'}
metrics_mock_http = common.LoggingHttpMockSequence(
[(success, '{"access_token":"token","expires_in":3600}'),
(success, data_files['monitoring.v3.json']),
(success, data_files['audit-log-start-metric-list.json']),
(success, data_files['audit-log-start-metric-create.json'])])
api.Clients.metrics.http = metrics_mock_http
api.Clients.metrics.cache_discovery = False
compute_mock_http = common.LoggingHttpMockSequence(
[(success, '{"access_token":"token","expires_in":3600}'),
(success, data_files['compute.v1.json']),
(not_found, ''),
(success, data_files['audit-log-start-resource-insert.json'])])
api.Clients.compute.http = compute_mock_http
api.Clients.compute.cache_discovery = False
mock_datastore = mock.Mock(spec=Client)
mock_datastore.project = 'project-1'
entity = mock.MagicMock(spec=Entity)
mock_datastore.get.side_effect = [entity, common.config_entity()]
api.CLIENTS.datastore = mock_datastore
dns_sync_app = main.DnsSyncApplication()
response = request.get_response(dns_sync_app)
self.assertEquals(response.status_int, 200)
def test_audit_log_loop_stop(self):
"""Test we can stop the audit loop."""
url = '/stop_audit_log_loop'
request = webapp2.Request.blank(url)
request.method = 'POST'
request.headers['content-type'] = 'application/json'
data_files = common.read_data_files([
'tests/data/monitoring.v3.json',
'tests/data/audit-log-stop-metric-list.json',
'tests/data/compute.v1.json',
'tests/data/audit-log-resource-get.json',
'tests/data/audit-log-stop-resource-delete.json'
])
success = {'status': '200'}
metrics_mock_http = common.LoggingHttpMockSequence(
[(success, '{"access_token":"token","expires_in":3600}'),
(success, data_files['monitoring.v3.json']),
(success, data_files['audit-log-stop-metric-list.json'])])
api.Clients.metrics.http = metrics_mock_http
api.Clients.metrics.cache_discovery = False
compute_mock_http = common.LoggingHttpMockSequence(
[(success, '{"access_token":"token","expires_in":3600}'),
(success, data_files['compute.v1.json']),
(success, data_files['audit-log-resource-get.json']),
(success, data_files['audit-log-stop-resource-delete.json'])])
api.Clients.compute.http = compute_mock_http
api.Clients.compute.cache_discovery = False
mock_datastore = mock.Mock(spec=Client)
mock_datastore.project = 'project-1'
entity = dict()
mock_datastore.get.side_effect = [entity, common.config_entity()]
api.CLIENTS.datastore = mock_datastore
dns_sync_app = main.DnsSyncApplication()
response = request.get_response(dns_sync_app)
self.assertEquals(response.status_int, 200)
def test_audit_log_loop_event(self):
"""Test receiving an audit loop event."""
url = '/push_notification?secret={}'.format('my-test-secret-key')
request = webapp2.Request.blank(url)
request.method = 'POST'
request.headers['content-type'] = 'application/json'
data_files = common.read_data_files([
'tests/data/audit-log-loop-message.json',
'tests/data/compute.v1.json',
'tests/data/audit-log-loop-compute-operation.json',
'tests/data/audit-log-resource-get.json',
'tests/data/monitoring.v3.json',
'tests/data/dns.v1.json',
'tests/data/dns-zone-response.json',
'tests/data/instance-creation-dns-pending-operation.json',
'tests/data/instance-creation-dns-done-operation.json',
'tests/data/instance-creation-dns-record-set-response.json'
])
data = base64.encodestring(data_files[
'audit-log-loop-message.json'])
post = {
'message': {
'data': data,
'attributes': {
'compute.googleapis.com/resource_id':
'18082097775580039429',
'compute.googleapis.com/resource_name': 'dns-sync-test',
'compute.googleapis.com/resource_type': 'instance',
'compute.googleapis.com/resource_zone': 'us-central1-a',
'logging.googleapis.com/timestamp':
'2016-04-03T23: 06: 31.17867Z'
},
'message_id': '29119446125187'
},
'subscription': 'projects/project-1/subscriptions/gae-push'
}
request.body = json.dumps(post)
success = {'status': '200'}
compute_mock_http = common.LoggingHttpMockSequence(
[(success, '{"access_token":"token","expires_in":3600}'),
(success, data_files['compute.v1.json']),
(success, data_files['audit-log-loop-compute-operation.json']),
(success, data_files['audit-log-resource-get.json']),
# stop instance
(success, data_files['audit-log-loop-compute-operation.json'])])
api.Clients.compute.http = compute_mock_http
api.Clients.compute.cache_discovery = False
metrics_mock_http = common.LoggingHttpMockSequence(
[(success, '{"access_token":"token","expires_in":3600}'),
(success, data_files['monitoring.v3.json']),
# timeseries.write
(success, '{}')])
api.Clients.metrics.http = metrics_mock_http
api.Clients.metrics.cache_discovery = False
mock_dns = mock.MagicMock()
mock_dns.changes().get().execute.return_value = {'status': 'done'}
api.Clients.dns = mock_dns
mock_datastore = mock.Mock(spec=Client)
mock_datastore.project = 'project-1'
now = audit_log.utcnow()
last_call_time = now - datetime.timedelta(0, 30)
entity = Entity()
entity.update({'running': True,
'last_call': 'start',
'last_call_time': last_call_time,
'last_call_event_received': False})
mock_datastore.get.side_effect = [common.config_entity(), entity]
api.CLIENTS.datastore = mock_datastore
dns_sync_app = main.DnsSyncApplication()
auth.AdminRequestHandler.SKIP_AUTHENTICATION = True
# Get a response for that request.
response = request.get_response(dns_sync_app)
self.assertEquals(response.status_int, 200)
| <filename>tools/dns-sync/tests/test_audit_log_loop.py
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import datetime
import json
import logging
import unittest
from google.cloud.datastore import Entity
from google.cloud.datastore.client import Client
import mock
import webapp2
import common
from dns_sync import api
from dns_sync import audit_log
from dns_sync import auth
from dns_sync import main
class TestHandlers(unittest.TestCase):
def setUp(self):
logging.basicConfig(level=logging.DEBUG)
def test_audit_log_loop_start(self):
"""Test that we can start the audit loop."""
url = '/start_audit_log_loop'
request = webapp2.Request.blank(url)
request.method = 'POST'
request.headers['content-type'] = 'application/json'
data_files = common.read_data_files([
'tests/data/monitoring.v3.json',
'tests/data/audit-log-start-metric-list.json',
'tests/data/audit-log-start-metric-create.json',
'tests/data/compute.v1.json',
'tests/data/audit-log-resource-get.json',
'tests/data/audit-log-start-resource-insert.json'
])
success = {'status': '200'}
not_found = {'status': '404'}
metrics_mock_http = common.LoggingHttpMockSequence(
[(success, '{"access_token":"token","expires_in":3600}'),
(success, data_files['monitoring.v3.json']),
(success, data_files['audit-log-start-metric-list.json']),
(success, data_files['audit-log-start-metric-create.json'])])
api.Clients.metrics.http = metrics_mock_http
api.Clients.metrics.cache_discovery = False
compute_mock_http = common.LoggingHttpMockSequence(
[(success, '{"access_token":"token","expires_in":3600}'),
(success, data_files['compute.v1.json']),
(not_found, ''),
(success, data_files['audit-log-start-resource-insert.json'])])
api.Clients.compute.http = compute_mock_http
api.Clients.compute.cache_discovery = False
mock_datastore = mock.Mock(spec=Client)
mock_datastore.project = 'project-1'
entity = mock.MagicMock(spec=Entity)
mock_datastore.get.side_effect = [entity, common.config_entity()]
api.CLIENTS.datastore = mock_datastore
dns_sync_app = main.DnsSyncApplication()
response = request.get_response(dns_sync_app)
self.assertEquals(response.status_int, 200)
def test_audit_log_loop_stop(self):
"""Test we can stop the audit loop."""
url = '/stop_audit_log_loop'
request = webapp2.Request.blank(url)
request.method = 'POST'
request.headers['content-type'] = 'application/json'
data_files = common.read_data_files([
'tests/data/monitoring.v3.json',
'tests/data/audit-log-stop-metric-list.json',
'tests/data/compute.v1.json',
'tests/data/audit-log-resource-get.json',
'tests/data/audit-log-stop-resource-delete.json'
])
success = {'status': '200'}
metrics_mock_http = common.LoggingHttpMockSequence(
[(success, '{"access_token":"token","expires_in":3600}'),
(success, data_files['monitoring.v3.json']),
(success, data_files['audit-log-stop-metric-list.json'])])
api.Clients.metrics.http = metrics_mock_http
api.Clients.metrics.cache_discovery = False
compute_mock_http = common.LoggingHttpMockSequence(
[(success, '{"access_token":"token","expires_in":3600}'),
(success, data_files['compute.v1.json']),
(success, data_files['audit-log-resource-get.json']),
(success, data_files['audit-log-stop-resource-delete.json'])])
api.Clients.compute.http = compute_mock_http
api.Clients.compute.cache_discovery = False
mock_datastore = mock.Mock(spec=Client)
mock_datastore.project = 'project-1'
entity = dict()
mock_datastore.get.side_effect = [entity, common.config_entity()]
api.CLIENTS.datastore = mock_datastore
dns_sync_app = main.DnsSyncApplication()
response = request.get_response(dns_sync_app)
self.assertEquals(response.status_int, 200)
def test_audit_log_loop_event(self):
"""Test receiving an audit loop event."""
url = '/push_notification?secret={}'.format('my-test-secret-key')
request = webapp2.Request.blank(url)
request.method = 'POST'
request.headers['content-type'] = 'application/json'
data_files = common.read_data_files([
'tests/data/audit-log-loop-message.json',
'tests/data/compute.v1.json',
'tests/data/audit-log-loop-compute-operation.json',
'tests/data/audit-log-resource-get.json',
'tests/data/monitoring.v3.json',
'tests/data/dns.v1.json',
'tests/data/dns-zone-response.json',
'tests/data/instance-creation-dns-pending-operation.json',
'tests/data/instance-creation-dns-done-operation.json',
'tests/data/instance-creation-dns-record-set-response.json'
])
data = base64.encodestring(data_files[
'audit-log-loop-message.json'])
post = {
'message': {
'data': data,
'attributes': {
'compute.googleapis.com/resource_id':
'18082097775580039429',
'compute.googleapis.com/resource_name': 'dns-sync-test',
'compute.googleapis.com/resource_type': 'instance',
'compute.googleapis.com/resource_zone': 'us-central1-a',
'logging.googleapis.com/timestamp':
'2016-04-03T23: 06: 31.17867Z'
},
'message_id': '29119446125187'
},
'subscription': 'projects/project-1/subscriptions/gae-push'
}
request.body = json.dumps(post)
success = {'status': '200'}
compute_mock_http = common.LoggingHttpMockSequence(
[(success, '{"access_token":"token","expires_in":3600}'),
(success, data_files['compute.v1.json']),
(success, data_files['audit-log-loop-compute-operation.json']),
(success, data_files['audit-log-resource-get.json']),
# stop instance
(success, data_files['audit-log-loop-compute-operation.json'])])
api.Clients.compute.http = compute_mock_http
api.Clients.compute.cache_discovery = False
metrics_mock_http = common.LoggingHttpMockSequence(
[(success, '{"access_token":"token","expires_in":3600}'),
(success, data_files['monitoring.v3.json']),
# timeseries.write
(success, '{}')])
api.Clients.metrics.http = metrics_mock_http
api.Clients.metrics.cache_discovery = False
mock_dns = mock.MagicMock()
mock_dns.changes().get().execute.return_value = {'status': 'done'}
api.Clients.dns = mock_dns
mock_datastore = mock.Mock(spec=Client)
mock_datastore.project = 'project-1'
now = audit_log.utcnow()
last_call_time = now - datetime.timedelta(0, 30)
entity = Entity()
entity.update({'running': True,
'last_call': 'start',
'last_call_time': last_call_time,
'last_call_event_received': False})
mock_datastore.get.side_effect = [common.config_entity(), entity]
api.CLIENTS.datastore = mock_datastore
dns_sync_app = main.DnsSyncApplication()
auth.AdminRequestHandler.SKIP_AUTHENTICATION = True
# Get a response for that request.
response = request.get_response(dns_sync_app)
self.assertEquals(response.status_int, 200)
| en | 0.853432 | # Copyright 2017 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Test that we can start the audit loop. Test we can stop the audit loop. Test receiving an audit loop event. # stop instance # timeseries.write # Get a response for that request. | 2.205751 | 2 |
listings/chap10/listing_10_4_dummy_variables.py | unixime/fight-churn | 0 | 6630329 | <gh_stars>0
import pandas as pd
from listing_10_3_grouped_category_cohorts import group_category_column
def dummy_variables(data_set_path, groups={},current=False):
raw_data = pd.read_csv(data_set_path, index_col=[0, 1])
for cat in groups.keys():
group_category_column(raw_data,cat,groups[cat])
data_w_dummies = pd.get_dummies(raw_data,dummy_na=True)
data_w_dummies.to_csv(data_set_path.replace('.csv', '_xgbdummies.csv'))
new_cols = sorted(list(set(data_w_dummies.columns).difference(set(raw_data.columns))))
cat_cols = sorted(list(set(raw_data.columns).difference(set(data_w_dummies.columns))))
dummy_col_df = pd.DataFrame(new_cols,index=new_cols,columns=['metrics'])
dummy_col_df.to_csv(data_set_path.replace('.csv', '_dummies_groupmets.csv'))
if not current:
new_cols.append('is_churn')
dummies_only = data_w_dummies[new_cols]
save_path = data_set_path.replace('.csv', '_dummies_groupscore.csv')
print('Saved dummy variable (only) dataset ' + save_path)
dummies_only.to_csv(save_path)
raw_data.drop(cat_cols,axis=1,inplace=True)
save_path = data_set_path.replace('.csv', '_nocat.csv')
print('Saved no category dataset ' + save_path)
raw_data.to_csv(save_path)
| import pandas as pd
from listing_10_3_grouped_category_cohorts import group_category_column
def dummy_variables(data_set_path, groups={},current=False):
raw_data = pd.read_csv(data_set_path, index_col=[0, 1])
for cat in groups.keys():
group_category_column(raw_data,cat,groups[cat])
data_w_dummies = pd.get_dummies(raw_data,dummy_na=True)
data_w_dummies.to_csv(data_set_path.replace('.csv', '_xgbdummies.csv'))
new_cols = sorted(list(set(data_w_dummies.columns).difference(set(raw_data.columns))))
cat_cols = sorted(list(set(raw_data.columns).difference(set(data_w_dummies.columns))))
dummy_col_df = pd.DataFrame(new_cols,index=new_cols,columns=['metrics'])
dummy_col_df.to_csv(data_set_path.replace('.csv', '_dummies_groupmets.csv'))
if not current:
new_cols.append('is_churn')
dummies_only = data_w_dummies[new_cols]
save_path = data_set_path.replace('.csv', '_dummies_groupscore.csv')
print('Saved dummy variable (only) dataset ' + save_path)
dummies_only.to_csv(save_path)
raw_data.drop(cat_cols,axis=1,inplace=True)
save_path = data_set_path.replace('.csv', '_nocat.csv')
print('Saved no category dataset ' + save_path)
raw_data.to_csv(save_path) | none | 1 | 2.886543 | 3 |
|
tools/dump_bytecode.py | wenq1/duktape | 34 | 6630330 | #!/usr/bin/env python2
#
# Utility to dump bytecode into a human readable form.
#
import os
import sys
import struct
import optparse
def decode_string(buf, off):
strlen, = struct.unpack('>L', buf[off:off+4])
off += 4
strdata = buf[off:off+strlen]
off += strlen
return off, strdata
def sanitize_string(val):
# Don't try to UTF-8 decode, just escape non-printable ASCII.
def f(c):
if ord(c) < 0x20 or ord(c) > 0x7e or c in '\'"':
return '\\x%02x' % ord(c)
else:
return c
return "'" + ''.join(map(f, val)) + "'"
def decode_sanitize_string(buf, off):
off, val = decode_string(buf, off)
return off, sanitize_string(val)
def dump_function(buf, off, ind):
count_inst, count_const, count_funcs = struct.unpack('>LLL', buf[off:off+12])
off += 12
print('%sInstructions: %d' % (ind, count_inst))
print('%sConstants: %d' % (ind, count_const))
print('%sInner functions: %d' % (ind, count_funcs))
# Line numbers present, assuming debugger support; otherwise 0.
nregs, nargs, start_line, end_line = struct.unpack('>HHLL', buf[off:off+12])
off += 12
print('%sNregs: %d' % (ind, nregs))
print('%sNargs: %d' % (ind, nargs))
print('%sStart line number: %d' % (ind, start_line))
print('%sEnd line number: %d' % (ind, end_line))
compfunc_flags, = struct.unpack('>L', buf[off:off+4])
off += 4
print('%sduk_hcompiledfunction flags: 0x%08x' % (ind, compfunc_flags))
for i in xrange(count_inst):
ins, = struct.unpack('>L', buf[off:off+4])
off += 4
print('%s %06d: %08lx' % (ind, i, ins))
print('%sConstants:' % ind)
for i in xrange(count_const):
const_type, = struct.unpack('B', buf[off:off+1])
off += 1
if const_type == 0x00:
off, strdata = decode_sanitize_string(buf, off)
print('%s %06d: %s' % (ind, i, strdata))
elif const_type == 0x01:
num, = struct.unpack('>d', buf[off:off+8])
off += 8
print('%s %06d: %f' % (ind, i, num))
else:
raise Exception('invalid constant type: %d' % const_type)
for i in xrange(count_funcs):
print('%sInner function %d:' % (ind, i))
off = dump_function(buf, off, ind + ' ')
val, = struct.unpack('>L', buf[off:off+4])
off += 4
print('%s.length: %d' % (ind, val))
off, val = decode_sanitize_string(buf, off)
print('%s.name: %s' % (ind, val))
off, val = decode_sanitize_string(buf, off)
print('%s.fileName: %s' % (ind, val))
off, val = decode_string(buf, off) # actually a buffer
print('%s._Pc2line: %s' % (ind, val.encode('hex')))
while True:
off, name = decode_string(buf, off)
if name == '':
break
name = sanitize_string(name)
val, = struct.unpack('>L', buf[off:off+4])
off += 4
print('%s_Varmap[%s] = %d' % (ind, name, val))
num_formals, = struct.unpack('>L', buf[off:off+4])
off += 4
if num_formals != 0xffffffff:
print('%s_Formals: %d formal arguments' % (ind, num_formals))
for idx in xrange(num_formals):
off, name = decode_string(buf, off)
name = sanitize_string(name)
print('%s_Formals[%d] = %s' % (ind, idx, name))
else:
print('%s_Formals: absent' % ind)
return off
def dump_bytecode(buf, off, ind):
sig, = struct.unpack('B', buf[off:off+1])
print('%sSignature byte: 0x%02x' % (ind, sig))
off += 1
if sig == 0xff:
raise Exception('pre-Duktape 2.2 0xFF signature byte (signature byte is 0xBF since Duktape 2.2)')
if sig != 0xbf:
raise Exception('invalid signature byte: %d' % sig)
off = dump_function(buf, off, ind + ' ')
return off
def main():
parser = optparse.OptionParser()
parser.add_option('--hex-decode', dest='hex_decode', default=False, action='store_true', help='Input file is ASCII hex encoded, decode before dump')
(opts, args) = parser.parse_args()
with open(args[0], 'rb') as f:
d = f.read()
if opts.hex_decode:
d = d.strip()
d = d.decode('hex')
dump_bytecode(d, 0, '')
if __name__ == '__main__':
main()
| #!/usr/bin/env python2
#
# Utility to dump bytecode into a human readable form.
#
import os
import sys
import struct
import optparse
def decode_string(buf, off):
strlen, = struct.unpack('>L', buf[off:off+4])
off += 4
strdata = buf[off:off+strlen]
off += strlen
return off, strdata
def sanitize_string(val):
# Don't try to UTF-8 decode, just escape non-printable ASCII.
def f(c):
if ord(c) < 0x20 or ord(c) > 0x7e or c in '\'"':
return '\\x%02x' % ord(c)
else:
return c
return "'" + ''.join(map(f, val)) + "'"
def decode_sanitize_string(buf, off):
off, val = decode_string(buf, off)
return off, sanitize_string(val)
def dump_function(buf, off, ind):
count_inst, count_const, count_funcs = struct.unpack('>LLL', buf[off:off+12])
off += 12
print('%sInstructions: %d' % (ind, count_inst))
print('%sConstants: %d' % (ind, count_const))
print('%sInner functions: %d' % (ind, count_funcs))
# Line numbers present, assuming debugger support; otherwise 0.
nregs, nargs, start_line, end_line = struct.unpack('>HHLL', buf[off:off+12])
off += 12
print('%sNregs: %d' % (ind, nregs))
print('%sNargs: %d' % (ind, nargs))
print('%sStart line number: %d' % (ind, start_line))
print('%sEnd line number: %d' % (ind, end_line))
compfunc_flags, = struct.unpack('>L', buf[off:off+4])
off += 4
print('%sduk_hcompiledfunction flags: 0x%08x' % (ind, compfunc_flags))
for i in xrange(count_inst):
ins, = struct.unpack('>L', buf[off:off+4])
off += 4
print('%s %06d: %08lx' % (ind, i, ins))
print('%sConstants:' % ind)
for i in xrange(count_const):
const_type, = struct.unpack('B', buf[off:off+1])
off += 1
if const_type == 0x00:
off, strdata = decode_sanitize_string(buf, off)
print('%s %06d: %s' % (ind, i, strdata))
elif const_type == 0x01:
num, = struct.unpack('>d', buf[off:off+8])
off += 8
print('%s %06d: %f' % (ind, i, num))
else:
raise Exception('invalid constant type: %d' % const_type)
for i in xrange(count_funcs):
print('%sInner function %d:' % (ind, i))
off = dump_function(buf, off, ind + ' ')
val, = struct.unpack('>L', buf[off:off+4])
off += 4
print('%s.length: %d' % (ind, val))
off, val = decode_sanitize_string(buf, off)
print('%s.name: %s' % (ind, val))
off, val = decode_sanitize_string(buf, off)
print('%s.fileName: %s' % (ind, val))
off, val = decode_string(buf, off) # actually a buffer
print('%s._Pc2line: %s' % (ind, val.encode('hex')))
while True:
off, name = decode_string(buf, off)
if name == '':
break
name = sanitize_string(name)
val, = struct.unpack('>L', buf[off:off+4])
off += 4
print('%s_Varmap[%s] = %d' % (ind, name, val))
num_formals, = struct.unpack('>L', buf[off:off+4])
off += 4
if num_formals != 0xffffffff:
print('%s_Formals: %d formal arguments' % (ind, num_formals))
for idx in xrange(num_formals):
off, name = decode_string(buf, off)
name = sanitize_string(name)
print('%s_Formals[%d] = %s' % (ind, idx, name))
else:
print('%s_Formals: absent' % ind)
return off
def dump_bytecode(buf, off, ind):
sig, = struct.unpack('B', buf[off:off+1])
print('%sSignature byte: 0x%02x' % (ind, sig))
off += 1
if sig == 0xff:
raise Exception('pre-Duktape 2.2 0xFF signature byte (signature byte is 0xBF since Duktape 2.2)')
if sig != 0xbf:
raise Exception('invalid signature byte: %d' % sig)
off = dump_function(buf, off, ind + ' ')
return off
def main():
parser = optparse.OptionParser()
parser.add_option('--hex-decode', dest='hex_decode', default=False, action='store_true', help='Input file is ASCII hex encoded, decode before dump')
(opts, args) = parser.parse_args()
with open(args[0], 'rb') as f:
d = f.read()
if opts.hex_decode:
d = d.strip()
d = d.decode('hex')
dump_bytecode(d, 0, '')
if __name__ == '__main__':
main()
| en | 0.722434 | #!/usr/bin/env python2 # # Utility to dump bytecode into a human readable form. # # Don't try to UTF-8 decode, just escape non-printable ASCII. # Line numbers present, assuming debugger support; otherwise 0. # actually a buffer | 3.089229 | 3 |
image_vision/plugins/mdi/area.py | IvanKosik/ImageVision | 0 | 6630331 | from core import Plugin
from plugins.window import MainWindowPlugin
from extensions.mdi import MdiArea
class MdiAreaPlugin(Plugin):
def __init__(self, main_window_plugin: MainWindowPlugin):
super().__init__()
self.main_window = main_window_plugin.main_window
self.mdi_area = MdiArea()
def _install(self):
self.main_window.setCentralWidget(self.mdi_area)
def _remove(self):
self.main_window.setCentralWidget(None)
| from core import Plugin
from plugins.window import MainWindowPlugin
from extensions.mdi import MdiArea
class MdiAreaPlugin(Plugin):
def __init__(self, main_window_plugin: MainWindowPlugin):
super().__init__()
self.main_window = main_window_plugin.main_window
self.mdi_area = MdiArea()
def _install(self):
self.main_window.setCentralWidget(self.mdi_area)
def _remove(self):
self.main_window.setCentralWidget(None)
| none | 1 | 1.783319 | 2 |
|
cmds/information.py | hacknorris-aka-penguin/discord-tux | 0 | 6630332 | import discord
from discord.ext import commands
class information(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def neofetch(self, ctx):
await ctx.send(
f"```fix\n /--\ OS NAME : {ctx.guild.name}\n -- \--/ -- \n / \ STORAGE : {ctx.guild.member_count} users \n | | \n /--\ /--\ INSTALLATION DATE : {ctx.guild.created_at}\n \--/ \--/ \n \ / DIRECTORIES : {len(ctx.guild.channels)} \n ---------- ```"
)
@commands.command()
async def echo(self, ctx, *, arg):
await ctx.send(f"{arg}")
@commands.command()
async def ping(self, ctx):
replytime = bot.latency * 1000
await ctx.send(f'Hi! I answered in {replytime} ms!')
@commands.command()
async def whoami(self, ctx):
await ctx.send(f'```{ctx.author}```')
return
@commands.command()
async def whois(self, ctx, member: discord.Member):
await ctx.send(f"```{member.name}#{member.discriminator}```")
@commands.command()
async def time(self, ctx):
now = datetime.now()
now2 = now.timestamp()
await ctx.send(f"`now? its `<t:{round(now2)}:T>")
@commands.command()
async def id(self, ctx, member: discord.Member):
id = member.id
await ctx.send(f"```{id}```")
@commands.command()
async def stat(self, ctx, member: discord.Member = None):
if member == None:
await ctx.send(f"```{ctx.message.author.status}```")
else:
await ctx.send(f"```{member.status}```")
def setup(bot):
bot.add_cog(information(bot))
| import discord
from discord.ext import commands
class information(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def neofetch(self, ctx):
await ctx.send(
f"```fix\n /--\ OS NAME : {ctx.guild.name}\n -- \--/ -- \n / \ STORAGE : {ctx.guild.member_count} users \n | | \n /--\ /--\ INSTALLATION DATE : {ctx.guild.created_at}\n \--/ \--/ \n \ / DIRECTORIES : {len(ctx.guild.channels)} \n ---------- ```"
)
@commands.command()
async def echo(self, ctx, *, arg):
await ctx.send(f"{arg}")
@commands.command()
async def ping(self, ctx):
replytime = bot.latency * 1000
await ctx.send(f'Hi! I answered in {replytime} ms!')
@commands.command()
async def whoami(self, ctx):
await ctx.send(f'```{ctx.author}```')
return
@commands.command()
async def whois(self, ctx, member: discord.Member):
await ctx.send(f"```{member.name}#{member.discriminator}```")
@commands.command()
async def time(self, ctx):
now = datetime.now()
now2 = now.timestamp()
await ctx.send(f"`now? its `<t:{round(now2)}:T>")
@commands.command()
async def id(self, ctx, member: discord.Member):
id = member.id
await ctx.send(f"```{id}```")
@commands.command()
async def stat(self, ctx, member: discord.Member = None):
if member == None:
await ctx.send(f"```{ctx.message.author.status}```")
else:
await ctx.send(f"```{member.status}```")
def setup(bot):
bot.add_cog(information(bot))
| en | 0.22457 | #{member.discriminator}```") | 2.712462 | 3 |
src/hello.py | JunyaKaneko/github-actions-hello-world | 0 | 6630333 | def hello(name):
return 'Hello, {}'.format(name)
| def hello(name):
return 'Hello, {}'.format(name)
| none | 1 | 2.071803 | 2 |
|
api/admin.py | razin92/payme | 1 | 6630334 | from django.contrib import admin
from .models import Transaction, BasicAuth
# Register your models here.
admin.site.register(Transaction)
admin.site.register(BasicAuth) | from django.contrib import admin
from .models import Transaction, BasicAuth
# Register your models here.
admin.site.register(Transaction)
admin.site.register(BasicAuth) | en | 0.968259 | # Register your models here. | 1.418075 | 1 |
smt/applications/ego.py | Laurentww/smt | 0 | 6630335 | <gh_stars>0
"""
Authors: <NAME>, <NAME>, <NAME>, <NAME> <<EMAIL>>
This package is distributed under New BSD license.
"""
import numpy as np
from types import FunctionType
from scipy.stats import norm
from scipy.optimize import minimize
from smt.utils.options_dictionary import OptionsDictionary
from smt.applications.application import SurrogateBasedApplication
from smt.applications.mixed_integer import (
MixedIntegerContext,
GOWER,
HOMO_GAUSSIAN,
FULL_GAUSSIAN,
)
from smt.utils.misc import compute_rms_error
from smt.surrogate_models import KPLS, KRG, KPLSK, MGP, GEKPLS
from smt.sampling_methods import LHS
class Evaluator(object):
"""
An interface for evaluation of a function at x points (nsamples of dimension nx).
User can derive this interface and override the run() method to implement custom multiprocessing.
"""
def run(self, fun, x):
"""
Evaluates fun at x.
Parameters
---------
fun : function to evaluate: (nsamples, nx) -> (nsample, 1)
x : np.ndarray[nsamples, nx]
nsamples points of nx dimensions.
Returns
-------
np.ndarray[nsample, 1]
fun evaluations at the nsamples points.
"""
return fun(x)
class EGO(SurrogateBasedApplication):
def _initialize(self):
super(EGO, self)._initialize()
declare = self.options.declare
declare("fun", None, types=FunctionType, desc="Function to minimize")
declare(
"criterion",
"EI",
types=str,
values=["EI", "SBO", "LCB"],
desc="criterion for next evaluation point determination: Expected Improvement, \
Surrogate-Based Optimization or Lower Confidence Bound",
)
declare("n_iter", None, types=int, desc="Number of optimizer steps")
declare(
"n_max_optim",
20,
types=int,
desc="Maximum number of internal optimizations",
)
declare("n_start", 20, types=int, desc="Number of optimization start points")
declare(
"n_parallel",
1,
types=int,
desc="Number of parallel samples to compute using qEI criterion",
)
declare(
"qEI",
"KBLB",
types=str,
values=["KB", "KBLB", "KBUB", "KBRand", "CLmin"],
desc="Approximated q-EI maximization strategy",
)
declare(
"evaluator",
default=Evaluator(),
types=Evaluator,
desc="Object used to run function fun to optimize at x points (nsamples, nxdim)",
)
declare(
"n_doe",
None,
types=int,
desc="Number of points of the initial LHS doe, only used if xdoe is not given",
)
declare("xdoe", None, types=np.ndarray, desc="Initial doe inputs")
declare("ydoe", None, types=np.ndarray, desc="Initial doe outputs")
declare("xlimits", None, types=np.ndarray, desc="Bounds of function fun inputs")
declare("verbose", False, types=bool, desc="Print computation information")
declare(
"enable_tunneling",
False,
types=bool,
desc="Enable the penalization of points that have been already evaluated in EI criterion",
)
declare(
"categorical_kernel",
None,
types=str,
values=[GOWER, HOMO_GAUSSIAN, FULL_GAUSSIAN],
desc="The kernel to use for categorical inputs. Only for non continuous Kriging.",
)
declare(
"surrogate",
KRG(print_global=False),
types=(KRG, KPLS, KPLSK, GEKPLS, MGP),
desc="SMT kriging-based surrogate model used internaly",
)
declare(
"xtypes",
None,
types=list,
desc="x type specifications: either FLOAT for continuous, INT for integer "
"or (ENUM n) for categorical doimension with n levels",
)
self.options.declare(
"random_state",
types=(type(None), int, np.random.RandomState),
desc="Numpy RandomState object or seed number which controls random draws",
)
def optimize(self, fun):
"""
Optimizes fun
Parameters
----------
fun: function to optimize: ndarray[n, nx] or ndarray[n] -> ndarray[n, 1]
Returns
-------
[nx, 1]: x optimum
[1, 1]: y optimum
int: index of optimum in data arrays
[ndoe + n_iter, nx]: coord-x data
[ndoe + n_iter, 1]: coord-y data
"""
x_data, y_data = self._setup_optimizer(fun)
n_iter = self.options["n_iter"]
n_parallel = self.options["n_parallel"]
for k in range(n_iter):
# Virtual enrichement loop
for p in range(n_parallel):
# find next best x-coord point to evaluate
x_et_k, success = self._find_best_point(
x_data, y_data, self.options["enable_tunneling"]
)
if not success:
self.log(
"Internal optimization failed at EGO iter = {}.{}".format(k, p)
)
break
elif success:
self.log(
"Internal optimization succeeded at EGO iter = {}.{}".format(
k, p
)
)
# Set temporaly the y-coord point based on the kriging prediction
y_et_k = self._get_virtual_point(np.atleast_2d(x_et_k), y_data)
# Update y_data with predicted value
y_data = y_data.reshape(y_data.shape[0], self.gpr.ny)
y_data = np.vstack((y_data, y_et_k))
x_data = np.atleast_2d(np.append(x_data, x_et_k, axis=0))
# Compute the real values of y_data
x_to_compute = np.atleast_2d(x_data[-n_parallel:])
if self.mixint and self.options["categorical_kernel"] is None:
x_to_compute = self.mixint.fold_with_enum_index(x_to_compute)
y = self._evaluator.run(fun, x_to_compute)
y_data[-n_parallel:] = y
# Find the optimal point
ind_best = np.argmin(y_data if y_data.ndim == 1 else y_data[:, 0])
x_opt = x_data[ind_best]
y_opt = y_data[ind_best]
if self.mixint and self.options["categorical_kernel"] is None:
x_opt = self.mixint.fold_with_enum_index(x_opt)[0]
return x_opt, y_opt, ind_best, x_data, y_data
def log(self, msg):
if self.options["verbose"]:
print(msg)
def EI(self, points, y_data, enable_tunneling=False, x_data=None):
"""Expected improvement"""
f_min = np.min(y_data)
pred = self.gpr.predict_values(points)
sig = np.sqrt(self.gpr.predict_variances(points))
args0 = (f_min - pred) / sig
args1 = (f_min - pred) * norm.cdf(args0)
args2 = sig * norm.pdf(args0)
if sig.size == 1 and sig == 0.0: # can be use only if one point is computed
return 0.0
ei = args1 + args2
# penalize the points already evaluated with tunneling
if enable_tunneling:
for i in range(len(points)):
p = np.atleast_2d(points[i])
EIp = self.EI(p, y_data, enable_tunneling=False)
for x in x_data:
x = np.atleast_2d(x)
# if np.abs(p-x)<1:
# ei[i]=ei[i]*np.reciprocal(1+100*np.exp(-np.reciprocal(1-np.square(p-x))))
pena = (
EIp - self.EI(x, y_data, enable_tunneling=False)
) / np.power(np.linalg.norm(p - x), 4)
if pena > 0:
ei[i] = ei[i] - pena
ei[i] = max(ei[i], 0)
return ei
def SBO(self, point):
"""Surrogate based optimization: min the surrogate model by suing the mean mu"""
res = self.gpr.predict_values(point)
return res
def LCB(self, point):
"""Lower confidence bound optimization: minimize by using mu - 3*sigma"""
pred = self.gpr.predict_values(point)
var = self.gpr.predict_variances(point)
res = pred - 3.0 * np.sqrt(var)
return res
def _setup_optimizer(self, fun):
"""
Instanciate internal surrogate used for optimization
and setup function evaluator wrt options
Parameters
----------
fun: function to optimize: ndarray[n, nx] or ndarray[n] -> ndarray[n, 1]
Returns
-------
ndarray: initial coord-x doe
ndarray: initial coord-y doe = fun(xdoe)
"""
# Set the model
self.gpr = self.options["surrogate"]
self.xlimits = self.options["xlimits"]
# Handle mixed integer optimization
xtypes = self.options["xtypes"]
if self.options["categorical_kernel"] is not None:
work_in_folded_space = True
else:
work_in_folded_space = False
if xtypes:
self.categorical_kernel = self.options["categorical_kernel"]
self.mixint = MixedIntegerContext(
xtypes,
self.xlimits,
work_in_folded_space=work_in_folded_space,
categorical_kernel=self.options["categorical_kernel"],
)
self.gpr = self.mixint.build_surrogate_model(self.gpr)
self._sampling = self.mixint.build_sampling_method(
LHS,
criterion="ese",
random_state=self.options["random_state"],
output_in_folded_space=work_in_folded_space,
)
else:
self.mixint = None
self._sampling = LHS(
xlimits=self.xlimits,
criterion="ese",
random_state=self.options["random_state"],
)
# Build DOE
self._evaluator = self.options["evaluator"]
xdoe = self.options["xdoe"]
if xdoe is None:
self.log("Build initial DOE with LHS")
n_doe = self.options["n_doe"]
x_doe = self._sampling(n_doe)
else:
self.log("Initial DOE given")
x_doe = np.atleast_2d(xdoe)
if self.mixint and self.options["categorical_kernel"] is None:
x_doe = self.mixint.unfold_with_enum_mask(x_doe)
ydoe = self.options["ydoe"]
if ydoe is None:
y_doe = self._evaluator.run(fun, x_doe)
else: # to save time if y_doe is already given to EGO
y_doe = ydoe
return x_doe, y_doe
def _find_best_point(self, x_data=None, y_data=None, enable_tunneling=False):
"""
Function that analyse a set of x_data and y_data and give back the
more interesting point to evaluates according to the selected criterion
Parameters
----------
x_data: ndarray(n_points, nx)
y_data: ndarray(n_points, 1)
Returns
-------
ndarray(nx, 1): the next best point to evaluate
boolean: success flag
"""
self.gpr.set_training_values(x_data, y_data)
if self.gpr.supports["training_derivatives"]:
for kx in range(self.gpr.nx):
self.gpr.set_training_derivatives(
x_data,
y_data[:, 1 + kx].reshape((y_data.shape[0], 1)),
kx
)
self.gpr.train()
criterion = self.options["criterion"]
n_start = self.options["n_start"]
n_max_optim = self.options["n_max_optim"]
if self.mixint:
bounds = self.mixint.get_unfolded_xlimits()
else:
bounds = self.xlimits
if criterion == "EI":
self.obj_k = lambda x: -self.EI(
np.atleast_2d(x), y_data, enable_tunneling, x_data
)
elif criterion == "SBO":
self.obj_k = lambda x: self.SBO(np.atleast_2d(x))
elif criterion == "LCB":
self.obj_k = lambda x: self.LCB(np.atleast_2d(x))
success = False
n_optim = 1 # in order to have some success optimizations with SLSQP
while not success and n_optim <= n_max_optim:
opt_all = []
x_start = self._sampling(n_start)
for ii in range(n_start):
try:
opt_all.append(
minimize(
lambda x: float(np.array(self.obj_k(x)).flat[0]),
x_start[ii, :],
method="SLSQP",
bounds=bounds,
options={"maxiter": 200},
)
)
except ValueError: # in case "x0 violates bound constraints" error
print("warning: `x0` violates bound constraints")
print("x0={}".format(x_start[ii, :]))
print("bounds={}".format(bounds))
opt_all.append({"success": False})
opt_all = np.asarray(opt_all)
opt_success = opt_all[[opt_i["success"] for opt_i in opt_all]]
obj_success = np.array([opt_i["fun"] for opt_i in opt_success])
success = obj_success.size != 0
if not success:
self.log("New start point for the internal optimization")
n_optim += 1
if n_optim >= n_max_optim:
# self.log("Internal optimization failed at EGO iter = {}".format(k))
return np.atleast_2d(0), False
ind_min = np.argmin(obj_success)
opt = opt_success[ind_min]
x_et_k = np.atleast_2d(opt["x"])
return x_et_k, True
def _get_virtual_point(self, x, y_data):
"""
Depending on the qEI attribute return a predicted value at given point x
Parameters
----------
x: ndarray(1, 1) the x-coord point where to forecast the y-coord virtual point
y_data: current y evaluation list only used when qEI is CLmin
Returns
-------
ndarray(1, 1): the so-called virtual y-coord point
"""
qEI = self.options["qEI"]
if qEI == "CLmin":
return np.min(y_data)
if qEI == "KB":
return self.gpr.predict_values(x)
if qEI == "KBUB":
conf = 3.0
if qEI == "KBLB":
conf = -3.0
if qEI == "KBRand":
conf = np.random.randn()
pred = self.gpr.predict_values(x)
var = self.gpr.predict_variances(x)
return pred + conf * np.sqrt(var)
| """
Authors: <NAME>, <NAME>, <NAME>, <NAME> <<EMAIL>>
This package is distributed under New BSD license.
"""
import numpy as np
from types import FunctionType
from scipy.stats import norm
from scipy.optimize import minimize
from smt.utils.options_dictionary import OptionsDictionary
from smt.applications.application import SurrogateBasedApplication
from smt.applications.mixed_integer import (
MixedIntegerContext,
GOWER,
HOMO_GAUSSIAN,
FULL_GAUSSIAN,
)
from smt.utils.misc import compute_rms_error
from smt.surrogate_models import KPLS, KRG, KPLSK, MGP, GEKPLS
from smt.sampling_methods import LHS
class Evaluator(object):
"""
An interface for evaluation of a function at x points (nsamples of dimension nx).
User can derive this interface and override the run() method to implement custom multiprocessing.
"""
def run(self, fun, x):
"""
Evaluates fun at x.
Parameters
---------
fun : function to evaluate: (nsamples, nx) -> (nsample, 1)
x : np.ndarray[nsamples, nx]
nsamples points of nx dimensions.
Returns
-------
np.ndarray[nsample, 1]
fun evaluations at the nsamples points.
"""
return fun(x)
class EGO(SurrogateBasedApplication):
def _initialize(self):
super(EGO, self)._initialize()
declare = self.options.declare
declare("fun", None, types=FunctionType, desc="Function to minimize")
declare(
"criterion",
"EI",
types=str,
values=["EI", "SBO", "LCB"],
desc="criterion for next evaluation point determination: Expected Improvement, \
Surrogate-Based Optimization or Lower Confidence Bound",
)
declare("n_iter", None, types=int, desc="Number of optimizer steps")
declare(
"n_max_optim",
20,
types=int,
desc="Maximum number of internal optimizations",
)
declare("n_start", 20, types=int, desc="Number of optimization start points")
declare(
"n_parallel",
1,
types=int,
desc="Number of parallel samples to compute using qEI criterion",
)
declare(
"qEI",
"KBLB",
types=str,
values=["KB", "KBLB", "KBUB", "KBRand", "CLmin"],
desc="Approximated q-EI maximization strategy",
)
declare(
"evaluator",
default=Evaluator(),
types=Evaluator,
desc="Object used to run function fun to optimize at x points (nsamples, nxdim)",
)
declare(
"n_doe",
None,
types=int,
desc="Number of points of the initial LHS doe, only used if xdoe is not given",
)
declare("xdoe", None, types=np.ndarray, desc="Initial doe inputs")
declare("ydoe", None, types=np.ndarray, desc="Initial doe outputs")
declare("xlimits", None, types=np.ndarray, desc="Bounds of function fun inputs")
declare("verbose", False, types=bool, desc="Print computation information")
declare(
"enable_tunneling",
False,
types=bool,
desc="Enable the penalization of points that have been already evaluated in EI criterion",
)
declare(
"categorical_kernel",
None,
types=str,
values=[GOWER, HOMO_GAUSSIAN, FULL_GAUSSIAN],
desc="The kernel to use for categorical inputs. Only for non continuous Kriging.",
)
declare(
"surrogate",
KRG(print_global=False),
types=(KRG, KPLS, KPLSK, GEKPLS, MGP),
desc="SMT kriging-based surrogate model used internaly",
)
declare(
"xtypes",
None,
types=list,
desc="x type specifications: either FLOAT for continuous, INT for integer "
"or (ENUM n) for categorical doimension with n levels",
)
self.options.declare(
"random_state",
types=(type(None), int, np.random.RandomState),
desc="Numpy RandomState object or seed number which controls random draws",
)
def optimize(self, fun):
"""
Optimizes fun
Parameters
----------
fun: function to optimize: ndarray[n, nx] or ndarray[n] -> ndarray[n, 1]
Returns
-------
[nx, 1]: x optimum
[1, 1]: y optimum
int: index of optimum in data arrays
[ndoe + n_iter, nx]: coord-x data
[ndoe + n_iter, 1]: coord-y data
"""
x_data, y_data = self._setup_optimizer(fun)
n_iter = self.options["n_iter"]
n_parallel = self.options["n_parallel"]
for k in range(n_iter):
# Virtual enrichement loop
for p in range(n_parallel):
# find next best x-coord point to evaluate
x_et_k, success = self._find_best_point(
x_data, y_data, self.options["enable_tunneling"]
)
if not success:
self.log(
"Internal optimization failed at EGO iter = {}.{}".format(k, p)
)
break
elif success:
self.log(
"Internal optimization succeeded at EGO iter = {}.{}".format(
k, p
)
)
# Set temporaly the y-coord point based on the kriging prediction
y_et_k = self._get_virtual_point(np.atleast_2d(x_et_k), y_data)
# Update y_data with predicted value
y_data = y_data.reshape(y_data.shape[0], self.gpr.ny)
y_data = np.vstack((y_data, y_et_k))
x_data = np.atleast_2d(np.append(x_data, x_et_k, axis=0))
# Compute the real values of y_data
x_to_compute = np.atleast_2d(x_data[-n_parallel:])
if self.mixint and self.options["categorical_kernel"] is None:
x_to_compute = self.mixint.fold_with_enum_index(x_to_compute)
y = self._evaluator.run(fun, x_to_compute)
y_data[-n_parallel:] = y
# Find the optimal point
ind_best = np.argmin(y_data if y_data.ndim == 1 else y_data[:, 0])
x_opt = x_data[ind_best]
y_opt = y_data[ind_best]
if self.mixint and self.options["categorical_kernel"] is None:
x_opt = self.mixint.fold_with_enum_index(x_opt)[0]
return x_opt, y_opt, ind_best, x_data, y_data
def log(self, msg):
if self.options["verbose"]:
print(msg)
def EI(self, points, y_data, enable_tunneling=False, x_data=None):
"""Expected improvement"""
f_min = np.min(y_data)
pred = self.gpr.predict_values(points)
sig = np.sqrt(self.gpr.predict_variances(points))
args0 = (f_min - pred) / sig
args1 = (f_min - pred) * norm.cdf(args0)
args2 = sig * norm.pdf(args0)
if sig.size == 1 and sig == 0.0: # can be use only if one point is computed
return 0.0
ei = args1 + args2
# penalize the points already evaluated with tunneling
if enable_tunneling:
for i in range(len(points)):
p = np.atleast_2d(points[i])
EIp = self.EI(p, y_data, enable_tunneling=False)
for x in x_data:
x = np.atleast_2d(x)
# if np.abs(p-x)<1:
# ei[i]=ei[i]*np.reciprocal(1+100*np.exp(-np.reciprocal(1-np.square(p-x))))
pena = (
EIp - self.EI(x, y_data, enable_tunneling=False)
) / np.power(np.linalg.norm(p - x), 4)
if pena > 0:
ei[i] = ei[i] - pena
ei[i] = max(ei[i], 0)
return ei
def SBO(self, point):
"""Surrogate based optimization: min the surrogate model by suing the mean mu"""
res = self.gpr.predict_values(point)
return res
def LCB(self, point):
"""Lower confidence bound optimization: minimize by using mu - 3*sigma"""
pred = self.gpr.predict_values(point)
var = self.gpr.predict_variances(point)
res = pred - 3.0 * np.sqrt(var)
return res
def _setup_optimizer(self, fun):
"""
Instanciate internal surrogate used for optimization
and setup function evaluator wrt options
Parameters
----------
fun: function to optimize: ndarray[n, nx] or ndarray[n] -> ndarray[n, 1]
Returns
-------
ndarray: initial coord-x doe
ndarray: initial coord-y doe = fun(xdoe)
"""
# Set the model
self.gpr = self.options["surrogate"]
self.xlimits = self.options["xlimits"]
# Handle mixed integer optimization
xtypes = self.options["xtypes"]
if self.options["categorical_kernel"] is not None:
work_in_folded_space = True
else:
work_in_folded_space = False
if xtypes:
self.categorical_kernel = self.options["categorical_kernel"]
self.mixint = MixedIntegerContext(
xtypes,
self.xlimits,
work_in_folded_space=work_in_folded_space,
categorical_kernel=self.options["categorical_kernel"],
)
self.gpr = self.mixint.build_surrogate_model(self.gpr)
self._sampling = self.mixint.build_sampling_method(
LHS,
criterion="ese",
random_state=self.options["random_state"],
output_in_folded_space=work_in_folded_space,
)
else:
self.mixint = None
self._sampling = LHS(
xlimits=self.xlimits,
criterion="ese",
random_state=self.options["random_state"],
)
# Build DOE
self._evaluator = self.options["evaluator"]
xdoe = self.options["xdoe"]
if xdoe is None:
self.log("Build initial DOE with LHS")
n_doe = self.options["n_doe"]
x_doe = self._sampling(n_doe)
else:
self.log("Initial DOE given")
x_doe = np.atleast_2d(xdoe)
if self.mixint and self.options["categorical_kernel"] is None:
x_doe = self.mixint.unfold_with_enum_mask(x_doe)
ydoe = self.options["ydoe"]
if ydoe is None:
y_doe = self._evaluator.run(fun, x_doe)
else: # to save time if y_doe is already given to EGO
y_doe = ydoe
return x_doe, y_doe
def _find_best_point(self, x_data=None, y_data=None, enable_tunneling=False):
"""
Function that analyse a set of x_data and y_data and give back the
more interesting point to evaluates according to the selected criterion
Parameters
----------
x_data: ndarray(n_points, nx)
y_data: ndarray(n_points, 1)
Returns
-------
ndarray(nx, 1): the next best point to evaluate
boolean: success flag
"""
self.gpr.set_training_values(x_data, y_data)
if self.gpr.supports["training_derivatives"]:
for kx in range(self.gpr.nx):
self.gpr.set_training_derivatives(
x_data,
y_data[:, 1 + kx].reshape((y_data.shape[0], 1)),
kx
)
self.gpr.train()
criterion = self.options["criterion"]
n_start = self.options["n_start"]
n_max_optim = self.options["n_max_optim"]
if self.mixint:
bounds = self.mixint.get_unfolded_xlimits()
else:
bounds = self.xlimits
if criterion == "EI":
self.obj_k = lambda x: -self.EI(
np.atleast_2d(x), y_data, enable_tunneling, x_data
)
elif criterion == "SBO":
self.obj_k = lambda x: self.SBO(np.atleast_2d(x))
elif criterion == "LCB":
self.obj_k = lambda x: self.LCB(np.atleast_2d(x))
success = False
n_optim = 1 # in order to have some success optimizations with SLSQP
while not success and n_optim <= n_max_optim:
opt_all = []
x_start = self._sampling(n_start)
for ii in range(n_start):
try:
opt_all.append(
minimize(
lambda x: float(np.array(self.obj_k(x)).flat[0]),
x_start[ii, :],
method="SLSQP",
bounds=bounds,
options={"maxiter": 200},
)
)
except ValueError: # in case "x0 violates bound constraints" error
print("warning: `x0` violates bound constraints")
print("x0={}".format(x_start[ii, :]))
print("bounds={}".format(bounds))
opt_all.append({"success": False})
opt_all = np.asarray(opt_all)
opt_success = opt_all[[opt_i["success"] for opt_i in opt_all]]
obj_success = np.array([opt_i["fun"] for opt_i in opt_success])
success = obj_success.size != 0
if not success:
self.log("New start point for the internal optimization")
n_optim += 1
if n_optim >= n_max_optim:
# self.log("Internal optimization failed at EGO iter = {}".format(k))
return np.atleast_2d(0), False
ind_min = np.argmin(obj_success)
opt = opt_success[ind_min]
x_et_k = np.atleast_2d(opt["x"])
return x_et_k, True
def _get_virtual_point(self, x, y_data):
"""
Depending on the qEI attribute return a predicted value at given point x
Parameters
----------
x: ndarray(1, 1) the x-coord point where to forecast the y-coord virtual point
y_data: current y evaluation list only used when qEI is CLmin
Returns
-------
ndarray(1, 1): the so-called virtual y-coord point
"""
qEI = self.options["qEI"]
if qEI == "CLmin":
return np.min(y_data)
if qEI == "KB":
return self.gpr.predict_values(x)
if qEI == "KBUB":
conf = 3.0
if qEI == "KBLB":
conf = -3.0
if qEI == "KBRand":
conf = np.random.randn()
pred = self.gpr.predict_values(x)
var = self.gpr.predict_variances(x)
return pred + conf * np.sqrt(var) | en | 0.590099 | Authors: <NAME>, <NAME>, <NAME>, <NAME> <<EMAIL>> This package is distributed under New BSD license. An interface for evaluation of a function at x points (nsamples of dimension nx). User can derive this interface and override the run() method to implement custom multiprocessing. Evaluates fun at x. Parameters --------- fun : function to evaluate: (nsamples, nx) -> (nsample, 1) x : np.ndarray[nsamples, nx] nsamples points of nx dimensions. Returns ------- np.ndarray[nsample, 1] fun evaluations at the nsamples points. Optimizes fun Parameters ---------- fun: function to optimize: ndarray[n, nx] or ndarray[n] -> ndarray[n, 1] Returns ------- [nx, 1]: x optimum [1, 1]: y optimum int: index of optimum in data arrays [ndoe + n_iter, nx]: coord-x data [ndoe + n_iter, 1]: coord-y data # Virtual enrichement loop # find next best x-coord point to evaluate # Set temporaly the y-coord point based on the kriging prediction # Update y_data with predicted value # Compute the real values of y_data # Find the optimal point Expected improvement # can be use only if one point is computed # penalize the points already evaluated with tunneling # if np.abs(p-x)<1: # ei[i]=ei[i]*np.reciprocal(1+100*np.exp(-np.reciprocal(1-np.square(p-x)))) Surrogate based optimization: min the surrogate model by suing the mean mu Lower confidence bound optimization: minimize by using mu - 3*sigma Instanciate internal surrogate used for optimization and setup function evaluator wrt options Parameters ---------- fun: function to optimize: ndarray[n, nx] or ndarray[n] -> ndarray[n, 1] Returns ------- ndarray: initial coord-x doe ndarray: initial coord-y doe = fun(xdoe) # Set the model # Handle mixed integer optimization # Build DOE # to save time if y_doe is already given to EGO Function that analyse a set of x_data and y_data and give back the more interesting point to evaluates according to the selected criterion Parameters ---------- x_data: ndarray(n_points, nx) y_data: ndarray(n_points, 1) Returns ------- ndarray(nx, 1): the next best point to evaluate boolean: success flag # in order to have some success optimizations with SLSQP # in case "x0 violates bound constraints" error # self.log("Internal optimization failed at EGO iter = {}".format(k)) Depending on the qEI attribute return a predicted value at given point x Parameters ---------- x: ndarray(1, 1) the x-coord point where to forecast the y-coord virtual point y_data: current y evaluation list only used when qEI is CLmin Returns ------- ndarray(1, 1): the so-called virtual y-coord point | 2.015591 | 2 |
P3-Capstone/ros/src/twist_controller/twist_controller.py | lucasosouza/udacity-carnd-term3 | 0 | 6630336 | <reponame>lucasosouza/udacity-carnd-term3<filename>P3-Capstone/ros/src/twist_controller/twist_controller.py
import rospy
from yaw_controller import YawController
from pid import PID
GAS_DENSITY = 2.858
ONE_MPH = 0.44704
class Controller(object):
def __init__(self, *args, **kwargs):
# TODO: Implement
vehicle_mass = kwargs['vehicle_mass']
fuel_capacity = kwargs['fuel_capacity']
decel_limit = kwargs['decel_limit']
accel_limit = kwargs['accel_limit']
wheel_radius = kwargs['wheel_radius']
wheel_base = kwargs['wheel_base']
steer_ratio = kwargs['steer_ratio']
max_lat_accel = kwargs['max_lat_accel']
max_steer_angle = kwargs['max_steer_angle']
min_speed = kwargs['min_speed']
linear_p_term = kwargs['linear_p_term']
linear_i_term = kwargs['linear_i_term']
linear_d_term = kwargs['linear_d_term']
# Calculate required braking torque according to vehicle dynamics?
_total_vehicle_mass = vehicle_mass + fuel_capacity * GAS_DENSITY
# Use F = ma to calculate the
# F_max = m * a_max
# T_max = F_max * r = m * r * a_max
# Assume all CoFs (Coefficient of Frictions) are 1
self._brake_torque_base = _total_vehicle_mass * wheel_radius
self.yaw_controller = YawController(wheel_base, steer_ratio,
min_speed, max_lat_accel, max_steer_angle)
# Tune the parameters in dbw_node
self.linear_pid = PID(linear_p_term, linear_i_term, linear_d_term,
decel_limit, accel_limit)
self._now = None
def reset(self):
"""
Reset PID when dbw_enable event is disabled
:return:
"""
self.linear_pid.reset()
self._now = None
def control(self, *args, **kwargs):
# TODO: Change the arg, kwarg list to suit your needs
# Return throttle, brake, steer
linear_velocity_setpoint = kwargs['linear_velocity_setpoint']
angular_velocity_setpoint = kwargs['angular_velocity_setpoint']
current_linear_velocity = kwargs['current_linear_velocity']
# Sample time interval:
timestamp = rospy.get_time()
if not self._now:
_sample_time = 0.02 # 50 Hz
else:
_sample_time = timestamp - self._now
self._now = timestamp
_error = linear_velocity_setpoint - current_linear_velocity
_control_correction = self.linear_pid.step(_error, _sample_time)
throttle = 0
brake = 0
if _control_correction > 0:
throttle = _control_correction
else:
brake = -1.0 * self._brake_torque_base * _control_correction
# Steer and steer ratio
steering = self.yaw_controller.get_steering(linear_velocity_setpoint,
angular_velocity_setpoint, current_linear_velocity)
return throttle, brake, steering
| import rospy
from yaw_controller import YawController
from pid import PID
GAS_DENSITY = 2.858
ONE_MPH = 0.44704
class Controller(object):
def __init__(self, *args, **kwargs):
# TODO: Implement
vehicle_mass = kwargs['vehicle_mass']
fuel_capacity = kwargs['fuel_capacity']
decel_limit = kwargs['decel_limit']
accel_limit = kwargs['accel_limit']
wheel_radius = kwargs['wheel_radius']
wheel_base = kwargs['wheel_base']
steer_ratio = kwargs['steer_ratio']
max_lat_accel = kwargs['max_lat_accel']
max_steer_angle = kwargs['max_steer_angle']
min_speed = kwargs['min_speed']
linear_p_term = kwargs['linear_p_term']
linear_i_term = kwargs['linear_i_term']
linear_d_term = kwargs['linear_d_term']
# Calculate required braking torque according to vehicle dynamics?
_total_vehicle_mass = vehicle_mass + fuel_capacity * GAS_DENSITY
# Use F = ma to calculate the
# F_max = m * a_max
# T_max = F_max * r = m * r * a_max
# Assume all CoFs (Coefficient of Frictions) are 1
self._brake_torque_base = _total_vehicle_mass * wheel_radius
self.yaw_controller = YawController(wheel_base, steer_ratio,
min_speed, max_lat_accel, max_steer_angle)
# Tune the parameters in dbw_node
self.linear_pid = PID(linear_p_term, linear_i_term, linear_d_term,
decel_limit, accel_limit)
self._now = None
def reset(self):
"""
Reset PID when dbw_enable event is disabled
:return:
"""
self.linear_pid.reset()
self._now = None
def control(self, *args, **kwargs):
# TODO: Change the arg, kwarg list to suit your needs
# Return throttle, brake, steer
linear_velocity_setpoint = kwargs['linear_velocity_setpoint']
angular_velocity_setpoint = kwargs['angular_velocity_setpoint']
current_linear_velocity = kwargs['current_linear_velocity']
# Sample time interval:
timestamp = rospy.get_time()
if not self._now:
_sample_time = 0.02 # 50 Hz
else:
_sample_time = timestamp - self._now
self._now = timestamp
_error = linear_velocity_setpoint - current_linear_velocity
_control_correction = self.linear_pid.step(_error, _sample_time)
throttle = 0
brake = 0
if _control_correction > 0:
throttle = _control_correction
else:
brake = -1.0 * self._brake_torque_base * _control_correction
# Steer and steer ratio
steering = self.yaw_controller.get_steering(linear_velocity_setpoint,
angular_velocity_setpoint, current_linear_velocity)
return throttle, brake, steering | en | 0.740757 | # TODO: Implement # Calculate required braking torque according to vehicle dynamics? # Use F = ma to calculate the # F_max = m * a_max # T_max = F_max * r = m * r * a_max # Assume all CoFs (Coefficient of Frictions) are 1 # Tune the parameters in dbw_node Reset PID when dbw_enable event is disabled :return: # TODO: Change the arg, kwarg list to suit your needs # Return throttle, brake, steer # Sample time interval: # 50 Hz # Steer and steer ratio | 2.775981 | 3 |
datahub/search/test/search_support/simplemodel/signals.py | Staberinde/data-hub-api | 6 | 6630337 | <gh_stars>1-10
from django.db.models.signals import post_delete, pre_delete
from datahub.search.signals import SignalReceiver
from datahub.search.test.search_support.models import SimpleModel as DBSimpleModel
def dummy_on_delete_callback(instance):
"""
Function called on_delete and deliberately empty.
It can be used to check if/when it's called.
"""
receivers = (
SignalReceiver(post_delete, DBSimpleModel, dummy_on_delete_callback),
SignalReceiver(pre_delete, DBSimpleModel, dummy_on_delete_callback),
)
| from django.db.models.signals import post_delete, pre_delete
from datahub.search.signals import SignalReceiver
from datahub.search.test.search_support.models import SimpleModel as DBSimpleModel
def dummy_on_delete_callback(instance):
"""
Function called on_delete and deliberately empty.
It can be used to check if/when it's called.
"""
receivers = (
SignalReceiver(post_delete, DBSimpleModel, dummy_on_delete_callback),
SignalReceiver(pre_delete, DBSimpleModel, dummy_on_delete_callback),
) | en | 0.97116 | Function called on_delete and deliberately empty. It can be used to check if/when it's called. | 2.07657 | 2 |
checkov/terraform/checks/resource/aws/IAMRoleAllowAssumeFromAccount.py | people-ai/checkov | 1 | 6630338 | from checkov.common.models.enums import CheckResult, CheckCategories
from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
import json
import re
class IAMRoleAllowAssumeFromAccount(BaseResourceCheck):
def __init__(self):
name = "Ensure IAM role allows only specific principals in account to assume it"
id = "CKV_AWS_61"
supported_resources = ['aws_iam_role']
categories = [CheckCategories.IAM]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def scan_resource_conf(self, conf):
if isinstance(conf['assume_role_policy'][0], str):
try:
assume_role_block = json.loads(conf['assume_role_policy'][0])
if 'Statement' in assume_role_block.keys():
if 'Principal' in assume_role_block['Statement'][0]:
if 'AWS' in assume_role_block['Statement'][0]['Principal']:
account_access = re.compile('\d{12}|arn:aws:iam::\d{12}:root')
if re.match(account_access, assume_role_block['Statement'][0]['Principal']['AWS']):
return CheckResult.FAILED
except:
pass
return CheckResult.PASSED
check = IAMRoleAllowAssumeFromAccount()
| from checkov.common.models.enums import CheckResult, CheckCategories
from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
import json
import re
class IAMRoleAllowAssumeFromAccount(BaseResourceCheck):
def __init__(self):
name = "Ensure IAM role allows only specific principals in account to assume it"
id = "CKV_AWS_61"
supported_resources = ['aws_iam_role']
categories = [CheckCategories.IAM]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def scan_resource_conf(self, conf):
if isinstance(conf['assume_role_policy'][0], str):
try:
assume_role_block = json.loads(conf['assume_role_policy'][0])
if 'Statement' in assume_role_block.keys():
if 'Principal' in assume_role_block['Statement'][0]:
if 'AWS' in assume_role_block['Statement'][0]['Principal']:
account_access = re.compile('\d{12}|arn:aws:iam::\d{12}:root')
if re.match(account_access, assume_role_block['Statement'][0]['Principal']['AWS']):
return CheckResult.FAILED
except:
pass
return CheckResult.PASSED
check = IAMRoleAllowAssumeFromAccount()
| none | 1 | 2.163262 | 2 |
|
deepscreening/chemvae.py | iwasakishuto/DeepScreening | 8 | 6630339 | <gh_stars>1-10
# coding: utf-8
import os
import re
import argparse
import warnings
import numpy as np
from keras.layers import (Layer, Input, Lambda, Dense, Flatten, RepeatVector,
Dropout, Concatenate, Convolution1D, GRU,
BatchNormalization)
from keras.models import load_model, Model
from keras import losses
from keras import backend as K
from .utils import load_params
from .utils import update_params
class ChemVAE(Model):
def __init__(self, params=None, x_train_data={}, y_train_data={}, **kwargs):
if params is None or isinstance(params, str):
params = load_params(path=params, name="chemvae")
params.update(kwargs)
params = self._update_params(params, x_train_data, y_train_data)
# Build the respective models.
encoder = load_encoder(params=params)
decoder = load_decoder(params=params)
property_predictor = load_property_predictor(params=params)
# Integrates everything.
x_in = encoder.input
z_mean, z_log_var, z = encoder(x_in)
reconstructed = decoder(z)
predictions = property_predictor(z)
if isinstance(predictions, list):
outputs = [Lambda(identity, name=re.sub(r"^.*\/(.+_property_)output\/.*$", r"\1pred", pred.name))(pred) for pred in predictions]
outputs.append(reconstructed)
else:
predictions = Lambda(identity, name=re.sub(r"^.*\/(.+_property_)output\/.*$", r"\1pred", predictions[0].name))(predictions)
outputs = [predictions, reconstructed]
super().__init__(inputs=x_in, outputs=outputs, name="ChemVAE")
# Memorize.
self.encoder = encoder
self.decoder = decoder
self.property_predictor = property_predictor
# Add losses.
self._add_losses(z_mean=z_mean, z_log_var=z_log_var, params=params)
self.params = params
def _update_params(self, params, x_train_data={}, y_train_data={}):
if "input_mol_SMILES" in x_train_data:
x_train_input = x_train_data.get("input_mol_SMILES")
num_tranin, max_chem_len, num_chars = x_train_input.shape
params = update_params(params, max_chem_len=max_chem_len, num_chars=num_chars)
if "reg_property_pred" in y_train_data:
y_train_reg = y_train_data.get("reg_property_pred")
num_train, num_reg_prop_tasks = y_train_reg.shape
params = update_params(params, num_reg_prop_tasks=num_reg_prop_tasks)
if "logit_property_pred" in y_train_data:
y_train_logit = y_train_data.get("logit_property_pred")
num_train, num_logit_prop_tasks = y_train_logit.shape
params = update_params(params, num_logit_prop_tasks=num_logit_prop_tasks)
return params
def _add_losses(self, z_mean, z_log_var, params={}):
kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)
kl_loss = K.sum(kl_loss, axis=-1)
kl_loss *= -0.5
kl_loss /= params.get("max_chem_len", 1)*params.get("num_chars", 1)
self.add_loss(K.mean(kl_loss))
def fit(self, x_train_data={}, y_train_data={}, batch_size=None, epochs=1,
verbose=1, callbacks=None, validation_split=0.0, validation_data=None,
shuffle=True, class_weight=None, sample_weight=None, initial_epoch=0,
steps_per_epoch=None, validation_steps=None, validation_freq=1,
max_queue_size=10, workers=1, use_multiprocessing=False, **kwargs):
y_train_data["decoder"] = x_train_data.get("input_mol_SMILES")
if validation_data is not None:
x_val_data, y_val_data = validation_data
y_val_data["decoder"] = x_val_data.get("input_mol_SMILES")
validation_data = (x_val_data, y_val_data)
return super().fit(x=x_train_data, y=y_train_data, batch_size=batch_size, epochs=epochs, verbose=verbose,
callbacks=callbacks, validation_split=validation_split, validation_data=validation_data,
shuffle=shuffle, class_weight=class_weight, sample_weight=sample_weight, initial_epoch=initial_epoch,
steps_per_epoch=steps_per_epoch, validation_steps=validation_steps, validation_freq=validation_freq,
max_queue_size=max_queue_size, workers=workers, use_multiprocessing=use_multiprocessing, **kwargs)
# =============================
# Lambda layer
# =============================
def identity(x):
return K.identity(x)
def sampling(args):
"""
reparameterization trick
instead of sampling from Q(z|X), sample epsilon = N(0,I)
z = z_mean + sqrt(var) * epsilon
~~~
@params args (tensor): mean and log of variance of Q(z|X)
@return z (tensor): sampled latent vector
"""
z_mean, z_log_var = args
batch = K.shape(z_mean)[0]
dim = K.int_shape(z_mean)[1]
# by default, random_normal has mean = 0 and std = 1.0
epsilon = K.random_normal(shape=(batch, dim))
z_rand = z_mean + K.exp(0.5 * z_log_var)*epsilon
return K.in_train_phase(z_rand, z_mean)
# =============================
# Encoder
# =============================
def encoder_model(params={}, **kwargs):
params.update(kwargs)
max_chem_len = params.get("max_chem_len")
num_chars = params.get("num_chars") # zinc.yml
if (max_chem_len is None) or (num_chars is None):
raise ValueError("You should define `max_chem_len` and `num_chars` in parameter file.")
x_in = Input(shape=(max_chem_len, num_chars), name="input_mol_SMILES")
# Convolutional
num_conv_layers = params.get("num_conv_layers", 4)
conv_dim_depth = params.get("conv_dim_depth", 8)
conv_dim_width = params.get("conv_dim_width", 8)
conv_depth_gf = params.get("conv_depth_gf", 1.15875438383)
conv_width_gf = params.get("conv_width_gf", 1.1758149644)
conv_activation = params.get("conv_activation", "tanh")
conv_dropout_rate = params.get("conv_dropout_rate", 0.0)
is_batchnorm_conv = params.get("is_batchnorm_conv", True)
x = x_in
for j in range(num_conv_layers):
x = Convolution1D(filters=int(conv_dim_depth * conv_depth_gf**j),
kernel_size=int(conv_dim_width * conv_width_gf**j),
activation=conv_activation,
name=f"encoder_conv{j}")(x)
if conv_dropout_rate > 0:
x = Dropout(rate=conv_dropout_rate, name=f"encoder_conv_dropout{j}")(x)
if is_batchnorm_conv:
x = BatchNormalization(axis=-1, name=f"encoder_conv_norm{j}")(x)
x = Flatten()(x)
# Middle layers
num_dense_layers = params.get("num_dense_layers", 1)
latent_space_dim = params.get("latent space_dim", 128)
latent_space_dim_gf = params.get("latent_space_dim_gf", 1.4928245388)
dense_activation = params.get("dense_activation", "tanh")
dense_dropout_rate = params.get("dense_dropout_rate", 0.0)
is_batchnorm_dense = params.get("is_batchnorm_dense", True)
for j in range(num_dense_layers):
x = Dense(units=int(latent_space_dim * latent_space_dim_gf**(num_dense_layers-j-1)),
activation=dense_activation,
name=f'encoder_dense{j}')(x)
if dense_dropout_rate > 0:
x = Dropout(rate=dense_dropout_rate, name=f"encoder_dense_dropout{j}")(x)
if is_batchnorm_dense:
x = BatchNormalization(axis=-1, name=f"encoder_dense_norm{j}")(x)
z_mean = Dense(latent_space_dim, name="latent_mean")(x)
z_log_var = Dense(latent_space_dim, name="latent_log_var")(x)
z = Lambda(function=sampling, output_shape=(latent_space_dim,), name="encoder_output")([z_mean, z_log_var])
return Model(x_in, [z_mean, z_log_var, z], name="encoder")
def load_encoder(params={}, **kwargs):
if "encoder_weights_path" in params:
path = params.get("encoder_weights_path")
return load_model(path)
else:
return encoder_model(params, **kwargs)
# =============================
# Decoder
# =============================
def decoder_model(params={}, add_loss=False, **kwargs):
params.update(kwargs)
max_chem_len = params.get("max_chem_len")
num_chars = params.get("num_chars", 35) # zinc.yml
latent_space_dim = params.get("latent space_dim", 128)
z_in = Input(shape=(latent_space_dim,), name="decoder_input")
# Middle layers
num_dense_layers = params.get("num_dense_layers", 1)
latent_space_dim = params.get("latent space_dim", 128)
latent_space_dim_gf = params.get("latent_space_dim_gf", 1.4928245388)
dense_activation = params.get("dense_activation", "tanh")
is_batchnorm_dense = params.get("is_batchnorm_dense", True)
dense_dropout_rate = params.get("dense_dropout_rate", 0.0)
z = z_in
for j in range(num_dense_layers):
z = Dense(units=int(latent_space_dim*latent_space_dim_gf**j),
activation=dense_activation,
name=f"decoder_dense{j}")(z)
if dense_dropout_rate > 0:
z = Dropout(rate=dense_dropout_rate, name=f"decoder_dense_dropout{j}")(z)
if is_batchnorm_dense:
z = BatchNormalization(axis=-1, name=f"decoder_dense_norm{j}")(z)
# Necessary for using GRU vectors
z_reps = RepeatVector(max_chem_len)(z)
num_gru_layers = params.get("num_gru_layers", 3)
gru_dim = params.get("gru_dim", 36)
gru_activation = params.get("gru_activation", "tanh")
gru_dropout_rate = params.get("gru_dropout_rate", 0.0)
is_batchnorm_gru = params.get("is_batchnorm_gru", True)
# Encoder parts using GRUs
x = z_reps
if num_gru_layers > 1:
for j in range(num_gru_layers-1):
x_dec = GRU(units=gru_dim,
return_sequences=True,
activation=gru_activation,
name=f"decoder_gru{j}")(x)
if gru_dropout_rate > 0:
x = Dropout(rate=gru_dropout_rate, name=f"decoder_gru_dropout{j}")(x)
if is_batchnorm_gru:
x = BatchNormalization(axis=-1, name=f"decoder_gru_norm{j}")(x)
x_out = GRU(units=num_chars,
return_sequences=True,
activation='softmax',
name='decoder_gru_final')(x)
return Model(z_in, x_out, name="decoder")
def load_decoder(params={}, **kwargs):
if "decoder_weights_path" in params:
path = params.get("decoder_weights_path")
return load_model(path)
else:
return decoder_model(params, **kwargs)
# ====================
# Property Prediction
# ====================
def property_predictor_model(params={}, **kwargs):
params.update(kwargs)
num_prov_layers = params.get("num_prov_layers", 3)
latent_space_dim = params.get("latent space_dim", 128)
prop_hidden_dim = params.get("prop_hidden_dim", 36)
prop_hidden_dim_gf = params.get("prop_hidden_dim_gf", 0.8)
prop_pred_activation = params.get("prop_pred_activation", "tanh")
prop_pred_dropout_rate = params.get("prop_pred_dropout_rate", 0.0)
is_batchnorm_prop = params.get("is_batchnorm_prop", True)
x_in = Input(shape=(latent_space_dim,), name='prop_pred_input')
x = x_in
for j in range(num_prov_layers):
x = Dense(units=int(prop_hidden_dim * prop_hidden_dim_gf**j),
activation=prop_pred_activation,
name=f"property_predictor_dense{j}")(x)
if prop_pred_dropout_rate > 0:
x = Dropout(rate=prop_pred_dropout_rate, name=f"property_predictor_dropout{j}")(x)
if is_batchnorm_prop:
x = BatchNormalization(axis=-1, name=f"property_predictor_norm{j}")(x)
num_reg_prop_tasks = params.get("num_reg_prop_tasks", 0)
num_logit_prop_tasks = params.get("num_logit_prop_tasks", 0)
if num_reg_prop_tasks+num_logit_prop_tasks==0:
raise ValueError("You must specify either 'regression tasks' and/or " + \
"'logistic tasks' for property prediction.")
# for regression tasks
outputs = []
if num_reg_prop_tasks > 0:
reg_prop_pred = Dense(units=num_reg_prop_tasks,
activation='linear',
name='reg_property_output')(x)
outputs.append(reg_prop_pred)
# for logistic tasks
if num_logit_prop_tasks > 0:
logit_prop_pred = Dense(units=num_logit_prop_tasks,
activation='sigmoid',
name='logit_property_output')(x)
outputs.append(logit_prop_pred)
return Model(inputs=x_in, outputs=outputs, name="property_predictor")
def load_property_predictor(params={}, **kwargs):
if "property_pred_weights_path" in params:
path = params.get("property_pred_weights_path")
return load_model(path)
else:
return property_predictor_model(params, **kwargs)
| # coding: utf-8
import os
import re
import argparse
import warnings
import numpy as np
from keras.layers import (Layer, Input, Lambda, Dense, Flatten, RepeatVector,
Dropout, Concatenate, Convolution1D, GRU,
BatchNormalization)
from keras.models import load_model, Model
from keras import losses
from keras import backend as K
from .utils import load_params
from .utils import update_params
class ChemVAE(Model):
def __init__(self, params=None, x_train_data={}, y_train_data={}, **kwargs):
if params is None or isinstance(params, str):
params = load_params(path=params, name="chemvae")
params.update(kwargs)
params = self._update_params(params, x_train_data, y_train_data)
# Build the respective models.
encoder = load_encoder(params=params)
decoder = load_decoder(params=params)
property_predictor = load_property_predictor(params=params)
# Integrates everything.
x_in = encoder.input
z_mean, z_log_var, z = encoder(x_in)
reconstructed = decoder(z)
predictions = property_predictor(z)
if isinstance(predictions, list):
outputs = [Lambda(identity, name=re.sub(r"^.*\/(.+_property_)output\/.*$", r"\1pred", pred.name))(pred) for pred in predictions]
outputs.append(reconstructed)
else:
predictions = Lambda(identity, name=re.sub(r"^.*\/(.+_property_)output\/.*$", r"\1pred", predictions[0].name))(predictions)
outputs = [predictions, reconstructed]
super().__init__(inputs=x_in, outputs=outputs, name="ChemVAE")
# Memorize.
self.encoder = encoder
self.decoder = decoder
self.property_predictor = property_predictor
# Add losses.
self._add_losses(z_mean=z_mean, z_log_var=z_log_var, params=params)
self.params = params
def _update_params(self, params, x_train_data={}, y_train_data={}):
if "input_mol_SMILES" in x_train_data:
x_train_input = x_train_data.get("input_mol_SMILES")
num_tranin, max_chem_len, num_chars = x_train_input.shape
params = update_params(params, max_chem_len=max_chem_len, num_chars=num_chars)
if "reg_property_pred" in y_train_data:
y_train_reg = y_train_data.get("reg_property_pred")
num_train, num_reg_prop_tasks = y_train_reg.shape
params = update_params(params, num_reg_prop_tasks=num_reg_prop_tasks)
if "logit_property_pred" in y_train_data:
y_train_logit = y_train_data.get("logit_property_pred")
num_train, num_logit_prop_tasks = y_train_logit.shape
params = update_params(params, num_logit_prop_tasks=num_logit_prop_tasks)
return params
def _add_losses(self, z_mean, z_log_var, params={}):
kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)
kl_loss = K.sum(kl_loss, axis=-1)
kl_loss *= -0.5
kl_loss /= params.get("max_chem_len", 1)*params.get("num_chars", 1)
self.add_loss(K.mean(kl_loss))
def fit(self, x_train_data={}, y_train_data={}, batch_size=None, epochs=1,
verbose=1, callbacks=None, validation_split=0.0, validation_data=None,
shuffle=True, class_weight=None, sample_weight=None, initial_epoch=0,
steps_per_epoch=None, validation_steps=None, validation_freq=1,
max_queue_size=10, workers=1, use_multiprocessing=False, **kwargs):
y_train_data["decoder"] = x_train_data.get("input_mol_SMILES")
if validation_data is not None:
x_val_data, y_val_data = validation_data
y_val_data["decoder"] = x_val_data.get("input_mol_SMILES")
validation_data = (x_val_data, y_val_data)
return super().fit(x=x_train_data, y=y_train_data, batch_size=batch_size, epochs=epochs, verbose=verbose,
callbacks=callbacks, validation_split=validation_split, validation_data=validation_data,
shuffle=shuffle, class_weight=class_weight, sample_weight=sample_weight, initial_epoch=initial_epoch,
steps_per_epoch=steps_per_epoch, validation_steps=validation_steps, validation_freq=validation_freq,
max_queue_size=max_queue_size, workers=workers, use_multiprocessing=use_multiprocessing, **kwargs)
# =============================
# Lambda layer
# =============================
def identity(x):
return K.identity(x)
def sampling(args):
"""
reparameterization trick
instead of sampling from Q(z|X), sample epsilon = N(0,I)
z = z_mean + sqrt(var) * epsilon
~~~
@params args (tensor): mean and log of variance of Q(z|X)
@return z (tensor): sampled latent vector
"""
z_mean, z_log_var = args
batch = K.shape(z_mean)[0]
dim = K.int_shape(z_mean)[1]
# by default, random_normal has mean = 0 and std = 1.0
epsilon = K.random_normal(shape=(batch, dim))
z_rand = z_mean + K.exp(0.5 * z_log_var)*epsilon
return K.in_train_phase(z_rand, z_mean)
# =============================
# Encoder
# =============================
def encoder_model(params={}, **kwargs):
params.update(kwargs)
max_chem_len = params.get("max_chem_len")
num_chars = params.get("num_chars") # zinc.yml
if (max_chem_len is None) or (num_chars is None):
raise ValueError("You should define `max_chem_len` and `num_chars` in parameter file.")
x_in = Input(shape=(max_chem_len, num_chars), name="input_mol_SMILES")
# Convolutional
num_conv_layers = params.get("num_conv_layers", 4)
conv_dim_depth = params.get("conv_dim_depth", 8)
conv_dim_width = params.get("conv_dim_width", 8)
conv_depth_gf = params.get("conv_depth_gf", 1.15875438383)
conv_width_gf = params.get("conv_width_gf", 1.1758149644)
conv_activation = params.get("conv_activation", "tanh")
conv_dropout_rate = params.get("conv_dropout_rate", 0.0)
is_batchnorm_conv = params.get("is_batchnorm_conv", True)
x = x_in
for j in range(num_conv_layers):
x = Convolution1D(filters=int(conv_dim_depth * conv_depth_gf**j),
kernel_size=int(conv_dim_width * conv_width_gf**j),
activation=conv_activation,
name=f"encoder_conv{j}")(x)
if conv_dropout_rate > 0:
x = Dropout(rate=conv_dropout_rate, name=f"encoder_conv_dropout{j}")(x)
if is_batchnorm_conv:
x = BatchNormalization(axis=-1, name=f"encoder_conv_norm{j}")(x)
x = Flatten()(x)
# Middle layers
num_dense_layers = params.get("num_dense_layers", 1)
latent_space_dim = params.get("latent space_dim", 128)
latent_space_dim_gf = params.get("latent_space_dim_gf", 1.4928245388)
dense_activation = params.get("dense_activation", "tanh")
dense_dropout_rate = params.get("dense_dropout_rate", 0.0)
is_batchnorm_dense = params.get("is_batchnorm_dense", True)
for j in range(num_dense_layers):
x = Dense(units=int(latent_space_dim * latent_space_dim_gf**(num_dense_layers-j-1)),
activation=dense_activation,
name=f'encoder_dense{j}')(x)
if dense_dropout_rate > 0:
x = Dropout(rate=dense_dropout_rate, name=f"encoder_dense_dropout{j}")(x)
if is_batchnorm_dense:
x = BatchNormalization(axis=-1, name=f"encoder_dense_norm{j}")(x)
z_mean = Dense(latent_space_dim, name="latent_mean")(x)
z_log_var = Dense(latent_space_dim, name="latent_log_var")(x)
z = Lambda(function=sampling, output_shape=(latent_space_dim,), name="encoder_output")([z_mean, z_log_var])
return Model(x_in, [z_mean, z_log_var, z], name="encoder")
def load_encoder(params={}, **kwargs):
if "encoder_weights_path" in params:
path = params.get("encoder_weights_path")
return load_model(path)
else:
return encoder_model(params, **kwargs)
# =============================
# Decoder
# =============================
def decoder_model(params={}, add_loss=False, **kwargs):
params.update(kwargs)
max_chem_len = params.get("max_chem_len")
num_chars = params.get("num_chars", 35) # zinc.yml
latent_space_dim = params.get("latent space_dim", 128)
z_in = Input(shape=(latent_space_dim,), name="decoder_input")
# Middle layers
num_dense_layers = params.get("num_dense_layers", 1)
latent_space_dim = params.get("latent space_dim", 128)
latent_space_dim_gf = params.get("latent_space_dim_gf", 1.4928245388)
dense_activation = params.get("dense_activation", "tanh")
is_batchnorm_dense = params.get("is_batchnorm_dense", True)
dense_dropout_rate = params.get("dense_dropout_rate", 0.0)
z = z_in
for j in range(num_dense_layers):
z = Dense(units=int(latent_space_dim*latent_space_dim_gf**j),
activation=dense_activation,
name=f"decoder_dense{j}")(z)
if dense_dropout_rate > 0:
z = Dropout(rate=dense_dropout_rate, name=f"decoder_dense_dropout{j}")(z)
if is_batchnorm_dense:
z = BatchNormalization(axis=-1, name=f"decoder_dense_norm{j}")(z)
# Necessary for using GRU vectors
z_reps = RepeatVector(max_chem_len)(z)
num_gru_layers = params.get("num_gru_layers", 3)
gru_dim = params.get("gru_dim", 36)
gru_activation = params.get("gru_activation", "tanh")
gru_dropout_rate = params.get("gru_dropout_rate", 0.0)
is_batchnorm_gru = params.get("is_batchnorm_gru", True)
# Encoder parts using GRUs
x = z_reps
if num_gru_layers > 1:
for j in range(num_gru_layers-1):
x_dec = GRU(units=gru_dim,
return_sequences=True,
activation=gru_activation,
name=f"decoder_gru{j}")(x)
if gru_dropout_rate > 0:
x = Dropout(rate=gru_dropout_rate, name=f"decoder_gru_dropout{j}")(x)
if is_batchnorm_gru:
x = BatchNormalization(axis=-1, name=f"decoder_gru_norm{j}")(x)
x_out = GRU(units=num_chars,
return_sequences=True,
activation='softmax',
name='decoder_gru_final')(x)
return Model(z_in, x_out, name="decoder")
def load_decoder(params={}, **kwargs):
if "decoder_weights_path" in params:
path = params.get("decoder_weights_path")
return load_model(path)
else:
return decoder_model(params, **kwargs)
# ====================
# Property Prediction
# ====================
def property_predictor_model(params={}, **kwargs):
params.update(kwargs)
num_prov_layers = params.get("num_prov_layers", 3)
latent_space_dim = params.get("latent space_dim", 128)
prop_hidden_dim = params.get("prop_hidden_dim", 36)
prop_hidden_dim_gf = params.get("prop_hidden_dim_gf", 0.8)
prop_pred_activation = params.get("prop_pred_activation", "tanh")
prop_pred_dropout_rate = params.get("prop_pred_dropout_rate", 0.0)
is_batchnorm_prop = params.get("is_batchnorm_prop", True)
x_in = Input(shape=(latent_space_dim,), name='prop_pred_input')
x = x_in
for j in range(num_prov_layers):
x = Dense(units=int(prop_hidden_dim * prop_hidden_dim_gf**j),
activation=prop_pred_activation,
name=f"property_predictor_dense{j}")(x)
if prop_pred_dropout_rate > 0:
x = Dropout(rate=prop_pred_dropout_rate, name=f"property_predictor_dropout{j}")(x)
if is_batchnorm_prop:
x = BatchNormalization(axis=-1, name=f"property_predictor_norm{j}")(x)
num_reg_prop_tasks = params.get("num_reg_prop_tasks", 0)
num_logit_prop_tasks = params.get("num_logit_prop_tasks", 0)
if num_reg_prop_tasks+num_logit_prop_tasks==0:
raise ValueError("You must specify either 'regression tasks' and/or " + \
"'logistic tasks' for property prediction.")
# for regression tasks
outputs = []
if num_reg_prop_tasks > 0:
reg_prop_pred = Dense(units=num_reg_prop_tasks,
activation='linear',
name='reg_property_output')(x)
outputs.append(reg_prop_pred)
# for logistic tasks
if num_logit_prop_tasks > 0:
logit_prop_pred = Dense(units=num_logit_prop_tasks,
activation='sigmoid',
name='logit_property_output')(x)
outputs.append(logit_prop_pred)
return Model(inputs=x_in, outputs=outputs, name="property_predictor")
def load_property_predictor(params={}, **kwargs):
if "property_pred_weights_path" in params:
path = params.get("property_pred_weights_path")
return load_model(path)
else:
return property_predictor_model(params, **kwargs) | en | 0.633429 | # coding: utf-8 # Build the respective models. # Integrates everything. # Memorize. # Add losses. # ============================= # Lambda layer # ============================= reparameterization trick instead of sampling from Q(z|X), sample epsilon = N(0,I) z = z_mean + sqrt(var) * epsilon ~~~ @params args (tensor): mean and log of variance of Q(z|X) @return z (tensor): sampled latent vector # by default, random_normal has mean = 0 and std = 1.0 # ============================= # Encoder # ============================= # zinc.yml # Convolutional # Middle layers # ============================= # Decoder # ============================= # zinc.yml # Middle layers # Necessary for using GRU vectors # Encoder parts using GRUs # ==================== # Property Prediction # ==================== # for regression tasks # for logistic tasks | 2.246013 | 2 |
tests/test_basic.py | cunni/pyspherepack | 1 | 6630340 | import pytest
from pyspherepack import Box
import numpy as np
# set up a module scoped box so that the test box (and really the pack()) is only instantiated once.
@pytest.fixture(scope="module")
def box_11packed():
b = Box(11,n_iters=50000)
b.pack()
return b
def test_box_instance():
# create Box
b = Box(41) # 41 balls
assert True
def test_pack_two():
# create Box
b = Box(2,n_iters=10000)
b.pack()
assert np.isclose(b.ball_radius(),np.sqrt(2)/2,.01)
def test_density(box_11packed):
assert box_11packed.density() > 60 # permissive, just to make sure
def test_radius(box_11packed):
assert box_11packed.ball_radius() > 0.15 # permissive, just to make sure
| import pytest
from pyspherepack import Box
import numpy as np
# set up a module scoped box so that the test box (and really the pack()) is only instantiated once.
@pytest.fixture(scope="module")
def box_11packed():
b = Box(11,n_iters=50000)
b.pack()
return b
def test_box_instance():
# create Box
b = Box(41) # 41 balls
assert True
def test_pack_two():
# create Box
b = Box(2,n_iters=10000)
b.pack()
assert np.isclose(b.ball_radius(),np.sqrt(2)/2,.01)
def test_density(box_11packed):
assert box_11packed.density() > 60 # permissive, just to make sure
def test_radius(box_11packed):
assert box_11packed.ball_radius() > 0.15 # permissive, just to make sure
| en | 0.90452 | # set up a module scoped box so that the test box (and really the pack()) is only instantiated once. # create Box # 41 balls # create Box # permissive, just to make sure # permissive, just to make sure | 2.353528 | 2 |
Packs/SentinelOne/Integrations/SentinelOne-V2/SentinelOne-V2.py | cbrake1/content | 1 | 6630341 | <gh_stars>1-10
from typing import Callable
import demistomock as demisto
from CommonServerPython import *
''' IMPORTS '''
import json
import requests
import traceback
from dateutil.parser import parse
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
''' GLOBALS '''
IS_VERSION_2_1: bool
''' HELPER FUNCTIONS '''
def get_threats_outputs(threats, rank: int = 0):
for threat in threats:
threat_rank = int(threat.get('rank') or 0)
if IS_VERSION_2_1 or threat_rank >= rank:
threat_info = threat.get('threatInfo', {}) if IS_VERSION_2_1 else threat
agent_realtime_info = threat.get('agentRealtimeInfo', {}) if IS_VERSION_2_1 else threat
entry = {
'ID': threat.get('id'),
'AgentComputerName': agent_realtime_info.get('agentComputerName'),
'CreatedDate': threat_info.get('createdAt'),
'SiteID': agent_realtime_info.get('siteId'),
'SiteName': agent_realtime_info.get('siteName'),
'Classification': threat_info.get('classification'),
'ClassificationSource': threat_info.get('classificationSource'),
'MitigationStatus': threat_info.get('mitigationStatus'),
'AgentID': agent_realtime_info.get('agentId'),
'ConfidenceLevel': threat_info.get('confidenceLevel'),
'FileContentHash': threat_info.get('sha1') if IS_VERSION_2_1 else threat_info.get('fileContentHash'),
'ThreatName': threat_info.get('threatName'),
'FileSha256': threat_info.get('fileSha256'),
'AgentOsType': agent_realtime_info.get('agentOsType'),
'FilePath': threat_info.get('filePath'),
'Username': threat_info.get('processUser') if IS_VERSION_2_1 else threat_info.get('username'),
'Description': threat_info.get('description'), # Only available in 2.0
'FileDisplayName': threat.get('fileDisplayName'), # Only available in 2.0
'Rank': threat_info.get('rank'), # Only available in 2.0
'MarkedAsBenign': threat_info.get('markedAsBenign'), # Only available in 2.0
'InQuarantine': threat_info.get('inQuarantine'), # Only available in 2.0
'FileMaliciousContent': threat_info.get('fileMaliciousContent'), # Only available in 2.0
}
remove_nulls_from_dictionary(entry)
yield entry
def get_agents_outputs(agents):
for agent in agents:
entry = {
'ID': agent.get('id'),
'NetworkStatus': agent.get('networkStatus'),
'AgentVersion': agent.get('agentVersion'),
'IsDecommissioned': agent.get('isDecommissioned'),
'IsActive': agent.get('isActive'),
'LastActiveDate': agent.get('lastActiveDate'),
'RegisteredAt': agent.get('registeredAt'),
'ExternalIP': agent.get('externalIp'),
'ThreatCount': agent.get('activeThreats'),
'EncryptedApplications': agent.get('encryptedApplications'),
'OSName': agent.get('osName'),
'ComputerName': agent.get('computerName'),
'Domain': agent.get('domain'),
'CreatedAt': agent.get('createdAt'),
'SiteName': agent.get('siteName'),
}
remove_nulls_from_dictionary(entry)
yield entry
class Client(BaseClient):
"""
Client will implement the service API, and should not contain any Demisto logic.
Should only do requests and return data.
"""
def get_activities_request(self, created_after: str = None, user_emails: str = None, group_ids=None,
created_until: str = None,
activities_ids=None, include_hidden: str = None, created_before: str = None,
threats_ids=None,
activity_types=None, user_ids=None, created_from: str = None,
created_between: str = None, agent_ids=None,
limit: str = '50'):
params = assign_params(
created_at__gt=created_after,
userEmails=user_emails,
groupIds=argToList(group_ids),
created_at__lte=created_until,
ids=argToList(activities_ids),
includeHidden=include_hidden,
created_at__lt=created_before,
threatIds=argToList(threats_ids),
activityTypes=argToList(activity_types),
userIds=argToList(user_ids),
created_at__gte=created_from,
createdAt_between=created_between,
agentIds=argToList(agent_ids),
limit=int(limit), )
response = self._http_request(method='GET', url_suffix='activities', params=params)
return response.get('data', {})
def get_threats_request(self, content_hash=None, mitigation_status=None, created_before=None, created_after=None,
created_until=None, created_from=None, resolved='false', display_name=None, query=None,
threat_ids=None, limit=20, classifications=None):
keys_to_ignore = ['displayName__like' if IS_VERSION_2_1 else 'displayName']
params = assign_params(
contentHashes=argToList(content_hash),
mitigationStatuses=argToList(mitigation_status),
createdAt__lt=created_before,
createdAt__gt=created_after,
createdAt__lte=created_until,
createdAt__gte=created_from,
resolved=argToBoolean(resolved),
displayName__like=display_name,
displayName=display_name,
query=query,
ids=argToList(threat_ids),
limit=int(limit),
classifications=argToList(classifications),
keys_to_ignore=keys_to_ignore,
)
response = self._http_request(method='GET', url_suffix='threats', params=params)
return response.get('data', {})
def mark_as_threat_request(self, threat_ids, target_scope):
endpoint_url = 'threats/mark-as-threat'
payload = {
"filter": {
"ids": threat_ids
},
"data": {
"targetScope": target_scope
}
}
response = self._http_request(method='POST', url_suffix=endpoint_url, json_data=payload)
return response.get('data', {})
def mitigate_threat_request(self, threat_ids, action):
endpoint_url = f'threats/mitigate/{action}'
payload = {
"filter": {
"ids": threat_ids
}
}
response = self._http_request(method='POST', url_suffix=endpoint_url, json_data=payload)
return response.get('data', {})
def resolve_threat_request(self, threat_ids):
endpoint_url = 'threats/mark-as-resolved'
payload = {
"filter": {
"ids": threat_ids
}
}
response = self._http_request(method='POST', url_suffix=endpoint_url, json_data=payload)
return response.get('data', {})
def get_groups_request(self, params: dict):
response = self._http_request(method='GET', url_suffix='groups', params=params)
return response.get('data', {})
def delete_group_request(self, group_id=None):
endpoint_url = f'groups/{group_id}'
response = self._http_request(method='DELETE', url_suffix=endpoint_url)
return response.get('data', {})
def get_sites_request(self, params):
response = self._http_request(method='GET', url_suffix='sites', params=params)
return response.get('data', {})
def move_agent_request(self, group_id, agents_id):
endpoint_url = f'groups/{group_id}/move-agents'
payload = {
"filter": {
"ids": agents_id
}
}
response = self._http_request(method='PUT', url_suffix=endpoint_url, json_data=payload)
return response.get('data', {})
def get_agent_processes_request(self, agents_ids=None):
"""
[DEPRECATED BY SentinelOne] Returns empty array. To get processes of an Agent, see Applications.
"""
endpoint_url = 'agents/processes'
params = {
'ids': agents_ids
}
response = self._http_request(method='GET', url_suffix=endpoint_url, params=params)
return response.get('data', {})
def get_site_request(self, site_id):
endpoint_url = f'sites/{site_id}'
response = self._http_request(method='GET', url_suffix=endpoint_url)
return response.get('data', {})
def reactivate_site_request(self, site_id):
endpoint_url = f'sites/{site_id}/reactivate'
response = self._http_request(method='PUT', url_suffix=endpoint_url)
return response.get('data', {})
def get_threat_summary_request(self, site_ids=None, group_ids=None):
endpoint_url = 'private/threats/summary'
params = {
"siteIds": site_ids,
"groupIds": group_ids
}
response = self._http_request(method='GET', url_suffix=endpoint_url, params=params)
return response.get('data', {})
def list_agents_request(self, params: dict):
response = self._http_request(method='GET', url_suffix='agents', params=params)
return response.get('data', {})
def get_agent_request(self, agent_ids):
params = {
"ids": agent_ids
}
response = self._http_request(method='GET', url_suffix='agents', params=params)
return response.get('data', {})
def connect_to_network_request(self, agent_ids):
endpoint_url = 'agents/actions/connect'
payload = {
'filter': {
'ids': agent_ids
}
}
response = self._http_request(method='POST', url_suffix=endpoint_url, json_data=payload)
return response.get('data', {})
def disconnect_from_network_request(self, agents_id):
endpoint_url = 'agents/actions/disconnect'
payload = {
'filter': {
'ids': agents_id
}
}
response = self._http_request(method='POST', url_suffix=endpoint_url, json_data=payload)
return response.get('data', {})
def broadcast_message_request(self, message, filters):
endpoint_url = 'agents/actions/broadcast'
payload = {
'data': {
'message': message
},
'filter': filters
}
response = self._http_request(method='POST', url_suffix=endpoint_url, json_data=payload)
return response.get('data', {})
def uninstall_agent_request(self, query, agent_id=None, group_id=None):
endpoint_url = 'agents/actions/uninstall'
payload = {
'filter': assign_params(
query=query,
ids=agent_id,
groupIds=group_id,
)
}
response = self._http_request(method='POST', url_suffix=endpoint_url, json_data=payload)
return response.get('data', {})
def shutdown_agents_request(self, query, agent_id=None, group_id=None):
endpoint_url = 'agents/actions/shutdown'
payload = {
'filter': assign_params(
query=query,
ids=agent_id,
groupIds=group_id
)
}
response = self._http_request(method='POST', url_suffix=endpoint_url, json_data=payload)
return response.get('data', {})
def create_query_request(self, query, from_date, to_date):
endpoint_url = 'dv/init-query'
payload = {
'query': query,
'fromDate': from_date,
'toDate': to_date
}
response = self._http_request(method='POST', url_suffix=endpoint_url, json_data=payload)
return response.get('data', {}).get('queryId')
def get_events_request(self, query_id=None, limit=None):
endpoint_url = 'dv/events'
params = {
'query_id': query_id,
'limit': limit
}
response = self._http_request(method='GET', url_suffix=endpoint_url, params=params)
return response.get('data', {})
def get_processes_request(self, query_id=None, limit=None):
endpoint_url = 'dv/events/process'
params = {
'query_id': query_id,
'limit': limit
}
response = self._http_request(method='GET', url_suffix=endpoint_url, params=params)
return response.get('data', {})
def get_hash_reputation_request(self, hash_):
endpoint_url = f'hashes/{hash_}/reputation'
response = self._http_request(method='GET', url_suffix=endpoint_url)
return response
def get_hash_classification_request(self, hash_):
"""
[DEPRECATED by S1] IN BOTH 2.0 and 2.1
"""
endpoint_url = f'hashes/{hash_}/classification'
response = self._http_request(method='GET', url_suffix=endpoint_url)
return response
def get_exclusions_request(self, item_ids=None, os_types=None, exclusion_type: str = None, limit: int = 10):
endpoint_url = 'exclusions'
params = {
"ids": item_ids,
"osTypes": os_types,
"type": exclusion_type,
"limit": limit
}
response = self._http_request(method='GET', url_suffix=endpoint_url, params=params)
return response.get('data', {})
def create_exclusion_item_request(self, exclusion_type, exclusion_value, os_type, description=None,
exclusion_mode=None, path_exclusion_type=None, group_ids=None, site_ids=None):
payload = {
"filter": {
"groupIds": group_ids,
"siteIds": site_ids
},
"data": assign_params(
type=exclusion_type,
value=exclusion_value,
osType=os_type,
description=description,
mode=exclusion_mode,
pathExclusionType=path_exclusion_type
)
}
response = self._http_request(method='POST', url_suffix='exclusions', json_data=payload)
if 'data' in response:
return response.get('data')[0]
return {}
''' COMMANDS + REQUESTS FUNCTIONS '''
def test_module(client: Client, is_fetch: bool, first_fetch: str = None):
"""
Performs basic get request to verify connection and creds.
"""
if is_fetch:
last_fetch = date_to_timestamp(dateparser.parse(first_fetch, settings={'TIMEZONE': 'UTC'}))
last_fetch_date_string = timestamp_to_datestring(last_fetch, '%Y-%m-%dT%H:%M:%S.%fZ')
client.get_threats_request(limit=1, created_after=last_fetch_date_string)
else:
client._http_request(method='GET', url_suffix='activities/types')
return 'ok'
def get_activities_command(client: Client, args: dict) -> CommandResults:
"""
Get a list of activities.
"""
context_entries = []
headers = ['ID', 'PrimaryDescription', 'Data', 'UserID', 'CreatedAt', 'ThreatID', 'UpdatedAt']
activities = client.get_activities_request(**args)
for activity in activities:
context_entries.append({
'Hash': activity.get('hash'),
'ActivityType': activity.get('activityType'),
'OsFamily': activity.get('osFamily'),
'PrimaryDescription': activity.get('primaryDescription'),
'Comments': activity.get('comments'),
'AgentUpdatedVersion': activity.get('agentUpdatedVersion'),
'UserID': activity.get('userId'),
'ID': activity.get('id'),
'Data': activity.get('data'),
'CreatedAt': activity.get('createdAt'),
'SecondaryDescription': activity.get('secondaryDescription'),
'ThreatID': activity.get('threatId'),
'GroupID': activity.get('groupId'),
'UpdatedAt': activity.get('updatedAt'),
'Description': activity.get('description'),
'AgentID': activity.get('agentId'),
'SiteID': activity.get('siteId'),
})
return CommandResults(
readable_output=tableToMarkdown('Sentinel One Activities', context_entries, headers=headers, removeNull=True,
headerTransform=pascalToSpace),
outputs_prefix='SentinelOne.Activity',
outputs_key_field='ID',
outputs=context_entries,
raw_response=activities)
def get_groups_command(client: Client, args: dict) -> CommandResults:
"""
Gets the group data.
"""
headers = ['id', 'name', 'type', 'creator', 'creatorId', 'createdAt', 'rank']
query_params = assign_params(
type=args.get('group_type'),
id=args.get('id'),
groupIds=argToList(args.get('group_ids')),
isDefault=args.get('is_default'),
name=args.get('name'),
query=args.get('query'),
rank=args.get('rank'),
limit=int(args.get('limit', 50)),
)
groups = client.get_groups_request(query_params)
return CommandResults(
readable_output=tableToMarkdown('Sentinel One Groups', groups, headers, headerTransform=pascalToSpace,
removeNull=True),
outputs_prefix='SentinelOne.Group',
outputs_key_field='ID',
outputs=groups,
raw_response=groups)
def delete_group(client: Client, args: dict) -> str:
"""
Deletes a group by ID.
"""
group_id = args.get('group_id')
response = client.delete_group_request(group_id)
if response.get('success'):
return f'Group: {group_id} was deleted successfully'
return f'The deletion of group: {group_id} has failed'
def move_agent_to_group_command(client: Client, args: dict) -> CommandResults:
"""
Move agents to a new group.
"""
group_id = args.get('group_id')
agents_id = argToList(args.get('agents_ids', []))
agents_groups = client.move_agent_request(group_id, agents_id)
# Parse response into context & content entries
if agents_groups.get('agentsMoved') and int(agents_groups.get('agentsMoved')) > 0:
agents_moved = True
else:
agents_moved = False
date_time_utc = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')
context_entries = {
'Date': date_time_utc,
'AgentsMoved': agents_groups.get('agentsMoved'),
'AffectedAgents': agents_moved,
}
return CommandResults(
readable_output=tableToMarkdown(f'Sentinel One - Moved Agents\nTotal of: {agents_groups.get("AgentsMoved", 0)}'
f'agents were Moved successfully', context_entries, removeNull=True),
outputs_prefix='SentinelOne.Agent',
outputs_key_field='Date',
outputs=context_entries,
raw_response=agents_groups)
def get_agent_processes(client: Client, args: dict):
"""
Retrieve running processes for a specific agent.
Note: This feature is obsolete and an empty array will always be returned
"""
headers = ['ProcessName', 'StartTime', 'Pid', 'MemoryUsage', 'CpuUsage', 'ExecutablePath']
contents = []
context = {}
agents_ids = args.get('agents_ids')
processes = client.get_agent_processes_request(agents_ids)
if processes:
for process in processes:
contents.append({
'ProcessName': process.get('processName'),
'CpuUsage': process.get('cpuUsage'),
'MemoryUsage': process.get('memoryUsage'),
'StartTime': process.get('startTime'),
'ExecutablePath': process.get('executablePath'),
'Pid': process.get('pid'),
})
context['SentinelOne.Agent(val.Pid && val.Pid === obj.Pid)'] = processes
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': contents,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Sentinel One Agent Processes', contents, headers, removeNull=True),
'EntryContext': context
})
def get_threats_command(client: Client, args: dict) -> CommandResults:
"""
Gets a list of threats.
Rank only relevant for API version 2.0
"""
headers = ['ID', 'AgentComputerName', 'CreatedDate', 'SiteID', 'SiteName', 'Classification', 'MitigationStatus',
'ConfidenceLevel' if IS_VERSION_2_1 else 'Rank', 'AgentID', 'FileContentHash', 'MarkedAsBenign']
threats = client.get_threats_request(**args)
outputs = list(get_threats_outputs(threats, int(args.get('rank', 0)))) if threats else None
return CommandResults(
readable_output=tableToMarkdown(
'Sentinel One - Getting Threat List', outputs,
metadata='Provides summary information and details for all the threats that matched your search criteria.',
headers=headers, headerTransform=pascalToSpace, removeNull=True),
outputs_prefix='SentinelOne.Threat',
outputs_key_field='ID',
outputs=outputs,
raw_response=threats)
def get_hash_command(client: Client, args: dict) -> CommandResults:
"""
Get hash reputation.
Removed hash classification since SentinelOne has deprecated it - Breaking BC.
"""
hash_ = args.get('hash')
type_ = get_hash_type(hash_)
if type_ == 'Unknown':
raise DemistoException('Enter a valid hash format.')
hash_reputation = client.get_hash_reputation_request(hash_)
reputation = hash_reputation.get('data', {})
contents = {
'Rank': reputation.get('rank'),
'Hash': hash_,
}
return CommandResults(
readable_output=tableToMarkdown('Sentinel One - Hash Reputation\nProvides hash reputation (rank from 0 to 10):',
contents, removeNull=True),
outputs_prefix='SentinelOne.Hash',
outputs_key_field='Hash',
outputs=contents,
raw_response=hash_reputation)
def mark_as_threat_command(client: Client, args: dict) -> CommandResults:
"""
Mark suspicious threats as threats. Relevant for API version 2.0
"""
context_entries = []
threat_ids = argToList(args.get('threat_ids'))
target_scope = args.get('target_scope')
# Make request and get raw response
affected_threats = client.mark_as_threat_request(threat_ids, target_scope)
# Parse response into context & content entries
if affected_threats.get('affected') and int(affected_threats.get('affected')) > 0:
title = f'Total of {affected_threats.get("affected")} provided threats were marked successfully'
affected = True
else:
affected = False
title = 'No threats were marked'
for threat_id in threat_ids:
context_entries.append({
'MarkedAsThreat': affected,
'ID': threat_id,
})
return CommandResults(
readable_output=tableToMarkdown('Sentinel One - Marking suspicious threats as threats \n' + title,
context_entries, headerTransform=pascalToSpace, removeNull=True),
outputs_prefix='SentinelOne.Threat',
outputs_key_field='ID',
outputs=context_entries,
raw_response=affected_threats)
def mitigate_threat_command(client: Client, args: dict) -> CommandResults:
"""
Apply a mitigation action to a group of threats. Relevant for API version 2.0
"""
contents = []
context_entries = []
# Get arguments
threat_ids = argToList(args.get('threat_ids'))
action = args.get('action')
# Make request and get raw response
mitigated_threats = client.mitigate_threat_request(threat_ids, action)
# Parse response into context & content entries
if mitigated_threats.get('affected') and int(mitigated_threats.get('affected')) > 0:
mitigated = True
meta = f'Total of {mitigated_threats.get("affected")} provided threats were mitigated successfully'
else:
mitigated = False
meta = 'No threats were mitigated'
for threat_id in threat_ids:
contents.append({
'Mitigated': mitigated,
'ID': threat_id,
'Mitigation Action': action,
})
context_entries.append({
'Mitigated': mitigated,
'ID': threat_id,
'Mitigation': {
'Action': action
},
})
return CommandResults(
readable_output=tableToMarkdown('Sentinel One - Mitigating threats', contents, metadata=meta, removeNull=True),
outputs_prefix='SentinelOne.Threat',
outputs_key_field='ID',
outputs=context_entries,
raw_response=mitigated_threats)
def resolve_threat_command(client: Client, args: dict) -> CommandResults:
"""
Mark threats as resolved
"""
context_entries = []
threat_ids = argToList(args.get('threat_ids'))
# Make request and get raw response
resolved_threats = client.resolve_threat_request(threat_ids)
# Parse response into context & content entries
if resolved_threats.get('affected') and int(resolved_threats.get('affected')) > 0:
resolved = True
title = f'Total of {resolved_threats.get("affected")} provided threats were resolved successfully'
else:
resolved = False
title = 'No threats were resolved'
for threat_id in threat_ids:
context_entries.append({
'Resolved': resolved,
'ID': threat_id,
})
return CommandResults(
readable_output=tableToMarkdown('Sentinel One - Resolving threats\n' + title, context_entries, removeNull=True),
outputs_prefix='SentinelOne.Threat',
outputs_key_field='ID',
outputs=context_entries,
raw_response=resolved_threats)
def get_white_list_command(client: Client, args: dict) -> CommandResults:
"""
List all white items matching the input filter
"""
context_entries = []
# Get arguments
item_ids = argToList(args.get('item_ids', []))
os_types = argToList(args.get('os_types', []))
exclusion_type = args.get('exclusion_type')
limit = int(args.get('limit', 10))
# Make request and get raw response
exclusion_items = client.get_exclusions_request(item_ids, os_types, exclusion_type, limit)
# Parse response into context & content entries
for exclusion_item in exclusion_items:
context_entries.append({
'ID': exclusion_item.get('id'),
'Type': exclusion_item.get('type'),
'CreatedAt': exclusion_item.get('createdAt'),
'Value': exclusion_item.get('value'),
'Source': exclusion_item.get('source'),
'UserID': exclusion_item.get('userId'),
'UpdatedAt': exclusion_item.get('updatedAt'),
'OsType': exclusion_item.get('osType'),
'UserName': exclusion_item.get('userName'),
'Mode': exclusion_item.get('mode'),
})
return CommandResults(
readable_output=tableToMarkdown('Sentinel One - Listing exclusion items', context_entries, removeNull=True,
metadata='Provides summary information and details for all the exclusion items'
' that matched your search criteria.'),
outputs_prefix='SentinelOne.Exclusions',
outputs_key_field='ID',
outputs=context_entries,
raw_response=exclusion_items)
def create_white_item_command(client: Client, args: dict):
"""
Create white item.
"""
context_entries = []
title = ''
group_ids = argToList(args.get('group_ids', []))
site_ids = argToList(args.get('site_ids', []))
exclusion_type = args.get('exclusion_type')
exclusion_value = args.get('exclusion_value')
os_type = args.get('os_type')
description = args.get('description')
exclusion_mode = args.get('exclusion_mode')
path_exclusion_type = args.get('path_exclusion_type')
if not (group_ids or site_ids):
raise DemistoException("You must provide either group_ids or site_ids.")
# Make request and get raw response
new_item = client.create_exclusion_item_request(exclusion_type, exclusion_value, os_type, description,
exclusion_mode, path_exclusion_type, group_ids, site_ids)
# Parse response into context & content entries
if new_item:
title = 'Sentinel One - Adding an exclusion item \n' + \
'The provided item was successfully added to the exclusion list'
context_entries.append({
'ID': new_item.get('id'),
'Type': new_item.get('type'),
'CreatedAt': new_item.get('createdAt'),
})
return CommandResults(
readable_output=tableToMarkdown(title, context_entries, removeNull=True, headerTransform=pascalToSpace),
outputs_prefix='SentinelOne.Exclusion',
outputs_key_field='ID',
outputs=context_entries,
raw_response=new_item)
def get_sites_command(client: Client, args: dict) -> CommandResults:
"""
List all sites with filtering options
"""
context_entries = []
query_params = assign_params(
updatedAt=args.get('updated_at'),
query=args.get('query'),
siteType=args.get('site_type'),
features=args.get('features'),
state=args.get('state'),
suite=args.get('suite'),
# HTTP 500 - server internal error when passing admin_only.
adminOnly=argToBoolean(args.get('admin_only')) if args.get('admin_only') else None,
accountId=args.get('account_id'),
name=args.get('site_name'),
createdAt=args.get('created_at'),
limit=int(args.get('limit', 50)),
siteIds=argToList(args.get('site_ids')),
)
# Make request and get raw response
raw_response = client.get_sites_request(query_params)
sites, all_sites = raw_response.get('sites'), raw_response.get('allSites')
# Parse response into context & content entries
for site in sites:
context_entries.append({
'ID': site.get('id'),
'Creator': site.get('creator'),
'Name': site.get('name'),
'Type': site.get('siteType'),
'AccountName': site.get('accountName'),
'State': site.get('state'),
'HealthStatus': site.get('healthStatus'),
'Suite': site.get('suite'),
'CreatedAt': site.get('createdAt'),
'Expiration': site.get('expiration'),
'UnlimitedLicenses': site.get('unlimitedLicenses'),
'TotalLicenses': all_sites.get('totalLicenses'),
'ActiveLicenses': all_sites.get('activeLicenses'),
})
return CommandResults(
readable_output=tableToMarkdown('Sentinel One - Getting List of Sites', context_entries, removeNull=True,
metadata='Provides summary information and details for all sites that matched '
'your search criteria.', headerTransform=pascalToSpace),
outputs_prefix='SentinelOne.Site',
outputs_key_field='ID',
outputs=context_entries,
raw_response=raw_response)
def get_site_command(client: Client, args: dict) -> CommandResults:
"""
Get a specific site by ID
"""
# Init main vars
context_entries = []
# Get arguments
site_id = args.get('site_id')
# Make request and get raw response
site = client.get_site_request(site_id)
# Parse response into context & content entries
if site:
context_entries.append({
'ID': site.get('id'),
'Creator': site.get('creator'),
'Name': site.get('name'),
'Type': site.get('siteType'),
'AccountName': site.get('accountName'),
'State': site.get('state'),
'HealthStatus': site.get('healthStatus'),
'Suite': site.get('suite'),
'CreatedAt': site.get('createdAt'),
'Expiration': site.get('expiration'),
'UnlimitedLicenses': site.get('unlimitedLicenses'),
'TotalLicenses': site.get('totalLicenses'),
'ActiveLicenses': site.get('activeLicenses'),
'AccountID': site.get('accountId'),
'IsDefault': site.get('isDefault'),
})
return CommandResults(
readable_output=tableToMarkdown(f'Sentinel One - Summary About Site: {site_id}', context_entries,
removeNull=True,
metadata='Provides summary information and details for specific site ID',
headerTransform=pascalToSpace),
outputs_prefix='SentinelOne.Site',
outputs_key_field='ID',
outputs=context_entries,
raw_response=site)
def reactivate_site_command(client: Client, args: dict) -> CommandResults:
"""
Reactivate specific site by ID
"""
# Init main vars
context = {}
# Get arguments
site_id = args.get('site_id')
# Make request and get raw response
site = client.reactivate_site_request(site_id)
# Parse response into context & content entries
if site:
context = {
'ID': site.get('id'),
'Reactivated': site.get('success'),
}
return CommandResults(
readable_output=tableToMarkdown(f'Sentinel One - Reactivated Site: {site_id}', context, removeNull=True),
outputs_prefix='SentinelOne.Site',
outputs_key_field='ID',
outputs=context,
raw_response=site)
def get_threat_summary_command(client: Client, args: dict) -> CommandResults:
"""
Get dashboard threat summary
"""
# Init main vars
context_entries = {}
site_ids = argToList(args.get('site_ids'))
group_ids = argToList(args.get('group_ids'))
# Make request and get raw response
threat_summary = client.get_threat_summary_request(site_ids, group_ids)
# Parse response into context & content entries
if threat_summary:
context_entries = {
'InProgress': threat_summary.get('inProgress'),
'MaliciousNotResolved': threat_summary.get('maliciousNotResolved'),
'NotMitigated': threat_summary.get('notMitigated'),
'NotMitigatedNotResolved': threat_summary.get('notMitigatedNotResolved'),
'NotResolved': threat_summary.get('notResolved'),
'Resolved': threat_summary.get('resolved'),
'SuspiciousNotMitigatedNotResolved': threat_summary.get('suspiciousNotMitigatedNotResolved'),
'SuspiciousNotResolved': threat_summary.get('suspiciousNotResolved'),
'Total': threat_summary.get('total'),
}
return CommandResults(
readable_output=tableToMarkdown('Sentinel One - Dashboard Threat Summary', context_entries, removeNull=True,
headerTransform=pascalToSpace),
outputs_prefix='SentinelOne.Threat',
outputs_key_field='ID',
outputs=context_entries,
raw_response=threat_summary)
# Agents Commands
def list_agents_command(client: Client, args: dict) -> CommandResults:
"""
List all agents matching the input filter
"""
# Get arguments
query_params = assign_params(
active_threats=args.get('min_active_threats'),
computer_name=args.get('computer_name'),
scan_status=args.get('scan_status'),
os_type=args.get('os_type'),
created_at=args.get('created_at'),
limit=int(args.get('limit', 10)),
)
# Make request and get raw response
agents = client.list_agents_request(query_params)
# Parse response into context & content entries
context_entries = list(get_agents_outputs(agents)) if agents else None
return CommandResults(
readable_output=tableToMarkdown('Sentinel One - List of Agents', context_entries, headerTransform=pascalToSpace,
removeNull=True, metadata='Provides summary information and details for all'
' the agents that matched your search criteria'),
outputs_prefix='SentinelOne.Agents',
outputs_key_field='ID',
outputs=context_entries,
raw_response=agents)
def get_agent_command(client: Client, args: dict) -> CommandResults:
"""
Get single agent via ID
"""
# Get arguments
agent_ids = argToList(args.get('agent_id'))
# Make request and get raw response
agents = client.get_agent_request(agent_ids)
# Parse response into context & content entries
context_entries = list(get_agents_outputs(agents)) if agents else None
return CommandResults(
readable_output=tableToMarkdown('Sentinel One - Get Agent Details', context_entries,
headerTransform=pascalToSpace, removeNull=True),
outputs_prefix='SentinelOne.Agent',
outputs_key_field='ID',
outputs=context_entries,
raw_response=agents)
def connect_agent_to_network(client: Client, args: dict) -> Union[CommandResults, str]:
"""
Sends a "connect to network" command to all agents matching the input filter.
"""
agent_ids = argToList(args.get('agent_id'))
# Make request and get raw response
raw_response = client.connect_to_network_request(agent_ids)
agents_affected = raw_response.get('affected', 0)
# Parse response into context & content entries
if agents_affected > 0:
agents = client.list_agents_request({'ids': agent_ids})
contents = [{
'NetworkStatus': agent.get('networkStatus'),
'ID': agent.get('id')
} for agent in agents]
return CommandResults(
readable_output=f'{agents_affected} agent(s) successfully connected to the network.',
outputs_prefix='SentinelOne.Agent',
outputs_key_field='ID',
outputs=contents,
raw_response=raw_response)
return 'No agents were connected to the network.'
def disconnect_agent_from_network(client: Client, args: dict) -> Union[CommandResults, str]:
"""
Sends a "disconnect from network" command to all agents matching the input filter.
"""
agent_ids = argToList(args.get('agent_id'))
# Make request and get raw response
raw_response = client.disconnect_from_network_request(agent_ids)
agents_affected = raw_response.get('affected', 0)
if agents_affected > 0:
agents = client.list_agents_request({'ids': agent_ids})
contents = [{
'NetworkStatus': agent.get('networkStatus'),
'ID': agent.get('id')
} for agent in agents]
return CommandResults(
readable_output=f'{agents_affected} agent(s) successfully disconnected from the network.',
outputs_prefix='SentinelOne.Agent',
outputs_key_field='ID',
outputs=contents,
raw_response=raw_response)
return 'No agents were disconnected from the network.'
def broadcast_message(client: Client, args: dict) -> str:
"""
Broadcasts a message to all agents matching the input filter.
"""
message = args.get('message')
filters = assign_params(
isActive=argToBoolean(args.get('active_agent', 'false')),
groupIds=argToList(args.get('group_id')),
ids=argToList(args.get('agent_id')),
domains=argToList(args.get('domain')),
)
response = client.broadcast_message_request(message, filters)
agents_affected = response.get('affected', 0)
if agents_affected > 0:
return 'The message was successfully delivered to the agent(s)'
return 'No messages were sent. Verify that the inputs are correct.'
def shutdown_agents(client: Client, args: dict) -> str:
"""
Sends a shutdown command to all agents matching the input filter
"""
query = args.get('query', '')
agent_id = argToList(args.get('agent_id'))
group_id = argToList(args.get('group_id'))
if not (agent_id or group_id):
raise DemistoException('Expecting at least one of the following arguments to filter by: agent_id, group_id.')
response = client.shutdown_agents_request(query, agent_id, group_id)
affected_agents = response.get('affected', 0)
if affected_agents > 0:
return f'Shutting down {affected_agents} agent(s).'
return 'No agents were shutdown.'
def uninstall_agent(client: Client, args: dict) -> str:
"""
Sends an uninstall command to all agents matching the input filter.
"""
query = args.get('query', '')
agent_id = argToList(args.get('agent_id'))
group_id = argToList(args.get('group_id'))
if not (agent_id or group_id):
raise DemistoException('Expecting at least one of the following arguments to filter by: agent_id, group_id.')
response = client.uninstall_agent_request(query, agent_id, group_id)
affected_agents = response.get('affected', 0)
if affected_agents > 0:
return f'Uninstall was sent to {affected_agents} agent(s).'
return 'No agents were affected.'
# Event Commands
def create_query(client: Client, args: dict) -> CommandResults:
query = args.get('query')
from_date = args.get('from_date')
to_date = args.get('to_date')
query_id = client.create_query_request(query, from_date, to_date)
context_entries = {
'Query': query,
'FromDate': from_date,
'ToDate': to_date,
'QueryID': query_id,
}
return CommandResults(
readable_output=f'The query ID is {query_id}',
outputs_prefix='SentinelOne.Query',
outputs_key_field='QueryID',
outputs=context_entries,
raw_response=query_id)
def get_events(client: Client, args: dict) -> Union[CommandResults, str]:
"""
Get all Deep Visibility events from query
"""
contents = []
event_standards = []
query_id = args.get('query_id')
limit = int(args.get('limit', 50))
events = client.get_events_request(query_id, limit)
for event in events:
contents.append({
'EventType': event.get('eventType'),
'Endpoint': event.get('agentName'),
'SiteName': event.get('siteName'),
'User': event.get('user'),
'Time': event.get('processStartTime'),
'AgentOS': event.get('agentOs'),
'ProcessID': event.get('pid'),
'ProcessUID': event.get('srcProcUid') if IS_VERSION_2_1 else event.get('processUniqueKey'),
'ProcessName': event.get('processName'),
'MD5': event.get('md5'),
'SHA256': event.get('sha256'),
})
event_standards.append({
'Type': event.get('eventType'),
'Name': event.get('processName'),
'ID': event.get('pid'),
})
context = {
'SentinelOne.Event(val.ProcessID && val.ProcessID === obj.ProcessID)': contents,
'Event(val.ID && val.ID === obj.ID)': event_standards
}
return CommandResults(
readable_output=tableToMarkdown('SentinelOne Events', contents, removeNull=True),
outputs=context,
raw_response=events)
def get_processes(client: Client, args: dict) -> CommandResults:
"""
Get Deep Visibility events from query by event type - process
"""
contents = []
query_id = args.get('query_id')
limit = int(args.get('limit', 50))
processes = client.get_processes_request(query_id, limit)
for process in processes:
contents.append({
'EventType': process.get('eventType'),
'Endpoint': process.get('agentName'),
'SiteName': process.get('siteName'),
'User': process.get('user'),
'Time': process.get('processStartTime'),
'ParentProcessID': process.get('parentPid'),
'ParentProcessUID': process.get('parentProcessUniqueKey'),
'ParentProcessName': process.get('parentProcessName'),
'ProcessID': process.get('pid'),
'ProcessUID': process.get('srcProcUid') if IS_VERSION_2_1 else process.get('processUniqueKey'),
'ProcessName': process.get('processName'),
'ProcessDisplayName': process.get('processDisplayName'),
'SHA1': process.get('processImageSha1Hash'),
'CMD': process.get('"processCmd'),
'SubsystemType': process.get('processSubSystem'),
'IntegrityLevel': process.get('processIntegrityLevel'),
'ParentProcessStartTime': process.get('parentProcessStartTime'),
})
return CommandResults(
readable_output=tableToMarkdown('SentinelOne Processes', contents, removeNull=True),
outputs_prefix='SentinelOne.Event',
outputs_key_field='ProcessID',
outputs=contents,
raw_response=processes)
def fetch_incidents(client: Client, fetch_limit: int, first_fetch: str, fetch_threat_rank: int):
last_run = demisto.getLastRun()
last_fetch = last_run.get('time')
# handle first time fetch
if last_fetch is None:
last_fetch = date_to_timestamp(dateparser.parse(first_fetch, settings={'TIMEZONE': 'UTC'}))
current_fetch = last_fetch
incidents = []
last_fetch_date_string = timestamp_to_datestring(last_fetch, '%Y-%m-%dT%H:%M:%S.%fZ')
threats = client.get_threats_request(limit=fetch_limit, created_after=last_fetch_date_string)
for threat in threats:
rank = threat.get('rank')
try:
rank = int(rank)
except TypeError:
rank = 0
# If no fetch threat rank is provided, bring everything, else only fetch above the threshold
if IS_VERSION_2_1 or rank >= fetch_threat_rank:
incident = threat_to_incident(threat)
date_occurred_dt = parse(incident['occurred'])
incident_date = date_to_timestamp(date_occurred_dt, '%Y-%m-%dT%H:%M:%S.%fZ')
if incident_date > last_fetch:
incidents.append(incident)
if incident_date > current_fetch:
current_fetch = incident_date
demisto.setLastRun({'time': current_fetch})
demisto.incidents(incidents)
def threat_to_incident(threat) -> dict:
threat_info = threat.get('threatInfo', {}) if IS_VERSION_2_1 else threat
incident = {
'name': f'Sentinel One Threat: {threat_info.get("classification", "Not classified")}',
'occurred': threat_info.get('createdAt'),
'rawJSON': json.dumps(threat)}
return incident
def main():
""" PARSE INTEGRATION PARAMETERS """
global IS_VERSION_2_1
params = demisto.params()
token = params.get('token')
api_version = params.get('api_version', '2.1')
server = params.get('url').rstrip('/')
base_url = urljoin(server, f'/web/api/v{api_version}/')
use_ssl = not params.get('insecure', False)
proxy = params.get('proxy', False)
IS_VERSION_2_1 = api_version == '2.1'
first_fetch_time = params.get('fetch_time', '3 days')
fetch_threat_rank = int(params.get('fetch_threat_rank', 0))
fetch_limit = int(params.get('fetch_limit', 10))
headers = {
'Authorization': 'ApiToken ' + token if token else 'ApiToken',
'Content-Type': 'application/json',
'Accept': 'application/json'
}
commands: Dict[str, Dict[str, Callable]] = {
'common': {
'sentinelone-get-activities': get_activities_command,
'sentinelone-get-threats': get_threats_command,
'sentinelone-mitigate-threat': mitigate_threat_command,
'sentinelone-get-hash': get_hash_command,
'sentinelone-get-white-list': get_white_list_command,
'sentinelone-create-white-list-item': create_white_item_command,
'sentinelone-get-sites': get_sites_command,
'sentinelone-get-site': get_site_command,
'sentinelone-reactivate-site': reactivate_site_command,
'sentinelone-list-agents': list_agents_command,
'sentinelone-get-agent': get_agent_command,
'sentinelone-get-groups': get_groups_command,
'sentinelone-move-agent': move_agent_to_group_command,
'sentinelone-delete-group': delete_group,
'sentinelone-connect-agent': connect_agent_to_network,
'sentinelone-disconnect-agent': disconnect_agent_from_network,
'sentinelone-broadcast-message': broadcast_message,
'sentinelone-get-events': get_events,
'sentinelone-create-query': create_query,
'sentinelone-get-processes': get_processes,
'sentinelone-shutdown-agent': shutdown_agents,
'sentinelone-uninstall-agent': uninstall_agent,
},
'2.0': {
'sentinelone-mark-as-threat': mark_as_threat_command,
'sentinelone-resolve-threat': resolve_threat_command,
'sentinelone-agent-processes': get_agent_processes,
},
'2.1': {
'sentinelone-threat-summary': get_threat_summary_command,
},
}
''' COMMANDS MANAGER / SWITCH PANEL '''
demisto.info(f'Command being called is {demisto.command()}')
command = demisto.command()
try:
client = Client(
base_url=base_url,
verify=use_ssl,
headers=headers,
proxy=proxy,
)
if command == 'test-module':
return_results(test_module(client, params.get('isFetch'), first_fetch_time))
if command == 'fetch-incidents':
fetch_incidents(client, fetch_limit, first_fetch_time, fetch_threat_rank)
else:
if command in commands['common']:
return_results(commands['common'][command](client, demisto.args()))
elif command in commands[api_version]:
return_results(commands[api_version][command](client, demisto.args()))
else:
raise NotImplementedError(f'The {command} command is not supported for API version {api_version}')
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {command} command.\nError:\n{str(e)}')
if __name__ in ['__main__', 'builtin', 'builtins']:
main()
| from typing import Callable
import demistomock as demisto
from CommonServerPython import *
''' IMPORTS '''
import json
import requests
import traceback
from dateutil.parser import parse
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
''' GLOBALS '''
IS_VERSION_2_1: bool
''' HELPER FUNCTIONS '''
def get_threats_outputs(threats, rank: int = 0):
for threat in threats:
threat_rank = int(threat.get('rank') or 0)
if IS_VERSION_2_1 or threat_rank >= rank:
threat_info = threat.get('threatInfo', {}) if IS_VERSION_2_1 else threat
agent_realtime_info = threat.get('agentRealtimeInfo', {}) if IS_VERSION_2_1 else threat
entry = {
'ID': threat.get('id'),
'AgentComputerName': agent_realtime_info.get('agentComputerName'),
'CreatedDate': threat_info.get('createdAt'),
'SiteID': agent_realtime_info.get('siteId'),
'SiteName': agent_realtime_info.get('siteName'),
'Classification': threat_info.get('classification'),
'ClassificationSource': threat_info.get('classificationSource'),
'MitigationStatus': threat_info.get('mitigationStatus'),
'AgentID': agent_realtime_info.get('agentId'),
'ConfidenceLevel': threat_info.get('confidenceLevel'),
'FileContentHash': threat_info.get('sha1') if IS_VERSION_2_1 else threat_info.get('fileContentHash'),
'ThreatName': threat_info.get('threatName'),
'FileSha256': threat_info.get('fileSha256'),
'AgentOsType': agent_realtime_info.get('agentOsType'),
'FilePath': threat_info.get('filePath'),
'Username': threat_info.get('processUser') if IS_VERSION_2_1 else threat_info.get('username'),
'Description': threat_info.get('description'), # Only available in 2.0
'FileDisplayName': threat.get('fileDisplayName'), # Only available in 2.0
'Rank': threat_info.get('rank'), # Only available in 2.0
'MarkedAsBenign': threat_info.get('markedAsBenign'), # Only available in 2.0
'InQuarantine': threat_info.get('inQuarantine'), # Only available in 2.0
'FileMaliciousContent': threat_info.get('fileMaliciousContent'), # Only available in 2.0
}
remove_nulls_from_dictionary(entry)
yield entry
def get_agents_outputs(agents):
for agent in agents:
entry = {
'ID': agent.get('id'),
'NetworkStatus': agent.get('networkStatus'),
'AgentVersion': agent.get('agentVersion'),
'IsDecommissioned': agent.get('isDecommissioned'),
'IsActive': agent.get('isActive'),
'LastActiveDate': agent.get('lastActiveDate'),
'RegisteredAt': agent.get('registeredAt'),
'ExternalIP': agent.get('externalIp'),
'ThreatCount': agent.get('activeThreats'),
'EncryptedApplications': agent.get('encryptedApplications'),
'OSName': agent.get('osName'),
'ComputerName': agent.get('computerName'),
'Domain': agent.get('domain'),
'CreatedAt': agent.get('createdAt'),
'SiteName': agent.get('siteName'),
}
remove_nulls_from_dictionary(entry)
yield entry
class Client(BaseClient):
"""
Client will implement the service API, and should not contain any Demisto logic.
Should only do requests and return data.
"""
def get_activities_request(self, created_after: str = None, user_emails: str = None, group_ids=None,
created_until: str = None,
activities_ids=None, include_hidden: str = None, created_before: str = None,
threats_ids=None,
activity_types=None, user_ids=None, created_from: str = None,
created_between: str = None, agent_ids=None,
limit: str = '50'):
params = assign_params(
created_at__gt=created_after,
userEmails=user_emails,
groupIds=argToList(group_ids),
created_at__lte=created_until,
ids=argToList(activities_ids),
includeHidden=include_hidden,
created_at__lt=created_before,
threatIds=argToList(threats_ids),
activityTypes=argToList(activity_types),
userIds=argToList(user_ids),
created_at__gte=created_from,
createdAt_between=created_between,
agentIds=argToList(agent_ids),
limit=int(limit), )
response = self._http_request(method='GET', url_suffix='activities', params=params)
return response.get('data', {})
def get_threats_request(self, content_hash=None, mitigation_status=None, created_before=None, created_after=None,
created_until=None, created_from=None, resolved='false', display_name=None, query=None,
threat_ids=None, limit=20, classifications=None):
keys_to_ignore = ['displayName__like' if IS_VERSION_2_1 else 'displayName']
params = assign_params(
contentHashes=argToList(content_hash),
mitigationStatuses=argToList(mitigation_status),
createdAt__lt=created_before,
createdAt__gt=created_after,
createdAt__lte=created_until,
createdAt__gte=created_from,
resolved=argToBoolean(resolved),
displayName__like=display_name,
displayName=display_name,
query=query,
ids=argToList(threat_ids),
limit=int(limit),
classifications=argToList(classifications),
keys_to_ignore=keys_to_ignore,
)
response = self._http_request(method='GET', url_suffix='threats', params=params)
return response.get('data', {})
def mark_as_threat_request(self, threat_ids, target_scope):
endpoint_url = 'threats/mark-as-threat'
payload = {
"filter": {
"ids": threat_ids
},
"data": {
"targetScope": target_scope
}
}
response = self._http_request(method='POST', url_suffix=endpoint_url, json_data=payload)
return response.get('data', {})
def mitigate_threat_request(self, threat_ids, action):
endpoint_url = f'threats/mitigate/{action}'
payload = {
"filter": {
"ids": threat_ids
}
}
response = self._http_request(method='POST', url_suffix=endpoint_url, json_data=payload)
return response.get('data', {})
def resolve_threat_request(self, threat_ids):
endpoint_url = 'threats/mark-as-resolved'
payload = {
"filter": {
"ids": threat_ids
}
}
response = self._http_request(method='POST', url_suffix=endpoint_url, json_data=payload)
return response.get('data', {})
def get_groups_request(self, params: dict):
response = self._http_request(method='GET', url_suffix='groups', params=params)
return response.get('data', {})
def delete_group_request(self, group_id=None):
endpoint_url = f'groups/{group_id}'
response = self._http_request(method='DELETE', url_suffix=endpoint_url)
return response.get('data', {})
def get_sites_request(self, params):
response = self._http_request(method='GET', url_suffix='sites', params=params)
return response.get('data', {})
def move_agent_request(self, group_id, agents_id):
endpoint_url = f'groups/{group_id}/move-agents'
payload = {
"filter": {
"ids": agents_id
}
}
response = self._http_request(method='PUT', url_suffix=endpoint_url, json_data=payload)
return response.get('data', {})
def get_agent_processes_request(self, agents_ids=None):
"""
[DEPRECATED BY SentinelOne] Returns empty array. To get processes of an Agent, see Applications.
"""
endpoint_url = 'agents/processes'
params = {
'ids': agents_ids
}
response = self._http_request(method='GET', url_suffix=endpoint_url, params=params)
return response.get('data', {})
def get_site_request(self, site_id):
endpoint_url = f'sites/{site_id}'
response = self._http_request(method='GET', url_suffix=endpoint_url)
return response.get('data', {})
def reactivate_site_request(self, site_id):
endpoint_url = f'sites/{site_id}/reactivate'
response = self._http_request(method='PUT', url_suffix=endpoint_url)
return response.get('data', {})
def get_threat_summary_request(self, site_ids=None, group_ids=None):
endpoint_url = 'private/threats/summary'
params = {
"siteIds": site_ids,
"groupIds": group_ids
}
response = self._http_request(method='GET', url_suffix=endpoint_url, params=params)
return response.get('data', {})
def list_agents_request(self, params: dict):
response = self._http_request(method='GET', url_suffix='agents', params=params)
return response.get('data', {})
def get_agent_request(self, agent_ids):
params = {
"ids": agent_ids
}
response = self._http_request(method='GET', url_suffix='agents', params=params)
return response.get('data', {})
def connect_to_network_request(self, agent_ids):
endpoint_url = 'agents/actions/connect'
payload = {
'filter': {
'ids': agent_ids
}
}
response = self._http_request(method='POST', url_suffix=endpoint_url, json_data=payload)
return response.get('data', {})
def disconnect_from_network_request(self, agents_id):
endpoint_url = 'agents/actions/disconnect'
payload = {
'filter': {
'ids': agents_id
}
}
response = self._http_request(method='POST', url_suffix=endpoint_url, json_data=payload)
return response.get('data', {})
def broadcast_message_request(self, message, filters):
endpoint_url = 'agents/actions/broadcast'
payload = {
'data': {
'message': message
},
'filter': filters
}
response = self._http_request(method='POST', url_suffix=endpoint_url, json_data=payload)
return response.get('data', {})
def uninstall_agent_request(self, query, agent_id=None, group_id=None):
endpoint_url = 'agents/actions/uninstall'
payload = {
'filter': assign_params(
query=query,
ids=agent_id,
groupIds=group_id,
)
}
response = self._http_request(method='POST', url_suffix=endpoint_url, json_data=payload)
return response.get('data', {})
def shutdown_agents_request(self, query, agent_id=None, group_id=None):
endpoint_url = 'agents/actions/shutdown'
payload = {
'filter': assign_params(
query=query,
ids=agent_id,
groupIds=group_id
)
}
response = self._http_request(method='POST', url_suffix=endpoint_url, json_data=payload)
return response.get('data', {})
def create_query_request(self, query, from_date, to_date):
endpoint_url = 'dv/init-query'
payload = {
'query': query,
'fromDate': from_date,
'toDate': to_date
}
response = self._http_request(method='POST', url_suffix=endpoint_url, json_data=payload)
return response.get('data', {}).get('queryId')
def get_events_request(self, query_id=None, limit=None):
endpoint_url = 'dv/events'
params = {
'query_id': query_id,
'limit': limit
}
response = self._http_request(method='GET', url_suffix=endpoint_url, params=params)
return response.get('data', {})
def get_processes_request(self, query_id=None, limit=None):
endpoint_url = 'dv/events/process'
params = {
'query_id': query_id,
'limit': limit
}
response = self._http_request(method='GET', url_suffix=endpoint_url, params=params)
return response.get('data', {})
def get_hash_reputation_request(self, hash_):
endpoint_url = f'hashes/{hash_}/reputation'
response = self._http_request(method='GET', url_suffix=endpoint_url)
return response
def get_hash_classification_request(self, hash_):
"""
[DEPRECATED by S1] IN BOTH 2.0 and 2.1
"""
endpoint_url = f'hashes/{hash_}/classification'
response = self._http_request(method='GET', url_suffix=endpoint_url)
return response
def get_exclusions_request(self, item_ids=None, os_types=None, exclusion_type: str = None, limit: int = 10):
endpoint_url = 'exclusions'
params = {
"ids": item_ids,
"osTypes": os_types,
"type": exclusion_type,
"limit": limit
}
response = self._http_request(method='GET', url_suffix=endpoint_url, params=params)
return response.get('data', {})
def create_exclusion_item_request(self, exclusion_type, exclusion_value, os_type, description=None,
exclusion_mode=None, path_exclusion_type=None, group_ids=None, site_ids=None):
payload = {
"filter": {
"groupIds": group_ids,
"siteIds": site_ids
},
"data": assign_params(
type=exclusion_type,
value=exclusion_value,
osType=os_type,
description=description,
mode=exclusion_mode,
pathExclusionType=path_exclusion_type
)
}
response = self._http_request(method='POST', url_suffix='exclusions', json_data=payload)
if 'data' in response:
return response.get('data')[0]
return {}
''' COMMANDS + REQUESTS FUNCTIONS '''
def test_module(client: Client, is_fetch: bool, first_fetch: str = None):
"""
Performs basic get request to verify connection and creds.
"""
if is_fetch:
last_fetch = date_to_timestamp(dateparser.parse(first_fetch, settings={'TIMEZONE': 'UTC'}))
last_fetch_date_string = timestamp_to_datestring(last_fetch, '%Y-%m-%dT%H:%M:%S.%fZ')
client.get_threats_request(limit=1, created_after=last_fetch_date_string)
else:
client._http_request(method='GET', url_suffix='activities/types')
return 'ok'
def get_activities_command(client: Client, args: dict) -> CommandResults:
"""
Get a list of activities.
"""
context_entries = []
headers = ['ID', 'PrimaryDescription', 'Data', 'UserID', 'CreatedAt', 'ThreatID', 'UpdatedAt']
activities = client.get_activities_request(**args)
for activity in activities:
context_entries.append({
'Hash': activity.get('hash'),
'ActivityType': activity.get('activityType'),
'OsFamily': activity.get('osFamily'),
'PrimaryDescription': activity.get('primaryDescription'),
'Comments': activity.get('comments'),
'AgentUpdatedVersion': activity.get('agentUpdatedVersion'),
'UserID': activity.get('userId'),
'ID': activity.get('id'),
'Data': activity.get('data'),
'CreatedAt': activity.get('createdAt'),
'SecondaryDescription': activity.get('secondaryDescription'),
'ThreatID': activity.get('threatId'),
'GroupID': activity.get('groupId'),
'UpdatedAt': activity.get('updatedAt'),
'Description': activity.get('description'),
'AgentID': activity.get('agentId'),
'SiteID': activity.get('siteId'),
})
return CommandResults(
readable_output=tableToMarkdown('Sentinel One Activities', context_entries, headers=headers, removeNull=True,
headerTransform=pascalToSpace),
outputs_prefix='SentinelOne.Activity',
outputs_key_field='ID',
outputs=context_entries,
raw_response=activities)
def get_groups_command(client: Client, args: dict) -> CommandResults:
"""
Gets the group data.
"""
headers = ['id', 'name', 'type', 'creator', 'creatorId', 'createdAt', 'rank']
query_params = assign_params(
type=args.get('group_type'),
id=args.get('id'),
groupIds=argToList(args.get('group_ids')),
isDefault=args.get('is_default'),
name=args.get('name'),
query=args.get('query'),
rank=args.get('rank'),
limit=int(args.get('limit', 50)),
)
groups = client.get_groups_request(query_params)
return CommandResults(
readable_output=tableToMarkdown('Sentinel One Groups', groups, headers, headerTransform=pascalToSpace,
removeNull=True),
outputs_prefix='SentinelOne.Group',
outputs_key_field='ID',
outputs=groups,
raw_response=groups)
def delete_group(client: Client, args: dict) -> str:
"""
Deletes a group by ID.
"""
group_id = args.get('group_id')
response = client.delete_group_request(group_id)
if response.get('success'):
return f'Group: {group_id} was deleted successfully'
return f'The deletion of group: {group_id} has failed'
def move_agent_to_group_command(client: Client, args: dict) -> CommandResults:
"""
Move agents to a new group.
"""
group_id = args.get('group_id')
agents_id = argToList(args.get('agents_ids', []))
agents_groups = client.move_agent_request(group_id, agents_id)
# Parse response into context & content entries
if agents_groups.get('agentsMoved') and int(agents_groups.get('agentsMoved')) > 0:
agents_moved = True
else:
agents_moved = False
date_time_utc = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')
context_entries = {
'Date': date_time_utc,
'AgentsMoved': agents_groups.get('agentsMoved'),
'AffectedAgents': agents_moved,
}
return CommandResults(
readable_output=tableToMarkdown(f'Sentinel One - Moved Agents\nTotal of: {agents_groups.get("AgentsMoved", 0)}'
f'agents were Moved successfully', context_entries, removeNull=True),
outputs_prefix='SentinelOne.Agent',
outputs_key_field='Date',
outputs=context_entries,
raw_response=agents_groups)
def get_agent_processes(client: Client, args: dict):
"""
Retrieve running processes for a specific agent.
Note: This feature is obsolete and an empty array will always be returned
"""
headers = ['ProcessName', 'StartTime', 'Pid', 'MemoryUsage', 'CpuUsage', 'ExecutablePath']
contents = []
context = {}
agents_ids = args.get('agents_ids')
processes = client.get_agent_processes_request(agents_ids)
if processes:
for process in processes:
contents.append({
'ProcessName': process.get('processName'),
'CpuUsage': process.get('cpuUsage'),
'MemoryUsage': process.get('memoryUsage'),
'StartTime': process.get('startTime'),
'ExecutablePath': process.get('executablePath'),
'Pid': process.get('pid'),
})
context['SentinelOne.Agent(val.Pid && val.Pid === obj.Pid)'] = processes
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': contents,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Sentinel One Agent Processes', contents, headers, removeNull=True),
'EntryContext': context
})
def get_threats_command(client: Client, args: dict) -> CommandResults:
"""
Gets a list of threats.
Rank only relevant for API version 2.0
"""
headers = ['ID', 'AgentComputerName', 'CreatedDate', 'SiteID', 'SiteName', 'Classification', 'MitigationStatus',
'ConfidenceLevel' if IS_VERSION_2_1 else 'Rank', 'AgentID', 'FileContentHash', 'MarkedAsBenign']
threats = client.get_threats_request(**args)
outputs = list(get_threats_outputs(threats, int(args.get('rank', 0)))) if threats else None
return CommandResults(
readable_output=tableToMarkdown(
'Sentinel One - Getting Threat List', outputs,
metadata='Provides summary information and details for all the threats that matched your search criteria.',
headers=headers, headerTransform=pascalToSpace, removeNull=True),
outputs_prefix='SentinelOne.Threat',
outputs_key_field='ID',
outputs=outputs,
raw_response=threats)
def get_hash_command(client: Client, args: dict) -> CommandResults:
"""
Get hash reputation.
Removed hash classification since SentinelOne has deprecated it - Breaking BC.
"""
hash_ = args.get('hash')
type_ = get_hash_type(hash_)
if type_ == 'Unknown':
raise DemistoException('Enter a valid hash format.')
hash_reputation = client.get_hash_reputation_request(hash_)
reputation = hash_reputation.get('data', {})
contents = {
'Rank': reputation.get('rank'),
'Hash': hash_,
}
return CommandResults(
readable_output=tableToMarkdown('Sentinel One - Hash Reputation\nProvides hash reputation (rank from 0 to 10):',
contents, removeNull=True),
outputs_prefix='SentinelOne.Hash',
outputs_key_field='Hash',
outputs=contents,
raw_response=hash_reputation)
def mark_as_threat_command(client: Client, args: dict) -> CommandResults:
"""
Mark suspicious threats as threats. Relevant for API version 2.0
"""
context_entries = []
threat_ids = argToList(args.get('threat_ids'))
target_scope = args.get('target_scope')
# Make request and get raw response
affected_threats = client.mark_as_threat_request(threat_ids, target_scope)
# Parse response into context & content entries
if affected_threats.get('affected') and int(affected_threats.get('affected')) > 0:
title = f'Total of {affected_threats.get("affected")} provided threats were marked successfully'
affected = True
else:
affected = False
title = 'No threats were marked'
for threat_id in threat_ids:
context_entries.append({
'MarkedAsThreat': affected,
'ID': threat_id,
})
return CommandResults(
readable_output=tableToMarkdown('Sentinel One - Marking suspicious threats as threats \n' + title,
context_entries, headerTransform=pascalToSpace, removeNull=True),
outputs_prefix='SentinelOne.Threat',
outputs_key_field='ID',
outputs=context_entries,
raw_response=affected_threats)
def mitigate_threat_command(client: Client, args: dict) -> CommandResults:
"""
Apply a mitigation action to a group of threats. Relevant for API version 2.0
"""
contents = []
context_entries = []
# Get arguments
threat_ids = argToList(args.get('threat_ids'))
action = args.get('action')
# Make request and get raw response
mitigated_threats = client.mitigate_threat_request(threat_ids, action)
# Parse response into context & content entries
if mitigated_threats.get('affected') and int(mitigated_threats.get('affected')) > 0:
mitigated = True
meta = f'Total of {mitigated_threats.get("affected")} provided threats were mitigated successfully'
else:
mitigated = False
meta = 'No threats were mitigated'
for threat_id in threat_ids:
contents.append({
'Mitigated': mitigated,
'ID': threat_id,
'Mitigation Action': action,
})
context_entries.append({
'Mitigated': mitigated,
'ID': threat_id,
'Mitigation': {
'Action': action
},
})
return CommandResults(
readable_output=tableToMarkdown('Sentinel One - Mitigating threats', contents, metadata=meta, removeNull=True),
outputs_prefix='SentinelOne.Threat',
outputs_key_field='ID',
outputs=context_entries,
raw_response=mitigated_threats)
def resolve_threat_command(client: Client, args: dict) -> CommandResults:
"""
Mark threats as resolved
"""
context_entries = []
threat_ids = argToList(args.get('threat_ids'))
# Make request and get raw response
resolved_threats = client.resolve_threat_request(threat_ids)
# Parse response into context & content entries
if resolved_threats.get('affected') and int(resolved_threats.get('affected')) > 0:
resolved = True
title = f'Total of {resolved_threats.get("affected")} provided threats were resolved successfully'
else:
resolved = False
title = 'No threats were resolved'
for threat_id in threat_ids:
context_entries.append({
'Resolved': resolved,
'ID': threat_id,
})
return CommandResults(
readable_output=tableToMarkdown('Sentinel One - Resolving threats\n' + title, context_entries, removeNull=True),
outputs_prefix='SentinelOne.Threat',
outputs_key_field='ID',
outputs=context_entries,
raw_response=resolved_threats)
def get_white_list_command(client: Client, args: dict) -> CommandResults:
"""
List all white items matching the input filter
"""
context_entries = []
# Get arguments
item_ids = argToList(args.get('item_ids', []))
os_types = argToList(args.get('os_types', []))
exclusion_type = args.get('exclusion_type')
limit = int(args.get('limit', 10))
# Make request and get raw response
exclusion_items = client.get_exclusions_request(item_ids, os_types, exclusion_type, limit)
# Parse response into context & content entries
for exclusion_item in exclusion_items:
context_entries.append({
'ID': exclusion_item.get('id'),
'Type': exclusion_item.get('type'),
'CreatedAt': exclusion_item.get('createdAt'),
'Value': exclusion_item.get('value'),
'Source': exclusion_item.get('source'),
'UserID': exclusion_item.get('userId'),
'UpdatedAt': exclusion_item.get('updatedAt'),
'OsType': exclusion_item.get('osType'),
'UserName': exclusion_item.get('userName'),
'Mode': exclusion_item.get('mode'),
})
return CommandResults(
readable_output=tableToMarkdown('Sentinel One - Listing exclusion items', context_entries, removeNull=True,
metadata='Provides summary information and details for all the exclusion items'
' that matched your search criteria.'),
outputs_prefix='SentinelOne.Exclusions',
outputs_key_field='ID',
outputs=context_entries,
raw_response=exclusion_items)
def create_white_item_command(client: Client, args: dict):
"""
Create white item.
"""
context_entries = []
title = ''
group_ids = argToList(args.get('group_ids', []))
site_ids = argToList(args.get('site_ids', []))
exclusion_type = args.get('exclusion_type')
exclusion_value = args.get('exclusion_value')
os_type = args.get('os_type')
description = args.get('description')
exclusion_mode = args.get('exclusion_mode')
path_exclusion_type = args.get('path_exclusion_type')
if not (group_ids or site_ids):
raise DemistoException("You must provide either group_ids or site_ids.")
# Make request and get raw response
new_item = client.create_exclusion_item_request(exclusion_type, exclusion_value, os_type, description,
exclusion_mode, path_exclusion_type, group_ids, site_ids)
# Parse response into context & content entries
if new_item:
title = 'Sentinel One - Adding an exclusion item \n' + \
'The provided item was successfully added to the exclusion list'
context_entries.append({
'ID': new_item.get('id'),
'Type': new_item.get('type'),
'CreatedAt': new_item.get('createdAt'),
})
return CommandResults(
readable_output=tableToMarkdown(title, context_entries, removeNull=True, headerTransform=pascalToSpace),
outputs_prefix='SentinelOne.Exclusion',
outputs_key_field='ID',
outputs=context_entries,
raw_response=new_item)
def get_sites_command(client: Client, args: dict) -> CommandResults:
"""
List all sites with filtering options
"""
context_entries = []
query_params = assign_params(
updatedAt=args.get('updated_at'),
query=args.get('query'),
siteType=args.get('site_type'),
features=args.get('features'),
state=args.get('state'),
suite=args.get('suite'),
# HTTP 500 - server internal error when passing admin_only.
adminOnly=argToBoolean(args.get('admin_only')) if args.get('admin_only') else None,
accountId=args.get('account_id'),
name=args.get('site_name'),
createdAt=args.get('created_at'),
limit=int(args.get('limit', 50)),
siteIds=argToList(args.get('site_ids')),
)
# Make request and get raw response
raw_response = client.get_sites_request(query_params)
sites, all_sites = raw_response.get('sites'), raw_response.get('allSites')
# Parse response into context & content entries
for site in sites:
context_entries.append({
'ID': site.get('id'),
'Creator': site.get('creator'),
'Name': site.get('name'),
'Type': site.get('siteType'),
'AccountName': site.get('accountName'),
'State': site.get('state'),
'HealthStatus': site.get('healthStatus'),
'Suite': site.get('suite'),
'CreatedAt': site.get('createdAt'),
'Expiration': site.get('expiration'),
'UnlimitedLicenses': site.get('unlimitedLicenses'),
'TotalLicenses': all_sites.get('totalLicenses'),
'ActiveLicenses': all_sites.get('activeLicenses'),
})
return CommandResults(
readable_output=tableToMarkdown('Sentinel One - Getting List of Sites', context_entries, removeNull=True,
metadata='Provides summary information and details for all sites that matched '
'your search criteria.', headerTransform=pascalToSpace),
outputs_prefix='SentinelOne.Site',
outputs_key_field='ID',
outputs=context_entries,
raw_response=raw_response)
def get_site_command(client: Client, args: dict) -> CommandResults:
"""
Get a specific site by ID
"""
# Init main vars
context_entries = []
# Get arguments
site_id = args.get('site_id')
# Make request and get raw response
site = client.get_site_request(site_id)
# Parse response into context & content entries
if site:
context_entries.append({
'ID': site.get('id'),
'Creator': site.get('creator'),
'Name': site.get('name'),
'Type': site.get('siteType'),
'AccountName': site.get('accountName'),
'State': site.get('state'),
'HealthStatus': site.get('healthStatus'),
'Suite': site.get('suite'),
'CreatedAt': site.get('createdAt'),
'Expiration': site.get('expiration'),
'UnlimitedLicenses': site.get('unlimitedLicenses'),
'TotalLicenses': site.get('totalLicenses'),
'ActiveLicenses': site.get('activeLicenses'),
'AccountID': site.get('accountId'),
'IsDefault': site.get('isDefault'),
})
return CommandResults(
readable_output=tableToMarkdown(f'Sentinel One - Summary About Site: {site_id}', context_entries,
removeNull=True,
metadata='Provides summary information and details for specific site ID',
headerTransform=pascalToSpace),
outputs_prefix='SentinelOne.Site',
outputs_key_field='ID',
outputs=context_entries,
raw_response=site)
def reactivate_site_command(client: Client, args: dict) -> CommandResults:
"""
Reactivate specific site by ID
"""
# Init main vars
context = {}
# Get arguments
site_id = args.get('site_id')
# Make request and get raw response
site = client.reactivate_site_request(site_id)
# Parse response into context & content entries
if site:
context = {
'ID': site.get('id'),
'Reactivated': site.get('success'),
}
return CommandResults(
readable_output=tableToMarkdown(f'Sentinel One - Reactivated Site: {site_id}', context, removeNull=True),
outputs_prefix='SentinelOne.Site',
outputs_key_field='ID',
outputs=context,
raw_response=site)
def get_threat_summary_command(client: Client, args: dict) -> CommandResults:
"""
Get dashboard threat summary
"""
# Init main vars
context_entries = {}
site_ids = argToList(args.get('site_ids'))
group_ids = argToList(args.get('group_ids'))
# Make request and get raw response
threat_summary = client.get_threat_summary_request(site_ids, group_ids)
# Parse response into context & content entries
if threat_summary:
context_entries = {
'InProgress': threat_summary.get('inProgress'),
'MaliciousNotResolved': threat_summary.get('maliciousNotResolved'),
'NotMitigated': threat_summary.get('notMitigated'),
'NotMitigatedNotResolved': threat_summary.get('notMitigatedNotResolved'),
'NotResolved': threat_summary.get('notResolved'),
'Resolved': threat_summary.get('resolved'),
'SuspiciousNotMitigatedNotResolved': threat_summary.get('suspiciousNotMitigatedNotResolved'),
'SuspiciousNotResolved': threat_summary.get('suspiciousNotResolved'),
'Total': threat_summary.get('total'),
}
return CommandResults(
readable_output=tableToMarkdown('Sentinel One - Dashboard Threat Summary', context_entries, removeNull=True,
headerTransform=pascalToSpace),
outputs_prefix='SentinelOne.Threat',
outputs_key_field='ID',
outputs=context_entries,
raw_response=threat_summary)
# Agents Commands
def list_agents_command(client: Client, args: dict) -> CommandResults:
"""
List all agents matching the input filter
"""
# Get arguments
query_params = assign_params(
active_threats=args.get('min_active_threats'),
computer_name=args.get('computer_name'),
scan_status=args.get('scan_status'),
os_type=args.get('os_type'),
created_at=args.get('created_at'),
limit=int(args.get('limit', 10)),
)
# Make request and get raw response
agents = client.list_agents_request(query_params)
# Parse response into context & content entries
context_entries = list(get_agents_outputs(agents)) if agents else None
return CommandResults(
readable_output=tableToMarkdown('Sentinel One - List of Agents', context_entries, headerTransform=pascalToSpace,
removeNull=True, metadata='Provides summary information and details for all'
' the agents that matched your search criteria'),
outputs_prefix='SentinelOne.Agents',
outputs_key_field='ID',
outputs=context_entries,
raw_response=agents)
def get_agent_command(client: Client, args: dict) -> CommandResults:
"""
Get single agent via ID
"""
# Get arguments
agent_ids = argToList(args.get('agent_id'))
# Make request and get raw response
agents = client.get_agent_request(agent_ids)
# Parse response into context & content entries
context_entries = list(get_agents_outputs(agents)) if agents else None
return CommandResults(
readable_output=tableToMarkdown('Sentinel One - Get Agent Details', context_entries,
headerTransform=pascalToSpace, removeNull=True),
outputs_prefix='SentinelOne.Agent',
outputs_key_field='ID',
outputs=context_entries,
raw_response=agents)
def connect_agent_to_network(client: Client, args: dict) -> Union[CommandResults, str]:
"""
Sends a "connect to network" command to all agents matching the input filter.
"""
agent_ids = argToList(args.get('agent_id'))
# Make request and get raw response
raw_response = client.connect_to_network_request(agent_ids)
agents_affected = raw_response.get('affected', 0)
# Parse response into context & content entries
if agents_affected > 0:
agents = client.list_agents_request({'ids': agent_ids})
contents = [{
'NetworkStatus': agent.get('networkStatus'),
'ID': agent.get('id')
} for agent in agents]
return CommandResults(
readable_output=f'{agents_affected} agent(s) successfully connected to the network.',
outputs_prefix='SentinelOne.Agent',
outputs_key_field='ID',
outputs=contents,
raw_response=raw_response)
return 'No agents were connected to the network.'
def disconnect_agent_from_network(client: Client, args: dict) -> Union[CommandResults, str]:
"""
Sends a "disconnect from network" command to all agents matching the input filter.
"""
agent_ids = argToList(args.get('agent_id'))
# Make request and get raw response
raw_response = client.disconnect_from_network_request(agent_ids)
agents_affected = raw_response.get('affected', 0)
if agents_affected > 0:
agents = client.list_agents_request({'ids': agent_ids})
contents = [{
'NetworkStatus': agent.get('networkStatus'),
'ID': agent.get('id')
} for agent in agents]
return CommandResults(
readable_output=f'{agents_affected} agent(s) successfully disconnected from the network.',
outputs_prefix='SentinelOne.Agent',
outputs_key_field='ID',
outputs=contents,
raw_response=raw_response)
return 'No agents were disconnected from the network.'
def broadcast_message(client: Client, args: dict) -> str:
"""
Broadcasts a message to all agents matching the input filter.
"""
message = args.get('message')
filters = assign_params(
isActive=argToBoolean(args.get('active_agent', 'false')),
groupIds=argToList(args.get('group_id')),
ids=argToList(args.get('agent_id')),
domains=argToList(args.get('domain')),
)
response = client.broadcast_message_request(message, filters)
agents_affected = response.get('affected', 0)
if agents_affected > 0:
return 'The message was successfully delivered to the agent(s)'
return 'No messages were sent. Verify that the inputs are correct.'
def shutdown_agents(client: Client, args: dict) -> str:
"""
Sends a shutdown command to all agents matching the input filter
"""
query = args.get('query', '')
agent_id = argToList(args.get('agent_id'))
group_id = argToList(args.get('group_id'))
if not (agent_id or group_id):
raise DemistoException('Expecting at least one of the following arguments to filter by: agent_id, group_id.')
response = client.shutdown_agents_request(query, agent_id, group_id)
affected_agents = response.get('affected', 0)
if affected_agents > 0:
return f'Shutting down {affected_agents} agent(s).'
return 'No agents were shutdown.'
def uninstall_agent(client: Client, args: dict) -> str:
"""
Sends an uninstall command to all agents matching the input filter.
"""
query = args.get('query', '')
agent_id = argToList(args.get('agent_id'))
group_id = argToList(args.get('group_id'))
if not (agent_id or group_id):
raise DemistoException('Expecting at least one of the following arguments to filter by: agent_id, group_id.')
response = client.uninstall_agent_request(query, agent_id, group_id)
affected_agents = response.get('affected', 0)
if affected_agents > 0:
return f'Uninstall was sent to {affected_agents} agent(s).'
return 'No agents were affected.'
# Event Commands
def create_query(client: Client, args: dict) -> CommandResults:
query = args.get('query')
from_date = args.get('from_date')
to_date = args.get('to_date')
query_id = client.create_query_request(query, from_date, to_date)
context_entries = {
'Query': query,
'FromDate': from_date,
'ToDate': to_date,
'QueryID': query_id,
}
return CommandResults(
readable_output=f'The query ID is {query_id}',
outputs_prefix='SentinelOne.Query',
outputs_key_field='QueryID',
outputs=context_entries,
raw_response=query_id)
def get_events(client: Client, args: dict) -> Union[CommandResults, str]:
"""
Get all Deep Visibility events from query
"""
contents = []
event_standards = []
query_id = args.get('query_id')
limit = int(args.get('limit', 50))
events = client.get_events_request(query_id, limit)
for event in events:
contents.append({
'EventType': event.get('eventType'),
'Endpoint': event.get('agentName'),
'SiteName': event.get('siteName'),
'User': event.get('user'),
'Time': event.get('processStartTime'),
'AgentOS': event.get('agentOs'),
'ProcessID': event.get('pid'),
'ProcessUID': event.get('srcProcUid') if IS_VERSION_2_1 else event.get('processUniqueKey'),
'ProcessName': event.get('processName'),
'MD5': event.get('md5'),
'SHA256': event.get('sha256'),
})
event_standards.append({
'Type': event.get('eventType'),
'Name': event.get('processName'),
'ID': event.get('pid'),
})
context = {
'SentinelOne.Event(val.ProcessID && val.ProcessID === obj.ProcessID)': contents,
'Event(val.ID && val.ID === obj.ID)': event_standards
}
return CommandResults(
readable_output=tableToMarkdown('SentinelOne Events', contents, removeNull=True),
outputs=context,
raw_response=events)
def get_processes(client: Client, args: dict) -> CommandResults:
"""
Get Deep Visibility events from query by event type - process
"""
contents = []
query_id = args.get('query_id')
limit = int(args.get('limit', 50))
processes = client.get_processes_request(query_id, limit)
for process in processes:
contents.append({
'EventType': process.get('eventType'),
'Endpoint': process.get('agentName'),
'SiteName': process.get('siteName'),
'User': process.get('user'),
'Time': process.get('processStartTime'),
'ParentProcessID': process.get('parentPid'),
'ParentProcessUID': process.get('parentProcessUniqueKey'),
'ParentProcessName': process.get('parentProcessName'),
'ProcessID': process.get('pid'),
'ProcessUID': process.get('srcProcUid') if IS_VERSION_2_1 else process.get('processUniqueKey'),
'ProcessName': process.get('processName'),
'ProcessDisplayName': process.get('processDisplayName'),
'SHA1': process.get('processImageSha1Hash'),
'CMD': process.get('"processCmd'),
'SubsystemType': process.get('processSubSystem'),
'IntegrityLevel': process.get('processIntegrityLevel'),
'ParentProcessStartTime': process.get('parentProcessStartTime'),
})
return CommandResults(
readable_output=tableToMarkdown('SentinelOne Processes', contents, removeNull=True),
outputs_prefix='SentinelOne.Event',
outputs_key_field='ProcessID',
outputs=contents,
raw_response=processes)
def fetch_incidents(client: Client, fetch_limit: int, first_fetch: str, fetch_threat_rank: int):
last_run = demisto.getLastRun()
last_fetch = last_run.get('time')
# handle first time fetch
if last_fetch is None:
last_fetch = date_to_timestamp(dateparser.parse(first_fetch, settings={'TIMEZONE': 'UTC'}))
current_fetch = last_fetch
incidents = []
last_fetch_date_string = timestamp_to_datestring(last_fetch, '%Y-%m-%dT%H:%M:%S.%fZ')
threats = client.get_threats_request(limit=fetch_limit, created_after=last_fetch_date_string)
for threat in threats:
rank = threat.get('rank')
try:
rank = int(rank)
except TypeError:
rank = 0
# If no fetch threat rank is provided, bring everything, else only fetch above the threshold
if IS_VERSION_2_1 or rank >= fetch_threat_rank:
incident = threat_to_incident(threat)
date_occurred_dt = parse(incident['occurred'])
incident_date = date_to_timestamp(date_occurred_dt, '%Y-%m-%dT%H:%M:%S.%fZ')
if incident_date > last_fetch:
incidents.append(incident)
if incident_date > current_fetch:
current_fetch = incident_date
demisto.setLastRun({'time': current_fetch})
demisto.incidents(incidents)
def threat_to_incident(threat) -> dict:
threat_info = threat.get('threatInfo', {}) if IS_VERSION_2_1 else threat
incident = {
'name': f'Sentinel One Threat: {threat_info.get("classification", "Not classified")}',
'occurred': threat_info.get('createdAt'),
'rawJSON': json.dumps(threat)}
return incident
def main():
""" PARSE INTEGRATION PARAMETERS """
global IS_VERSION_2_1
params = demisto.params()
token = params.get('token')
api_version = params.get('api_version', '2.1')
server = params.get('url').rstrip('/')
base_url = urljoin(server, f'/web/api/v{api_version}/')
use_ssl = not params.get('insecure', False)
proxy = params.get('proxy', False)
IS_VERSION_2_1 = api_version == '2.1'
first_fetch_time = params.get('fetch_time', '3 days')
fetch_threat_rank = int(params.get('fetch_threat_rank', 0))
fetch_limit = int(params.get('fetch_limit', 10))
headers = {
'Authorization': 'ApiToken ' + token if token else 'ApiToken',
'Content-Type': 'application/json',
'Accept': 'application/json'
}
commands: Dict[str, Dict[str, Callable]] = {
'common': {
'sentinelone-get-activities': get_activities_command,
'sentinelone-get-threats': get_threats_command,
'sentinelone-mitigate-threat': mitigate_threat_command,
'sentinelone-get-hash': get_hash_command,
'sentinelone-get-white-list': get_white_list_command,
'sentinelone-create-white-list-item': create_white_item_command,
'sentinelone-get-sites': get_sites_command,
'sentinelone-get-site': get_site_command,
'sentinelone-reactivate-site': reactivate_site_command,
'sentinelone-list-agents': list_agents_command,
'sentinelone-get-agent': get_agent_command,
'sentinelone-get-groups': get_groups_command,
'sentinelone-move-agent': move_agent_to_group_command,
'sentinelone-delete-group': delete_group,
'sentinelone-connect-agent': connect_agent_to_network,
'sentinelone-disconnect-agent': disconnect_agent_from_network,
'sentinelone-broadcast-message': broadcast_message,
'sentinelone-get-events': get_events,
'sentinelone-create-query': create_query,
'sentinelone-get-processes': get_processes,
'sentinelone-shutdown-agent': shutdown_agents,
'sentinelone-uninstall-agent': uninstall_agent,
},
'2.0': {
'sentinelone-mark-as-threat': mark_as_threat_command,
'sentinelone-resolve-threat': resolve_threat_command,
'sentinelone-agent-processes': get_agent_processes,
},
'2.1': {
'sentinelone-threat-summary': get_threat_summary_command,
},
}
''' COMMANDS MANAGER / SWITCH PANEL '''
demisto.info(f'Command being called is {demisto.command()}')
command = demisto.command()
try:
client = Client(
base_url=base_url,
verify=use_ssl,
headers=headers,
proxy=proxy,
)
if command == 'test-module':
return_results(test_module(client, params.get('isFetch'), first_fetch_time))
if command == 'fetch-incidents':
fetch_incidents(client, fetch_limit, first_fetch_time, fetch_threat_rank)
else:
if command in commands['common']:
return_results(commands['common'][command](client, demisto.args()))
elif command in commands[api_version]:
return_results(commands[api_version][command](client, demisto.args()))
else:
raise NotImplementedError(f'The {command} command is not supported for API version {api_version}')
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {command} command.\nError:\n{str(e)}')
if __name__ in ['__main__', 'builtin', 'builtins']:
main() | en | 0.766639 | IMPORTS # Disable insecure warnings GLOBALS HELPER FUNCTIONS # Only available in 2.0 # Only available in 2.0 # Only available in 2.0 # Only available in 2.0 # Only available in 2.0 # Only available in 2.0 Client will implement the service API, and should not contain any Demisto logic. Should only do requests and return data. [DEPRECATED BY SentinelOne] Returns empty array. To get processes of an Agent, see Applications. [DEPRECATED by S1] IN BOTH 2.0 and 2.1 COMMANDS + REQUESTS FUNCTIONS Performs basic get request to verify connection and creds. Get a list of activities. Gets the group data. Deletes a group by ID. Move agents to a new group. # Parse response into context & content entries Retrieve running processes for a specific agent. Note: This feature is obsolete and an empty array will always be returned Gets a list of threats. Rank only relevant for API version 2.0 Get hash reputation. Removed hash classification since SentinelOne has deprecated it - Breaking BC. Mark suspicious threats as threats. Relevant for API version 2.0 # Make request and get raw response # Parse response into context & content entries Apply a mitigation action to a group of threats. Relevant for API version 2.0 # Get arguments # Make request and get raw response # Parse response into context & content entries Mark threats as resolved # Make request and get raw response # Parse response into context & content entries List all white items matching the input filter # Get arguments # Make request and get raw response # Parse response into context & content entries Create white item. # Make request and get raw response # Parse response into context & content entries List all sites with filtering options # HTTP 500 - server internal error when passing admin_only. # Make request and get raw response # Parse response into context & content entries Get a specific site by ID # Init main vars # Get arguments # Make request and get raw response # Parse response into context & content entries Reactivate specific site by ID # Init main vars # Get arguments # Make request and get raw response # Parse response into context & content entries Get dashboard threat summary # Init main vars # Make request and get raw response # Parse response into context & content entries # Agents Commands List all agents matching the input filter # Get arguments # Make request and get raw response # Parse response into context & content entries Get single agent via ID # Get arguments # Make request and get raw response # Parse response into context & content entries Sends a "connect to network" command to all agents matching the input filter. # Make request and get raw response # Parse response into context & content entries Sends a "disconnect from network" command to all agents matching the input filter. # Make request and get raw response Broadcasts a message to all agents matching the input filter. Sends a shutdown command to all agents matching the input filter Sends an uninstall command to all agents matching the input filter. # Event Commands Get all Deep Visibility events from query Get Deep Visibility events from query by event type - process # handle first time fetch # If no fetch threat rank is provided, bring everything, else only fetch above the threshold PARSE INTEGRATION PARAMETERS COMMANDS MANAGER / SWITCH PANEL # print the traceback | 2.16795 | 2 |
src/sentry/incidents/endpoints/organization_alert_rule_trigger_action_details.py | pombredanne/django-sentry | 0 | 6630342 | from __future__ import absolute_import
from rest_framework import status
from rest_framework.response import Response
from sentry.api.serializers import serialize
from sentry.incidents.endpoints.bases import OrganizationAlertRuleTriggerActionEndpoint
from sentry.incidents.endpoints.serializers import AlertRuleTriggerActionSerializer
from sentry.incidents.logic import delete_alert_rule_trigger_action, InvalidTriggerActionError
class OrganizationAlertRuleTriggerActionDetailsEndpoint(OrganizationAlertRuleTriggerActionEndpoint):
def get(self, request, organization, alert_rule, alert_rule_trigger, alert_rule_trigger_action):
"""
Fetch an alert rule trigger action.
```````````````````````````````````
:auth: required
"""
data = serialize(alert_rule_trigger_action, request.user)
return Response(data)
def put(self, request, organization, alert_rule, alert_rule_trigger, alert_rule_trigger_action):
serializer = AlertRuleTriggerActionSerializer(
context={
"organization": organization,
"alert_rule": alert_rule,
"alert_rule_trigger": alert_rule_trigger,
"access": request.access,
},
instance=alert_rule_trigger_action,
data=request.data,
)
if serializer.is_valid():
try:
alert_rule_trigger_action = serializer.save()
except InvalidTriggerActionError as e:
return Response(e.message, status=status.HTTP_400_BAD_REQUEST)
return Response(
serialize(alert_rule_trigger_action, request.user), status=status.HTTP_200_OK
)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(
self, request, organization, alert_rule, alert_rule_trigger, alert_rule_trigger_action
):
delete_alert_rule_trigger_action(alert_rule_trigger_action)
return Response(status=status.HTTP_204_NO_CONTENT)
| from __future__ import absolute_import
from rest_framework import status
from rest_framework.response import Response
from sentry.api.serializers import serialize
from sentry.incidents.endpoints.bases import OrganizationAlertRuleTriggerActionEndpoint
from sentry.incidents.endpoints.serializers import AlertRuleTriggerActionSerializer
from sentry.incidents.logic import delete_alert_rule_trigger_action, InvalidTriggerActionError
class OrganizationAlertRuleTriggerActionDetailsEndpoint(OrganizationAlertRuleTriggerActionEndpoint):
def get(self, request, organization, alert_rule, alert_rule_trigger, alert_rule_trigger_action):
"""
Fetch an alert rule trigger action.
```````````````````````````````````
:auth: required
"""
data = serialize(alert_rule_trigger_action, request.user)
return Response(data)
def put(self, request, organization, alert_rule, alert_rule_trigger, alert_rule_trigger_action):
serializer = AlertRuleTriggerActionSerializer(
context={
"organization": organization,
"alert_rule": alert_rule,
"alert_rule_trigger": alert_rule_trigger,
"access": request.access,
},
instance=alert_rule_trigger_action,
data=request.data,
)
if serializer.is_valid():
try:
alert_rule_trigger_action = serializer.save()
except InvalidTriggerActionError as e:
return Response(e.message, status=status.HTTP_400_BAD_REQUEST)
return Response(
serialize(alert_rule_trigger_action, request.user), status=status.HTTP_200_OK
)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(
self, request, organization, alert_rule, alert_rule_trigger, alert_rule_trigger_action
):
delete_alert_rule_trigger_action(alert_rule_trigger_action)
return Response(status=status.HTTP_204_NO_CONTENT)
| en | 0.946024 | Fetch an alert rule trigger action. ``````````````````````````````````` :auth: required | 2.097556 | 2 |
Authentication.py | Snowbell92/funlearn | 0 | 6630343 | from PyQt5.QtWidgets import QApplication, QWidget, QLabel, QPushButton, QLineEdit, QMessageBox
from PyQt5 import QtCore, QtGui, QtWidgets
import sys
import tkinter as tk
from StartingPage import StartingPage
import mysql.connector
class App(QWidget):
trial = 0
UID = None
username = None
def __init__(self):
super().__init__()
'''
root = tk.Tk()
self.width = root.winfo_screenwidth()
self.height = root.winfo_screenheight()
'''
self.width = 700
self.height = 720
self.left = 500
self.top = 50
# print(self.width, self.height)
self.title = 'Sign Up/Sign In'
self.initUI()
def initUI(self):
horUnit = int(self.width / 12)
verUnit = int(self.height / 12)
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.setStyleSheet("background-color: rgb(54, 75, 109);");
self.lbl_heading = QLabel("USER AUTHENTICATION", self)
self.lbl_heading.setStyleSheet("font-size: 22px; font-weight: bold; color: white;")
self.lbl_heading.setGeometry(3.5 * horUnit, 1 * verUnit, 4.5 * horUnit, 0.6 * verUnit)
self.lbl_username = QLabel("Username", self)
self.lbl_username.setStyleSheet("font-size: 16px; font-weight: bold; color: white;")
self.lbl_username.setGeometry(1 * horUnit, 3 * verUnit, 1.5 * horUnit, 0.6 * verUnit)
self.txt_username = QLineEdit(self)
self.txt_username.setPlaceholderText("username")
self.txt_username.setStyleSheet("background-color: white")
self.txt_username.setGeometry(3.5 * horUnit, 3 * verUnit, 7 * horUnit, 0.6 * verUnit)
self.lbl_pwd = QLabel("Password", self)
self.lbl_pwd.setStyleSheet("font-size: 16px; font-weight: bold; color: white;")
self.lbl_pwd.setGeometry(1 * horUnit, 4.5 * verUnit, 1.5 * horUnit, 0.6 * verUnit)
self.txt_pwd = QLineEdit(self)
self.txt_pwd.setPlaceholderText("Password")
self.txt_pwd.setEchoMode(QLineEdit.Password)
self.txt_pwd.setStyleSheet("background-color: white")
self.txt_pwd.setGeometry(3.5 * horUnit, 4.5 * verUnit, 7 * horUnit, 0.6 * verUnit)
self.btn_signUp = QPushButton('Sign Up', self)
self.btn_signUp.setStyleSheet("background-color: lightgray; font-size: 16px; font-weight: bold;")
self.btn_signUp.setGeometry(5 * horUnit, 8 * verUnit, 1.5 * horUnit, 0.6 * verUnit)
self.btn_signUp.clicked.connect(self.on_click_signUp)
self.btn_signIn = QPushButton('Sign In', self)
self.btn_signIn.setStyleSheet("background-color: lightgray; font-size: 16px; font-weight: bold;")
self.btn_signIn.setGeometry(8 * horUnit, 8 * verUnit, 1.5 * horUnit, 0.6 * verUnit)
self.btn_signIn.clicked.connect(self.on_click_signIn)
self.btn_reset = QPushButton('Reset', self)
self.btn_reset.setStyleSheet("background-color: lightgray; font-size: 16px; font-weight: bold;")
self.btn_reset.setGeometry(2 * horUnit, 8 * verUnit, 1.5 * horUnit, 0.6 * verUnit)
self.btn_reset.hide()
self.btn_reset.clicked.connect(self.on_click_reset)
self.lbl_reset = QLabel("Want to reset password?", self)
self.lbl_reset.setGeometry(2 * horUnit, 6.5 * verUnit, 5 * horUnit, 0.5 * verUnit)
self.lbl_reset.hide()
self.lbl_reset.setStyleSheet("font-size: 18px; font-weight: bold; color: lightgray; color: blue;")
self.lbl_error = QLabel("Wrong Password. Try again!", self)
self.lbl_error.setGeometry(4 * horUnit, 5.5 * verUnit, 5 * horUnit, 0.5 * verUnit)
self.lbl_error.hide()
self.lbl_error.setStyleSheet("font-size: 18px; font-weight: bold; color: lightgray; color: red;")
self.show()
def on_click_signUp(self):
self.storeIntoDatabase()
messageBox = QtWidgets.QMessageBox()
messageBox.setIcon(QtWidgets.QMessageBox.Information)
messageBox.setWindowTitle("Sign Up")
messageBox.setText("Sign Up Successful!")
messageBox.setStandardButtons(QtWidgets.QMessageBox.Ok | QtWidgets.QMessageBox.Close)
messageBox.exec_()
self.loadStartingPage()
def on_click_signIn(self):
App.trial += 1
self.lbl_error.hide()
mydb = mysql.connector.connect(
host="localhost",
user="root",
# passwd="<PASSWORD>",
database="spl"
)
mycursor = mydb.cursor()
mycursor.execute("SELECT username, password FROM User")
myresult = mycursor.fetchall()
mycursor.close()
mydb.close()
name = self.txt_username.text()
pwd = self.txt_pwd.text()
print(name, " ", pwd, " ", App.trial)
flag = False
for row in myresult:
if name == row[0] and pwd == row[1]:
flag = True
self.loadStartingPage()
break;
elif name == row[0] and App.trial >= 5:
App.username = name
self.txt_pwd.setText("");
self.lbl_reset.show()
self.btn_reset.show()
# elif flag == False:
# self.lbl_error.show()
if flag == False:
self.lbl_error.show()
def on_click_reset(self):
App.trial = 0
self.lbl_error.hide()
mydb = mysql.connector.connect(
host='localhost',
user="root",
# passwd = "<PASSWORD>",
database="spl"
)
myCursor = mydb.cursor(buffered=True)
# name = self.txt_username.text()
pwd = self.txt_pwd.text()
# print(name, " ", pwd)
sql = "UPDATE User SET password = %s WHERE username = %s"
val = (pwd, App.username)
myCursor.execute(sql, val)
mydb.commit()
# print(myCursor.rowcount, "record inserted.")
myCursor.close()
mydb.close()
if self.txt_pwd.text() != "":
messageBox = QtWidgets.QMessageBox()
messageBox.setIcon(QtWidgets.QMessageBox.Information)
messageBox.setWindowTitle("Password Reset")
messageBox.setText("Password Reset Successful!")
messageBox.setStandardButtons(QtWidgets.QMessageBox.Ok | QtWidgets.QMessageBox.Close)
messageBox.exec_()
self.loadStartingPage()
else:
messageBox = QtWidgets.QMessageBox()
messageBox.setIcon(QtWidgets.QMessageBox.Information)
messageBox.setWindowTitle("Input Not Found")
messageBox.setText("Please fill up the password field.")
messageBox.setStandardButtons(QtWidgets.QMessageBox.Ok | QtWidgets.QMessageBox.Close)
messageBox.exec_()
def storeIntoDatabase(self):
mydb = mysql.connector.connect(
host='localhost',
user="root",
# passwd = "<PASSWORD>",
database="spl"
)
myCursor = mydb.cursor(buffered=True)
name = self.txt_username.text()
pwd = self.txt_pwd.text()
print(name, " ", pwd)
sql = "INSERT INTO User (username, password) VALUES (%s, %s)"
val = (name, pwd)
myCursor.execute(sql, val)
mydb.commit()
# print(myCursor.rowcount, "record inserted.")
myCursor.close()
mydb.close()
def loadStartingPage(self):
name = self.txt_username.text()
pwd = self.txt_pwd.text()
print(name, " ", pwd)
self.start = StartingPage()
self.start.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
obj = App()
sys.exit(app.exec_())
| from PyQt5.QtWidgets import QApplication, QWidget, QLabel, QPushButton, QLineEdit, QMessageBox
from PyQt5 import QtCore, QtGui, QtWidgets
import sys
import tkinter as tk
from StartingPage import StartingPage
import mysql.connector
class App(QWidget):
trial = 0
UID = None
username = None
def __init__(self):
super().__init__()
'''
root = tk.Tk()
self.width = root.winfo_screenwidth()
self.height = root.winfo_screenheight()
'''
self.width = 700
self.height = 720
self.left = 500
self.top = 50
# print(self.width, self.height)
self.title = 'Sign Up/Sign In'
self.initUI()
def initUI(self):
horUnit = int(self.width / 12)
verUnit = int(self.height / 12)
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.setStyleSheet("background-color: rgb(54, 75, 109);");
self.lbl_heading = QLabel("USER AUTHENTICATION", self)
self.lbl_heading.setStyleSheet("font-size: 22px; font-weight: bold; color: white;")
self.lbl_heading.setGeometry(3.5 * horUnit, 1 * verUnit, 4.5 * horUnit, 0.6 * verUnit)
self.lbl_username = QLabel("Username", self)
self.lbl_username.setStyleSheet("font-size: 16px; font-weight: bold; color: white;")
self.lbl_username.setGeometry(1 * horUnit, 3 * verUnit, 1.5 * horUnit, 0.6 * verUnit)
self.txt_username = QLineEdit(self)
self.txt_username.setPlaceholderText("username")
self.txt_username.setStyleSheet("background-color: white")
self.txt_username.setGeometry(3.5 * horUnit, 3 * verUnit, 7 * horUnit, 0.6 * verUnit)
self.lbl_pwd = QLabel("Password", self)
self.lbl_pwd.setStyleSheet("font-size: 16px; font-weight: bold; color: white;")
self.lbl_pwd.setGeometry(1 * horUnit, 4.5 * verUnit, 1.5 * horUnit, 0.6 * verUnit)
self.txt_pwd = QLineEdit(self)
self.txt_pwd.setPlaceholderText("Password")
self.txt_pwd.setEchoMode(QLineEdit.Password)
self.txt_pwd.setStyleSheet("background-color: white")
self.txt_pwd.setGeometry(3.5 * horUnit, 4.5 * verUnit, 7 * horUnit, 0.6 * verUnit)
self.btn_signUp = QPushButton('Sign Up', self)
self.btn_signUp.setStyleSheet("background-color: lightgray; font-size: 16px; font-weight: bold;")
self.btn_signUp.setGeometry(5 * horUnit, 8 * verUnit, 1.5 * horUnit, 0.6 * verUnit)
self.btn_signUp.clicked.connect(self.on_click_signUp)
self.btn_signIn = QPushButton('Sign In', self)
self.btn_signIn.setStyleSheet("background-color: lightgray; font-size: 16px; font-weight: bold;")
self.btn_signIn.setGeometry(8 * horUnit, 8 * verUnit, 1.5 * horUnit, 0.6 * verUnit)
self.btn_signIn.clicked.connect(self.on_click_signIn)
self.btn_reset = QPushButton('Reset', self)
self.btn_reset.setStyleSheet("background-color: lightgray; font-size: 16px; font-weight: bold;")
self.btn_reset.setGeometry(2 * horUnit, 8 * verUnit, 1.5 * horUnit, 0.6 * verUnit)
self.btn_reset.hide()
self.btn_reset.clicked.connect(self.on_click_reset)
self.lbl_reset = QLabel("Want to reset password?", self)
self.lbl_reset.setGeometry(2 * horUnit, 6.5 * verUnit, 5 * horUnit, 0.5 * verUnit)
self.lbl_reset.hide()
self.lbl_reset.setStyleSheet("font-size: 18px; font-weight: bold; color: lightgray; color: blue;")
self.lbl_error = QLabel("Wrong Password. Try again!", self)
self.lbl_error.setGeometry(4 * horUnit, 5.5 * verUnit, 5 * horUnit, 0.5 * verUnit)
self.lbl_error.hide()
self.lbl_error.setStyleSheet("font-size: 18px; font-weight: bold; color: lightgray; color: red;")
self.show()
def on_click_signUp(self):
self.storeIntoDatabase()
messageBox = QtWidgets.QMessageBox()
messageBox.setIcon(QtWidgets.QMessageBox.Information)
messageBox.setWindowTitle("Sign Up")
messageBox.setText("Sign Up Successful!")
messageBox.setStandardButtons(QtWidgets.QMessageBox.Ok | QtWidgets.QMessageBox.Close)
messageBox.exec_()
self.loadStartingPage()
def on_click_signIn(self):
App.trial += 1
self.lbl_error.hide()
mydb = mysql.connector.connect(
host="localhost",
user="root",
# passwd="<PASSWORD>",
database="spl"
)
mycursor = mydb.cursor()
mycursor.execute("SELECT username, password FROM User")
myresult = mycursor.fetchall()
mycursor.close()
mydb.close()
name = self.txt_username.text()
pwd = self.txt_pwd.text()
print(name, " ", pwd, " ", App.trial)
flag = False
for row in myresult:
if name == row[0] and pwd == row[1]:
flag = True
self.loadStartingPage()
break;
elif name == row[0] and App.trial >= 5:
App.username = name
self.txt_pwd.setText("");
self.lbl_reset.show()
self.btn_reset.show()
# elif flag == False:
# self.lbl_error.show()
if flag == False:
self.lbl_error.show()
def on_click_reset(self):
App.trial = 0
self.lbl_error.hide()
mydb = mysql.connector.connect(
host='localhost',
user="root",
# passwd = "<PASSWORD>",
database="spl"
)
myCursor = mydb.cursor(buffered=True)
# name = self.txt_username.text()
pwd = self.txt_pwd.text()
# print(name, " ", pwd)
sql = "UPDATE User SET password = %s WHERE username = %s"
val = (pwd, App.username)
myCursor.execute(sql, val)
mydb.commit()
# print(myCursor.rowcount, "record inserted.")
myCursor.close()
mydb.close()
if self.txt_pwd.text() != "":
messageBox = QtWidgets.QMessageBox()
messageBox.setIcon(QtWidgets.QMessageBox.Information)
messageBox.setWindowTitle("Password Reset")
messageBox.setText("Password Reset Successful!")
messageBox.setStandardButtons(QtWidgets.QMessageBox.Ok | QtWidgets.QMessageBox.Close)
messageBox.exec_()
self.loadStartingPage()
else:
messageBox = QtWidgets.QMessageBox()
messageBox.setIcon(QtWidgets.QMessageBox.Information)
messageBox.setWindowTitle("Input Not Found")
messageBox.setText("Please fill up the password field.")
messageBox.setStandardButtons(QtWidgets.QMessageBox.Ok | QtWidgets.QMessageBox.Close)
messageBox.exec_()
def storeIntoDatabase(self):
mydb = mysql.connector.connect(
host='localhost',
user="root",
# passwd = "<PASSWORD>",
database="spl"
)
myCursor = mydb.cursor(buffered=True)
name = self.txt_username.text()
pwd = self.txt_pwd.text()
print(name, " ", pwd)
sql = "INSERT INTO User (username, password) VALUES (%s, %s)"
val = (name, pwd)
myCursor.execute(sql, val)
mydb.commit()
# print(myCursor.rowcount, "record inserted.")
myCursor.close()
mydb.close()
def loadStartingPage(self):
name = self.txt_username.text()
pwd = self.txt_pwd.text()
print(name, " ", pwd)
self.start = StartingPage()
self.start.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
obj = App()
sys.exit(app.exec_())
| en | 0.340878 | root = tk.Tk() self.width = root.winfo_screenwidth() self.height = root.winfo_screenheight() # print(self.width, self.height) # passwd="<PASSWORD>", # elif flag == False: # self.lbl_error.show() # passwd = "<PASSWORD>", # name = self.txt_username.text() # print(name, " ", pwd) # print(myCursor.rowcount, "record inserted.") # passwd = "<PASSWORD>", # print(myCursor.rowcount, "record inserted.") | 2.732009 | 3 |
example/benchmark/ijson_pubsub/client.py | kokizzu/ijson | 117 | 6630344 | <filename>example/benchmark/ijson_pubsub/client.py<gh_stars>100-1000
import sys
from requests import Session
queue = sys.argv[1] if len(sys.argv) > 1 else '/sum'
s = Session()
while True:
s.post(f'http://localhost:8001' + queue, json={'a': 5, 'b': 8}, headers={'Type': 'pub'})
| <filename>example/benchmark/ijson_pubsub/client.py<gh_stars>100-1000
import sys
from requests import Session
queue = sys.argv[1] if len(sys.argv) > 1 else '/sum'
s = Session()
while True:
s.post(f'http://localhost:8001' + queue, json={'a': 5, 'b': 8}, headers={'Type': 'pub'})
| none | 1 | 2.271245 | 2 |
|
bukiyipmodtest.py | NtateLephadi/csc1015f_assignment_4 | 0 | 6630345 | # test program for Bukiyip calculations
import bukiyip
print('**** Bukiyip test program ****')
print('Available commands:')
print('d <number> : convert given decimal number to base-3.')
print('b <number> : convert given base-3 number to decimal.')
print('a <number> <number> : add the given base-3 numbers.')
print('m <number> <number> : multiply the given base-3 numbers.')
print('q : quit')
print()
while True:
choice = input ("Enter a command:\n")
action = choice[:1]
if action == 'q':
break
elif action == 'b' or action == 'd':
num = int(choice[2:])
if action == 'b':
print(bukiyip.bukiyip_to_decimal (num))
else:
print(bukiyip.decimal_to_bukiyip(num))
elif action == 'a' or action == 'm':
num1, num2 = map (int, choice[2:].split(" "))
if action == 'a':
print(bukiyip.bukiyip_add (num1, num2))
else:
print(bukiyip.bukiyip_multiply (num1, num2))
| # test program for Bukiyip calculations
import bukiyip
print('**** Bukiyip test program ****')
print('Available commands:')
print('d <number> : convert given decimal number to base-3.')
print('b <number> : convert given base-3 number to decimal.')
print('a <number> <number> : add the given base-3 numbers.')
print('m <number> <number> : multiply the given base-3 numbers.')
print('q : quit')
print()
while True:
choice = input ("Enter a command:\n")
action = choice[:1]
if action == 'q':
break
elif action == 'b' or action == 'd':
num = int(choice[2:])
if action == 'b':
print(bukiyip.bukiyip_to_decimal (num))
else:
print(bukiyip.decimal_to_bukiyip(num))
elif action == 'a' or action == 'm':
num1, num2 = map (int, choice[2:].split(" "))
if action == 'a':
print(bukiyip.bukiyip_add (num1, num2))
else:
print(bukiyip.bukiyip_multiply (num1, num2))
| en | 0.788313 | # test program for Bukiyip calculations | 4.126408 | 4 |
reduceccd/__init__.py | jselsing/reduceccd | 6 | 6630346 | from .reduceccd import *
| from .reduceccd import *
| none | 1 | 1.028683 | 1 |
|
predico/sample/tour/current_request.py | pauleveritt/predico | 0 | 6630347 | from dataclasses import dataclass
from predico import registry
from predico.sample import servicemanager, setup, Article
from predico.services.request.base_request import Request
@registry.view(
resource=Article,
template_string='<h1>{v.name}: {v.request.resource.title}</h1>'
)
@dataclass
class ArticleView:
request: Request
name: str = 'Article View'
if __name__ == '__main__':
setup()
request_service = servicemanager.services['request']
request = request_service.make_request('more/index')
output = request.render()
print(output)
| from dataclasses import dataclass
from predico import registry
from predico.sample import servicemanager, setup, Article
from predico.services.request.base_request import Request
@registry.view(
resource=Article,
template_string='<h1>{v.name}: {v.request.resource.title}</h1>'
)
@dataclass
class ArticleView:
request: Request
name: str = 'Article View'
if __name__ == '__main__':
setup()
request_service = servicemanager.services['request']
request = request_service.make_request('more/index')
output = request.render()
print(output)
| none | 1 | 2.158318 | 2 |
|
pyDMPC/ControlFramework/Inits/Init_Geo.py | RWTH-EBC/pyDMPC | 15 | 6630348 | # Global paths
glob_lib_paths = [r'C:\Git\pyDMPC\pyDMPC\ModelicaModels\ModelicaModels',
r'C:\Git\modelica-buildings\Buildings',
r'C:\Git\AixLib\AixLib']
glob_res_path = r'C:\TEMP\Dymola'
glob_dym_path = r'C:\Program Files\Dymola 2018 FD01\Modelica\Library\python_interface\dymola.egg'
# Working directory
import time
timestr = time.strftime("%Y%m%d_%H%M%S")
name_wkdir = r'pyDMPC_' + 'wkdir' + timestr
# Controlled system
contr_sys_typ = "Modelica"
ads_id = '5.59.199.202.1.1'
ads_port = 851
name_fmu = 'pyDMPCFMU_Geo.fmu'
orig_fmu_path = glob_res_path + '\\' + name_fmu
dest_fmu_path = glob_res_path + '\\' + name_wkdir + '\\' + name_fmu
time_incr = 120
# States
inputs = []
input_names = []
traj_points = []
input_variables = []
commands = []
command_variables = []
output_names = []
set_points = []
state_var_names = []
model_state_var_names = []
traj_var = []
# Times
start = []
stop = []
incr = []
opt_time = []
samp_time = []
# Paths
lib_paths = []
res_path = []
dym_path = []
mod_path = []
command_names = []
# Modifiers
cost_fac = []
# Variation
min_var = []
max_var = []
inc_var = []
# Subsystem Config
model_type = []
name = []
sys_id = []
ups_neigh = []
downs_neigh = []
par_neigh = []
# Subsystems
sys_id.append(0)
name.append("Field")
model_type.append("Modelica")
ups_neigh.append(1)
downs_neigh.append(None)
par_neigh.append(None)
input_names.append(["returnTemperature.T"])
input_variables.append(["external"])
inputs.append([])
output_names.append(["returnTemperature.T"])
set_points.append([287])
state_var_names.append(["supplyTemperatureMeas"])
model_state_var_names.append(["vol.T_start"])
start.append(0.)
stop.append(3600.0*24*365.25*3)
incr.append(3600.)
opt_time.append(10800)
samp_time.append(10)
lib_paths.append(glob_lib_paths)
res_path.append(glob_res_path + "\\" + name_wkdir)
dym_path.append(glob_dym_path)
mod_path.append(r'ModelicaModels.SubsystemModels.DetailedModels.Geo.Field')
command_names.append(["heatShare"])
command_variables.append(["decisionVariables.table[1,2]"])
commands.append(range(0,105,5))
traj_points.append(range(278,310,1))
traj_var.append(["supplyTemperature.T"])
cost_fac.append([0.0, 0.0, 1.0])
sys_id.append(1)
name.append("Building")
model_type.append("Modelica")
ups_neigh.append(None)
downs_neigh.append([0])
par_neigh.append(None)
input_names.append(["supplyTemperature.T"])
input_variables.append([r"variation.table[1,2]"])
inputs.append(range(280,310,5))
output_names.append(["returnTemperature"])
set_points.append([287])
state_var_names.append(["sine.y"])
model_state_var_names.append(["const.k"])
start.append(0.)
stop.append(7200.)
incr.append(10.)
opt_time.append(600)
samp_time.append(10)
lib_paths.append(glob_lib_paths)
res_path.append(glob_res_path + "\\" + name_wkdir)
dym_path.append(glob_dym_path)
mod_path.append(r'ModelicaModels.SubsystemModels.DetailedModels.Geo.Building')
command_names.append([])
command_variables.append(["decisionVariables.table[1,2]"])
commands.append(range(0,105,5))
traj_points.append([])
traj_var.append([])
cost_fac.append([-0.01, 1.0, 0.0])
| # Global paths
glob_lib_paths = [r'C:\Git\pyDMPC\pyDMPC\ModelicaModels\ModelicaModels',
r'C:\Git\modelica-buildings\Buildings',
r'C:\Git\AixLib\AixLib']
glob_res_path = r'C:\TEMP\Dymola'
glob_dym_path = r'C:\Program Files\Dymola 2018 FD01\Modelica\Library\python_interface\dymola.egg'
# Working directory
import time
timestr = time.strftime("%Y%m%d_%H%M%S")
name_wkdir = r'pyDMPC_' + 'wkdir' + timestr
# Controlled system
contr_sys_typ = "Modelica"
ads_id = '5.59.199.202.1.1'
ads_port = 851
name_fmu = 'pyDMPCFMU_Geo.fmu'
orig_fmu_path = glob_res_path + '\\' + name_fmu
dest_fmu_path = glob_res_path + '\\' + name_wkdir + '\\' + name_fmu
time_incr = 120
# States
inputs = []
input_names = []
traj_points = []
input_variables = []
commands = []
command_variables = []
output_names = []
set_points = []
state_var_names = []
model_state_var_names = []
traj_var = []
# Times
start = []
stop = []
incr = []
opt_time = []
samp_time = []
# Paths
lib_paths = []
res_path = []
dym_path = []
mod_path = []
command_names = []
# Modifiers
cost_fac = []
# Variation
min_var = []
max_var = []
inc_var = []
# Subsystem Config
model_type = []
name = []
sys_id = []
ups_neigh = []
downs_neigh = []
par_neigh = []
# Subsystems
sys_id.append(0)
name.append("Field")
model_type.append("Modelica")
ups_neigh.append(1)
downs_neigh.append(None)
par_neigh.append(None)
input_names.append(["returnTemperature.T"])
input_variables.append(["external"])
inputs.append([])
output_names.append(["returnTemperature.T"])
set_points.append([287])
state_var_names.append(["supplyTemperatureMeas"])
model_state_var_names.append(["vol.T_start"])
start.append(0.)
stop.append(3600.0*24*365.25*3)
incr.append(3600.)
opt_time.append(10800)
samp_time.append(10)
lib_paths.append(glob_lib_paths)
res_path.append(glob_res_path + "\\" + name_wkdir)
dym_path.append(glob_dym_path)
mod_path.append(r'ModelicaModels.SubsystemModels.DetailedModels.Geo.Field')
command_names.append(["heatShare"])
command_variables.append(["decisionVariables.table[1,2]"])
commands.append(range(0,105,5))
traj_points.append(range(278,310,1))
traj_var.append(["supplyTemperature.T"])
cost_fac.append([0.0, 0.0, 1.0])
sys_id.append(1)
name.append("Building")
model_type.append("Modelica")
ups_neigh.append(None)
downs_neigh.append([0])
par_neigh.append(None)
input_names.append(["supplyTemperature.T"])
input_variables.append([r"variation.table[1,2]"])
inputs.append(range(280,310,5))
output_names.append(["returnTemperature"])
set_points.append([287])
state_var_names.append(["sine.y"])
model_state_var_names.append(["const.k"])
start.append(0.)
stop.append(7200.)
incr.append(10.)
opt_time.append(600)
samp_time.append(10)
lib_paths.append(glob_lib_paths)
res_path.append(glob_res_path + "\\" + name_wkdir)
dym_path.append(glob_dym_path)
mod_path.append(r'ModelicaModels.SubsystemModels.DetailedModels.Geo.Building')
command_names.append([])
command_variables.append(["decisionVariables.table[1,2]"])
commands.append(range(0,105,5))
traj_points.append([])
traj_var.append([])
cost_fac.append([-0.01, 1.0, 0.0])
| en | 0.549123 | # Global paths # Working directory # Controlled system # States # Times # Paths # Modifiers # Variation # Subsystem Config # Subsystems | 1.57012 | 2 |
app/mxcache.py | spacedogXYZ/email-validator | 3 | 6630349 | import flanker.addresslib
from flanker.addresslib.drivers.redis_driver import RedisCache
import redis
import collections
import dnsq
class MxCache(object):
def __init__(self, app=None):
self.app = app
if app is not None:
self.init_app(app)
def init_app(self, app):
if 'REDIS_URL' in app.config:
cache = RedisCache()
cache.r = redis.StrictRedis.from_url(app.config.get('REDIS_URL'))
else:
cache = collections.defaultdict(str)
self._cache = cache
# cache mail server responses
flanker.addresslib.set_mx_cache(self._cache)
# set custom DNS timeout
dnsq.DNS_LIFETIME_TIMEOUT_SECONDS = app.config.get('DNS_TIMEOUT')
app.mxcache = self
def redis_conn(self):
if hasattr(self._cache, 'r'):
return self._cache.r
else:
return self._cache
| import flanker.addresslib
from flanker.addresslib.drivers.redis_driver import RedisCache
import redis
import collections
import dnsq
class MxCache(object):
def __init__(self, app=None):
self.app = app
if app is not None:
self.init_app(app)
def init_app(self, app):
if 'REDIS_URL' in app.config:
cache = RedisCache()
cache.r = redis.StrictRedis.from_url(app.config.get('REDIS_URL'))
else:
cache = collections.defaultdict(str)
self._cache = cache
# cache mail server responses
flanker.addresslib.set_mx_cache(self._cache)
# set custom DNS timeout
dnsq.DNS_LIFETIME_TIMEOUT_SECONDS = app.config.get('DNS_TIMEOUT')
app.mxcache = self
def redis_conn(self):
if hasattr(self._cache, 'r'):
return self._cache.r
else:
return self._cache
| en | 0.472869 | # cache mail server responses # set custom DNS timeout | 2.471929 | 2 |
tvof/text_search/migrations/0002_auto_20181118_2039.py | kingsdigitallab/tvof-django | 0 | 6630350 | <filename>tvof/text_search/migrations/0002_auto_20181118_2039.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2018-11-18 20:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('text_search', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='annotatedtoken',
name='is_rubric',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='annotatedtoken',
name='manuscript',
field=models.CharField(default='unspecified', max_length=30),
),
migrations.AddField(
model_name='annotatedtoken',
name='section_name',
field=models.CharField(default='unspecified', max_length=100),
),
migrations.AlterField(
model_name='annotatedtoken',
name='location',
field=models.CharField(help_text='location id for the seg comprising this token', max_length=20),
),
migrations.AlterField(
model_name='annotatedtoken',
name='pos',
field=models.CharField(help_text='part of speech', max_length=30),
),
]
| <filename>tvof/text_search/migrations/0002_auto_20181118_2039.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2018-11-18 20:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('text_search', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='annotatedtoken',
name='is_rubric',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='annotatedtoken',
name='manuscript',
field=models.CharField(default='unspecified', max_length=30),
),
migrations.AddField(
model_name='annotatedtoken',
name='section_name',
field=models.CharField(default='unspecified', max_length=100),
),
migrations.AlterField(
model_name='annotatedtoken',
name='location',
field=models.CharField(help_text='location id for the seg comprising this token', max_length=20),
),
migrations.AlterField(
model_name='annotatedtoken',
name='pos',
field=models.CharField(help_text='part of speech', max_length=30),
),
]
| en | 0.67158 | # -*- coding: utf-8 -*- # Generated by Django 1.11.16 on 2018-11-18 20:39 | 1.634945 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.