ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a54bd03da8a5769a3a31a502e39f3f02470d338 | import mxnet as mx
import numpy as np
import cv2
from tools.rand_sampler import RandSampler
class DetIter(mx.io.DataIter):
"""
Detection Iterator, which will feed data and label to network
Optional data augmentation is performed when providing batch
Parameters:
----------
imdb : Imdb
image database
batch_size : int
batch size
data_shape : int or (int, int)
image shape to be resized
mean_pixels : float or float list
[R, G, B], mean pixel values
rand_samplers : list
random cropping sampler list, if not specified, will
use original image only
rand_mirror : bool
whether to randomly mirror input images, default False
shuffle : bool
whether to shuffle initial image list, default False
rand_seed : int or None
whether to use fixed random seed, default None
max_crop_trial : bool
if random crop is enabled, defines the maximum trial time
if trial exceed this number, will give up cropping
is_train : bool
whether in training phase, default True, if False, labels might
be ignored
"""
def __init__(self, imdb, batch_size, data_shape, \
mean_pixels=[128, 128, 128], rand_samplers=[], \
rand_mirror=False, shuffle=False, rand_seed=None, \
is_train=True, max_crop_trial=50):
super(DetIter, self).__init__()
self._imdb = imdb
self.batch_size = batch_size
if isinstance(data_shape, int):
data_shape = (data_shape, data_shape)
self._data_shape = data_shape
self._mean_pixels = mx.nd.array(mean_pixels).reshape((3,1,1))
if not rand_samplers:
self._rand_samplers = []
else:
if not isinstance(rand_samplers, list):
rand_samplers = [rand_samplers]
assert isinstance(rand_samplers[0], RandSampler), "Invalid rand sampler"
self._rand_samplers = rand_samplers
self.is_train = is_train
self._rand_mirror = rand_mirror
self._shuffle = shuffle
if rand_seed:
np.random.seed(rand_seed) # fix random seed
self._max_crop_trial = max_crop_trial
self._current = 0
self._size = imdb.num_images
self._index = np.arange(self._size)
self._data = None
self._label = None
self._get_batch()
@property
def provide_data(self):
return [(k, v.shape) for k, v in self._data.items()]
@property
def provide_label(self):
if self.is_train:
return [(k, v.shape) for k, v in self._label.items()]
else:
return []
def reset(self):
self._current = 0
if self._shuffle:
np.random.shuffle(self._index)
def iter_next(self):
return self._current < self._size
def next(self):
if self.iter_next():
self._get_batch()
data_batch = mx.io.DataBatch(data=self._data.values(),
label=self._label.values(),
pad=self.getpad(), index=self.getindex())
self._current += self.batch_size
return data_batch
else:
raise StopIteration
def getindex(self):
return self._current // self.batch_size
def getpad(self):
pad = self._current + self.batch_size - self._size
return 0 if pad < 0 else pad
def _get_batch(self):
"""
Load data/label from dataset
"""
batch_data = mx.nd.zeros((self.batch_size, 3, self._data_shape[0], self._data_shape[1]))
batch_label = []
for i in range(self.batch_size):
if (self._current + i) >= self._size:
if not self.is_train:
continue
# use padding from middle in each epoch
idx = (self._current + i + self._size // 2) % self._size
index = self._index[idx]
else:
index = self._index[self._current + i]
# index = self.debug_index
im_path = self._imdb.image_path_from_index(index)
with open(im_path, 'rb') as fp:
img_content = fp.read()
img = mx.img.imdecode(img_content)
gt = self._imdb.label_from_index(index).copy() if self.is_train else None
data, label = self._data_augmentation(img, gt)
batch_data[i] = data
if self.is_train:
batch_label.append(label)
self._data = {'data': batch_data}
if self.is_train:
self._label = {'label': mx.nd.array(np.array(batch_label))}
else:
self._label = {'label': None}
def _data_augmentation(self, data, label):
"""
perform data augmentations: crop, mirror, resize, sub mean, swap channels...
"""
if self.is_train and self._rand_samplers:
rand_crops = []
for rs in self._rand_samplers:
rand_crops += rs.sample(label)
num_rand_crops = len(rand_crops)
# randomly pick up one as input data
if num_rand_crops > 0:
index = int(np.random.uniform(0, 1) * num_rand_crops)
width = data.shape[1]
height = data.shape[0]
crop = rand_crops[index][0]
xmin = int(crop[0] * width)
ymin = int(crop[1] * height)
xmax = int(crop[2] * width)
ymax = int(crop[3] * height)
if xmin >= 0 and ymin >= 0 and xmax <= width and ymax <= height:
data = mx.img.fixed_crop(data, xmin, ymin, xmax-xmin, ymax-ymin)
else:
# padding mode
new_width = xmax - xmin
new_height = ymax - ymin
offset_x = 0 - xmin
offset_y = 0 - ymin
data_bak = data
data = mx.nd.full((new_height, new_width, 3), 128, dtype='uint8')
data[offset_y:offset_y+height, offset_x:offset_x + width, :] = data_bak
label = rand_crops[index][1]
if self.is_train:
interp_methods = [cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, \
cv2.INTER_NEAREST, cv2.INTER_LANCZOS4]
else:
interp_methods = [cv2.INTER_LINEAR]
interp_method = interp_methods[int(np.random.uniform(0, 1) * len(interp_methods))]
data = mx.img.imresize(data, self._data_shape[0], self._data_shape[1], interp_method)
if self.is_train and self._rand_mirror:
if np.random.uniform(0, 1) > 0.5:
data = mx.nd.flip(data, axis=1)
valid_mask = np.where(label[:, 0] > -1)[0]
tmp = 1.0 - label[valid_mask, 1]
label[valid_mask, 1] = 1.0 - label[valid_mask, 3]
label[valid_mask, 3] = tmp
data = mx.nd.transpose(data, (2,0,1))
data = data.astype('float32')
data = data - self._mean_pixels
return data, label
|
py | 1a54bd833cda9c0601bc7647082b20f1dcbb408c | from ._population import OrbitalDFTU, dmatpawu2params, params2dmatpawu, get_final_abinit_out
|
py | 1a54be0e9176614c159c725ea5cd384b1d06dbc2 | """
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from resource_management import *
from resource_management.libraries.functions.mounted_dirs_helper import handle_mounted_dirs
from utils import service
from ambari_commons.os_family_impl import OsFamilyImpl, OsFamilyFuncImpl
from ambari_commons import OSConst
def create_dirs(data_dir):
"""
:param data_dir: The directory to create
:param params: parameters
"""
import params
Directory(data_dir,
create_parents = True,
cd_access="a",
mode=0755,
owner=params.hdfs_user,
group=params.user_group,
ignore_failures=True
)
@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
def datanode(action=None):
if action == "configure":
import params
Directory(params.dfs_domain_socket_dir,
create_parents = True,
mode=0751,
owner=params.hdfs_user,
group=params.user_group)
# handle_mounted_dirs ensures that we don't create dfs data dirs which are temporary unavailable (unmounted), and intended to reside on a different mount.
data_dir_to_mount_file_content = handle_mounted_dirs(create_dirs, params.dfs_data_dirs, params.data_dir_mount_file, params)
# create a history file used by handle_mounted_dirs
File(params.data_dir_mount_file,
owner=params.hdfs_user,
group=params.user_group,
mode=0644,
content=data_dir_to_mount_file_content
)
elif action == "start" or action == "stop":
import params
service(
action=action, name="datanode",
user=params.hdfs_user,
create_pid_dir=True,
create_log_dir=True
)
elif action == "status":
import status_params
check_process_status(status_params.datanode_pid_file)
@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
def datanode(action=None):
if action == "configure":
pass
elif(action == "start" or action == "stop"):
import params
Service(params.datanode_win_service_name, action=action)
elif action == "status":
import status_params
check_windows_service_status(status_params.datanode_win_service_name) |
py | 1a54be484c40c67d8610eac4c8dfd148e49a5ca1 | #!/usr/bin/env python
# encoding: utf-8
import numpy as np
import rospy
import time
# Importar mensajes de los distintos paquetes para guardar variables en ellos.
from geometry_msgs.msg import Twist
from nav_msgs.msg import Odometry
from geometry_msgs.msg import PoseWithCovarianceStamped, PointStamped
from tf.transformations import euler_from_quaternion, quaternion_from_euler
from std_msgs.msg import Int16,Float64,Int32,Bool
follower_lin_vel=0.08 # Velocidad lineal del robot seguidor.
map_size_x = 250.0 # Medida del mapa en "x" en cm.
map_size_y = 250.0 # Medida del mapa en "y" en cm.
resolution = 1.0 # Medida del mallado en cm.
class FollowerRobot: # Clase para el robot seguidor.
def __init__(self): # Constructor de la clase.
# Declara constantes que definen la acción del seguidor.
self.map_size_x = float(map_size_x) # Medida del mapa en "x" en cm.
self.map_size_y = float(map_size_y) # Medida del mapa en "y" en cm.
self.resolution = float(resolution) # Medida del mallado en cm.
self.follower_lin_vel = float(follower_lin_vel) # Velocidad lineal del robot seguidor.
self.lim_angular = 1.2 # Límite de velocidad angular.
# Declara variables útiles para seguir al líder y el movimiento del seguidor.
self.turn_on = False # Variable para iniciar con el robot sin moverse.
self.vel_msg = Twist() # Crea objeto para poder publicar la velocidad.
self.vel_msg.linear.x, self.vel_msg.linear.y, self.vel_msg.linear.z = [0.0,0.0,0.0] # Declara la variable de la velocidad lineal en cero.
self.vel_msg.angular.x,self.vel_msg.angular.y,self.vel_msg.angular.z = [0.0,0.0,0.0] # Declara la variable de la velocidad angular en cero.
self.wp = list() # Crea una lista donde se guardan los puntos a alcanzar.
# Crea Subscriptores
self.sub_p_0 = rospy.Subscriber("/tb3_0/amcl_pose", PoseWithCovarianceStamped, self.LiderCallback, queue_size=1) # Define método para obtener la posición del robot líder.
self.sub_p_1 = rospy.Subscriber("/tb3_1/amcl_pose", PoseWithCovarianceStamped, self.FollowerCallback, queue_size=1) # Define método obtener la posición del robot seguidor.
self.turn_on_subscriber=rospy.Subscriber("/turn_on",Bool,self.turn_onCallback,queue_size=1) # Crea el subscriptor de avanzar o parar.
# Crea Publicadores
self.pub = rospy.Publisher('/tb3_1/cmd_vel', Twist, queue_size=10) # Crea publicador de la velocidad que va a tener el robot.
self.pub.publish(self.vel_msg) # Publica la velocidad inicial.
def LiderCallback(self,data): # Método para obtener las posiciones del líder y guardarlas en una lista.
# Hacer lista de los puntos por los que va pasando el robot líder
self.wp.insert(0,data) # Crea lista de los puntos a alcanzar por el seguidor.
#print 'Posicion lider recibida wp=',len(self.wp) # Imprime en pantalla la cantidad de puntos a seguir.
def FollowerCallback(self, data): # Método para ejecutar cada vez que se recibe una nueva posición en el seguidor.
#print 'Posicion seguidor recibida' # Imprime mensaje indicando que se recibió un mensaje.
if self.turn_on==True: # Ejecuta rutina si se le indicó que avance.
wplen=len(self.wp) # Guarda cuantas posiciones hay por alcanzar.
if wplen> 60: # Si es mayor a 60 posiciones ejecuta el algoritmo de seguimento de trayectoria esto para no chocar.
# Aplicar el algoritmo waypoint
# Datos del líder
xL = self.wp[-1].pose.pose.position.x # Obtener la posición mas antigua de x Lider.
yL = self.wp[-1].pose.pose.position.y # Obtener la posición mas antigua de y Lider.
orientation_q = self.wp[-1].pose.pose.orientation # Obtener la orientacion mas antigua del Lider.
orientation_list = [orientation_q.x, orientation_q.y, orientation_q.z, orientation_q.w] # Guardar los elementos de la orientación en una lista.
(rollL, pitchL, yawL) = euler_from_quaternion (orientation_list) # Obtener la orientación del líder en ángulos esféricos en vez de cuaterniones.
# Datos del seguidor
xF = data.pose.pose.position.x # Obtener posición del seguidor
yF = data.pose.pose.position.y # Obtener posición del seguidor
orientation_q = data.pose.pose.orientation # Obtener la orientación del seguidor en cuaterniones.
orientation_list = [orientation_q.x, orientation_q.y, orientation_q.z, orientation_q.w] # Guardar la orientación del seguidor en una lista.
(rollF, pitchF, yawF) = euler_from_quaternion (orientation_list) # Obtener la orientación del seguidor en ángulos esféricos en vez de cuaterniones.
#print 'xF=',xF,'yF=',yF # Imprime la posición del seguidor.
ka = 1.0 # Constante para incrementar la velocidad de giro.
a = self.convert2pi(np.arctan2(yL-yF,xL-xF))-self.convert2pi(yawF) # Calcula la diferencia angular entre las 2 posciones.
avel = (a+np.pi)%(2*np.pi)-np.pi # Orienta la velocidad en el sentido de giro mas corto.
if abs(avel)>45*np.pi/180: # Si el ángulo a girar es mayor a 45 grados.
lvel = 0.0 # Rota sin velocidad lineal.
else: # Si es menor.
lvel = self.follower_lin_vel # Rota con velocidad lineal constante.
#print 'velocidad angular=',avel # Imprime el ángulo a rotar.
avel = self.limitar(ka*avel,-self.lim_angular,self.lim_angular) # Obtiene la velocidad de giro contemplando su límite a alcanzar.
self.vel_msg.linear.x = lvel # Guarda la velocidad lineal.
self.vel_msg.angular.z = avel # Guarda la velocidad angular.
self.pub.publish(self.vel_msg) # Publica la velocidad para que el robot seguidor para que vaya a la velocidad calculada.
self.wp.pop() # Elimina la posición alcanzada.
else: # Si aun no alcanza las posiciones deseadas.
#print 'Velocidad fija del movil' # Imprime un mensaje que el robot seguidor aun no esta siguiendo.
self.vel_msg.linear.x,self.vel_msg.angular.z = [0.0,0.05] # Guarda en la variable a publicar la velocidad para girar sobre su propio eje.
self.pub.publish(self.vel_msg) # Publica la velocidad para que el robot seguidor gire sobre su propio eje.
def turn_onCallback(self,data): # Método para que avance o se pare el robot seguidor.
self.turn_on=data.data # Obtien si quiere que avance el robot seguidor.
if self.turn_on==False: # Si no se quiere que avance el robot seguidor.
#print 'Parar el movil' # Imprime el mensaje que se paró el robot seguidor.
self.vel_msg.linear.x,self.vel_msg.angular.z = [0.0,0.05] # Guarda en la variable a publicar para parar al seguidor.
self.pub.publish(self.vel_msg) # Publica la velocidad para que el seguidor va a tener.
else: # Si se quiere que avance el robot seguidor.
#print 'Velocidad fija del movil' # Imprime el mensaje que el robot seguidor siguiendo al lider.
self.vel_msg.linear.x,self.vel_msg.angular.z = [0.0,0.05] # Guarda en la variable a publicar la velocidad para girar sobre su propio eje.
self.pub.publish(self.vel_msg) # Publica la velocidad para que el seguidor va a tener.
def limitar(self,x,lim_inf,lim_sup): # Metódo para fijar límites de un valor o función.
if (x<=lim_inf): # Si es menor al límite inferior.
x = lim_inf # Coloca el límite inferior.
elif (x>=lim_sup): # Si es mayor al límite superior.
x = lim_sup # Coloca el límite superior.
return x
def convert2pi(self,theta):# Método que convierte a un rango de 0 a 2pi.
if theta<0: # Si el ángulo es negativo lo convierte a más de pi.
theta=2*np.pi+theta
else: # Deja el ángulo igual.
theta=theta
return theta # Regresa el ángulo ya convertido.
def setStop(self): # Método para parar al robot.
self.vel_msg.linear.x,self.vel_msg.angular.z=[0,0] # Establece las velocidades en 0.
self.velocity_publisher.publish(self.vel_msg) # Publica la velocidad.
def main():
rospy.init_node('FollowerRobot') # Inicia el nodo del robot seguidor.
fr=FollowerRobot() # Constructor de la función del robot seguidor.
print 'Nodo Seguidor inicializado' # Imprime que se logró inicializar el robot seguidor.
rospy.spin() # Mientras ROS se siga ejecutando repite la secuencia.
if __name__ == '__main__':
try:
main() # Ejecuta el programa principal.
except rospy.ROSInterruptException:
pass |
py | 1a54bf67e57a54071dbb48a25f315aaeb3073bc7 | from parse_tree import parse_sentence
from statement import *
import statement
def reit(logic_val, reference):
if len(reference) > 1:
return False
logic_tree = parse_sentence(logic_val)
ref_tree = parse_sentence(reference[0])
return compareTree(logic_tree, ref_tree)
def orIntro(logic_val, reference):
if len(reference) > 1:
return False
logic_tree = parse_sentence(logic_val)
ref_tree = parse_sentence(reference[0])
temp_tree = logic_tree
# TODO: Make sure all top level are ors
#while isinstance(temp_tree, BinaryStatement):
# if not temp_tree.value == '|':
# return False
# temp_tree = temp_tree.left
ors = getAllOrOperands(logic_tree, [])
for i in ors:
print_tree(i)
if compareTree(i, ref_tree) == True:
return True
return False
def orElim(logic_val, reference):
if len(reference) == 0:
return False
logic_tree = parse_sentence(logic_val)
ref_tree = parse_sentence(reference[0])
ors = getAllOrOperands(ref_tree, [])
#TODO: Make sure all top level are ors
for ref in reference[1:]:
ref_tree2 = parse_sentence(ref)
for i in ors:
if compareTree(ref_tree2.left, i) == True:
break
else: return False
for ref in reference[1:]:
ref_tree2 = parse_sentence(ref)
if compareTree(ref_tree2.right, logic_tree) == False:
return False
return True
def andIntro(logic_val, reference):
if len(reference) == 0:
return False
logic_tree = parse_sentence(logic_val)
refs = []
for i in reference:
refs.append(parse_sentence(i))
# TODO: Make sure all top level are ands
ands = getAllAndOperands(logic_tree, [])
for i in ands:
for j in refs:
if compareTree(i, j) == True:
break
else:
return False
return True
def andElim(logic_val, reference):
if len(reference) > 1:
return False
logic_tree = parse_sentence(logic_val)
ref_tree = parse_sentence(reference[0])
temp_tree = ref_tree
# TODO: Make sure all top level are ands
#while isinstance(temp_tree, BinaryStatement):
# if not temp_tree.value == '|':
# return False
# temp_tree = temp_tree.left
ands = getAllAndOperands(ref_tree, [])
for i in ands:
if compareTree(i, logic_tree) == True:
return True
return False
def notIntro(logic_val, reference):
if len(reference) > 1:
return False
logic_tree = parse_sentence(logic_val)
ref_tree = parse_sentence(reference[0]) # this is a subproof
if not ref_tree.value == "-": # subproof condition
return False
if not isinstance(ref_tree.right, ContradictionStatement):
return False
neg = UnaryStatement("~", ref_tree.left)
return compareTree(neg, logic_tree)
def notElim(logic_val, reference):
if len(reference) > 1:
return False
logic_tree = parse_sentence(logic_val)
ref_tree = parse_sentence(reference[0])
if not ref_tree.value == "~":
return False
if not ref_tree.child.value == "~":
return False
return compareTree(ref_tree.child.child, logic_tree)
def contraIntro(logic_val, reference):
if len(reference) > 2:
return False
if not logic_val == "!":
return False
# Statements could have been selected out of order
ref_tree1 = parse_sentence(reference[0])
ref_tree2 = parse_sentence(reference[1])
neg = UnaryStatement("~", ref_tree1)
if compareTree(ref_tree2, neg) == True:
return True
else:
ref_tree1 = parse_sentence(reference[1])
ref_tree2 = parse_sentence(reference[0])
neg = UnaryStatement("~", ref_tree1)
if compareTree(ref_tree2, neg) == True:
return True
return False
def contraElim(logic_val, reference):
if len(reference) > 1:
return False
ref_tree = parse_sentence(reference[0])
return compareTree(ref_tree, ContradictionStatement())
def impIntro(logic_val, reference):
if len(reference) > 1:
return False
logic_tree = parse_sentence(logic_val)
ref_tree = parse_sentence(reference[0])
return compareTree(logic_tree, ref_tree)
def impElim(logic_val, reference):
if len(reference) > 2:
return False
ref_tree = parse_sentence(reference[0])
logic_tree = parse_sentence("(" + reference[1] + ") - (" + logic_val + ")")
return compareTree(ref_tree, logic_tree)
def biIntro(logic_val, reference):
if len(reference) > 2:
return False
logic_tree = parse_sentence(logic_val)
if not logic_tree.value == "=":
return False
ref_tree1 = parse_sentence(reference[0])
ref_tree2 = parse_sentence(reference[1])
if compareTree(ref_tree1.left, ref_tree2.right) == False:
return False
if compareTree(ref_tree2.left, ref_tree1.right) == False:
return False
if compareTree(logic_tree.left, ref_tree1.left) == True and \
compareTree(logic_tree.right, ref_tree1.right) == True:
return True
elif compareTree(logic_tree.left, ref_tree2.left) == True and \
compareTree(logic_tree.right, ref_tree2.right) == True:
return True
else: return False
def biElim(logic_val, reference):
if len(reference) > 2:
return False
ref_tree = parse_sentence(reference[0])
logic_tree = parse_sentence("(" + reference[1] + ") = (" + logic_val + ")")
if compareTree(ref_tree, logic_tree) == True:
return True
logic_tree = parse_sentence("(" + logic_val + ") = (" + reference[1] + ")")
if compareTree(ref_tree, logic_tree) == True:
return True
ref_tree = parse_sentence(reference[1])
logic_tree = parse_sentence("(" + reference[0] + ") = (" + logic_val + ")")
if compareTree(ref_tree, logic_tree) == True:
return True
logic_tree = parse_sentence("(" + logic_val + ") = (" + reference[0] + ")")
if compareTree(ref_tree, logic_tree) == True:
return True
return False
|
py | 1a54bf73b73bab88df0bb97ba5fc320d0b51c25c | import logging
import pandas as pd
from scipy.sparse import csr_matrix
from sklearn.feature_extraction.text import TfidfVectorizer
from datasets.nlp import NLP
from representations.representation import Representation
logger = logging.getLogger()
class TfIdf(Representation):
def __init__(self, args):
self.args = args
self.vectorizer: TfidfVectorizer = TfidfVectorizer(lowercase=not self.args.no_lower_case, )
self.lower_case = not self.args.no_lower_case
self.nlp = NLP()
def fit_vectorizer(self, text, stem_first=True):
if stem_first:
stemmed = self.stem(text)
self.vectorizer.fit(stemmed.apply(lambda x: ' '.join(x)))
else:
self.vectorizer.fit(text)
def __call__(self, raw: pd.Series, fit_vectorizer: bool = True, stem_first=True) -> csr_matrix:
if stem_first:
logger.info('Stemming...')
data = self.stem(raw).apply(lambda x: ' '.join(x))
else:
data = raw
if fit_vectorizer:
logger.info('Fitting TF-IDF...')
self.vectorizer.fit(data)
result = self.vectorizer.transform(data)
return result
def stem(self, raw: pd.Series):
return raw.apply(
lambda x: [
self.nlp.stem(word) for word in self.nlp.tokenize(x) if self.nlp.stem(word) is not None
]
)
def get_vectorizer(self):
return self.vectorizer
|
py | 1a54c08112220710efc2b742428ab1bd9133bb2e | #
# Copyright (c) 2019-2021 Triad National Security, LLC
# All rights reserved.
#
# This file is part of the bueno project. See the LICENSE file at the
# top-level directory of this distribution for more information.
#
'''
Bueno run script for the unstructured mesh physics mini-app,
PENNANT
'''
import re
import io
import csv
import sys
import typing
from bueno.public import container
from bueno.public import experiment
from bueno.public import logger
from bueno.public import metadata
from bueno.public import utils
# pylint: disable=too-few-public-methods
class AddArgsAction(experiment.CLIAddArgsAction):
'''
Handle custom argument processing
'''
def __call__(self, cliconfig: experiment.CLIConfiguration) -> None:
'''
New argument definitions
'''
cliconfig.argparser.add_argument(
'--pinfile',
help="pennant input file",
default='./experiments/nohsmall/nohsmall.pnt'
)
class Experiment:
'''
PENNANT benchmark definition
'''
def __init__(self, config: experiment.CLIConfiguration) -> None:
'''
Experiment configuration.
'''
experiment.name(config.args.name)
self.config = config
# PENNANT input file
self.pinfile = config.args.pinfile
self.data: typing.Dict[str, list] = {
'commands': list(),
'results': list()
}
# Emit program config to terminal & collected assets.
self.emit_conf()
self.add_assets()
def emit_conf(self) -> None:
'''
Emit configuration to terminal
'''
pcd = dict()
pcd['Program'] = vars(self.config.args)
utils.yamlp(pcd, 'Program')
def add_assets(self) -> None:
'''
Select additional assets to copy
'''
metadata.add_asset(metadata.FileAsset(self.config.args.input))
metadata.add_asset(metadata.FileAsset(self.pinfile))
def post_action(self, **kwargs: typing.Dict[str, str]) -> None:
'''
Post experiment iteration action
'''
logger.log('# Starting Post Action...')
cmd = kwargs.pop('command')
# Record command used in iteration.
self.data['commands'].append(cmd)
# Record timing data from PENNANT terminal output.
self.parse_output(kwargs.pop('output'))
def parse_output(self, out1: typing.List[str]) -> None:
'''
Parse timing results information from PENNANT terminal output.
'''
# Search for end of run data.
pos = -1
for pos, line in enumerate(out1):
if line == 'Run complete\n':
print('Found runtime table!')
break
# No data found, stop test.
if pos == -1:
logger.log('ERROR: No post-run data found')
sys.exit()
# Isolate terminal lines containing timing details.
timing = out1[pos + 1: pos + 6]
# Format end of run data.
results = []
for row in timing:
items = row.split(',')
for item in items:
if '*' in item or item == '\n':
continue # Skip empty or decorative lines.
# Trim whitespace.
item = re.sub(r'[ ]*\=[ ]+', ':', item)
item = item.strip()
# Remove unecessary characters.
item = re.sub(r'[()]', '', item)
results.append(item.split(':')[1]) # Discard label
# Append iteration results to Experiment data
self.data['results'].append(results)
def run(self, genspec: str) -> None:
'''
Experiment iterations definition
'''
logger.log('# Starting Runs...')
# Generate the iterative run commands.
rcmd = self.config.args.runcmds
pruns = experiment.runcmds(rcmd[0], rcmd[1], rcmd[2], rcmd[3])
executable = self.config.args.executable
appargs = genspec.format(executable)
# Execute generated run commands.
for prun in pruns:
logger.log('')
container.prun(prun, appargs, postaction=self.post_action)
def report(self) -> None:
'''
Generate csv report from run iterations.
'''
logger.emlog(F'# {experiment.name()} Report')
# Setup table.
table = utils.Table()
sio = io.StringIO(newline=None)
dataraw = csv.writer(sio)
header = ['Cycle', 'Cstop', 'Time', 'Tstop', 'Hydro Cycle', 'Command']
dataraw.writerow(header)
table.addrow(header)
# Populate table.
for index, entry in enumerate(self.data['results']):
entry.append(self.data['commands'][index])
dataraw.writerow(entry)
table.addrow(entry)
# Write table to csv & display to terminal.
csvname = self.config.args.csv_output
metadata.add_asset(metadata.StringIOAsset(sio, csvname))
table.emit()
logger.log('')
def main(argv: typing.List[str]) -> None:
'''
Setup and start experiment.
'''
# Program description.
desc = 'bueno run script for PENNANT experiments.'
# Default Configuration.
defaults = experiment.CannedCLIConfiguration.Defaults
defaults.name = 'pennant'
defaults.description = desc
defaults.input = './experiments/config.txt'
defaults.executable = '~/PENNANT/build/pennant'
defaults.runcmds = (2, 2, 'mpirun -n %n', 'nidx + 1')
defaults.csv_output = 'data.csv'
# Compile and parse configuration.
config = experiment.CannedCLIConfiguration(desc, argv, defaults)
config.addargs(AddArgsAction)
config.parseargs()
for genspec in experiment.readgs(config.args.input, config):
# Update config after each iteration
exp = Experiment(config)
exp.run(genspec)
exp.report()
# vim: ft=python ts=4 sts=4 sw=4 expandtab
|
py | 1a54c0a2bcfd4fcbb05f2a4abc8db9fab290cc5c | n = int(input())
S = input()
T = input()
result = 0
for i in range(n):
if S[i] != T[i]:
result += 1
print(result)
|
py | 1a54c13d50a882b242b9a0934c2256aae783b353 | #!/usr/bin/python3
import socket
import threading
import time
import numpy as np
from picamera2 import Picamera2
from picamera2.encoders import H264Encoder
from picamera2.outputs import CircularOutput, FileOutput
lsize = (320, 240)
picam2 = Picamera2()
video_config = picam2.video_configuration(main={"size": (1280, 720), "format": "RGB888"},
lores={"size": lsize, "format": "YUV420"})
picam2.configure(video_config)
picam2.start_preview()
encoder = H264Encoder(1000000, repeat=True)
circ = CircularOutput()
encoder.output = [circ]
picam2.encoder = encoder
picam2.start()
picam2.start_encoder()
w, h = lsize
prev = None
encoding = False
ltime = 0
def server():
global circ, picam2
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(("0.0.0.0", 10001))
sock.listen()
while tup := sock.accept():
event = threading.Event()
conn, addr = tup
stream = conn.makefile("wb")
filestream = FileOutput(stream)
filestream.start()
picam2.encoder.output = [circ, filestream]
filestream.connectiondead = lambda ex: event.set()
event.wait()
t = threading.Thread(target=server)
t.setDaemon(True)
t.start()
while True:
cur = picam2.capture_buffer("lores")
cur = cur[:w * h].reshape(h, w)
if prev is not None:
# Measure pixels differences between current and
# previous frame
mse = np.square(np.subtract(cur, prev)).mean()
if mse > 7:
if not encoding:
epoch = int(time.time())
circ.fileoutput = "{}.h264".format(epoch)
circ.start()
encoding = True
print("New Motion", mse)
ltime = time.time()
else:
if encoding and time.time() - ltime > 5.0:
circ.stop()
encoding = False
prev = cur
picam2.stop_encoder()
|
py | 1a54c1cee8089fdf4d4a2588c536ec2933c37aca | #!/usr/bin/env python
from __future__ import print_function, division
import itertools, time, copy
import collections, random
import os, pickle
import numba
import numpy as np
board_size = 15
estimate_level = 4
t_random = 0#.01# controls how random the bonus for level=0
show_q = False
def strategy(state):
""" AI's strategy """
""" Information provided to you:
state = (board, last_move, playing, board_size)
board = (x_stones, o_stones)
stones is a set contains positions of one player's stones. e.g.
x_stones = {(8,8), (8,9), (8,10), (8,11)}
playing = 0|1, the current player's index
Your strategy will return a position code for the next stone, e.g. (8,7)
"""
global board_size
board, last_move, playing, board_size = state
initialize()
#print('estimate_level', estimate_level)
other_player = int(not playing)
my_stones = board[playing]
opponent_stones = board[other_player]
# put the first stone in the center if it's the start of the game
center = int((board_size-1)/2)
if last_move is None: # if it's the first move of the game
r = np.random.randint(board_size)
c = np.random.randint(board_size)
best_move = (r, c)
#best_move = (center, center)
strategy.zobrist_code = strategy.zobrist_me[best_move]
return (best_move[0]+1, best_move[1]+1)
else:
last_move = (last_move[0]-1, last_move[1]-1)
# update zobrist_code with opponent last move
strategy.zobrist_code ^= strategy.zobrist_opponent[last_move]
# build new state representation
state = np.zeros(board_size**2, dtype=np.int32).reshape(board_size, board_size)
for i,j in my_stones:
state[i-1,j-1] = 1
for i,j in opponent_stones:
state[i-1,j-1] = -1
#if strategy.zobrist_code in U_stone.cache:
# print("Calculated Move: %.3f" %U_stone.cache[strategy.zobrist_code])
#else:
# print("Didn't know this move!")
if len(my_stones) == 0:
level = 7
else:
level = 0
# clear the U cache
U_stone.cache = dict()
alpha = -1.0
beta = 2.0
empty_spots_left = np.sum(state==0)
best_move, best_q = best_action_q(state, strategy.zobrist_code, empty_spots_left, last_move, alpha, beta, 1, level)
if show_q:
print("best_q = %f" % best_q)
# update zobrist_code with my move
strategy.zobrist_code ^= strategy.zobrist_me[best_move]
# return the best move
return (best_move[0]+1, best_move[1]+1)
level_max_n = [20, 20, 12, 12, 8, 8, 6, 6, 4, 4, 4, 4, 4, 4, 4]
def best_action_q(state, zobrist_code, empty_spots_left, last_move, alpha, beta, player, level):
"Return the optimal action for a state"
if empty_spots_left == 0: # Board filled up, it's a tie
return None, 0.5
#move_interest_values = np.zeros(board_size**2, dtype=np.float32).reshape(board_size,board_size)
move_interest_values = best_action_q.move_interest_values
move_interest_values.fill(0) # reuse the same array
# boost the interests of closer moves by a little bit
# note that it might boost a taken spot, but an available spot will at least get 10 interest in find_interesting_moves()
boost_dist = 3
r, c = last_move
xmin = max(0, r-boost_dist)
xmax = min(board_size, r+boost_dist+1)
ymin = max(0, c-boost_dist)
ymax = min(board_size, c+boost_dist+1)
move_interest_values[xmin:xmax, ymin:ymax] = 1.5
verbose = False
#if level == 0:
# verbose = True
n_moves = level_max_n[level]
interested_moves = find_interesting_moves(state, empty_spots_left, move_interest_values, player, n_moves, verbose)
if len(interested_moves) == 1:
current_move = interested_moves[0]
current_move = (current_move[0], current_move[1])
q = Q_stone(state, zobrist_code, empty_spots_left, current_move, alpha, beta, player, level)
if verbose: print(current_move, q)
return current_move, q
#best_move = (-1,-1) # admit defeat if all moves have 0 win rate
best_move = (interested_moves[0,0], interested_moves[0,1]) # continue to play even I'm losing
if player == 1:
max_q = 0.0
max_bonused_q = 0.0
for current_move in interested_moves:
current_move = (current_move[0], current_move[1])
q = Q_stone(state, zobrist_code, empty_spots_left, current_move, alpha, beta, player, level+1)
if level == 0 and q > 0:
bonus_q = abs(np.random.normal(0, t_random)) / (226-empty_spots_left)**2
if q + bonus_q > max_q:
max_q = q + bonus_q
best_move = current_move
max_bonused_q = bonus_q
else:
if q > alpha: alpha = q
if q > max_q:
max_q = q
best_move = current_move
if verbose:
print(current_move, q)
if q == 1.0 or beta <= alpha:
break
best_q = max_q - max_bonused_q
elif player == -1:
min_q = 1.0
for current_move in interested_moves:
current_move = (current_move[0], current_move[1])
q = Q_stone(state, zobrist_code, empty_spots_left, current_move, alpha, beta, player, level+1)
if q < beta: beta = q
if q < min_q:
min_q = q
best_move = current_move
if q == 0.0 or beta <= alpha:
break
best_q = min_q
return best_move, best_q
@numba.jit(nopython=True, nogil=True)
def find_interesting_moves(state, empty_spots_left, move_interest_values, player, n_moves, verbose=False):
""" Look at state and find the interesing n_move moves.
input:
-------
state: numpy.array board_size x board_size
empty_spots_left: number of empty spots on the board
player: 1 or -1, the current player
n_moves: int, desired number of interesing moves
output:
-------
interested_moves: numpy.array final_n_moves x 2
*note : final_n_moves = 1 if limited
* else final_n_moves = n_moves + number of length-4 moves
*note2: final_n_moves will not exceed empty_spots_left
#suggested_n_moves: suggested number of moves to
"""
force_to_block = False
exist_will_win_move = False
directions = ((1,1), (1,0), (0,1), (1,-1))
final_single_move = np.zeros(2, dtype=np.int64).reshape(1,2) # for returning the single move
for r in range(board_size):
for c in range(board_size):
if state[r,c] != 0: continue
interest_value = 10 # as long as it's a valid point, this is for avoiding the taken spaces
my_hard_4 = 0
for dr, dc in directions:
my_line_length = 1 # last_move
opponent_line_length = 1
# try to extend in the positive direction (max 4 times)
ext_r = r
ext_c = c
skipped_1 = 0
my_blocked = False
opponent_blocked = False
for i in range(4):
ext_r += dr
ext_c += dc
if ext_r < 0 or ext_r >= board_size or ext_c < 0 or ext_c >= board_size:
break
elif state[ext_r, ext_c] == player:
if my_blocked is True:
break
else:
my_line_length += 1
opponent_blocked = True
elif state[ext_r, ext_c] == -player:
if opponent_blocked is True:
break
else:
opponent_line_length += 1
my_blocked = True
elif skipped_1 is 0:
skipped_1 = i + 1 # allow one skip and record the position of the skip
else:
break
# the backward counting starts at the furthest "unskipped" stone
forward_my_open = False
forward_opponent_open = False
if skipped_1 == 0:
my_line_length_back = my_line_length
opponent_line_length_back = opponent_line_length
elif skipped_1 == 1:
my_line_length_back = 1
opponent_line_length_back = 1
forward_my_open = True
forward_opponent_open = True
else:
if my_blocked is False:
my_line_length_back = skipped_1
opponent_line_length_back = 1
forward_my_open = True
else:
my_line_length_back = 1
opponent_line_length_back = skipped_1
forward_opponent_open = True
my_line_length_no_skip = my_line_length_back
opponent_line_length_no_skip = opponent_line_length_back
# backward is a little complicated, will try to extend my stones first
ext_r = r
ext_c = c
skipped_2 = 0
opponent_blocked = False
for i in range(5-my_line_length_no_skip):
ext_r -= dr
ext_c -= dc
if ext_r < 0 or ext_r >= board_size or ext_c < 0 or ext_c >= board_size:
break
elif state[ext_r, ext_c] == player:
my_line_length_back += 1
opponent_blocked = True
elif skipped_2 is 0 and state[ext_r, ext_c] == 0:
skipped_2 = i + 1
else:
break
# see if i'm winning
if my_line_length_back == 5:
final_single_move[0,0] = r
final_single_move[0,1] = c
return final_single_move
#interested_n_moves[0] = move
#return interested_n_moves, True
# extend my forward line length to check if there is hard 4
if skipped_2 is 0:
my_line_length += my_line_length_back - my_line_length_no_skip
else:
my_line_length += skipped_2 - 1
# notice that here the forward length can exceed 5 after extension, but it should be at max 4
if my_line_length > 4:
my_line_length = 4
backward_my_open = True if skipped_2 > 0 else False
backward_opponent_open = False
# then try to extend the opponent
if opponent_blocked is True:
if skipped_2 == 1:
backward_opponent_open = True
else:
ext_r = r
ext_c = c
skipped_2 = 0
for i in range(5-opponent_line_length_no_skip):
ext_r -= dr
ext_c -= dc
if ext_r < 0 or ext_r >= board_size or ext_c < 0 or ext_c >= board_size:
break
elif state[ext_r, ext_c] == -player:
opponent_line_length_back += 1
elif skipped_2 is 0 and state[ext_r, ext_c] == 0:
skipped_2 = i + 1
else:
break
# extend my forward line length to check if there is hard 4
if skipped_2 is 0:
opponent_line_length += opponent_line_length_back - opponent_line_length_no_skip
else:
opponent_line_length += skipped_2 - 1
backward_opponent_open = True
# here if opponent_line_length_back == 5, skipped_2 will be 0 and this flag won't be True
# but it do not affect our final result, because we have to block this no matter if it's open
# notice that here the forward length can exceed 5 after extension, but it should be at max 4
if opponent_line_length > 4:
opponent_line_length = 4
# check if we have to block this
if opponent_line_length_back == 5:
final_single_move[0,0] = r
final_single_move[0,1] = c
force_to_block = True
elif force_to_block is False:
# if I will win after this move, I won't consider other moves
if forward_my_open is True and my_line_length == 4:
my_hard_4 += 1
if backward_my_open is True and my_line_length_back == 4:
my_hard_4 += 1
if my_hard_4 >= 2:
final_single_move[0,0] = r
final_single_move[0,1] = c
exist_will_win_move = True
if force_to_block is False and exist_will_win_move is False:
# compute the interest_value for other moves
if forward_my_open is True:
interest_value += my_line_length ** 4
if backward_my_open is True:
interest_value += my_line_length_back ** 4
if forward_opponent_open is True:
interest_value += opponent_line_length ** 4
if backward_opponent_open is True:
interest_value += opponent_line_length_back ** 4
# after looking at all directions, record the total interest_value of this move
move_interest_values[r, c] += interest_value
if interest_value > 256: # one (length_4) ** 4, highly interesting move
n_moves += 1
# all moves have been investigated now see if we have to block first
if force_to_block is True or exist_will_win_move is True:
if verbose is True:
print(final_single_move[0,0], final_single_move[0,1], "Only One")
return final_single_move
else:
flattened_interest = move_interest_values.ravel()
# The interest value > 250 means at least one length_4 or three length_3 which make it highly interesting
#n_high_interest_moves = np.sum(flattened_interest > 266) # did it in the loop
if n_moves > empty_spots_left:
n_moves = empty_spots_left
high_interest_idx = np.argsort(flattened_interest)[-n_moves:][::-1]
interested_moves = np.empty(n_moves*2, dtype=np.int64).reshape(n_moves, 2)
interested_moves[:,0] = high_interest_idx // board_size
interested_moves[:,1] = high_interest_idx % board_size
if verbose is True:
print("There are", n_moves, "interested_moves")
for i in range(n_moves):
print(interested_moves[i,0],interested_moves[i,1],' : ', flattened_interest[high_interest_idx[i]])
return interested_moves
def Q_stone(state, zobrist_code, empty_spots_left, current_move, alpha, beta, player, level):
# update the state
state[current_move] = player
# update the zobrist code for the new state
if player == 1:
move_code = strategy.zobrist_me[current_move]
else:
move_code = strategy.zobrist_opponent[current_move]
new_zobrist_code = zobrist_code ^ move_code
result = U_stone(state, new_zobrist_code, empty_spots_left-1, current_move, alpha, beta, player, level)
# revert the changes for the state
state[current_move] = 0
return result
def U_stone(state, zobrist_code, empty_spots_left, last_move, alpha, beta, player, level):
try:
return U_stone.cache[zobrist_code]
except:
pass
if i_will_win(state, last_move, player):
return 1.0 if player == 1 else 0.0
elif level >= estimate_level:
result = estimate_U(state, player)
else:
best_move, best_q = best_action_q(state, zobrist_code, empty_spots_left, last_move, alpha, beta, -player, level)
result = best_q
U_stone.cache[zobrist_code] = result
return result
@numba.jit(nopython=True, nogil=True)
def estimate_U(state, player):
u = 0.0
my_max_n = 0
opponent_max_n = 0
for i in range(board_size):
for j in range(board_size):
# horizontal wins --
if j <= board_size - 5:
my_blocked, opponent_blocked = False, False
my_n, opponent_n = 0, 0
for k in range(5):
if state[i, j+k] == -1:
my_blocked = True
opponent_n += 1
elif state[i, j+k] == 1:
opponent_blocked = True
my_n += 1
if my_blocked is True and opponent_blocked is True:
break
if my_blocked is False:
u += 3 ** my_n
if my_n > my_max_n:
my_max_n = my_n
if opponent_blocked is False:
u -= 3 ** opponent_n
if opponent_n > opponent_max_n:
opponent_max_n = opponent_n
# vertical wins |
if i <= board_size - 5:
my_blocked, opponent_blocked = False, False
my_n, opponent_n = 0, 0
for k in range(5):
if state[i+k, j] == -1:
my_blocked = True
opponent_n += 1
elif state[i+k, j] == 1:
opponent_blocked = True
my_n += 1
if my_blocked is True and opponent_blocked is True:
break
if my_blocked is False:
u += 3 ** my_n
if my_n > my_max_n:
my_max_n = my_n
if opponent_blocked is False:
u -= 3 ** opponent_n
if opponent_n > opponent_max_n:
opponent_max_n = opponent_n
# left oblique wins /
if i <= board_size - 5 and j >= 4:
my_blocked, opponent_blocked = False, False
my_n, opponent_n = 0, 0
for k in range(5):
if state[i+k, j-k] == -1:
my_blocked = True
opponent_n += 1
elif state[i+k, j-k] == 1:
opponent_blocked = True
my_n += 1
if my_blocked is True and opponent_blocked is True:
break
if my_blocked is False:
u += 3 ** my_n
if my_n > my_max_n:
my_max_n = my_n
if opponent_blocked is False:
u -= 3 ** opponent_n
if opponent_n > opponent_max_n:
opponent_max_n = opponent_n
# right oblique wins \
if i <= board_size - 5 and j <= board_size - 5:
my_blocked, opponent_blocked = False, False
my_n, opponent_n = 0, 0
for k in range(5):
if state[i+k, j+k] == -1:
my_blocked = True
opponent_n += 1
elif state[i+k, j+k] == 1:
opponent_blocked = True
my_n += 1
if my_blocked is True and opponent_blocked is True:
break
if my_blocked is False:
u += 3 ** my_n
if my_n > my_max_n:
my_max_n = my_n
if opponent_blocked is False:
u -= 3 ** opponent_n
if opponent_n > opponent_max_n:
opponent_max_n = opponent_n
if player == 1: # next move is opponent
longer = 2 * (3 **opponent_max_n) # one of the longest can get 1 longer
block = 3 ** my_max_n
u -= max(longer, block)
else: # next move is me
longer = 2 * (3 ** my_max_n)
block = 3 ** opponent_max_n
u += max(longer, block)
if u > 0:
result = 1.0 - 0.5 * np.exp(-u**2 * 0.0001)
else:
result = 0.5 * np.exp(-u**2 * 0.0001)
return result
@numba.jit(nopython=True,nogil=True)
def i_win(state, last_move, player):
""" Return true if I just got 5-in-a-row with last_move """
r, c = last_move
# try all 4 directions, the other 4 is included
directions = [(1,1), (1,0), (0,1), (1,-1)]
for dr, dc in directions:
line_length = 1 # last_move
# try to extend in the positive direction (max 4 times)
ext_r = r
ext_c = c
for _ in range(4):
ext_r += dr
ext_c += dc
if ext_r < 0 or ext_r >= board_size or ext_c < 0 or ext_c >= board_size:
break
elif state[ext_r, ext_c] == player:
line_length += 1
else:
break
if line_length is 5:
return True # 5 in a row
# try to extend in the opposite direction
ext_r = r
ext_c = c
for _ in range(5-line_length):
ext_r -= dr
ext_c -= dc
if ext_r < 0 or ext_r >= board_size or ext_c < 0 or ext_c >= board_size:
break
elif state[ext_r, ext_c] == player:
line_length += 1
else:
break
if line_length is 5:
return True # 5 in a row
return False
@numba.jit(nopython=True,nogil=True)
def i_will_win(state, last_move, player):
""" Return true if I will win next step if the opponent don't have 4-in-a-row.
Winning Conditions:
1. 5 in a row.
2. 4 in a row with both end open. (free 4)
3. 4 in a row with one missing stone x 2 (hard 4 x 2)
"""
r, c = last_move
# try all 4 directions, the other 4 is equivalent
directions = [(1,1), (1,0), (0,1), (1,-1)]
n_hard_4 = 0 # number of hard 4s found
for dr, dc in directions:
#print(dr, dc)
line_length = 1 # last_move
# try to extend in the positive direction (max 4 times)
ext_r = r
ext_c = c
skipped_1 = 0
for i in range(4):
ext_r += dr
ext_c += dc
if ext_r < 0 or ext_r >= board_size or ext_c < 0 or ext_c >= board_size:
break
elif state[ext_r, ext_c] == player:
line_length += 1
elif skipped_1 is 0 and state[ext_r, ext_c] == 0:
skipped_1 = i+1 # allow one skip and record the position of the skip
else:
break
if line_length is 5:
return True # 5 in a row
#print("Forward line_length",line_length)
# try to extend in the opposite direction
ext_r = r
ext_c = c
skipped_2 = 0
# the backward counting starts at the furthest "unskipped" stone
if skipped_1 is not 0:
line_length_back = skipped_1
else:
line_length_back = line_length
line_length_no_skip = line_length_back
for i in range(5-line_length_back):
ext_r -= dr
ext_c -= dc
if ext_r < 0 or ext_r >= board_size or ext_c < 0 or ext_c >= board_size:
break
elif state[ext_r, ext_c] == player:
line_length_back += 1
elif skipped_2 is 0 and state[ext_r, ext_c] == 0:
skipped_2 = i + 1
else:
break
#print("Backward line_length",line_length_back)
if line_length_back is 5:
return True # 5 in a row
if line_length_back == 4 and skipped_2 is not 0:
n_hard_4 += 1 # backward hard 4
if n_hard_4 == 2:
return True # two hard 4
#print("back n_hard_4 = ", n_hard_4)
# extend the forward line to the furthest "unskipped" stone
#print("line_length_back", line_length_back)
if skipped_2 is 0:
line_length += line_length_back - line_length_no_skip
else:
line_length += skipped_2 - 1
if line_length >= 4 and skipped_1 is not 0:
n_hard_4 += 1 # forward hard 4
if n_hard_4 == 2:
return True # two hard 4 or free 4
#print('total n_hard_4', n_hard_4)
return False
def initialize():
# initialize zobrist for u caching
if not hasattr(strategy, 'zobrist_me'):
strategy.zobrist_me = np.random.randint(np.iinfo(np.int64).max, size=board_size**2).reshape(board_size,board_size)
#if not hasattr(strategy, 'zobrist_opponent'):
strategy.zobrist_opponent = np.random.randint(np.iinfo(np.int64).max, size=board_size**2).reshape(board_size,board_size)
#if not hasattr(strategy, 'zobrist_code'):
strategy.zobrist_code = 0
if not hasattr(U_stone, 'cache'):
U_stone.cache = dict()
if not hasattr(best_action_q, 'move_interest_values'):
best_action_q.move_interest_values = np.zeros(board_size**2, dtype=np.float32).reshape(board_size,board_size)
def finish():
del strategy.zobrist_me
del strategy.zobrist_opponent
del strategy.zobrist_code
del U_stone.cache
del best_action_q.move_interest_values
return
def board_show(stones):
if isinstance(stones, np.ndarray):
stones = {(s1,s2) for s1, s2 in stones}
print(' '*4 + ' '.join([chr(97+i) for i in xrange(board_size)]))
print (' '*3 + '='*(2*board_size))
for x in xrange(1, board_size+1):
row = ['%2s|'%x]
for y in xrange(1, board_size+1):
if (x-1,y-1) in stones:
c = 'x'
else:
c = '-'
row.append(c)
print (' '.join(row))
def print_state(state):
assert isinstance(state, np.ndarray)
print(' '*4 + ' '.join([chr(97+i) for i in xrange(board_size)]))
print (' '*3 + '='*(2*board_size))
for x in xrange(1, board_size+1):
row = ['%2s|'%x]
for y in xrange(1, board_size+1):
if state[x-1,y-1] == 1:
c = 'o'
elif state[x-1,y-1] == -1:
c = 'x'
else:
c = '-'
row.append(c)
print (' '.join(row))
def check():
global board_size
board_size = 15
state = np.zeros(board_size**2, dtype=np.int32).reshape(board_size, board_size)
# check if i_win() is working properly
state[zip(*[(8,9), (8,11), (8,8), (8,10), (8,12)])] = 1
assert i_win(state, (8,10), 1) == True
state.fill(0)
state[zip(*[(8,10), (9,11), (8,8), (9,12), (7,9), (10,9), (11,12), (11,13)])] = 1
assert i_win(state, (10,12), 1) == True
state.fill(0)
state[zip(*[(8,10), (8,12), (8,8), (9,12), (7,9), (10,9), (11,12), (11,13)])] = 1
assert i_win(state, (10,12), 1) == False
# check if i_will_win() is working properly
# o - x x X x - o
state.fill(0)
state[zip(*[(8,9), (8,11), (8,8)])] = 1
state[zip(*[(8,6), (8,13)])] = -1
assert i_will_win(state, (8, 10), 1) == True
#
state.fill(0)
state[zip(*[(7,7), (7,8), (9,11)])] = 1
state[zip(*[(6,8), (7,9)])] = -1
print(state)
assert i_will_win(state, (8,10), -1) == False
## o - x x X x o
#assert i_will_win({(8,9), (8,11), (8,8)}, {(8,6), (8,12)}, (8,10)) == False
## o - x x X o
## x
##
## x
## x
#assert i_will_win({(8,9), (8,8), (9,10), (11,10), (12,10)}, {(8,6), (8,11)}, (8,10)) == False
## o - x x X x o
## x
##
## x
## x
#assert i_will_win({(8,9), (8,8), (9,10), (11,10), (12,10)}, {(8,6), (8,11)}, (8,10)) == False
## o - x x X x o
## x
##
## x
## x
#assert i_will_win({ (8,8), (8,9), (8,11), (9,9), (11,7), (12,6)}, {(8,6), (8,12)}, (8,10)) == True
## | x x x X - x x x - - o
#assert i_will_win({(8,1), (8,2), (8,0), (8,9), (8,7), (8,8)}, {(8,10)}, (8,3)) == False
## | x x - x X x x o
#assert i_will_win({(8,1), (8,2), (8,4), (8,6), (8,7)}, {(8,8)}, (8,5)) == False
## | x x - x X - x x o
#assert i_will_win({(8,1), (8,2), (8,4), (8,7), (8,8)}, {(8,9)}, (8,5)) == True
## | x x x - X - x x x o
#assert i_will_win({(8,1), (8,2), (8,3), (8,7), (8,8), (8,9)}, {(8,10)}, (8,5)) == True
## | x - x X x - x o
#assert i_will_win({(8,1), (8,3), (8,5), (8,7)}, {(8,8)}, (8,4)) == True
#assert i_will_win({(8,8), (8,10), (9,9), (11,7), (11,9)}, {(7,7), (7,9), (8,7), (10,8), (11,8)}, (8,9)) == False
print("All check passed!")
if __name__ == '__main__':
import pickle
state = pickle.load(open('debug.state','rb'))
board, last_move, playing, board_size = state
player_stones = board[playing]
other = int(not playing)
ai_stones = board[other]
player_move = (8,9)
player_stones.add(player_move)
state = (player_stones, ai_stones), player_move, other, board_size
strategy(state)
#import time
#check()
#test3()
#benchmark()
#benchmark2()
|
py | 1a54c20b3fbfe05a9d9288af88f696f6ef2f8219 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
from google.api_core import client_options as client_options_lib
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.ads.googleads.v9.resources.types import extension_feed_item
from google.ads.googleads.v9.services.types import extension_feed_item_service
from google.rpc import status_pb2 # type: ignore
from .transports.base import (
ExtensionFeedItemServiceTransport,
DEFAULT_CLIENT_INFO,
)
from .transports.grpc import ExtensionFeedItemServiceGrpcTransport
class ExtensionFeedItemServiceClientMeta(type):
"""Metaclass for the ExtensionFeedItemService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[ExtensionFeedItemServiceTransport]]
_transport_registry["grpc"] = ExtensionFeedItemServiceGrpcTransport
def get_transport_class(
cls, label: str = None,
) -> Type[ExtensionFeedItemServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class ExtensionFeedItemServiceClient(
metaclass=ExtensionFeedItemServiceClientMeta
):
"""Service to manage extension feed items."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ExtensionFeedItemServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ExtensionFeedItemServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> ExtensionFeedItemServiceTransport:
"""Return the transport used by the client instance.
Returns:
ExtensionFeedItemServiceTransport: The transport used by the client instance.
"""
return self._transport
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
@staticmethod
def ad_group_path(customer_id: str, ad_group_id: str,) -> str:
"""Return a fully-qualified ad_group string."""
return "customers/{customer_id}/adGroups/{ad_group_id}".format(
customer_id=customer_id, ad_group_id=ad_group_id,
)
@staticmethod
def parse_ad_group_path(path: str) -> Dict[str, str]:
"""Parse a ad_group path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/adGroups/(?P<ad_group_id>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def asset_path(customer_id: str, asset_id: str,) -> str:
"""Return a fully-qualified asset string."""
return "customers/{customer_id}/assets/{asset_id}".format(
customer_id=customer_id, asset_id=asset_id,
)
@staticmethod
def parse_asset_path(path: str) -> Dict[str, str]:
"""Parse a asset path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/assets/(?P<asset_id>.+?)$", path
)
return m.groupdict() if m else {}
@staticmethod
def campaign_path(customer_id: str, campaign_id: str,) -> str:
"""Return a fully-qualified campaign string."""
return "customers/{customer_id}/campaigns/{campaign_id}".format(
customer_id=customer_id, campaign_id=campaign_id,
)
@staticmethod
def parse_campaign_path(path: str) -> Dict[str, str]:
"""Parse a campaign path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/campaigns/(?P<campaign_id>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def extension_feed_item_path(customer_id: str, feed_item_id: str,) -> str:
"""Return a fully-qualified extension_feed_item string."""
return "customers/{customer_id}/extensionFeedItems/{feed_item_id}".format(
customer_id=customer_id, feed_item_id=feed_item_id,
)
@staticmethod
def parse_extension_feed_item_path(path: str) -> Dict[str, str]:
"""Parse a extension_feed_item path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/extensionFeedItems/(?P<feed_item_id>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def geo_target_constant_path(criterion_id: str,) -> str:
"""Return a fully-qualified geo_target_constant string."""
return "geoTargetConstants/{criterion_id}".format(
criterion_id=criterion_id,
)
@staticmethod
def parse_geo_target_constant_path(path: str) -> Dict[str, str]:
"""Parse a geo_target_constant path into its component segments."""
m = re.match(r"^geoTargetConstants/(?P<criterion_id>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, ExtensionFeedItemServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the extension feed item service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.ExtensionFeedItemServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in (
"true",
"false",
):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
use_client_cert = (
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true"
)
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc # type: ignore
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if is_mtls
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, ExtensionFeedItemServiceTransport):
# transport is a ExtensionFeedItemServiceTransport instance.
if credentials:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
self._transport = transport
elif isinstance(transport, str):
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials, host=self.DEFAULT_ENDPOINT
)
else:
self._transport = ExtensionFeedItemServiceGrpcTransport(
credentials=credentials,
host=api_endpoint,
ssl_channel_credentials=ssl_credentials,
client_info=client_info,
)
def get_extension_feed_item(
self,
request: Union[
extension_feed_item_service.GetExtensionFeedItemRequest, dict
] = None,
*,
resource_name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> extension_feed_item.ExtensionFeedItem:
r"""Returns the requested extension feed item in full detail.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Args:
request (Union[google.ads.googleads.v9.services.types.GetExtensionFeedItemRequest, dict]):
The request object. Request message for
[ExtensionFeedItemService.GetExtensionFeedItem][google.ads.googleads.v9.services.ExtensionFeedItemService.GetExtensionFeedItem].
resource_name (:class:`str`):
Required. The resource name of the
extension feed item to fetch.
This corresponds to the ``resource_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v9.resources.types.ExtensionFeedItem:
An extension feed item.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([resource_name]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a extension_feed_item_service.GetExtensionFeedItemRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, extension_feed_item_service.GetExtensionFeedItemRequest
):
request = extension_feed_item_service.GetExtensionFeedItemRequest(
request
)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if resource_name is not None:
request.resource_name = resource_name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.get_extension_feed_item
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("resource_name", request.resource_name),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
def mutate_extension_feed_items(
self,
request: Union[
extension_feed_item_service.MutateExtensionFeedItemsRequest, dict
] = None,
*,
customer_id: str = None,
operations: Sequence[
extension_feed_item_service.ExtensionFeedItemOperation
] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> extension_feed_item_service.MutateExtensionFeedItemsResponse:
r"""Creates, updates, or removes extension feed items. Operation
statuses are returned.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `CollectionSizeError <>`__
`CountryCodeError <>`__ `DatabaseError <>`__ `DateError <>`__
`DistinctError <>`__ `ExtensionFeedItemError <>`__
`FieldError <>`__ `FieldMaskError <>`__ `HeaderError <>`__
`ImageError <>`__ `InternalError <>`__ `LanguageCodeError <>`__
`MutateError <>`__ `NewResourceCreationError <>`__
`OperationAccessDeniedError <>`__ `QuotaError <>`__
`RangeError <>`__ `RequestError <>`__ `SizeLimitError <>`__
`StringLengthError <>`__ `UrlFieldError <>`__
Args:
request (Union[google.ads.googleads.v9.services.types.MutateExtensionFeedItemsRequest, dict]):
The request object. Request message for
[ExtensionFeedItemService.MutateExtensionFeedItems][google.ads.googleads.v9.services.ExtensionFeedItemService.MutateExtensionFeedItems].
customer_id (:class:`str`):
Required. The ID of the customer
whose extension feed items are being
modified.
This corresponds to the ``customer_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
operations (:class:`Sequence[google.ads.googleads.v9.services.types.ExtensionFeedItemOperation]`):
Required. The list of operations to
perform on individual extension feed
items.
This corresponds to the ``operations`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v9.services.types.MutateExtensionFeedItemsResponse:
Response message for an extension
feed item mutate.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([customer_id, operations]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a extension_feed_item_service.MutateExtensionFeedItemsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, extension_feed_item_service.MutateExtensionFeedItemsRequest
):
request = extension_feed_item_service.MutateExtensionFeedItemsRequest(
request
)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if customer_id is not None:
request.customer_id = customer_id
if operations is not None:
request.operations = operations
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.mutate_extension_feed_items
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("customer_id", request.customer_id),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
__all__ = ("ExtensionFeedItemServiceClient",)
|
py | 1a54c268fd6b4d7cf4914385b5cb497af7e79ecc | import json
import os
table_objects = ["bowl", "bottle", "can", "computer keyboard", "keypad", "display", "phone", "jar", "knife", "lamp", "laptop", "microphone"
"mug", "remote", "wine bottle"]
data = json.load(open('taxonomy.json'))
for datapoint in data:
for obj in table_objects:
if obj in datapoint['name']:
os.system('unzip {}.zip'.format(datapoint['synsetId']))
break |
py | 1a54c2c910d951a52ae550c4acecf98b771930b3 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-03-16 17:29
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('dashboard_builder', '0011_auto_20190301_1529'),
# ('dashboard_builder', '0007_merge_20181120_1538'),
]
operations = [
]
|
py | 1a54c398098f3076281b4967ecf44e3395b6c754 | from scripts.logger import Logger
from wows.wowsapi import WowsApi
from wows.wowsdb import Wows_database
import math
class WorldofWarships:
def __init__(self, key, db_path):
self.logger = Logger(self.__class__.__name__)
self.logger.debug('Initializing wows class.')
self.wowsapi = WowsApi(key)
self.wowsdb = Wows_database(db_path)
def update(self):
# check version
version_db = self.wowsdb.get_db_version()
version_api = self.wowsapi.get_api_version()
# return if version is up to date.
if version_db == version_api:
self.logger.debug(f'Returning as database has latest version {version_db}.')
return
self.update_warships()
self.update_shipparams()
# finally update version
self.wowsdb.update_version(version_api)
def update_warships(self):
"""
Update warships table in database.
"""
self.logger.debug('Updating warships in database.')
warships_count = self.wowsapi.get_warships_count()
pages = math.ceil(warships_count / 100)
warships_api = self.wowsapi.get_warships(pages)
warships_db = self.wowsdb.get_warships()
warships_db_ids = list(map(lambda warship:warship.ship_id, warships_db))
for warship in warships_api:
# if warship not found in db, register
if warship.ship_id not in warships_db_ids:
self.wowsdb.register_ship(warship)
else:
index = warships_db_ids.index(warship.ship_id)
warship_db = warships_db[index]
assert warship.ship_id == warship_db.ship_id
# if warship from api differes from warship in db, update
if warship != warship_db:
self.wowsdb.update_warship(warship)
self.logger.debug('Warships updated.')
def update_shipparams(self):
"""
Update shipparameters table in database.
"""
self.logger.debug('Updating shipparams in database.')
ship_ids = self.wowsdb.get_ship_ids()
for ship_id in ship_ids:
param = self.wowsapi.get_ship_profile(ship_id[0])
self.wowsdb.update_shipparam(param)
self.logger.debug('Ship parameters updated.')
|
py | 1a54c4be4ad01af9b9b43246d87e15e3f305aa7b | import graphene
from graphene_django.types import DjangoObjectType
from .models import Category, Ingredient
class CategoryType(DjangoObjectType):
class Meta:
model = Category
class IngredientType(DjangoObjectType):
class Meta:
model = Ingredient
class Query(object):
category = graphene.Field(CategoryType, id=graphene.Int(), name=graphene.String())
all_categories = graphene.List(CategoryType)
ingredient = graphene.Field(
IngredientType, id=graphene.Int(), name=graphene.String()
)
all_ingredients = graphene.List(IngredientType)
def resolve_all_categories(self, context):
return Category.objects.all()
def resolve_all_ingredients(self, context):
# We can easily optimize query count in the resolve method
return Ingredient.objects.select_related("category").all()
def resolve_category(self, context, id=None, name=None):
if id is not None:
return Category.objects.get(pk=id)
if name is not None:
return Category.objects.get(name=name)
return None
def resolve_ingredient(self, context, id=None, name=None):
if id is not None:
return Ingredient.objects.get(pk=id)
if name is not None:
return Ingredient.objects.get(name=name)
return None
|
py | 1a54c4e229f240d3771c56695e8ec412dab9c50f | """Helpers."""
import inspect
from collections import Iterable, Mapping
from typing import Optional, Tuple, List, Iterable as IterableType
from aiohttp import web
from mimeparse import parse_media_range, _filter_blank
from .abc.field import FieldABC
from .fields.decorators import Tag
from .typings import Callee, MimeTypeComponents, QFParsed
from .common import JSONAPI
def is_generator(obj):
"""Return True if ``obj`` is a generator."""
return inspect.isgeneratorfunction(obj) or inspect.isgenerator(obj)
def is_iterable_but_not_string(obj):
"""Return True if ``obj`` is an iterable object that isn't a string."""
return (
(isinstance(obj, Iterable) and not hasattr(obj, "strip")) or
is_generator(obj)
)
def is_indexable_but_not_string(obj):
"""Return True if ``obj`` is indexable but isn't a string."""
return not hasattr(obj, "strip") and hasattr(obj, "__getitem__")
def is_collection(obj, exclude=()):
"""Return True if ``obj`` is a collection type."""
return (not isinstance(obj, (Mapping,) + exclude) and
is_iterable_but_not_string(obj))
def ensure_collection(value, exclude=()):
"""Ensure value is collection."""
return value if is_collection(value, exclude=exclude) else (value,)
def first(iterable, default=None, key=None):
"""
Return first element of *iterable*.
Return first element of *iterable* that evaluates to ``True``, else
return ``None`` or optional *default*.
>>> first([0, False, None, [], (), 42])
42
>>> first([0, False, None, [], ()]) is None
True
>>> first([0, False, None, [], ()], default='ohai')
'ohai'
>>> import re
>>> m = first(re.match(regex, 'abc') for regex in ['b.*', 'a(.*)'])
>>> m.group(1)
'bc'
The optional *key* argument specifies a one-argument predicate function
like that used for *filter()*. The *key* argument, if supplied, should be
in keyword form. For example, finding the first even number in an iterable:
>>> first([1, 1, 3, 4, 5], key=lambda x: x % 2 == 0)
4
Contributed by Hynek Schlawack, author of `the original standalone module`_
.. _the original standalone module: https://github.com/hynek/first
"""
return next(filter(key, iterable), default)
def make_sentinel(name='_MISSING', var_name=None):
"""
Create sentinel instance.
Creates and returns a new **instance** of a new class, suitable for
usage as a "sentinel", a kind of singleton often used to indicate
a value is missing when ``None`` is a valid input.
>>> make_sentinel(var_name='_MISSING')
_MISSING
The most common use cases here in project are as default values
for optional function arguments, partly because of its
less-confusing appearance in automatically generated
documentation. Sentinels also function well as placeholders in queues
and linked lists.
.. note::
By design, additional calls to ``make_sentinel`` with the same
values will not produce equivalent objects.
>>> make_sentinel('TEST') == make_sentinel('TEST')
False
>>> type(make_sentinel('TEST')) == type(make_sentinel('TEST'))
False
:arg str name:
Name of the Sentinel
:arg str var_name:
Set this name to the name of the variable in its respective
module enable pickleability.
"""
class Sentinel(object):
def __init__(self):
self.name = name
self.var_name = var_name
def __repr__(self):
if self.var_name:
return self.var_name
return '%s(%r)' % (self.__class__.__name__, self.name)
if var_name:
def __reduce__(self):
return self.var_name
def __nonzero__(self):
return False
__bool__ = __nonzero__
return Sentinel()
def get_router_resource(app: web.Application, resource: str):
"""Return route of JSON API application for resource."""
return app.router[f"{app[JSONAPI]['routes_namespace']}.{resource}"]
def get_processors(obj, tag: Tag, field: FieldABC,
default: Optional[Callee] = None):
has_processors = getattr(obj, '_has_processors', False)
if has_processors:
processor_tag = tag, field.key
processors = obj.__processors__.get(processor_tag)
if processors:
for processor_name in processors:
processor = getattr(obj, processor_name)
processor_kwargs = \
processor.__processing_kwargs__.get(processor_tag)
yield processor, processor_kwargs
return
if not callable(default):
return
yield default, {}
def quality_and_fitness_parsed(mime_type: str,
parsed_ranges: List[MimeTypeComponents]
) -> QFParsed:
"""Find the best match for a mime-type amongst parsed media-ranges.
Find the best match for a given mime-type against a list of media_ranges
that have already been parsed by parse_media_range(). Returns a tuple of
the fitness value and the value of the 'q' quality parameter of the best
match, or (-1, 0) if no match was found. Just as for quality_parsed(),
'parsed_ranges' must be a list of parsed media ranges.
Cherry-picked from python-mimeparse and improved.
"""
best_fitness = -1
best_fit_q = 0
(target_type, target_subtype, target_params) = parse_media_range(mime_type)
best_matched = None
for (type, subtype, params) in parsed_ranges:
# check if the type and the subtype match
type_match = (
type in (target_type, '*') or
target_type == '*'
)
subtype_match = (
subtype in (target_subtype, '*') or
target_subtype == '*'
)
# if they do, assess the "fitness" of this mime_type
if type_match and subtype_match:
# 100 points if the type matches w/o a wildcard
fitness = type == target_type and 100 or 0
# 10 points if the subtype matches w/o a wildcard
fitness += subtype == target_subtype and 10 or 0
# 1 bonus point for each matching param besides "q"
param_matches = sum([
1 for (key, value) in target_params.items()
if key != 'q' and key in params and value == params[key]
])
fitness += param_matches
# finally, add the target's "q" param (between 0 and 1)
fitness += float(target_params.get('q', 1))
if fitness > best_fitness:
best_fitness = fitness
best_fit_q = params['q']
best_matched = (type, subtype, params)
return (float(best_fit_q), best_fitness), best_matched
def best_match(supported: IterableType[str],
header: str) -> Tuple[str, Optional[MimeTypeComponents]]:
"""Return mime-type with the highest quality ('q') from list of candidates.
Takes a list of supported mime-types and finds the best match for all the
media-ranges listed in header. The value of header must be a string that
conforms to the format of the HTTP Accept: header. The value of 'supported'
is a list of mime-types. The list of supported mime-types should be sorted
in order of increasing desirability, in case of a situation where there is
a tie.
Cherry-picked from python-mimeparse and improved.
>>> best_match(['application/xbel+xml', 'text/xml'],
'text/*;q=0.5,*/*; q=0.1')
('text/xml', ('text', '*', {'q': '0.5'}))
"""
split_header = _filter_blank(header.split(','))
parsed_header = [parse_media_range(r) for r in split_header]
weighted_matches = {}
for i, mime_type in enumerate(supported):
weight, match = quality_and_fitness_parsed(mime_type, parsed_header)
weighted_matches[(weight, i)] = (mime_type, match)
best = max(weighted_matches.keys())
return best[0][0] and weighted_matches[best] or ('', None)
def get_mime_type_params(mime_type: MimeTypeComponents):
return {k: v for k, v in mime_type[2].items() if k != 'q'}
MISSING = make_sentinel()
|
py | 1a54c60fdfd8858d9a2ae94a2daa9e37f4db22cf | """
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : 数据库系统
Case Name : LocalDateTime类型setObject并发用例
Description :
1.写配置文件
2.编译java工具
3.建表
4.并发执行java脚本
5.查询结果
6.重复step4-5 50次
Expect :
History :
"""
import unittest
import os
import datetime
import time
from yat.test import Node
from yat.test import macro
from testcase.utils.Logger import Logger
from testcase.utils.Common import Common
from testcase.utils.CommonSH import CommonSH
from testcase.utils.Constant import Constant
from testcase.utils.ComThread import ComThread
class Jdbcisreadonly(unittest.TestCase):
def setUp(self):
self.log = Logger()
self.db_primary_user_node = Node(node='PrimaryDbUser')
self.db_primary_root_node = Node(node='PrimaryRoot')
self.log.info("-----------this is setup-----------")
self.log.info("Opengauss_Function_JDBC_Set_Get_Object_Case0018 start")
self.targetpath = "/home/jdbc_test"
self.properties = os.path.join(self.targetpath,
"jdbc_case0001.properties")
self.sql_path = os.path.join(self.targetpath, "jdbc_set_get_object")
self.java_name = "jdbc_set_get_object_case0018"
self.tb_name = "jdbc_set_get_object_case0018"
self.common = Common()
self.constant = Constant()
self.commonshpri = CommonSH('PrimaryDbUser')
def test_index(self):
self.log.info('--------1.写配置文件-------')
self.common.scp_file(self.db_primary_root_node,
f"{self.java_name}.java", self.targetpath)
result = self.db_primary_root_node.sh(
f"touch {self.properties}").result()
self.log.info(result)
config = f'echo "password=' \
f'{self.db_primary_user_node.db_password}"> {self.properties}'
self.db_primary_root_node.sh(config)
config = f'echo "port={self.db_primary_user_node.db_port}">> ' \
f'{self.properties}'
self.db_primary_root_node.sh(config)
config = f'echo "hostname={self.db_primary_user_node.db_host}">> ' \
f'{self.properties}'
self.db_primary_root_node.sh(config)
config = f'echo "user={self.db_primary_user_node.db_user}">> ' \
f'{self.properties}'
self.db_primary_root_node.sh(config)
config = f'echo "dbname={self.db_primary_user_node.db_name}">> ' \
f'{self.properties}'
self.db_primary_root_node.sh(config)
config = f'echo "stringtype=unspecified">> {self.properties}'
self.db_primary_root_node.sh(config)
config = f'cat {self.properties}'
result = self.db_primary_root_node.sh(config).result()
self.assertTrue("password=" in result and "port=" in result
and "hostname=" in result and "user=" in result
and "dbname=" in result)
self.log.info('--------------2. 编译java工具------------------')
self.db_primary_root_node.scp_put(macro.JDBC_PATH,
f"{self.targetpath}/postgresql.jar")
cmd = f"javac -encoding utf-8 -cp " \
f"{os.path.join(self.targetpath, 'postgresql.jar')} " \
f"{os.path.join(self.targetpath, f'{self.java_name}.java')}"
self.log.info(cmd)
result = self.db_primary_root_node.sh(cmd).result()
self.log.info(result)
self.log.info("---------------3.创建表----------------------")
cmd = f"drop table if exists {self.tb_name};" \
f"create table {self.tb_name}(t_time timestamp);"
result = self.commonshpri.execut_db_sql(cmd)
self.log.info(result)
self.assertIn(self.constant.CREATE_TABLE_SUCCESS, result)
self.log.info("-------------4.运行java工具---------------------")
for index in range(50):
self.log.info(f"======round {index}========")
today = self.db_primary_root_node.sh(
"date '+%Y-%m-%d 00:00:00'").result()
yesterday = (datetime.datetime.strptime(
today, '%Y-%m-%d 00:00:00') - datetime.timedelta(days=+1)
).strftime('%Y-%m-%d 00:00:00')
tomorrow = (datetime.datetime.strptime(
today, '%Y-%m-%d 00:00:00') - datetime.timedelta(days=-1)
).strftime('%Y-%m-%d 00:00:00')
self.log.info(f"today is {today}, tomorrow is "
f"{tomorrow}, yesterday is {yesterday}")
cmd = f" java -cp " \
f"{os.path.join(self.targetpath, 'postgresql.jar')}" \
f":{self.targetpath} " \
f"{self.java_name} -F {self.properties}"
self.log.info(cmd)
insert_thread = []
for i in range(9):
insert_thread.append(ComThread(
self.common.get_sh_result,
args=(self.db_primary_root_node, cmd)))
insert_thread[i].setDaemon(True)
insert_thread[i].start()
time.sleep(2)
for i in range(9):
insert_thread[i].join(30)
result = insert_thread[i].get_result()
self.assertNotIn('error', result)
cmd = f"select * from {self.tb_name} order by 1 desc;"
insert_result = self.commonshpri.execut_db_sql(cmd)
self.log.info(insert_result)
self.assertIn("(126 rows)", insert_result)
self.assertEqual(insert_result.count('infinity'), 36)
self.assertEqual(insert_result.count('-infinity'), 18)
self.assertEqual(insert_result.count('1970-01-01 00:00:00'), 9)
self.assertEqual(insert_result.count('2020-02-29 23:59:59'), 18)
result_time = self.db_primary_root_node.sh(
"date '+%Y-%m-%d %H:%M:%S'").result()
self.log.info(result_time)
now = []
now.append((datetime.datetime.strptime(
result_time, '%Y-%m-%d %H:%M:%S') -
datetime.timedelta(minutes=1)
).strftime('%Y-%m-%d %H:%M'))
now.append((datetime.datetime.strptime(
result_time, '%Y-%m-%d %H:%M:%S') -
datetime.timedelta(minutes=-1)
).strftime('%Y-%m-%d %H:%M'))
now.append((datetime.datetime.strptime(
result_time, '%Y-%m-%d %H:%M:%S')).strftime('%Y-%m-%d %H:%M'))
self.log.info(f"now is {now}")
self.assertTrue((insert_result.count(now[0]) +
insert_result.count(now[1]) +
insert_result.count(now[2])) >= 27)
self.assertGreaterEqual(insert_result.count(tomorrow), 9)
self.assertGreaterEqual(insert_result.count(yesterday), 9)
self.assertGreaterEqual(insert_result.count(today), 9)
for line in range(2, 10):
self.assertEqual(' ', insert_result.splitlines()[line])
cmd = f"delete from {self.tb_name};"
result = self.commonshpri.execut_db_sql(cmd)
self.log.info(result)
def tearDown(self):
self.log.info('------------this is tearDown-------------')
self.log.info('------------------清理环境-------------')
cmd = f"drop table if exists {self.tb_name};"
result = self.commonshpri.execut_db_sql(cmd)
self.log.info(result)
cmd = f"rm -rf {self.targetpath}"
self.log.info(cmd)
self.db_primary_root_node.sh(cmd)
self.log.info("-Opengauss_Function_JDBC_Set_Get_Object_Case0018 end-")
|
py | 1a54c64202257df6a68b8db4f7924b5fe6d9adfc | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradients for operators defined in math_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import pywrap_tensorflow as c_api
from tensorflow.python.compat import compat
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util import object_identity
def _safe_shape_div(x, y):
"""Divides `x / y` assuming `x, y >= 0`, treating `0 / 0 = 0`."""
return x // math_ops.maximum(y, 1)
@ops.RegisterGradient("ArgMax")
def _ArgMaxGrad(op, grad):
del op, grad
return [None, None]
@ops.RegisterGradient("ArgMin")
def _ArgMinGrad(op, grad):
del op, grad
return [None, None]
# TODO(rmlarsen): Implement gradient.
ops.NotDifferentiable("EuclideanNorm")
def SmartBroadcastGradientArgs(x, y, grad):
"""Optimized version of `broadcast_gradient_args` that caches results.
This implementation avoids creating `broadcast_gradient_args` ops in the case
that the input shapes are fully defined, and provides hints to the calling
code that can be used to avoid creating reduction and reshaping ops.
Args:
x: The left input tensor to a broadcasting binary op.
y: The right input tensor to a broadcasting binary op.
grad: The incoming gradient tensor for a broadcasting binary op.
Returns:
A pair of tuples, containing:
* A 3-tuple of broadcast information for x, containing:
* The shape of x (as a tuple or Tensor).
* The reduction indices for x (as a tuple or Tensor).
* A boolean, which if True, indicates that x's shape differs from grad's
shape (and so x's gradient must be reduced and/or reshaped).
* A 3-tuple of broadcast information for y, containing the respective
details for y.
"""
# NOTE: It may be productive to apply these optimizations in the eager case
# as well.
if context.executing_eagerly() or not (
isinstance(x, ops.Tensor) and isinstance(y, ops.Tensor)
and isinstance(grad, ops.Tensor)):
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
return (sx, rx, True), (sy, ry, True)
# pylint: disable=protected-access
x_shape_tuple = x._shape_tuple()
y_shape_tuple = y._shape_tuple()
grad_shape_tuple = grad._shape_tuple()
# pylint: enable=protected-access
if (x_shape_tuple is None or None in x_shape_tuple or
y_shape_tuple is None or None in y_shape_tuple):
sx = array_ops.shape_internal(x, optimize=False)
sy = array_ops.shape_internal(y, optimize=False)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
return (sx, rx, True), (sy, ry, True)
x_needs_reduction = x_shape_tuple != grad_shape_tuple
y_needs_reduction = y_shape_tuple != grad_shape_tuple
# Get the default graph rather than relying on `x.graph`, `y.graph`, or
# `grad.graph`, because these may be eager tensors.
g = ops.get_default_graph()
try:
rx, ry = g._bcast_grad_args_cache[(x_shape_tuple, y_shape_tuple)] # pylint: disable=protected-access
return (x_shape_tuple, rx, x_needs_reduction), (
y_shape_tuple, ry, y_needs_reduction)
except KeyError:
rx, ry = array_ops.broadcast_gradient_args(x_shape_tuple, y_shape_tuple)
# TODO(mrry): If this becomes a bottleneck, add a multi-output version of
# `TF_TryEvaluateConstant()`.
rx_value = tuple(c_api.TF_TryEvaluateConstant_wrapper(
rx.graph._c_graph, rx._as_tf_output())) # pylint: disable=protected-access
assert rx_value is not None
ry_value = tuple(c_api.TF_TryEvaluateConstant_wrapper(
ry.graph._c_graph, ry._as_tf_output())) # pylint: disable=protected-access
assert ry_value is not None
g._bcast_grad_args_cache[(x_shape_tuple, y_shape_tuple)] = ( # pylint: disable=protected-access
rx_value, ry_value)
return (x_shape_tuple, rx_value, x_needs_reduction), (
y_shape_tuple, ry_value, y_needs_reduction)
_empty_tuple = ()
def _IsScalar(x):
return x._shape_tuple() is _empty_tuple # pylint: disable=protected-access
@ops.RegisterGradient("Sum")
def _SumGrad(op, grad):
"""Gradient for Sum."""
# Fast path for when reducing to a scalar and ndims is known: adds only
# Reshape and Tile ops (and possibly a Shape).
input_0_shape = op.inputs[0]._shape_tuple() # pylint: disable=protected-access
if input_0_shape is not None:
axes = tensor_util.constant_value(op.inputs[1])
if axes is not None:
rank = len(input_0_shape)
if np.array_equal(axes, np.arange(rank)): # Reduce all dims.
if context.executing_eagerly():
ctx = context.context()
new_shape = ctx.ones_rank_cache().get(rank)
if new_shape is None:
new_shape = constant_op.constant([1] * rank, dtype=dtypes.int32)
ctx.ones_rank_cache().put(rank, new_shape)
else:
new_shape = [1] * rank
grad = array_ops.reshape(grad, new_shape)
# If shape is not fully defined (but rank is), we use Shape.
if None not in input_0_shape:
input_shape = constant_op.constant(input_0_shape, dtype=dtypes.int32)
else:
input_shape = array_ops.shape(op.inputs[0])
return [array_ops.tile(grad, input_shape), None]
elif None not in input_0_shape and not context.executing_eagerly():
# The shape and reduction indices are statically known, so we use a
# graph-level cache to avoid recomputing `reduced_shape()` for each
# invocation.
graph = ops.get_default_graph()
# Canonicalize `axes` to be a tuple of indices. The incoming
# value may be a scalar or a vector, and may include negative indices.
axes = tuple(axes.reshape(-1))
try:
output_shape_kept_dims, tile_scaling = graph._reduced_shape_cache[ # pylint: disable=protected-access
(input_0_shape, axes)]
except KeyError:
# Compute and cache `output_shape_kept_dims` and `tile_scaling`.
def EvaluateAsTuple(t):
value = c_api.TF_TryEvaluateConstant_wrapper(
t.graph._c_graph, t._as_tf_output()) # pylint: disable=protected-access
assert value is not None
return tuple(value)
output_shape_kept_dims = EvaluateAsTuple(
math_ops.reduced_shape(input_0_shape, axes))
tile_scaling = EvaluateAsTuple(
_safe_shape_div(input_0_shape, output_shape_kept_dims))
graph._reduced_shape_cache[(input_0_shape, axes)] = ( # pylint:disable=protected-access
output_shape_kept_dims, tile_scaling)
grad = array_ops.reshape(grad, output_shape_kept_dims)
return [array_ops.tile(grad, tile_scaling), None]
input_shape = array_ops.shape(op.inputs[0])
# TODO(apassos) remove this once device placement for eager ops makes more
# sense.
with ops.colocate_with(input_shape):
output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1])
tile_scaling = _safe_shape_div(input_shape, output_shape_kept_dims)
grad = array_ops.reshape(grad, output_shape_kept_dims)
return [array_ops.tile(grad, tile_scaling), None]
def _MinOrMaxGrad(op, grad):
"""Gradient for Min or Max. Amazingly it's precisely the same code."""
input_shape = array_ops.shape(op.inputs[0])
output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1])
y = op.outputs[0]
y = array_ops.reshape(y, output_shape_kept_dims)
grad = array_ops.reshape(grad, output_shape_kept_dims)
# Compute the number of selected (maximum or minimum) elements in each
# reduction dimension. If there are multiple minimum or maximum elements
# then the gradient will be divided between them.
indicators = math_ops.cast(math_ops.equal(y, op.inputs[0]), grad.dtype)
num_selected = array_ops.reshape(
math_ops.reduce_sum(indicators, op.inputs[1]), output_shape_kept_dims)
return [math_ops.divide(indicators, num_selected) * grad, None]
@ops.RegisterGradient("Max")
def _MaxGrad(op, grad):
"""Gradient for Max."""
return _MinOrMaxGrad(op, grad)
@ops.RegisterGradient("Min")
def _MinGrad(op, grad):
return _MinOrMaxGrad(op, grad)
@ops.RegisterGradient("Mean")
def _MeanGrad(op, grad):
"""Gradient for Mean."""
sum_grad = _SumGrad(op, grad)[0]
input_shape = op.inputs[0]._shape_tuple() # pylint: disable=protected-access
output_shape = op.outputs[0]._shape_tuple() # pylint: disable=protected-access
if (input_shape is not None and output_shape is not None and
None not in input_shape and None not in output_shape):
input_size = np.prod(input_shape)
output_size = np.prod(output_shape)
factor = input_size // max(output_size, 1)
factor = constant_op.constant(factor, dtype=sum_grad.dtype)
else:
input_shape = array_ops.shape(op.inputs[0])
output_shape = array_ops.shape(op.outputs[0])
factor = _safe_shape_div(
math_ops.reduce_prod(input_shape), math_ops.reduce_prod(output_shape))
return math_ops.truediv(sum_grad, math_ops.cast(factor, sum_grad.dtype)), None
@ops.RegisterGradient("Prod")
def _ProdGrad(op, grad):
"""Gradient for Prod."""
# The gradient can be expressed by dividing the product by each entry of the
# input tensor, but this approach can't deal with zeros in the input.
# Here, we avoid this problem by composing the output as a product of two
# cumprod operations.
input_shape = array_ops.shape(op.inputs[0])
# Reshape reduction indices for the case where the parameter is a scalar
reduction_indices = array_ops.reshape(op.inputs[1], [-1])
# Expand grad to full input shape
output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1])
tile_scaling = _safe_shape_div(input_shape, output_shape_kept_dims)
grad = array_ops.reshape(grad, output_shape_kept_dims)
grad = array_ops.tile(grad, tile_scaling)
# Pack all reduced dimensions into a single one, so we can perform the
# cumprod ops. If the reduction dims list is empty, it defaults to float32,
# so we need to cast here. We put all the shape-related ops on CPU to avoid
# copying back and forth, and since listdiff is CPU only.
with ops.device("/cpu:0"):
rank = array_ops.rank(op.inputs[0])
reduction_indices = (reduction_indices + rank) % rank
reduced = math_ops.cast(reduction_indices, dtypes.int32)
idx = math_ops.range(0, rank)
other, _ = array_ops.setdiff1d(idx, reduced)
perm = array_ops.concat([reduced, other], 0)
reduced_num = math_ops.reduce_prod(array_ops.gather(input_shape, reduced))
other_num = math_ops.reduce_prod(array_ops.gather(input_shape, other))
permuted = array_ops.transpose(op.inputs[0], perm)
permuted_shape = array_ops.shape(permuted)
reshaped = array_ops.reshape(permuted, (reduced_num, other_num))
# Calculate product, leaving out the current entry
left = math_ops.cumprod(reshaped, axis=0, exclusive=True)
right = math_ops.cumprod(reshaped, axis=0, exclusive=True, reverse=True)
# For complex inputs, the gradient is in the conjugate direction.
y = array_ops.reshape(
math_ops.conj(left) * math_ops.conj(right), permuted_shape)
# Invert the transpose and reshape operations.
# Make sure to set the statically known shape information through a reshape.
out = grad * array_ops.transpose(y, array_ops.invert_permutation(perm))
return array_ops.reshape(out, input_shape), None
@ops.RegisterGradient("SegmentSum")
def _SegmentSumGrad(op, grad):
"""Gradient for SegmentSum."""
return array_ops.gather(grad, op.inputs[1]), None
@ops.RegisterGradient("SegmentMean")
def _SegmentMeanGrad(op, grad):
"""Gradient for SegmentMean."""
input_rank = array_ops.rank(op.inputs[0])
ones_shape = array_ops.concat([
array_ops.shape(op.inputs[1]),
array_ops.fill(array_ops.expand_dims(input_rank - 1, 0), 1)
], 0)
ones = array_ops.fill(ones_shape, constant_op.constant(1, dtype=grad.dtype))
scaled_grad = math_ops.divide(grad, math_ops.segment_sum(ones, op.inputs[1]))
return array_ops.gather(scaled_grad, op.inputs[1]), None
@ops.RegisterGradient("SparseSegmentSum")
def _SparseSegmentSumGrad(op, grad):
"""Gradient for SparseSegmentSum."""
input_rows = array_ops.shape(op.inputs[0])[0]
return (math_ops.unsorted_segment_sum(
array_ops.gather(grad, op.inputs[2]), op.inputs[1], input_rows), None,
None)
@ops.RegisterGradient("SparseSegmentSumWithNumSegments")
def _SparseSegmentSumWithNumSegmentsGrad(op, grad):
"""Gradient for SparseSegmentSumWithNumSegments."""
input_rows = array_ops.shape(op.inputs[0])[0]
return (math_ops.unsorted_segment_sum(
array_ops.gather(grad, op.inputs[2]), op.inputs[1], input_rows), None,
None, None)
@ops.RegisterGradient("SparseSegmentMean")
def _SparseSegmentMeanGrad(op, grad):
"""Gradient for SparseSegmentMean."""
dim0 = array_ops.shape(op.inputs[0])[0]
return (math_ops.sparse_segment_mean_grad(grad, op.inputs[1], op.inputs[2],
dim0), None, None)
@ops.RegisterGradient("SparseSegmentMeanWithNumSegments")
def _SparseSegmentMeanWithNumSegmentsGrad(op, grad):
"""Gradient for SparseSegmentMeanWithNumSegments."""
dim0 = array_ops.shape(op.inputs[0])[0]
return (math_ops.sparse_segment_mean_grad(grad, op.inputs[1], op.inputs[2],
dim0), None, None, None)
@ops.RegisterGradient("SparseSegmentSqrtN")
def _SparseSegmentSqrtNGrad(op, grad):
"""Gradient for SparseSegmentSqrtN."""
dim0 = array_ops.shape(op.inputs[0])[0]
return (math_ops.sparse_segment_sqrt_n_grad(grad, op.inputs[1], op.inputs[2],
dim0), None, None)
@ops.RegisterGradient("SparseSegmentSqrtNWithNumSegments")
def _SparseSegmentSqrtNWithNumSegmentsGrad(op, grad):
"""Gradient for SparseSegmentSqrtNWithNumSegments."""
dim0 = array_ops.shape(op.inputs[0])[0]
return (math_ops.sparse_segment_sqrt_n_grad(grad, op.inputs[1], op.inputs[2],
dim0), None, None, None)
def _SegmentMinOrMaxGrad(op, grad):
""" Gradient for SegmentMin and SegmentMax. """
zeros = array_ops.zeros_like(op.inputs[0], dtype=op.inputs[0].dtype)
# Get the number of selected (minimum or maximum) elements in each segment.
gathered_outputs = array_ops.gather(op.outputs[0], op.inputs[1])
is_selected = math_ops.equal(op.inputs[0], gathered_outputs)
num_selected = math_ops.segment_sum(
math_ops.cast(is_selected, grad.dtype), op.inputs[1])
# Compute the gradient for each segment. The gradient for the ith segment is
# divided evenly among the selected elements in that segment.
weighted_grads = math_ops.divide(grad, num_selected)
gathered_grads = array_ops.gather(weighted_grads, op.inputs[1])
return array_ops.where(is_selected, gathered_grads, zeros), None
@ops.RegisterGradient("SegmentMin")
def _SegmentMinGrad(op, grad):
"""Gradient for SegmentMin."""
return _SegmentMinOrMaxGrad(op, grad)
@ops.RegisterGradient("SegmentMax")
def _SegmentMaxGrad(op, grad):
"""Gradient for SegmentMax."""
return _SegmentMinOrMaxGrad(op, grad)
def _GatherDropNegatives(params,
ids,
zero_clipped_indices=None,
is_positive=None):
""" Helper function for unsorted segment ops.
Gathers params for
positive segment ids and gathers 0 for inputs with negative segment id.
Also returns the clipped indices and a boolean mask with the same shape
as ids where a positive id is masked as true. With this, the latter two
can be passed as arguments to this function to reuse them.
"""
if zero_clipped_indices is None:
zero_clipped_indices = math_ops.maximum(ids, array_ops.zeros_like(ids))
gathered = array_ops.gather(params, zero_clipped_indices)
if is_positive is None:
is_positive = math_ops.greater_equal(ids, 0)
# tf.where(condition, x, y) requires condition to have the same shape as x
# and y.
# todo(philjd): remove this if tf.where supports broadcasting (#9284)
for _ in range(gathered.shape.ndims - is_positive.shape.ndims):
is_positive = array_ops.expand_dims(is_positive, -1)
is_positive = (
is_positive & array_ops.ones_like(gathered, dtype=dtypes.bool))
# replace gathered params of negative indices with 0
zero_slice = array_ops.zeros_like(gathered)
return (array_ops.where(is_positive, gathered, zero_slice),
zero_clipped_indices, is_positive)
def _UnsortedSegmentMinOrMaxGrad(op, grad):
""" Gradient for UnsortedSegmentMin and UnsortedSegmentMax. """
# Get the number of selected (minimum or maximum) elements in each segment.
gathered_outputs, zero_clipped_indices, is_positive = \
_GatherDropNegatives(op.outputs[0], op.inputs[1])
is_selected = math_ops.equal(op.inputs[0], gathered_outputs)
is_selected = math_ops.logical_and(is_selected, is_positive)
num_selected = math_ops.unsorted_segment_sum(
math_ops.cast(is_selected, grad.dtype), op.inputs[1], op.inputs[2])
# Compute the gradient for each segment. The gradient for the ith segment is
# divided evenly among the selected elements in that segment.
weighted_grads = math_ops.divide(grad, num_selected)
gathered_grads, _, _ = _GatherDropNegatives(weighted_grads, None,
zero_clipped_indices, is_positive)
zeros = array_ops.zeros_like(gathered_grads)
return array_ops.where(is_selected, gathered_grads, zeros), None, None
@ops.RegisterGradient("UnsortedSegmentSum")
def _UnsortedSegmentSumGrad(op, grad):
"""Gradient for UnsortedSegmentSum."""
return _GatherDropNegatives(grad, op.inputs[1])[0], None, None
@ops.RegisterGradient("UnsortedSegmentMax")
def _UnsortedSegmentMaxGrad(op, grad):
""" Gradient for UnsortedSegmentMax. """
return _UnsortedSegmentMinOrMaxGrad(op, grad)
@ops.RegisterGradient("UnsortedSegmentMin")
def _UnsortedSegmentMinGrad(op, grad):
""" Gradient for UnsortedSegmentMin. """
return _UnsortedSegmentMinOrMaxGrad(op, grad)
@ops.RegisterGradient("UnsortedSegmentProd")
def _UnsortedSegmentProdGrad(op, grad):
""" Gradient for UnsortedSegmentProd.
The gradient can be expressed for each segment by dividing the segment's
product by each element of the segment input tensor, but this approach can't
deal with zeros in the input.
Unlike reduce_prod we can't use cumsum here as individual segments may have
a different number of elements. Therefore we consider three cases:
1) A segment input contains no zeros and we can safely divide by the input
tensor.
2) A segment contains exactly one zero. Then the gradient of each input of
the segment is zero except for the 0-input, there the gradient is
the product of the remaining segment entries.
3) A segment contains at least two zeros. The gradient is zero for all
segment inputs.
"""
# Note that unsorted_segment_sum will filter out the negative indices,
# so we don't need to do a logical_and with is_positive here
is_zero = math_ops.equal(op.inputs[0], 0)
num_zeros = gen_math_ops.unsorted_segment_sum(
math_ops.cast(is_zero, dtype=dtypes.int32), op.inputs[1], op.inputs[2])
# handle case 3 and set the gradient to 0 for segments with more than one
# 0 as input
grad = array_ops.where(
math_ops.greater(num_zeros, 1), array_ops.zeros_like(grad), grad)
# replace all zeros with ones and compute the unsorted_segment_prod
non_zero_data = array_ops.where(is_zero, array_ops.ones_like(op.inputs[0]),
op.inputs[0])
non_zero_prod = gen_math_ops.unsorted_segment_prod(non_zero_data,
op.inputs[1], op.inputs[2])
# clip the indices for gather to be positive
zero_clipped_indices = math_ops.maximum(op.inputs[1],
array_ops.zeros_like(op.inputs[1]))
gathered_prod = array_ops.gather(op.outputs[0], zero_clipped_indices)
gathered_non_zero_prod = array_ops.gather(non_zero_prod, zero_clipped_indices)
prod_divided_by_el = gathered_prod / op.inputs[0] # May contain nan/inf.
# Now fetch the individual results for segments containing 0 and those that
# don't. is_zero will also fetch results for entries with negative index
# but the following gather_drop_negatives sets the corresponding entry in
# grad to 0 for these
partial_derivative = array_ops.where(is_zero, gathered_non_zero_prod,
prod_divided_by_el)
gathered_grad = _GatherDropNegatives(grad, op.inputs[1],
zero_clipped_indices)[0]
return gathered_grad * partial_derivative, None, None
@ops.RegisterGradient("Abs")
def _AbsGrad(op, grad):
x = op.inputs[0]
return grad * math_ops.sign(x)
@ops.RegisterGradient("Neg")
def _NegGrad(_, grad):
"""Returns -grad."""
return -grad
@ops.RegisterGradient("Inv")
def _InvGrad(op, grad):
"""Returns -grad * (1 / x^2)."""
y = op.outputs[0] # y = 1 / x
return gen_math_ops.reciprocal_grad(y, grad)
@ops.RegisterGradient("Reciprocal")
def _ReciprocalGrad(op, grad):
"""Returns -grad * (1 / x^2)."""
y = op.outputs[0] # y = 1 / x
return gen_math_ops.reciprocal_grad(y, grad)
@ops.RegisterGradient("InvGrad")
def _InvGradGrad(op, grad):
b = op.inputs[1]
# op.output[0]: y = -b * conj(a)^2
with ops.control_dependencies([grad]):
ca = math_ops.conj(op.inputs[0])
cg = math_ops.conj(grad)
return cg * -2.0 * b * ca, gen_math_ops.reciprocal_grad(ca, grad)
@ops.RegisterGradient("ReciprocalGrad")
def _ReciprocalGradGrad(op, grad):
b = op.inputs[1]
# op.output[0]: y = -b * conj(a)^2
with ops.control_dependencies([grad]):
ca = math_ops.conj(op.inputs[0])
cg = math_ops.conj(grad)
return cg * -2.0 * b * ca, gen_math_ops.reciprocal_grad(ca, grad)
@ops.RegisterGradient("Square")
def _SquareGrad(op, grad):
x = op.inputs[0]
# Added control dependencies to prevent 2*x from being computed too early.
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
y = constant_op.constant(2.0, dtype=x.dtype)
return math_ops.multiply(grad, math_ops.multiply(x, y))
@ops.RegisterGradient("Sqrt")
def _SqrtGrad(op, grad):
y = op.outputs[0] # y = x^(1/2)
return gen_math_ops.sqrt_grad(y, grad)
@ops.RegisterGradient("SqrtGrad")
def _SqrtGradGrad(op, grad):
a = op.inputs[0]
y = op.outputs[0] # y = 0.5 * b / conj(a)
with ops.control_dependencies([grad]):
if compat.forward_compatible(2019, 9, 14):
ga = gen_math_ops.xdivy(grad, a)
return -gen_math_ops.mul_no_nan(y, math_ops.conj(ga)), 0.5 * ga
else:
ga = grad / a
return -math_ops.conj(ga) * y, 0.5 * ga
@ops.RegisterGradient("Rsqrt")
def _RsqrtGrad(op, grad):
"""Returns -0.5 * grad * conj(y)^3."""
y = op.outputs[0] # y = x^(-1/2)
return gen_math_ops.rsqrt_grad(y, grad)
@ops.RegisterGradient("RsqrtGrad")
def _RsqrtGradGrad(op, grad):
"""Returns backprop gradient for f(a,b) = -0.5 * b * conj(a)^3."""
a = op.inputs[0] # a = x^{-1/2}
b = op.inputs[1] # backprop gradient for a
with ops.control_dependencies([grad]):
ca = math_ops.conj(a)
cg = math_ops.conj(grad)
grad_a = -1.5 * cg * b * math_ops.square(ca)
grad_b = gen_math_ops.rsqrt_grad(ca, grad)
return grad_a, grad_b
@ops.RegisterGradient("Exp")
def _ExpGrad(op, grad):
"""Returns grad * exp(x)."""
y = op.outputs[0] # y = e^x
with ops.control_dependencies([grad]):
y = math_ops.conj(y)
if compat.forward_compatible(2019, 9, 14):
return math_ops.mul_no_nan(y, grad)
else:
return grad * y
@ops.RegisterGradient("Expm1")
def _Expm1Grad(op, grad):
"""Returns grad * exp(x)."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
y = math_ops.exp(x)
if compat.forward_compatible(2019, 9, 14):
return math_ops.mul_no_nan(y, grad)
else:
return grad * y
@ops.RegisterGradient("Log")
def _LogGrad(op, grad):
"""Returns grad * (1/x)."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
if compat.forward_compatible(2019, 9, 14):
return gen_math_ops.xdivy(grad, x)
else:
return grad * math_ops.reciprocal(x)
@ops.RegisterGradient("Log1p")
def _Log1pGrad(op, grad):
"""Returns grad * (1/(1 + x))."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
if compat.forward_compatible(2019, 9, 14):
return gen_math_ops.xdivy(grad, 1 + x)
else:
return grad * math_ops.reciprocal(1 + x)
@ops.RegisterGradient("Xlogy")
def _XLogyGrad(op, grad):
"""Returns gradient of xlogy(x, y) with respect to x and y."""
x = op.inputs[0]
y = op.inputs[1]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
with ops.control_dependencies([grad]):
not_zero_x = math_ops.cast(
math_ops.not_equal(x, math_ops.cast(0., dtype=x.dtype)), dtype=x.dtype)
partial_x = gen_math_ops.xlogy(not_zero_x, y)
partial_y = gen_math_ops.xdivy(x, y)
return (array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx),
array_ops.reshape(math_ops.reduce_sum(partial_y * grad, ry), sy))
@ops.RegisterGradient("Xdivy")
def _XDivyGrad(op, grad):
"""Returns gradient of xdivy(x, y) with respect to x and y."""
x = op.inputs[0]
y = op.inputs[1]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
with ops.control_dependencies([grad]):
not_zero_x = math_ops.cast(
math_ops.not_equal(x, math_ops.cast(0., dtype=x.dtype)), dtype=x.dtype)
partial_x = gen_math_ops.xdivy(not_zero_x, y)
partial_y = gen_math_ops.xdivy(math_ops.negative(x), y**2)
return (array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx),
array_ops.reshape(math_ops.reduce_sum(partial_y * grad, ry), sy))
@ops.RegisterGradient("Sinh")
def _SinhGrad(op, grad):
"""Returns grad * cosh(x)."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
return grad * math_ops.cosh(x)
@ops.RegisterGradient("Cosh")
def _CoshGrad(op, grad):
"""Returns grad * sinh(x)."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
return grad * math_ops.sinh(x)
@ops.RegisterGradient("Tanh")
def _TanhGrad(op, grad):
"""Returns grad * (1 - tanh(x) * tanh(x))."""
y = op.outputs[0] # y = tanh(x)
with ops.control_dependencies([grad]):
y = math_ops.conj(y)
return gen_math_ops.tanh_grad(y, grad)
@ops.RegisterGradient("Asinh")
def _AsinhGrad(op, grad):
"""Returns grad * 1/cosh(y)."""
y = op.outputs[0]
with ops.control_dependencies([grad]):
y = math_ops.conj(y)
return grad / math_ops.cosh(y)
@ops.RegisterGradient("Acosh")
def _AcoshGrad(op, grad):
"""Returns grad * 1/sinh(y)."""
y = op.outputs[0]
with ops.control_dependencies([grad]):
y = math_ops.conj(y)
if compat.forward_compatible(2019, 9, 14):
return math_ops.xdivy(grad, math_ops.sinh(y))
else:
return grad / math_ops.sinh(y)
@ops.RegisterGradient("Atanh")
def _AtanhGrad(op, grad):
"""Returns grad * 1/ (1 - x^2)."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
x2 = math_ops.square(x)
one = constant_op.constant(1, dtype=grad.dtype)
inv = math_ops.reciprocal(math_ops.subtract(one, x2))
return grad * inv
@ops.RegisterGradient("TanhGrad")
def _TanhGradGrad(op, grad):
with ops.control_dependencies([grad]):
a = math_ops.conj(op.inputs[0])
b = math_ops.conj(op.inputs[1])
return grad * -2.0 * b * a, gen_math_ops.tanh_grad(a, grad)
@ops.RegisterGradient("Erf")
def _ErfGrad(op, grad):
"""Returns grad * 2/sqrt(pi) * exp(-x**2)."""
x = op.inputs[0]
two_over_root_pi = constant_op.constant(2 / np.sqrt(np.pi), dtype=grad.dtype)
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
return grad * two_over_root_pi * math_ops.exp(-math_ops.square(x))
@ops.RegisterGradient("Erfc")
def _ErfcGrad(op, grad):
"""Returns -grad * 2/sqrt(pi) * exp(-x**2)."""
x = op.inputs[0]
minus_two_over_root_pi = constant_op.constant(
-2 / np.sqrt(np.pi), dtype=grad.dtype)
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
return grad * minus_two_over_root_pi * math_ops.exp(-math_ops.square(x))
@ops.RegisterGradient("Lgamma")
def _LgammaGrad(op, grad):
"""Returns grad * digamma(x)."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
if compat.forward_compatible(2019, 9, 14):
return math_ops.mul_no_nan(math_ops.digamma(x), grad)
else:
return grad * math_ops.digamma(x)
@ops.RegisterGradient("Digamma")
def _DigammaGrad(op, grad):
"""Compute gradient of the digamma function with respect to its argument."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
partial_x = math_ops.polygamma(array_ops.constant(1, dtype=x.dtype), x)
if compat.forward_compatible(2019, 9, 14):
return math_ops.mul_no_nan(partial_x, grad)
else:
return grad * partial_x
@ops.RegisterGradient("BesselI0e")
def _BesselI0eGrad(op, grad):
"""Compute gradient of bessel_i0e(x) with respect to its argument."""
x = op.inputs[0]
y = op.outputs[0]
with ops.control_dependencies([grad]):
partial_x = (math_ops.bessel_i1e(x) - math_ops.sign(x) * y)
if compat.forward_compatible(2019, 9, 14):
return math_ops.mul_no_nan(partial_x, grad)
else:
return grad * partial_x
@ops.RegisterGradient("BesselI1e")
def _BesselI1eGrad(op, grad):
"""Compute gradient of bessel_i1e(x) with respect to its argument."""
x = op.inputs[0]
y = op.outputs[0]
with ops.control_dependencies([grad]):
# For x = 0, the correct gradient is 0.5.
# However, the main branch gives NaN because of the division by x, so
# we impute the gradient manually.
# An alternative solution is to express the gradient via bessel_i0e and
# bessel_i2e, but the latter is not yet implemented in Eigen.
eps = np.finfo(x.dtype.as_numpy_dtype).eps
zeros = array_ops.zeros_like(x)
x_is_not_tiny = math_ops.abs(x) > eps
safe_x = array_ops.where(x_is_not_tiny, x, eps + zeros)
dy_dx = math_ops.bessel_i0e(safe_x) - y * (
math_ops.sign(safe_x) + math_ops.reciprocal(safe_x))
dy_dx = array_ops.where(x_is_not_tiny, dy_dx, 0.5 + zeros)
if compat.forward_compatible(2019, 9, 14):
return math_ops.mul_no_nan(dy_dx, grad)
else:
return grad * dy_dx
@ops.RegisterGradient("Igamma")
def _IgammaGrad(op, grad):
"""Returns gradient of igamma(a, x) with respect to a and x."""
a = op.inputs[0]
x = op.inputs[1]
sa = array_ops.shape(a)
sx = array_ops.shape(x)
ra, rx = gen_array_ops.broadcast_gradient_args(sa, sx)
with ops.control_dependencies([grad]):
partial_a = gen_math_ops.igamma_grad_a(a, x)
# Perform operations in log space before summing, because Gamma(a)
# and Gamma'(a) can grow large.
partial_x = math_ops.exp(-x + (a - 1) * math_ops.log(x) -
math_ops.lgamma(a))
if compat.forward_compatible(2019, 9, 14):
return (array_ops.reshape(
math_ops.reduce_sum(math_ops.mul_no_nan(partial_a, grad), ra), sa),
array_ops.reshape(
math_ops.reduce_sum(math_ops.mul_no_nan(partial_x, grad), rx),
sx))
else:
return (array_ops.reshape(math_ops.reduce_sum(partial_a * grad, ra), sa),
array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx))
@ops.RegisterGradient("Igammac")
def _IgammacGrad(op, grad):
"""Returns gradient of igammac(a, x) = 1 - igamma(a, x) w.r.t. a and x."""
igamma_grad_a, igamma_grad_x = _IgammaGrad(op, grad)
return (-igamma_grad_a, -igamma_grad_x)
@ops.RegisterGradient("Betainc")
def _BetaincGrad(op, grad):
"""Returns gradient of betainc(a, b, x) with respect to x."""
# TODO(ebrevdo): Perhaps add the derivative w.r.t. a, b
a, b, x = op.inputs
# two cases: x is a scalar and a/b are same-shaped tensors, or vice
# versa; so its sufficient to check against shape(a).
sa = array_ops.shape(a)
sx = array_ops.shape(x)
_, rx = gen_array_ops.broadcast_gradient_args(sa, sx)
# Perform operations in log space before summing, because terms
# can grow large.
log_beta = (
gen_math_ops.lgamma(a) + gen_math_ops.lgamma(b) -
gen_math_ops.lgamma(a + b))
partial_x = math_ops.exp((b - 1) * math_ops.log(1 - x) +
(a - 1) * math_ops.log(x) - log_beta)
# TODO(b/36815900): Mark None return values as NotImplemented
if compat.forward_compatible(2019, 9, 14):
return (
None, # da
None, # db
array_ops.reshape(
math_ops.reduce_sum(math_ops.mul_no_nan(partial_x, grad), rx), sx))
else:
return (
None, # da
None, # db
array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx))
@ops.RegisterGradient("Zeta")
def _ZetaGrad(op, grad):
"""Returns gradient of zeta(x, q) with respect to x and q."""
# TODO(tillahoffmann): Add derivative with respect to x
x = op.inputs[0]
q = op.inputs[1]
# Broadcast gradients
sx = array_ops.shape(x)
sq = array_ops.shape(q)
unused_rx, rq = gen_array_ops.broadcast_gradient_args(sx, sq)
# Evaluate gradient
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
q = math_ops.conj(q)
partial_q = -x * math_ops.zeta(x + 1, q)
# TODO(b/36815900): Mark None return values as NotImplemented
if compat.forward_compatible(2019, 9, 14):
return (None,
array_ops.reshape(
math_ops.reduce_sum(math_ops.mul_no_nan(partial_q, grad), rq),
sq))
else:
return (None,
array_ops.reshape(math_ops.reduce_sum(partial_q * grad, rq), sq))
@ops.RegisterGradient("Polygamma")
def _PolygammaGrad(op, grad):
"""Returns gradient of psi(n, x) with respect to n and x."""
# TODO(tillahoffmann): Add derivative with respect to n
n = op.inputs[0]
x = op.inputs[1]
# Broadcast gradients
sn = array_ops.shape(n)
sx = array_ops.shape(x)
unused_rn, rx = gen_array_ops.broadcast_gradient_args(sn, sx)
# Evaluate gradient
with ops.control_dependencies([grad]):
n = math_ops.conj(n)
x = math_ops.conj(x)
partial_x = math_ops.polygamma(n + 1, x)
# TODO(b/36815900): Mark None return values as NotImplemented
if compat.forward_compatible(2019, 9, 14):
return (None,
array_ops.reshape(
math_ops.reduce_sum(math_ops.mul_no_nan(partial_x, grad), rx),
sx))
else:
return (None,
array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx))
@ops.RegisterGradient("Sigmoid")
def _SigmoidGrad(op, grad):
"""Returns grad * sigmoid(x) * (1 - sigmoid(x))."""
y = op.outputs[0] # y = sigmoid(x)
with ops.control_dependencies([grad]):
y = math_ops.conj(y)
return gen_math_ops.sigmoid_grad(y, grad)
@ops.RegisterGradient("SigmoidGrad")
def _SigmoidGradGrad(op, grad):
with ops.control_dependencies([grad]):
a = math_ops.conj(op.inputs[0])
b = math_ops.conj(op.inputs[1])
gb = grad * b
return gb - 2.0 * gb * a, gen_math_ops.sigmoid_grad(a, grad)
@ops.RegisterGradient("Sign")
def _SignGrad(op, _):
"""Returns 0."""
x = op.inputs[0]
return array_ops.zeros(array_ops.shape(x), dtype=x.dtype)
@ops.RegisterGradient("Sin")
def _SinGrad(op, grad):
"""Returns grad * cos(x)."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
return grad * math_ops.cos(x)
@ops.RegisterGradient("Cos")
def _CosGrad(op, grad):
"""Returns grad * -sin(x)."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
return -grad * math_ops.sin(x)
@ops.RegisterGradient("Tan")
def _TanGrad(op, grad):
"""Returns grad * 1/sec^2(x)."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
secx = math_ops.reciprocal(math_ops.cos(x))
secx2 = math_ops.square(secx)
if compat.forward_compatible(2019, 9, 14):
return math_ops.mul_no_nan(secx2, grad)
else:
return secx2 * grad
@ops.RegisterGradient("Asin")
def _AsinGrad(op, grad):
"""Returns grad * 1/sqrt(1-x^2)."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
x2 = math_ops.square(x)
one = constant_op.constant(1, dtype=grad.dtype)
den = math_ops.sqrt(math_ops.subtract(one, x2))
if compat.forward_compatible(2019, 9, 14):
return math_ops.xdivy(grad, den)
else:
inv = math_ops.reciprocal(den)
return grad * inv
@ops.RegisterGradient("Acos")
def _AcosGrad(op, grad):
"""Returns grad * -1/sqrt(1-x^2)."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
x2 = math_ops.square(x)
one = constant_op.constant(1, dtype=grad.dtype)
den = math_ops.sqrt(math_ops.subtract(one, x2))
if compat.forward_compatible(2019, 9, 14):
return -math_ops.xdivy(grad, den)
else:
inv = math_ops.reciprocal(den)
return -grad * inv
@ops.RegisterGradient("Atan")
def _AtanGrad(op, grad):
"""Returns grad * 1/ (1 + x^2)."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
x2 = math_ops.square(x)
one = constant_op.constant(1, dtype=grad.dtype)
inv = math_ops.reciprocal(math_ops.add(one, x2))
return grad * inv
@ops.RegisterGradient("Atan2")
def _Atan2Grad(op, grad):
"""Returns grad * x / (x^2 + y^2), grad * -y / (x^2 + y^2)."""
y = op.inputs[0]
x = op.inputs[1]
with ops.control_dependencies([grad]):
if compat.forward_compatible(2019, 9, 14):
grad_inv = math_ops.xdivy(grad, (math_ops.square(x) + math_ops.square(y)))
else:
grad_inv = grad / (math_ops.square(x) + math_ops.square(y))
return x * grad_inv, -y * grad_inv
@ops.RegisterGradient("AddN")
def _AddNGrad(op, grad):
"""Copies the gradient to all inputs."""
# Not broadcasting.
return [grad] * len(op.inputs)
def _ShapesFullySpecifiedAndEqual(x, y, grad):
# pylint: disable=protected-access
x_shape = x._shape_tuple()
y_shape = y._shape_tuple()
grad_shape = grad._shape_tuple()
# pylint: enable=protected-access
return (x_shape == y_shape and x_shape == grad_shape and
x_shape is not None and None not in x_shape)
@ops.RegisterGradient("Add")
@ops.RegisterGradient("AddV2")
def _AddGrad(op, grad):
"""Gradient for Add."""
y = op.inputs[1]
skip_input_indices = None
try:
skip_input_indices = op.skip_input_indices
if skip_input_indices is not None and 1 in skip_input_indices and _IsScalar(
y):
return grad, None
except AttributeError:
# No gradient skipping, so do the full gradient computation
pass
x = op.inputs[0]
if (isinstance(grad, ops.Tensor) and
_ShapesFullySpecifiedAndEqual(x, y, grad)):
return grad, grad
(sx, rx, must_reduce_x), (sy, ry, must_reduce_y) = (
SmartBroadcastGradientArgs(x, y, grad))
if skip_input_indices is not None and 0 in skip_input_indices:
gx = None
elif not must_reduce_x:
gx = grad
else:
gx = array_ops.reshape(math_ops.reduce_sum(grad, rx), sx)
if skip_input_indices is not None and 1 in skip_input_indices:
gy = None
elif not must_reduce_y:
gy = grad
else:
gy = array_ops.reshape(math_ops.reduce_sum(grad, ry), sy)
return (gx, gy)
@ops.RegisterGradient("Sub")
def _SubGrad(op, grad):
"""Gradient for Sub."""
y = op.inputs[1]
skip_input_indices = None
try:
skip_input_indices = op.skip_input_indices
if skip_input_indices is not None and 1 in skip_input_indices and _IsScalar(
y):
return grad, None
except AttributeError:
# No gradient skipping, so do the full gradient computation
pass
x = op.inputs[0]
if (isinstance(grad, ops.Tensor) and
_ShapesFullySpecifiedAndEqual(x, y, grad)):
return grad, -grad
(sx, rx, must_reduce_x), (sy, ry, must_reduce_y) = (
SmartBroadcastGradientArgs(x, y, grad))
if skip_input_indices is not None and 0 in skip_input_indices:
gx = None
elif not must_reduce_x:
gx = grad
else:
gx = array_ops.reshape(math_ops.reduce_sum(grad, rx), sx)
if skip_input_indices is not None and 1 in skip_input_indices:
gy = None
elif not must_reduce_y:
gy = -grad
else:
gy = array_ops.reshape(math_ops.reduce_sum(-grad, ry), sy)
return (gx, gy)
@ops.RegisterGradient("Mul")
def _MulGrad(op, grad):
"""The gradient of scalar multiplication."""
y = op.inputs[1]
skip_input_indices = None
try:
skip_input_indices = op.skip_input_indices
if skip_input_indices is not None and 1 in skip_input_indices and _IsScalar(
y):
return gen_math_ops.mul(grad, math_ops.conj(y)), None
except AttributeError:
# No gradient skipping, so do the full gradient computation
pass
x = op.inputs[0]
if (isinstance(grad, ops.Tensor) and
_ShapesFullySpecifiedAndEqual(x, y, grad) and
grad.dtype in (dtypes.int32, dtypes.float32)):
return gen_math_ops.mul(grad, y), gen_math_ops.mul(grad, x)
assert x.dtype.base_dtype == y.dtype.base_dtype, (x.dtype, " vs. ", y.dtype)
(sx, rx, must_reduce_x), (sy, ry, must_reduce_y) = (
SmartBroadcastGradientArgs(x, y, grad))
x = math_ops.conj(x)
y = math_ops.conj(y)
if skip_input_indices is not None and 0 in skip_input_indices:
gx = None
elif not must_reduce_x:
gx = gen_math_ops.mul(grad, y)
else:
gx = array_ops.reshape(
math_ops.reduce_sum(gen_math_ops.mul(grad, y), rx), sx)
if skip_input_indices is not None and 1 in skip_input_indices:
gy = None
elif not must_reduce_y:
gy = gen_math_ops.mul(x, grad)
else:
gy = array_ops.reshape(
math_ops.reduce_sum(gen_math_ops.mul(x, grad), ry), sy)
return (gx, gy)
@ops.RegisterGradient("MulNoNan")
def _MulNoNanGrad(op, grad):
"""The gradient of scalar multiplication with NaN-suppression."""
x = op.inputs[0]
y = op.inputs[1]
if (isinstance(grad, ops.Tensor) and
_ShapesFullySpecifiedAndEqual(x, y, grad)):
return gen_math_ops.mul_no_nan(grad, y), gen_math_ops.mul_no_nan(x, grad)
assert x.dtype.base_dtype == y.dtype.base_dtype, (x.dtype, " vs. ", y.dtype)
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
return (array_ops.reshape(
math_ops.reduce_sum(gen_math_ops.mul_no_nan(grad, y), rx), sx),
array_ops.reshape(
math_ops.reduce_sum(gen_math_ops.mul_no_nan(x, grad), ry), sy))
@ops.RegisterGradient("Div")
def _DivGrad(op, grad):
"""The gradient for the Div operator."""
x = op.inputs[0]
y = op.inputs[1]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
x = math_ops.conj(x)
y = math_ops.conj(y)
if compat.forward_compatible(2019, 9, 14):
return (array_ops.reshape(
math_ops.reduce_sum(math_ops.xdivy(grad, y), rx), sx),
array_ops.reshape(
math_ops.reduce_sum(
math_ops.mul_no_nan(
math_ops.divide(math_ops.divide(-x, y), y), grad), ry),
sy))
else:
return (array_ops.reshape(
math_ops.reduce_sum(math_ops.divide(grad, y), rx), sx),
array_ops.reshape(
math_ops.reduce_sum(
grad * math_ops.divide(math_ops.divide(-x, y), y), ry), sy))
@ops.RegisterGradient("FloorDiv")
def _FloorDivGrad(_, unused_grad):
"""The gradient for the FloorDiv operator."""
return None, None
@ops.RegisterGradient("FloorMod")
def _FloorModGrad(op, grad):
"""Returns grad * (1, -floor(x/y))."""
x = math_ops.conj(op.inputs[0])
y = math_ops.conj(op.inputs[1])
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
floor_xy = math_ops.floor_div(x, y)
gx = array_ops.reshape(math_ops.reduce_sum(grad, rx), sx)
gy = array_ops.reshape(
math_ops.reduce_sum(grad * math_ops.negative(floor_xy), ry), sy)
return gx, gy
@ops.RegisterGradient("TruncateDiv")
def _TruncateDivGrad(_, unused_grad):
return None, None
@ops.RegisterGradient("RealDiv")
def _RealDivGrad(op, grad):
"""RealDiv op gradient."""
x = op.inputs[0]
y = op.inputs[1]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
x = math_ops.conj(x)
y = math_ops.conj(y)
if compat.forward_compatible(2019, 9, 14):
return (array_ops.reshape(
math_ops.reduce_sum(math_ops.xdivy(grad, y), rx), sx),
array_ops.reshape(
math_ops.reduce_sum(
math_ops.mul_no_nan(
math_ops.realdiv(math_ops.realdiv(-x, y), y), grad),
ry), sy))
else:
return (array_ops.reshape(
math_ops.reduce_sum(math_ops.realdiv(grad, y), rx), sx),
array_ops.reshape(
math_ops.reduce_sum(
grad * math_ops.realdiv(math_ops.realdiv(-x, y), y), ry),
sy))
@ops.RegisterGradient("DivNoNan")
def _DivNoNanGrad(op, grad):
"""DivNoNan op gradient."""
x = op.inputs[0]
y = op.inputs[1]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
x = math_ops.conj(x)
y = math_ops.conj(y)
if compat.forward_compatible(2019, 9, 14):
return (array_ops.reshape(
math_ops.reduce_sum(math_ops.div_no_nan(grad, y), rx), sx),
array_ops.reshape(
math_ops.reduce_sum(
math_ops.mul_no_nan(
math_ops.div_no_nan(math_ops.div_no_nan(-x, y), y),
grad), ry), sy))
else:
return (array_ops.reshape(
math_ops.reduce_sum(math_ops.div_no_nan(grad, y), rx), sx),
array_ops.reshape(
math_ops.reduce_sum(
grad * math_ops.div_no_nan(math_ops.div_no_nan(-x, y), y),
ry), sy))
@ops.RegisterGradient("Pow")
def _PowGrad(op, grad):
"""Returns grad * (y*x^(y-1), z*log(x))."""
x = op.inputs[0]
y = op.inputs[1]
use_mul_no_nan = compat.forward_compatible(2019, 9, 14)
skip_input_indices = None
try:
skip_input_indices = op.skip_input_indices
# TODO(mrry): If `y` is a constant, we can combine `tf.sub()` and the
# constant `1` into a single constant op.
if skip_input_indices is not None and 1 in skip_input_indices and _IsScalar(
y):
x = math_ops.conj(x)
y = math_ops.conj(y)
if use_mul_no_nan:
return gen_math_ops.mul_no_nan(y * math_ops.pow(x, y - 1), grad), None
else:
return grad * y * math_ops.pow(x, y - 1), None
except AttributeError:
# No gradient skipping, so do the full gradient computation
pass
(sx, rx, must_reduce_x), (sy, ry, must_reduce_y) = (
SmartBroadcastGradientArgs(x, y, grad))
x = math_ops.conj(x)
y = math_ops.conj(y)
if skip_input_indices is None or 0 not in skip_input_indices:
if use_mul_no_nan:
gx = gen_math_ops.mul_no_nan(y * math_ops.pow(x, y - 1), grad)
else:
gx = grad * y * math_ops.pow(x, y - 1)
if must_reduce_x:
gx = array_ops.reshape(math_ops.reduce_sum(gx, rx), sx)
else:
gx = None
if skip_input_indices is None or 1 not in skip_input_indices:
z = math_ops.conj(op.outputs[0])
# Avoid false singularity at x = 0
if x.dtype.is_complex:
# real(x) < 0 is fine for the complex case
mask = math_ops.not_equal(x, 0)
else:
# There's no sensible real value to return if x < 0, so return 0
mask = x > 0
safe_x = array_ops.where(mask, x, array_ops.ones_like(x))
log_x = array_ops.where(mask, math_ops.log(safe_x), array_ops.zeros_like(x))
if use_mul_no_nan:
gy = gen_math_ops.mul_no_nan(z * log_x, grad)
else:
gy = grad * z * log_x
if must_reduce_y:
gy = array_ops.reshape(math_ops.reduce_sum(gy, ry), sy)
else:
gy = None
return gx, gy
def _MaximumMinimumGradInputOnly(op, grad, selector_op):
x = op.inputs[0]
y = op.inputs[1]
zeros = array_ops.zeros_like(grad)
xmask = selector_op(x, y)
xgrad = array_ops.where(xmask, grad, zeros)
ygrad = None # Return None for ygrad since the config allows that.
return (xgrad, ygrad)
def _MaximumMinimumGrad(op, grad, selector_op):
"""Factor out the code for the gradient of Maximum or Minimum."""
y = op.inputs[1]
skip_input_indices = None
try:
skip_input_indices = op.skip_input_indices
if skip_input_indices is not None and 1 in skip_input_indices and _IsScalar(
y):
# When we want to get gradients for the first input only, and the second
# input tensor is a scalar, we can do a much simpler calculation
return _MaximumMinimumGradInputOnly(op, grad, selector_op)
except AttributeError:
# No gradient skipping, so do the full gradient computation
pass
x = op.inputs[0]
gdtype = grad.dtype
sx = array_ops.shape(x)
sy = array_ops.shape(y)
gradshape = array_ops.shape(grad)
zeros = array_ops.zeros(gradshape, gdtype)
xmask = selector_op(x, y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
if skip_input_indices is not None and 0 in skip_input_indices:
gx = None
else:
xgrad = array_ops.where(xmask, grad, zeros)
gx = array_ops.reshape(math_ops.reduce_sum(xgrad, rx), sx)
if skip_input_indices is not None and 1 in skip_input_indices:
gy = None
else:
ygrad = array_ops.where(xmask, zeros, grad)
gy = array_ops.reshape(math_ops.reduce_sum(ygrad, ry), sy)
return (gx, gy)
@ops.RegisterGradient("Maximum")
def _MaximumGrad(op, grad):
"""Returns grad*(x > y, x <= y) with type of grad."""
return _MaximumMinimumGrad(op, grad, math_ops.greater_equal)
@ops.RegisterGradient("Minimum")
def _MinimumGrad(op, grad):
"""Returns grad*(x < y, x >= y) with type of grad."""
return _MaximumMinimumGrad(op, grad, math_ops.less_equal)
@ops.RegisterGradient("SquaredDifference")
def _SquaredDifferenceGrad(op, grad):
"""Returns the gradient for (x-y)^2."""
x = op.inputs[0]
y = op.inputs[1]
skip_input_indices = None
try:
skip_input_indices = op.skip_input_indices
except AttributeError:
# No gradient skipping, so do the full gradient computation
pass
with ops.control_dependencies([grad]):
# The parens ensure that if grad is IndexedSlices, it'll get multiplied by
# Tensor (not a number like 2.0) which causes it to convert to Tensor.
x_grad = math_ops.scalar_mul(2.0, grad) * (x - y)
if (isinstance(grad, ops.Tensor) and
_ShapesFullySpecifiedAndEqual(x, y, grad)):
return x_grad, -x_grad
(sx, rx, must_reduce_x), (sy, ry, must_reduce_y) = (
SmartBroadcastGradientArgs(x, y, grad))
if skip_input_indices is not None and 0 in skip_input_indices:
gx = None
elif must_reduce_x:
gx = array_ops.reshape(math_ops.reduce_sum(x_grad, rx), sx)
else:
gx = x_grad
if skip_input_indices is not None and 1 in skip_input_indices:
gy = None
elif must_reduce_y:
gy = -array_ops.reshape(math_ops.reduce_sum(x_grad, ry), sy)
else:
gy = -x_grad
return (gx, gy)
# Logical operations have no gradients.
ops.NotDifferentiable("Less")
ops.NotDifferentiable("LessEqual")
ops.NotDifferentiable("Greater")
ops.NotDifferentiable("GreaterEqual")
ops.NotDifferentiable("Equal")
ops.NotDifferentiable("ApproximateEqual")
ops.NotDifferentiable("NotEqual")
ops.NotDifferentiable("LogicalAnd")
ops.NotDifferentiable("LogicalOr")
ops.NotDifferentiable("LogicalNot")
@ops.RegisterGradient("Select")
def _SelectGrad(op, grad):
c = op.inputs[0]
x = op.inputs[1]
zeros = array_ops.zeros_like(x)
return (None, array_ops.where(c, grad, zeros), array_ops.where(
c, zeros, grad))
@ops.RegisterGradient("SelectV2")
def _SelectGradV2(op, grad):
c = op.inputs[0]
x = op.inputs[1]
y = op.inputs[2]
zeros = array_ops.zeros([], dtype=grad.dtype.base_dtype)
gx = array_ops.where_v2(c, grad, zeros)
x_shape = array_ops.shape(x)
output_shape = array_ops.shape(op.outputs[0])
# Reduce away broadcasted leading dims.
reduce_x, _ = gen_array_ops.broadcast_gradient_args(x_shape, output_shape)
gx = math_ops.reduce_sum(gx, keepdims=True, axis=reduce_x)
gx = array_ops.reshape(gx, x_shape)
gy = array_ops.where_v2(c, zeros, grad)
y_shape = array_ops.shape(y)
# Reduce away broadcasted leading dims.
reduce_y, _ = gen_array_ops.broadcast_gradient_args(y_shape, output_shape)
gy = math_ops.reduce_sum(gy, keepdims=True, axis=reduce_y)
gy = array_ops.reshape(gy, y_shape)
return (None, gx, gy)
def _MatMulGradAgainstFirstOnly(op, grad):
"""Gradient for MatMul, only for the first input."""
t_a = op.get_attr("transpose_a")
t_b = op.get_attr("transpose_b")
b = math_ops.conj(op.inputs[1])
if not t_a and not t_b:
grad_a = gen_math_ops.mat_mul(grad, b, transpose_b=True)
elif not t_a and t_b:
grad_a = gen_math_ops.mat_mul(grad, b)
elif t_a and not t_b:
grad_a = gen_math_ops.mat_mul(b, grad, transpose_b=True)
elif t_a and t_b:
grad_a = gen_math_ops.mat_mul(b, grad, transpose_a=True, transpose_b=True)
return grad_a, None
def _MatMulGradAgainstSecondOnly(op, grad):
"""Gradient for MatMul, only for the second input."""
t_a = op.get_attr("transpose_a")
t_b = op.get_attr("transpose_b")
a = math_ops.conj(op.inputs[0])
if not t_a and not t_b:
grad_b = gen_math_ops.mat_mul(a, grad, transpose_a=True)
elif not t_a and t_b:
grad_b = gen_math_ops.mat_mul(grad, a, transpose_a=True)
elif t_a and not t_b:
grad_b = gen_math_ops.mat_mul(a, grad)
elif t_a and t_b:
grad_b = gen_math_ops.mat_mul(grad, a, transpose_a=True, transpose_b=True)
return None, grad_b
@ops.RegisterGradient("MatMul")
def _MatMulGrad(op, grad):
"""Gradient for MatMul."""
try:
skip_input_indices = op.skip_input_indices
if skip_input_indices is not None:
if 1 in skip_input_indices:
return _MatMulGradAgainstFirstOnly(op, grad)
elif 0 in skip_input_indices:
return _MatMulGradAgainstSecondOnly(op, grad)
except AttributeError:
# No gradient skipping, so do the full gradient computation
pass
t_a = op.get_attr("transpose_a")
t_b = op.get_attr("transpose_b")
a = math_ops.conj(op.inputs[0])
b = math_ops.conj(op.inputs[1])
if not t_a and not t_b:
grad_a = gen_math_ops.mat_mul(grad, b, transpose_b=True)
grad_b = gen_math_ops.mat_mul(a, grad, transpose_a=True)
elif not t_a and t_b:
grad_a = gen_math_ops.mat_mul(grad, b)
grad_b = gen_math_ops.mat_mul(grad, a, transpose_a=True)
elif t_a and not t_b:
grad_a = gen_math_ops.mat_mul(b, grad, transpose_b=True)
grad_b = gen_math_ops.mat_mul(a, grad)
elif t_a and t_b:
grad_a = gen_math_ops.mat_mul(b, grad, transpose_a=True, transpose_b=True)
grad_b = gen_math_ops.mat_mul(grad, a, transpose_a=True, transpose_b=True)
return grad_a, grad_b
@ops.RegisterGradient("SparseMatMul")
def _SparseMatMulGrad(op, grad):
"""Gradient for SparseMatMul."""
t_a = op.get_attr("transpose_a")
t_b = op.get_attr("transpose_b")
is_sparse = object_identity.ObjectIdentityDictionary()
is_sparse[op.inputs[0]] = op.get_attr("a_is_sparse")
is_sparse[op.inputs[1]] = op.get_attr("b_is_sparse")
# Use heuristic to figure out if grad might be sparse
is_sparse[grad] = not context.executing_eagerly() and (
grad.op.type == "ReluGrad")
def _SparseMatMul(t1, t2, out_dtype, transpose_a=False, transpose_b=False):
"""Helper function to create SparseMatMul op."""
assert t1 in is_sparse and t2 in is_sparse
t1_sparse = is_sparse[t1]
t2_sparse = is_sparse[t2]
if transpose_b:
t2 = array_ops.transpose(t2)
transpose_b = False
prod = math_ops.matmul(
t1,
t2,
transpose_a=transpose_a,
transpose_b=transpose_b,
a_is_sparse=t1_sparse,
b_is_sparse=t2_sparse)
if prod.dtype != out_dtype:
prod = math_ops.cast(prod, out_dtype)
return prod
dtype_a = op.inputs[0].dtype
dtype_b = op.inputs[1].dtype
if not t_a and not t_b:
return (_SparseMatMul(grad, op.inputs[1], dtype_a, transpose_b=True),
_SparseMatMul(op.inputs[0], grad, dtype_b, transpose_a=True))
elif not t_a and t_b:
return (_SparseMatMul(grad, op.inputs[1], dtype_a),
_SparseMatMul(grad, op.inputs[0], dtype_b, transpose_a=True))
elif t_a and not t_b:
return (_SparseMatMul(op.inputs[1], grad, dtype_a, transpose_b=True),
_SparseMatMul(op.inputs[0], grad, dtype_b))
elif t_a and t_b:
return (_SparseMatMul(
op.inputs[1], grad, dtype_a, transpose_a=True, transpose_b=True),
_SparseMatMul(
grad, op.inputs[0], dtype_b, transpose_a=True,
transpose_b=True))
@ops.RegisterGradient("Floor")
def _FloorGrad(_, unused_grad):
return [None]
@ops.RegisterGradient("Ceil")
def _CeilGrad(_, unused_grad):
return [None]
@ops.RegisterGradient("Round")
def _RoundGrad(_, unused_grad):
return [None]
@ops.RegisterGradient("Rint")
def _RintGrad(_, unused_grad):
# the gradient of Rint is zero
return [None]
@ops.RegisterGradient("BatchMatMul")
def _BatchMatMul(op, grad):
"""Returns the gradient of x and y given the gradient of x * y."""
x = op.inputs[0]
y = op.inputs[1]
adj_x = op.get_attr("adj_x")
adj_y = op.get_attr("adj_y")
if not adj_x:
if not adj_y:
grad_x = math_ops.matmul(grad, y, adjoint_a=False, adjoint_b=True)
grad_y = math_ops.matmul(x, grad, adjoint_a=True, adjoint_b=False)
else:
grad_x = math_ops.matmul(grad, y, adjoint_a=False, adjoint_b=False)
grad_y = math_ops.matmul(grad, x, adjoint_a=True, adjoint_b=False)
else:
if not adj_y:
grad_x = math_ops.matmul(y, grad, adjoint_a=False, adjoint_b=True)
grad_y = math_ops.matmul(x, grad, adjoint_a=False, adjoint_b=False)
else:
grad_x = math_ops.matmul(y, grad, adjoint_a=True, adjoint_b=True)
grad_y = math_ops.matmul(grad, x, adjoint_a=True, adjoint_b=True)
return grad_x, grad_y
@ops.RegisterGradient("BatchMatMulV2")
def _BatchMatMulV2(op, grad):
"""Returns the gradient of x and y given the gradient of x * y."""
x = op.inputs[0]
y = op.inputs[1]
adj_x = op.get_attr("adj_x")
adj_y = op.get_attr("adj_y")
if not adj_x:
if not adj_y:
grad_x = math_ops.matmul(grad, y, adjoint_a=False, adjoint_b=True)
grad_y = math_ops.matmul(x, grad, adjoint_a=True, adjoint_b=False)
else:
grad_x = math_ops.matmul(grad, y, adjoint_a=False, adjoint_b=False)
grad_y = math_ops.matmul(grad, x, adjoint_a=True, adjoint_b=False)
else:
if not adj_y:
grad_x = math_ops.matmul(y, grad, adjoint_a=False, adjoint_b=True)
grad_y = math_ops.matmul(x, grad, adjoint_a=False, adjoint_b=False)
else:
grad_x = math_ops.matmul(y, grad, adjoint_a=True, adjoint_b=True)
grad_y = math_ops.matmul(grad, x, adjoint_a=True, adjoint_b=True)
# Reduce along the broadcasted batch dimensions, if broadcasting is required.
shape_x_static = x.get_shape()
shape_y_static = y.get_shape()
if not (shape_x_static.is_fully_defined() and
shape_y_static.is_fully_defined() and
shape_x_static == shape_y_static):
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx[:-2], sy[:-2])
grad_x = array_ops.reshape(math_ops.reduce_sum(grad_x, rx), sx)
grad_y = array_ops.reshape(math_ops.reduce_sum(grad_y, ry), sy)
return grad_x, grad_y
ops.NotDifferentiable("Range")
ops.NotDifferentiable("LinSpace")
@ops.RegisterGradient("Complex")
def _ComplexGrad(op, grad):
"""Returns the real and imaginary components of 'grad', respectively."""
x = op.inputs[0]
y = op.inputs[1]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
return (array_ops.reshape(math_ops.reduce_sum(math_ops.real(grad), rx), sx),
array_ops.reshape(math_ops.reduce_sum(math_ops.imag(grad), ry), sy))
@ops.RegisterGradient("Real")
def _RealGrad(_, grad):
"""Returns 'grad' as the real part and set the imaginary part 0."""
zero = constant_op.constant(0, dtype=grad.dtype)
return math_ops.complex(grad, zero)
@ops.RegisterGradient("Imag")
def _ImagGrad(_, grad):
"""Returns 'grad' as the imaginary part and set the real part 0."""
zero = constant_op.constant(0, dtype=grad.dtype)
return math_ops.complex(zero, grad)
@ops.RegisterGradient("Angle")
def _AngleGrad(op, grad):
"""Returns -grad / (Im(x) + iRe(x))"""
x = op.inputs[0]
with ops.control_dependencies([grad]):
re = math_ops.real(x)
im = math_ops.imag(x)
z = math_ops.reciprocal(math_ops.complex(im, re))
zero = constant_op.constant(0, dtype=grad.dtype)
complex_grad = math_ops.complex(grad, zero)
return -complex_grad * z
@ops.RegisterGradient("Conj")
def _ConjGrad(_, grad):
"""Returns the complex conjugate of grad."""
return math_ops.conj(grad)
@ops.RegisterGradient("ComplexAbs")
def _ComplexAbsGrad(op, grad):
"""Returns the gradient of ComplexAbs."""
return math_ops.div_no_nan(
math_ops.complex(
grad, array_ops.zeros_like(grad)) * op.inputs[0],
math_ops.complex(
op.outputs[0], array_ops.zeros_like(op.outputs[0])))
@ops.RegisterGradient("Cast")
def _CastGrad(op, grad):
t = [
dtypes.float16, dtypes.float32, dtypes.float64, dtypes.bfloat16,
dtypes.complex64, dtypes.complex128
]
src_type = op.inputs[0].dtype.base_dtype
dst_type = grad.dtype.base_dtype
if src_type in t and dst_type in t:
return math_ops.cast(grad, src_type)
else:
return None
@ops.RegisterGradient("Cross")
def _CrossGrad(op, grad):
u = op.inputs[0]
v = op.inputs[1]
return (math_ops.cross(v, grad), math_ops.cross(grad, u))
@ops.RegisterGradient("Cumsum")
def _CumsumGrad(op, grad):
axis = op.inputs[1]
exclusive = op.get_attr("exclusive")
reverse = op.get_attr("reverse")
return [
math_ops.cumsum(grad, axis, exclusive=exclusive, reverse=not reverse),
None
]
@ops.RegisterGradient("Cumprod")
def _CumprodGrad(op, grad):
x = op.inputs[0]
axis = op.inputs[1]
exclusive = op.get_attr("exclusive")
reverse = op.get_attr("reverse")
# TODO This fails when x contains 0 and should be fixed
prod = math_ops.cumprod(x, axis, exclusive=exclusive, reverse=reverse)
out = math_ops.cumsum(
prod * grad, axis, exclusive=exclusive, reverse=not reverse)
return [out / x, None]
@ops.RegisterGradient("CumulativeLogsumexp")
def _CumulativeLogsumexpGrad(op, grad):
x = op.inputs[0]
axis = op.inputs[1]
cumulative_logsumexp = op.outputs[0]
exclusive = op.get_attr("exclusive")
reverse = op.get_attr("reverse")
# Split the incoming gradient into positive and negative part
# in order to take logs. This is required for stable results.
log_grad_positive = array_ops.where_v2(
math_ops.greater(grad, 0),
math_ops.log(grad),
grad.dtype.min)
log_grad_negative = array_ops.where_v2(
math_ops.less(grad, 0),
math_ops.log(-grad),
grad.dtype.min)
output_pos = math_ops.exp(
math_ops.cumulative_logsumexp(
log_grad_positive - cumulative_logsumexp,
axis=axis, reverse=not reverse, exclusive=exclusive) + x)
output_neg = math_ops.exp(
math_ops.cumulative_logsumexp(
log_grad_negative - cumulative_logsumexp,
axis=axis, reverse=not reverse, exclusive=exclusive) + x)
return [output_pos - output_neg, None]
@ops.RegisterGradient("NextAfter")
def _NextAfterGrad(op, grad):
"""Returns gradient of nextafter(x1, x2) with respect to x1 and x2."""
x1 = op.inputs[0]
x2 = op.inputs[1]
s_x1 = array_ops.shape(x1)
s_x2 = array_ops.shape(x2)
r_x1, r_x2 = gen_array_ops.broadcast_gradient_args(s_x1, s_x2)
with ops.control_dependencies([grad]):
partial_x1 = array_ops.ones(s_x1, dtype=x1.dtype)
partial_x2 = array_ops.zeros(s_x2, dtype=x2.dtype)
return (array_ops.reshape(
math_ops.reduce_sum(partial_x1 * grad, r_x1), s_x1),
array_ops.reshape(
math_ops.reduce_sum(partial_x2 * grad, r_x2), s_x2))
|
py | 1a54c75bc25f258f31b74621e59a64af3a5c55c1 | from __future__ import print_function, absolute_import, division #makes KratosMultiphysics backward compatible with python 2.6 and 2.7
# importing the Kratos Library
import KratosMultiphysics
import KratosMultiphysics.ShallowWaterApplication as Shallow
# Check that KratosMultiphysics was imported in the main script
KratosMultiphysics.CheckForPreviousImport()
## Import base class file
from shallow_water_base_solver import ShallowWaterBaseSolver
def CreateSolver(model, custom_settings):
return EulerianPrimitiveVarSolver(model, custom_settings)
class EulerianPrimitiveVarSolver(ShallowWaterBaseSolver):
def __init__(self, model, custom_settings):
super(EulerianPrimitiveVarSolver, self).__init__(model, custom_settings)
# Set the element and condition names for the replace settings
self.element_name = "EulerPrimVarElement"
self.condition_name = "Condition"
self.min_buffer_size = 2
def AddDofs(self):
KratosMultiphysics.VariableUtils().AddDof(KratosMultiphysics.VELOCITY_X, self.main_model_part)
KratosMultiphysics.VariableUtils().AddDof(KratosMultiphysics.VELOCITY_Y, self.main_model_part)
KratosMultiphysics.VariableUtils().AddDof(Shallow.HEIGHT, self.main_model_part)
self.print_on_rank_zero("::[EulerianPrimitiveVarSolver]::", "Shallow water solver DOFs added correctly.")
def SolveSolutionStep(self):
if self._TimeBufferIsInitialized:
# If a node and it's neighbours are dry, set ACTIVE flag to false
self.ShallowVariableUtils.SetDryWetState()
# Solve equations on mesh
is_converged = self.solver.SolveSolutionStep()
# Compute free surface
self.ShallowVariableUtils.ComputeFreeSurfaceElevation()
# If water height is negative or close to zero, reset values
self.ShallowVariableUtils.CheckDryPrimitiveVariables()
return is_converged
|
py | 1a54c9ac0f331cbd1ab7348c37076672db399df1 | #!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Run regression test suite.
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts.
For a description of arguments recognized by test scripts, see
`test/functional/test_framework/test_framework.py:BitcoinTestFramework.main`.
"""
import argparse
from collections import deque
import configparser
import datetime
import os
import time
import shutil
import signal
import sys
import subprocess
import tempfile
import re
import logging
# Formatting. Default colors to empty strings.
BOLD, GREEN, RED, GREY = ("", ""), ("", ""), ("", ""), ("", "")
try:
# Make sure python thinks it can write unicode to its stdout
"\u2713".encode("utf_8").decode(sys.stdout.encoding)
TICK = "✓ "
CROSS = "✖ "
CIRCLE = "○ "
except UnicodeDecodeError:
TICK = "P "
CROSS = "x "
CIRCLE = "o "
if os.name != 'nt' or sys.getwindowsversion() >= (10, 0, 14393):
if os.name == 'nt':
import ctypes
kernel32 = ctypes.windll.kernel32
ENABLE_VIRTUAL_TERMINAL_PROCESSING = 4
STD_OUTPUT_HANDLE = -11
STD_ERROR_HANDLE = -12
# Enable ascii color control to stdout
stdout = kernel32.GetStdHandle(STD_OUTPUT_HANDLE)
stdout_mode = ctypes.c_int32()
kernel32.GetConsoleMode(stdout, ctypes.byref(stdout_mode))
kernel32.SetConsoleMode(stdout, stdout_mode.value | ENABLE_VIRTUAL_TERMINAL_PROCESSING)
# Enable ascii color control to stderr
stderr = kernel32.GetStdHandle(STD_ERROR_HANDLE)
stderr_mode = ctypes.c_int32()
kernel32.GetConsoleMode(stderr, ctypes.byref(stderr_mode))
kernel32.SetConsoleMode(stderr, stderr_mode.value | ENABLE_VIRTUAL_TERMINAL_PROCESSING)
# primitive formatting on supported
# terminal via ANSI escape sequences:
BOLD = ('\033[0m', '\033[1m')
GREEN = ('\033[0m', '\033[0;32m')
RED = ('\033[0m', '\033[0;31m')
GREY = ('\033[0m', '\033[1;30m')
TEST_EXIT_PASSED = 0
TEST_EXIT_SKIPPED = 77
BASE_SCRIPTS = [
# Scripts that are run by the travis build process.
# Longest test should go first, to favor running tests in parallel
'feature_fee_estimation.py',
'wallet_hd.py',
'wallet_backup.py',
# vv Tests less than 5m vv
'mining_getblocktemplate_longpoll.py',
'feature_maxuploadtarget.py',
'feature_block.py',
'rpc_fundrawtransaction.py',
'p2p_compactblocks.py',
'feature_segwit.py',
# vv Tests less than 2m vv
'wallet_basic.py',
'wallet_labels.py',
'p2p_segwit.py',
'p2p_timeouts.py',
'wallet_dump.py',
'wallet_listtransactions.py',
# vv Tests less than 60s vv
'p2p_sendheaders.py',
'wallet_zapwallettxes.py',
'wallet_importmulti.py',
'mempool_limit.py',
'rpc_txoutproof.py',
'wallet_listreceivedby.py',
'wallet_abandonconflict.py',
'feature_csv_activation.py',
'rpc_rawtransaction.py',
'wallet_address_types.py',
'feature_bip68_sequence.py',
'p2p_feefilter.py',
'feature_reindex.py',
# vv Tests less than 30s vv
'wallet_keypool_topup.py',
'interface_zmq.py',
'interface_bitcoin_cli.py',
'mempool_resurrect.py',
'wallet_txn_doublespend.py --mineblock',
'tool_wallet.py',
'wallet_txn_clone.py',
'wallet_txn_clone.py --segwit',
'rpc_getchaintips.py',
'rpc_misc.py',
'interface_rest.py',
'mempool_spend_coinbase.py',
'mempool_reorg.py',
'mempool_persist.py',
'wallet_multiwallet.py',
'wallet_multiwallet.py --usecli',
'wallet_createwallet.py',
'wallet_createwallet.py --usecli',
'interface_http.py',
'interface_rpc.py',
'rpc_psbt.py',
'rpc_users.py',
'feature_proxy.py',
'rpc_signrawtransaction.py',
'wallet_groups.py',
'p2p_disconnect_ban.py',
'rpc_decodescript.py',
'rpc_blockchain.py',
'rpc_deprecated.py',
'wallet_disable.py',
'rpc_net.py',
'wallet_keypool.py',
'p2p_mempool.py',
'p2p_blocksonly.py',
'mining_prioritisetransaction.py',
'p2p_invalid_locator.py',
'p2p_invalid_block.py',
'p2p_invalid_messages.py',
'p2p_invalid_tx.py',
'feature_assumevalid.py',
'example_test.py',
'wallet_txn_doublespend.py',
'wallet_txn_clone.py --mineblock',
'feature_notifications.py',
'rpc_invalidateblock.py',
'feature_rbf.py',
'mempool_packages.py',
'rpc_createmultisig.py',
'feature_versionbits_warning.py',
'rpc_preciousblock.py',
'wallet_importprunedfunds.py',
'p2p_leak_tx.py',
'rpc_signmessage.py',
'wallet_balance.py',
'feature_nulldummy.py',
'mempool_accept.py',
'wallet_import_rescan.py',
'wallet_import_with_label.py',
'rpc_bind.py --ipv4',
'rpc_bind.py --ipv6',
'rpc_bind.py --nonloopback',
'mining_basic.py',
'wallet_bumpfee.py',
'rpc_named_arguments.py',
'wallet_listsinceblock.py',
'p2p_leak.py',
'wallet_encryption.py',
'wallet_scriptaddress2.py',
'feature_dersig.py',
'feature_cltv.py',
'rpc_uptime.py',
'wallet_resendwallettransactions.py',
'wallet_fallbackfee.py',
'feature_minchainwork.py',
'rpc_getblockstats.py',
'wallet_create_tx.py',
'p2p_fingerprint.py',
'feature_uacomment.py',
'wallet_coinbase_category.py',
'feature_filelock.py',
'p2p_unrequested_blocks.py',
'feature_includeconf.py',
'rpc_deriveaddresses.py',
'rpc_deriveaddresses.py --usecli',
'rpc_scantxoutset.py',
'feature_logging.py',
# 'p2p_node_network_limited.py', # incompatible with Omni
'feature_blocksdir.py',
'feature_config_args.py',
'rpc_help.py',
'feature_help.py',
'feature_shutdown.py',
'omni_reorg.py',
'omni_clientexpiry.py',
'omni_stov1.py',
'omni_freeze.py',
'omni_graceperiod.py',
'omni_createtoken.py',
'omni_freedexspec.py',
'omni_dexversionsspec.py',
'omni_basicspec.py',
'omni_reorgspec.py',
'omni_sendallspec.py',
'omni_crowdsalespec.py',
'omni_smartandmanagedspec.py',
'omni_stospec.py',
# Don't append tests at the end to avoid merge conflicts
# Put them in a random line within the section that fits their approximate run-time
]
EXTENDED_SCRIPTS = [
# These tests are not run by the travis build process.
# Longest test should go first, to favor running tests in parallel
'feature_pruning.py',
'feature_dbcrash.py',
]
# Place EXTENDED_SCRIPTS first since it has the 3 longest running tests
ALL_SCRIPTS = EXTENDED_SCRIPTS + BASE_SCRIPTS
NON_SCRIPTS = [
# These are python files that live in the functional tests directory, but are not test scripts.
"combine_logs.py",
"create_cache.py",
"test_runner.py",
]
def main():
# Parse arguments and pass through unrecognised args
parser = argparse.ArgumentParser(add_help=False,
usage='%(prog)s [test_runner.py options] [script options] [scripts]',
description=__doc__,
epilog='''
Help text and arguments for individual test script:''',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--combinedlogslen', '-c', type=int, default=0, metavar='n', help='On failure, print a log (of length n lines) to the console, combined from the test framework and all test nodes.')
parser.add_argument('--coverage', action='store_true', help='generate a basic coverage report for the RPC interface')
parser.add_argument('--ci', action='store_true', help='Run checks and code that are usually only enabled in a continuous integration environment')
parser.add_argument('--exclude', '-x', help='specify a comma-separated-list of scripts to exclude.')
parser.add_argument('--extended', action='store_true', help='run the extended test suite in addition to the basic tests')
parser.add_argument('--help', '-h', '-?', action='store_true', help='print help text and exit')
parser.add_argument('--jobs', '-j', type=int, default=4, help='how many test scripts to run in parallel. Default=4.')
parser.add_argument('--keepcache', '-k', action='store_true', help='the default behavior is to flush the cache directory on startup. --keepcache retains the cache from the previous testrun.')
parser.add_argument('--quiet', '-q', action='store_true', help='only print dots, results summary and failure logs')
parser.add_argument('--tmpdirprefix', '-t', default=tempfile.gettempdir(), help="Root directory for datadirs")
parser.add_argument('--failfast', action='store_true', help='stop execution after the first test failure')
args, unknown_args = parser.parse_known_args()
# args to be passed on always start with two dashes; tests are the remaining unknown args
tests = [arg for arg in unknown_args if arg[:2] != "--"]
passon_args = [arg for arg in unknown_args if arg[:2] == "--"]
# Read config generated by configure.
config = configparser.ConfigParser()
configfile = os.path.abspath(os.path.dirname(__file__)) + "/../config.ini"
config.read_file(open(configfile, encoding="utf8"))
passon_args.append("--configfile=%s" % configfile)
# Set up logging
logging_level = logging.INFO if args.quiet else logging.DEBUG
logging.basicConfig(format='%(message)s', level=logging_level)
# Create base test directory
tmpdir = "%s/test_runner_Ł_🏃_%s" % (args.tmpdirprefix, datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))
os.makedirs(tmpdir)
logging.debug("Temporary test directory at %s" % tmpdir)
enable_bitcoind = config["components"].getboolean("ENABLE_BITCOIND")
if not enable_bitcoind:
print("No functional tests to run.")
print("Rerun ./configure with --with-daemon and then make")
sys.exit(0)
# Build list of tests
test_list = []
if tests:
# Individual tests have been specified. Run specified tests that exist
# in the ALL_SCRIPTS list. Accept the name with or without .py extension.
tests = [test + ".py" if ".py" not in test else test for test in tests]
for test in tests:
if test in ALL_SCRIPTS:
test_list.append(test)
else:
print("{}WARNING!{} Test '{}' not found in full test list.".format(BOLD[1], BOLD[0], test))
elif args.extended:
# Include extended tests
test_list += ALL_SCRIPTS
else:
# Run base tests only
test_list += BASE_SCRIPTS
# Remove the test cases that the user has explicitly asked to exclude.
if args.exclude:
exclude_tests = [test.split('.py')[0] for test in args.exclude.split(',')]
for exclude_test in exclude_tests:
# Remove <test_name>.py and <test_name>.py --arg from the test list
exclude_list = [test for test in test_list if test.split('.py')[0] == exclude_test]
for exclude_item in exclude_list:
test_list.remove(exclude_item)
if not exclude_list:
print("{}WARNING!{} Test '{}' not found in current test list.".format(BOLD[1], BOLD[0], exclude_test))
if not test_list:
print("No valid test scripts specified. Check that your test is in one "
"of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests")
sys.exit(0)
if args.help:
# Print help for test_runner.py, then print help of the first script (with args removed) and exit.
parser.print_help()
subprocess.check_call([sys.executable, os.path.join(config["environment"]["SRCDIR"], 'test', 'functional', test_list[0].split()[0]), '-h'])
sys.exit(0)
check_script_list(src_dir=config["environment"]["SRCDIR"], fail_on_warn=args.ci)
check_script_prefixes()
if not args.keepcache:
shutil.rmtree("%s/test/cache" % config["environment"]["BUILDDIR"], ignore_errors=True)
run_tests(
test_list=test_list,
src_dir=config["environment"]["SRCDIR"],
build_dir=config["environment"]["BUILDDIR"],
tmpdir=tmpdir,
jobs=args.jobs,
enable_coverage=args.coverage,
args=passon_args,
combined_logs_len=args.combinedlogslen,
failfast=args.failfast,
runs_ci=args.ci,
)
def run_tests(*, test_list, src_dir, build_dir, tmpdir, jobs=1, enable_coverage=False, args=None, combined_logs_len=0, failfast=False, runs_ci):
args = args or []
# Warn if bitcoind is already running (unix only)
try:
if subprocess.check_output(["pidof", "omnilited"]) is not None:
print("%sWARNING!%s There is already a omnilited process running on this system. Tests may fail unexpectedly due to resource contention!" % (BOLD[1], BOLD[0]))
except (OSError, subprocess.SubprocessError):
pass
# Warn if there is a cache directory
cache_dir = "%s/test/cache" % build_dir
if os.path.isdir(cache_dir):
print("%sWARNING!%s There is a cache directory here: %s. If tests fail unexpectedly, try deleting the cache directory." % (BOLD[1], BOLD[0], cache_dir))
tests_dir = src_dir + '/test/functional/'
flags = ['--cachedir={}'.format(cache_dir)] + args
if enable_coverage:
coverage = RPCCoverage()
flags.append(coverage.flag)
logging.debug("Initializing coverage directory at %s" % coverage.dir)
else:
coverage = None
if len(test_list) > 1 and jobs > 1:
# Populate cache
try:
subprocess.check_output([sys.executable, tests_dir + 'create_cache.py'] + flags + ["--tmpdir=%s/cache" % tmpdir])
except subprocess.CalledProcessError as e:
sys.stdout.buffer.write(e.output)
raise
#Run Tests
job_queue = TestHandler(
num_tests_parallel=jobs,
tests_dir=tests_dir,
tmpdir=tmpdir,
test_list=test_list,
flags=flags,
timeout_duration=40 * 60 if runs_ci else float('inf'), # in seconds
)
start_time = time.time()
test_results = []
max_len_name = len(max(test_list, key=len))
test_count = len(test_list)
for i in range(test_count):
test_result, testdir, stdout, stderr = job_queue.get_next()
test_results.append(test_result)
done_str = "{}/{} - {}{}{}".format(i + 1, test_count, BOLD[1], test_result.name, BOLD[0])
if test_result.status == "Passed":
logging.debug("%s passed, Duration: %s s" % (done_str, test_result.time))
elif test_result.status == "Skipped":
logging.debug("%s skipped" % (done_str))
else:
print("%s failed, Duration: %s s\n" % (done_str, test_result.time))
print(BOLD[1] + 'stdout:\n' + BOLD[0] + stdout + '\n')
print(BOLD[1] + 'stderr:\n' + BOLD[0] + stderr + '\n')
if combined_logs_len and os.path.isdir(testdir):
# Print the final `combinedlogslen` lines of the combined logs
print('{}Combine the logs and print the last {} lines ...{}'.format(BOLD[1], combined_logs_len, BOLD[0]))
print('\n============')
print('{}Combined log for {}:{}'.format(BOLD[1], testdir, BOLD[0]))
print('============\n')
combined_logs_args = [sys.executable, os.path.join(tests_dir, 'combine_logs.py'), testdir]
if BOLD[0]:
combined_logs_args += ['--color']
combined_logs, _ = subprocess.Popen(combined_logs_args, universal_newlines=True, stdout=subprocess.PIPE).communicate()
print("\n".join(deque(combined_logs.splitlines(), combined_logs_len)))
if failfast:
logging.debug("Early exiting after test failure")
break
print_results(test_results, max_len_name, (int(time.time() - start_time)))
if coverage:
coverage.report_rpc_coverage()
logging.debug("Cleaning up coverage data")
coverage.cleanup()
# Clear up the temp directory if all subdirectories are gone
if not os.listdir(tmpdir):
os.rmdir(tmpdir)
all_passed = all(map(lambda test_result: test_result.was_successful, test_results))
# This will be a no-op unless failfast is True in which case there may be dangling
# processes which need to be killed.
job_queue.kill_and_join()
sys.exit(not all_passed)
def print_results(test_results, max_len_name, runtime):
results = "\n" + BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "STATUS ", "DURATION") + BOLD[0]
test_results.sort(key=TestResult.sort_key)
all_passed = True
time_sum = 0
for test_result in test_results:
all_passed = all_passed and test_result.was_successful
time_sum += test_result.time
test_result.padding = max_len_name
results += str(test_result)
status = TICK + "Passed" if all_passed else CROSS + "Failed"
if not all_passed:
results += RED[1]
results += BOLD[1] + "\n%s | %s | %s s (accumulated) \n" % ("ALL".ljust(max_len_name), status.ljust(9), time_sum) + BOLD[0]
if not all_passed:
results += RED[0]
results += "Runtime: %s s\n" % (runtime)
print(results)
class TestHandler:
"""
Trigger the test scripts passed in via the list.
"""
def __init__(self, *, num_tests_parallel, tests_dir, tmpdir, test_list, flags, timeout_duration):
assert num_tests_parallel >= 1
self.num_jobs = num_tests_parallel
self.tests_dir = tests_dir
self.tmpdir = tmpdir
self.timeout_duration = timeout_duration
self.test_list = test_list
self.flags = flags
self.num_running = 0
self.jobs = []
def get_next(self):
while self.num_running < self.num_jobs and self.test_list:
# Add tests
self.num_running += 1
test = self.test_list.pop(0)
portseed = len(self.test_list)
portseed_arg = ["--portseed={}".format(portseed)]
log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16)
log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16)
test_argv = test.split()
testdir = "{}/{}_{}".format(self.tmpdir, re.sub(".py$", "", test_argv[0]), portseed)
tmpdir_arg = ["--tmpdir={}".format(testdir)]
self.jobs.append((test,
time.time(),
subprocess.Popen([sys.executable, self.tests_dir + test_argv[0]] + test_argv[1:] + self.flags + portseed_arg + tmpdir_arg,
universal_newlines=True,
stdout=log_stdout,
stderr=log_stderr),
testdir,
log_stdout,
log_stderr))
if not self.jobs:
raise IndexError('pop from empty list')
dot_count = 0
while True:
# Return first proc that finishes
time.sleep(.5)
for job in self.jobs:
(name, start_time, proc, testdir, log_out, log_err) = job
if int(time.time() - start_time) > self.timeout_duration:
# In travis, timeout individual tests (to stop tests hanging and not providing useful output).
proc.send_signal(signal.SIGINT)
if proc.poll() is not None:
log_out.seek(0), log_err.seek(0)
[stdout, stderr] = [log_file.read().decode('utf-8') for log_file in (log_out, log_err)]
log_out.close(), log_err.close()
if proc.returncode == TEST_EXIT_PASSED and stderr == "":
status = "Passed"
elif proc.returncode == TEST_EXIT_SKIPPED:
status = "Skipped"
else:
status = "Failed"
self.num_running -= 1
self.jobs.remove(job)
clearline = '\r' + (' ' * dot_count) + '\r'
print(clearline, end='', flush=True)
dot_count = 0
return TestResult(name, status, int(time.time() - start_time)), testdir, stdout, stderr
print('.', end='', flush=True)
dot_count += 1
def kill_and_join(self):
"""Send SIGKILL to all jobs and block until all have ended."""
procs = [i[2] for i in self.jobs]
for proc in procs:
proc.kill()
for proc in procs:
proc.wait()
class TestResult():
def __init__(self, name, status, time):
self.name = name
self.status = status
self.time = time
self.padding = 0
def sort_key(self):
if self.status == "Passed":
return 0, self.name.lower()
elif self.status == "Failed":
return 2, self.name.lower()
elif self.status == "Skipped":
return 1, self.name.lower()
def __repr__(self):
if self.status == "Passed":
color = GREEN
glyph = TICK
elif self.status == "Failed":
color = RED
glyph = CROSS
elif self.status == "Skipped":
color = GREY
glyph = CIRCLE
return color[1] + "%s | %s%s | %s s\n" % (self.name.ljust(self.padding), glyph, self.status.ljust(7), self.time) + color[0]
@property
def was_successful(self):
return self.status != "Failed"
def check_script_prefixes():
"""Check that test scripts start with one of the allowed name prefixes."""
good_prefixes_re = re.compile("(example|feature|interface|mempool|mining|omni|p2p|rpc|wallet|tool)_")
bad_script_names = [script for script in ALL_SCRIPTS if good_prefixes_re.match(script) is None]
if bad_script_names:
print("%sERROR:%s %d tests not meeting naming conventions:" % (BOLD[1], BOLD[0], len(bad_script_names)))
print(" %s" % ("\n ".join(sorted(bad_script_names))))
raise AssertionError("Some tests are not following naming convention!")
def check_script_list(*, src_dir, fail_on_warn):
"""Check scripts directory.
Check that there are no scripts in the functional tests directory which are
not being run by pull-tester.py."""
script_dir = src_dir + '/test/functional/'
python_files = set([test_file for test_file in os.listdir(script_dir) if test_file.endswith(".py")])
missed_tests = list(python_files - set(map(lambda x: x.split()[0], ALL_SCRIPTS + NON_SCRIPTS)))
if len(missed_tests) != 0:
print("%sWARNING!%s The following scripts are not being run: %s. Check the test lists in test_runner.py." % (BOLD[1], BOLD[0], str(missed_tests)))
if fail_on_warn:
# On travis this warning is an error to prevent merging incomplete commits into master
sys.exit(1)
class RPCCoverage():
"""
Coverage reporting utilities for test_runner.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `litecoin-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: test/functional/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir=%s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % command) for command in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `test/functional/test-framework/coverage.py`
reference_filename = 'rpc_interface.txt'
coverage_file_prefix = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, reference_filename)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r', encoding="utf8") as coverage_ref_file:
all_cmds.update([line.strip() for line in coverage_ref_file.readlines()])
for root, _, files in os.walk(self.dir):
for filename in files:
if filename.startswith(coverage_file_prefix):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r', encoding="utf8") as coverage_file:
covered_cmds.update([line.strip() for line in coverage_file.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
main()
|
py | 1a54c9d4fcc67aeac937846599520cd7ad18c298 | from nonebot import on_request, get_driver
from nonebot.adapters.onebot.v11.bot import Bot
from nonebot.adapters.onebot.v11.event import GroupRequestEvent, FriendRequestEvent
from nonebot.adapters.onebot.v11.message import Message
try:
master = get_driver().config.master
except:
master = []
add_req = on_request()
@add_req.handle()
async def group_add(bot: Bot, event: GroupRequestEvent):
'''
入群申请
'''
if (event.sub_type) == "add":
if str(event.comment) == 'ATRI -My Dear Moments-':
await bot.set_group_add_request(flag=event.flag, sub_type='add', approve=True)
elif (event.sub_type) == "invite":
if event.user_id in master:
await bot.set_group_add_request(flag=event.flag, sub_type='invite', approve=True)
else :
await bot.set_group_add_request(flag=event.flag, sub_type='invite', approve=False)
add_friend_req = on_request()
@add_friend_req.handle()
async def friend_add(bot: Bot, event: FriendRequestEvent):
'''
好友添加请求
'''
pass |
py | 1a54ca21124743b3b7489964e71b6f181c584ed1 |
from functools import partial
import threading
import os
from kivy.app import App
from kivy.clock import Clock
from kivy.lang import Builder
from kivy.properties import ObjectProperty, StringProperty, OptionProperty
from kivy.core.window import Window
from kivy.uix.button import Button
from kivy.utils import platform
from kivy.uix.widget import Widget
from kivy.core.window import Window
from kivy.clock import Clock
from kivy.utils import platform
from electrum_ltc.base_wizard import BaseWizard
from electrum_ltc.util import is_valid_email
from . import EventsDialog
from ...i18n import _
from .password_dialog import PasswordDialog
# global Variables
is_test = (platform == "linux")
test_seed = "time taxi field recycle tiny license olive virus report rare steel portion achieve"
test_seed = "grape impose jazz bind spatial mind jelly tourist tank today holiday stomach"
test_xpub = "xpub661MyMwAqRbcEbvVtRRSjqxVnaWVUMewVzMiURAKyYratih4TtBpMypzzefmv8zUNebmNVzB3PojdC5sV2P9bDgMoo9B3SARw1MXUUfU1GL"
Builder.load_string('''
#:import Window kivy.core.window.Window
#:import _ electrum_ltc_gui.kivy.i18n._
<WizardTextInput@TextInput>
border: 4, 4, 4, 4
font_size: '15sp'
padding: '15dp', '15dp'
background_color: (1, 1, 1, 1) if self.focus else (0.454, 0.698, 0.909, 1)
foreground_color: (0.31, 0.31, 0.31, 1) if self.focus else (0.835, 0.909, 0.972, 1)
hint_text_color: self.foreground_color
background_active: 'atlas://gui/kivy/theming/light/create_act_text_active'
background_normal: 'atlas://gui/kivy/theming/light/create_act_text_active'
size_hint_y: None
height: '48sp'
<WizardButton@Button>:
root: None
size_hint: 1, None
height: '48sp'
on_press: if self.root: self.root.dispatch('on_press', self)
on_release: if self.root: self.root.dispatch('on_release', self)
<BigLabel@Label>
color: .854, .925, .984, 1
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
bold: True
<-WizardDialog>
text_color: .854, .925, .984, 1
value: ''
#auto_dismiss: False
size_hint: None, None
canvas.before:
Color:
rgba: .239, .588, .882, 1
Rectangle:
size: Window.size
crcontent: crcontent
# add electrum icon
BoxLayout:
orientation: 'vertical' if self.width < self.height else 'horizontal'
padding:
min(dp(27), self.width/32), min(dp(27), self.height/32),\
min(dp(27), self.width/32), min(dp(27), self.height/32)
spacing: '10dp'
GridLayout:
id: grid_logo
cols: 1
pos_hint: {'center_y': .5}
size_hint: 1, None
height: self.minimum_height
Label:
color: root.text_color
text: 'ELECTRUM'
size_hint: 1, None
height: self.texture_size[1] if self.opacity else 0
font_size: '33sp'
font_name: 'gui/kivy/data/fonts/tron/Tr2n.ttf'
GridLayout:
cols: 1
id: crcontent
spacing: '1dp'
Widget:
size_hint: 1, 0.3
GridLayout:
rows: 1
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
WizardButton:
id: back
text: _('Back')
root: root
WizardButton:
id: next
text: _('Next')
root: root
disabled: root.value == ''
<WizardMultisigDialog>
value: 'next'
Widget
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: _("Choose the number of signatures needed to unlock funds in your wallet")
Widget
size_hint: 1, 1
GridLayout:
orientation: 'vertical'
cols: 2
spacing: '14dp'
size_hint: 1, 1
height: self.minimum_height
Label:
color: root.text_color
text: _('From {} cosigners').format(n.value)
Slider:
id: n
range: 2, 5
step: 1
value: 2
Label:
color: root.text_color
text: _('Require {} signatures').format(m.value)
Slider:
id: m
range: 1, n.value
step: 1
value: 2
<WizardChoiceDialog>
message : ''
Widget:
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message
Widget
size_hint: 1, 1
GridLayout:
row_default_height: '48dp'
orientation: 'vertical'
id: choices
cols: 1
spacing: '14dp'
size_hint: 1, None
<WizardConfirmDialog>
message : ''
Widget:
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message
Widget
size_hint: 1, 1
<WizardTOSDialog>
message : ''
size_hint: 1, 1
ScrollView:
size_hint: 1, 1
TextInput:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.minimum_height
text: root.message
disabled: True
<WizardEmailDialog>
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: 'Please enter your email address'
WizardTextInput:
id: email
on_text: Clock.schedule_once(root.on_text)
multiline: False
on_text_validate: Clock.schedule_once(root.on_enter)
<WizardKnownOTPDialog>
message : ''
message2: ''
Widget:
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message
Widget
size_hint: 1, 1
WizardTextInput:
id: otp
on_text: Clock.schedule_once(root.on_text)
multiline: False
on_text_validate: Clock.schedule_once(root.on_enter)
Widget
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message2
Widget
size_hint: 1, 1
height: '48sp'
BoxLayout:
orientation: 'horizontal'
WizardButton:
id: cb
text: _('Request new secret')
on_release: root.request_new_secret()
size_hint: 1, None
WizardButton:
id: abort
text: _('Abort creation')
on_release: root.abort_wallet_creation()
size_hint: 1, None
<WizardNewOTPDialog>
message : ''
message2 : ''
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message
QRCodeWidget:
id: qr
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message2
WizardTextInput:
id: otp
on_text: Clock.schedule_once(root.on_text)
multiline: False
on_text_validate: Clock.schedule_once(root.on_enter)
<MButton@Button>:
size_hint: 1, None
height: '33dp'
on_release:
self.parent.update_amount(self.text)
<WordButton@Button>:
size_hint: None, None
padding: '5dp', '5dp'
text_size: None, self.height
width: self.texture_size[0]
height: '30dp'
on_release:
self.parent.new_word(self.text)
<SeedButton@Button>:
height: dp(100)
border: 4, 4, 4, 4
halign: 'justify'
valign: 'top'
font_size: '18dp'
text_size: self.width - dp(24), self.height - dp(12)
color: .1, .1, .1, 1
background_normal: 'atlas://gui/kivy/theming/light/white_bg_round_top'
background_down: self.background_normal
size_hint_y: None
<SeedLabel@Label>:
font_size: '12sp'
text_size: self.width, None
size_hint: 1, None
height: self.texture_size[1]
halign: 'justify'
valign: 'middle'
border: 4, 4, 4, 4
<RestoreSeedDialog>
message: ''
word: ''
BigLabel:
text: "ENTER YOUR SEED PHRASE"
GridLayout
cols: 1
padding: 0, '12dp'
orientation: 'vertical'
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
SeedButton:
id: text_input_seed
text: ''
on_text: Clock.schedule_once(root.on_text)
on_release: root.options_dialog()
SeedLabel:
text: root.message
BoxLayout:
id: suggestions
height: '35dp'
size_hint: 1, None
new_word: root.on_word
BoxLayout:
id: line1
update_amount: root.update_text
size_hint: 1, None
height: '30dp'
MButton:
text: 'Q'
MButton:
text: 'W'
MButton:
text: 'E'
MButton:
text: 'R'
MButton:
text: 'T'
MButton:
text: 'Y'
MButton:
text: 'U'
MButton:
text: 'I'
MButton:
text: 'O'
MButton:
text: 'P'
BoxLayout:
id: line2
update_amount: root.update_text
size_hint: 1, None
height: '30dp'
Widget:
size_hint: 0.5, None
height: '33dp'
MButton:
text: 'A'
MButton:
text: 'S'
MButton:
text: 'D'
MButton:
text: 'F'
MButton:
text: 'G'
MButton:
text: 'H'
MButton:
text: 'J'
MButton:
text: 'K'
MButton:
text: 'L'
Widget:
size_hint: 0.5, None
height: '33dp'
BoxLayout:
id: line3
update_amount: root.update_text
size_hint: 1, None
height: '30dp'
Widget:
size_hint: 1, None
MButton:
text: 'Z'
MButton:
text: 'X'
MButton:
text: 'C'
MButton:
text: 'V'
MButton:
text: 'B'
MButton:
text: 'N'
MButton:
text: 'M'
MButton:
text: ' '
MButton:
text: '<'
<AddXpubDialog>
title: ''
message: ''
BigLabel:
text: root.title
GridLayout
cols: 1
padding: 0, '12dp'
orientation: 'vertical'
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
SeedButton:
id: text_input
text: ''
on_text: Clock.schedule_once(root.check_text)
SeedLabel:
text: root.message
GridLayout
rows: 1
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
IconButton:
id: scan
height: '48sp'
on_release: root.scan_xpub()
icon: 'atlas://gui/kivy/theming/light/camera'
size_hint: 1, None
WizardButton:
text: _('Paste')
on_release: root.do_paste()
WizardButton:
text: _('Clear')
on_release: root.do_clear()
<ShowXpubDialog>
xpub: ''
message: _('Here is your master public key. Share it with your cosigners.')
BigLabel:
text: "MASTER PUBLIC KEY"
GridLayout
cols: 1
padding: 0, '12dp'
orientation: 'vertical'
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
SeedButton:
id: text_input
text: root.xpub
SeedLabel:
text: root.message
GridLayout
rows: 1
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
WizardButton:
text: _('QR code')
on_release: root.do_qr()
WizardButton:
text: _('Copy')
on_release: root.do_copy()
WizardButton:
text: _('Share')
on_release: root.do_share()
<ShowSeedDialog>
spacing: '12dp'
value: 'next'
BigLabel:
text: "PLEASE WRITE DOWN YOUR SEED PHRASE"
GridLayout:
id: grid
cols: 1
pos_hint: {'center_y': .5}
size_hint_y: None
height: self.minimum_height
orientation: 'vertical'
spacing: '12dp'
SeedButton:
text: root.seed_text
on_release: root.options_dialog()
SeedLabel:
text: root.message
<LineDialog>
BigLabel:
text: root.title
SeedLabel:
text: root.message
TextInput:
id: passphrase_input
multiline: False
size_hint: 1, None
height: '27dp'
SeedLabel:
text: root.warning
''')
class WizardDialog(EventsDialog):
''' Abstract dialog to be used as the base for all Create Account Dialogs
'''
crcontent = ObjectProperty(None)
def __init__(self, wizard, **kwargs):
super(WizardDialog, self).__init__()
self.wizard = wizard
self.ids.back.disabled = not wizard.can_go_back()
self.app = App.get_running_app()
self.run_next = kwargs['run_next']
_trigger_size_dialog = Clock.create_trigger(self._size_dialog)
Window.bind(size=_trigger_size_dialog,
rotation=_trigger_size_dialog)
_trigger_size_dialog()
self._on_release = False
def _size_dialog(self, dt):
app = App.get_running_app()
if app.ui_mode[0] == 'p':
self.size = Window.size
else:
#tablet
if app.orientation[0] == 'p':
#portrait
self.size = Window.size[0]/1.67, Window.size[1]/1.4
else:
self.size = Window.size[0]/2.5, Window.size[1]
def add_widget(self, widget, index=0):
if not self.crcontent:
super(WizardDialog, self).add_widget(widget)
else:
self.crcontent.add_widget(widget, index=index)
def on_dismiss(self):
app = App.get_running_app()
if app.wallet is None and not self._on_release:
app.stop()
def get_params(self, button):
return (None,)
def on_release(self, button):
self._on_release = True
self.close()
if not button:
self.parent.dispatch('on_wizard_complete', None)
return
if button is self.ids.back:
self.wizard.go_back()
return
params = self.get_params(button)
self.run_next(*params)
class WizardMultisigDialog(WizardDialog):
def get_params(self, button):
m = self.ids.m.value
n = self.ids.n.value
return m, n
class WizardOTPDialogBase(WizardDialog):
def get_otp(self):
otp = self.ids.otp.text
if len(otp) != 6:
return
try:
return int(otp)
except:
return
def on_text(self, dt):
self.ids.next.disabled = self.get_otp() is None
def on_enter(self, dt):
# press next
next = self.ids.next
if not next.disabled:
next.dispatch('on_release')
class WizardKnownOTPDialog(WizardOTPDialogBase):
def __init__(self, wizard, **kwargs):
WizardOTPDialogBase.__init__(self, wizard, **kwargs)
self.message = _("This wallet is already registered with TrustedCoin. To finalize wallet creation, please enter your Google Authenticator Code.")
self.message2 =_("If you have lost your Google Authenticator account, you can request a new secret. You will need to retype your seed.")
self.request_new = False
def get_params(self, button):
return (self.get_otp(), self.request_new)
def request_new_secret(self):
self.request_new = True
self.on_release(True)
def abort_wallet_creation(self):
self._on_release = True
os.unlink(self.wizard.storage.path)
self.wizard.terminate()
self.dismiss()
class WizardNewOTPDialog(WizardOTPDialogBase):
def __init__(self, wizard, **kwargs):
WizardOTPDialogBase.__init__(self, wizard, **kwargs)
otp_secret = kwargs['otp_secret']
uri = "otpauth://totp/%s?secret=%s"%('trustedcoin.com', otp_secret)
self.message = "Please scan the following QR code in Google Authenticator. You may also use the secret key: %s"%otp_secret
self.message2 = _('Then, enter your Google Authenticator code:')
self.ids.qr.set_data(uri)
def get_params(self, button):
return (self.get_otp(), False)
class WizardTOSDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
WizardDialog.__init__(self, wizard, **kwargs)
self.ids.next.text = 'Accept'
self.ids.next.disabled = False
self.message = kwargs['tos']
self.message2 = _('Enter your email address:')
class WizardEmailDialog(WizardDialog):
def get_params(self, button):
return (self.ids.email.text,)
def on_text(self, dt):
self.ids.next.disabled = not is_valid_email(self.ids.email.text)
def on_enter(self, dt):
# press next
next = self.ids.next
if not next.disabled:
next.dispatch('on_release')
class WizardConfirmDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
super(WizardConfirmDialog, self).__init__(wizard, **kwargs)
self.message = kwargs.get('message', '')
self.value = 'ok'
def on_parent(self, instance, value):
if value:
app = App.get_running_app()
self._back = _back = partial(app.dispatch, 'on_back')
def get_params(self, button):
return (True,)
class WizardChoiceDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
super(WizardChoiceDialog, self).__init__(wizard, **kwargs)
self.message = kwargs.get('message', '')
choices = kwargs.get('choices', [])
layout = self.ids.choices
layout.bind(minimum_height=layout.setter('height'))
for action, text in choices:
l = WizardButton(text=text)
l.action = action
l.height = '48dp'
l.root = self
layout.add_widget(l)
def on_parent(self, instance, value):
if value:
app = App.get_running_app()
self._back = _back = partial(app.dispatch, 'on_back')
def get_params(self, button):
return (button.action,)
class LineDialog(WizardDialog):
title = StringProperty('')
message = StringProperty('')
warning = StringProperty('')
def __init__(self, wizard, **kwargs):
WizardDialog.__init__(self, wizard, **kwargs)
self.ids.next.disabled = False
def get_params(self, b):
return (self.ids.passphrase_input.text,)
class ShowSeedDialog(WizardDialog):
seed_text = StringProperty('')
message = _("If you forget your PIN or lose your device, your seed phrase will be the only way to recover your funds.")
ext = False
def __init__(self, wizard, **kwargs):
super(ShowSeedDialog, self).__init__(wizard, **kwargs)
self.seed_text = kwargs['seed_text']
def on_parent(self, instance, value):
if value:
app = App.get_running_app()
self._back = _back = partial(self.ids.back.dispatch, 'on_release')
def options_dialog(self):
from .seed_options import SeedOptionsDialog
def callback(status):
self.ext = status
d = SeedOptionsDialog(self.ext, callback)
d.open()
def get_params(self, b):
return (self.ext,)
class WordButton(Button):
pass
class WizardButton(Button):
pass
class RestoreSeedDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
super(RestoreSeedDialog, self).__init__(wizard, **kwargs)
self._test = kwargs['test']
from electrum_ltc.mnemonic import Mnemonic
from electrum_ltc.old_mnemonic import words as old_wordlist
self.words = set(Mnemonic('en').wordlist).union(set(old_wordlist))
self.ids.text_input_seed.text = test_seed if is_test else ''
self.message = _('Please type your seed phrase using the virtual keyboard.')
self.title = _('Enter Seed')
self.ext = False
def options_dialog(self):
from .seed_options import SeedOptionsDialog
def callback(status):
self.ext = status
d = SeedOptionsDialog(self.ext, callback)
d.open()
def get_suggestions(self, prefix):
for w in self.words:
if w.startswith(prefix):
yield w
def on_text(self, dt):
self.ids.next.disabled = not bool(self._test(self.get_text()))
text = self.ids.text_input_seed.text
if not text:
last_word = ''
elif text[-1] == ' ':
last_word = ''
else:
last_word = text.split(' ')[-1]
enable_space = False
self.ids.suggestions.clear_widgets()
suggestions = [x for x in self.get_suggestions(last_word)]
if last_word in suggestions:
b = WordButton(text=last_word)
self.ids.suggestions.add_widget(b)
enable_space = True
for w in suggestions:
if w != last_word and len(suggestions) < 10:
b = WordButton(text=w)
self.ids.suggestions.add_widget(b)
i = len(last_word)
p = set()
for x in suggestions:
if len(x)>i: p.add(x[i])
for line in [self.ids.line1, self.ids.line2, self.ids.line3]:
for c in line.children:
if isinstance(c, Button):
if c.text in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':
c.disabled = (c.text.lower() not in p) and bool(last_word)
elif c.text == ' ':
c.disabled = not enable_space
def on_word(self, w):
text = self.get_text()
words = text.split(' ')
words[-1] = w
text = ' '.join(words)
self.ids.text_input_seed.text = text + ' '
self.ids.suggestions.clear_widgets()
def get_text(self):
ti = self.ids.text_input_seed
return ' '.join(ti.text.strip().split())
def update_text(self, c):
c = c.lower()
text = self.ids.text_input_seed.text
if c == '<':
text = text[:-1]
else:
text += c
self.ids.text_input_seed.text = text
def on_parent(self, instance, value):
if value:
tis = self.ids.text_input_seed
tis.focus = True
#tis._keyboard.bind(on_key_down=self.on_key_down)
self._back = _back = partial(self.ids.back.dispatch,
'on_release')
app = App.get_running_app()
def on_key_down(self, keyboard, keycode, key, modifiers):
if keycode[0] in (13, 271):
self.on_enter()
return True
def on_enter(self):
#self._remove_keyboard()
# press next
next = self.ids.next
if not next.disabled:
next.dispatch('on_release')
def _remove_keyboard(self):
tis = self.ids.text_input_seed
if tis._keyboard:
tis._keyboard.unbind(on_key_down=self.on_key_down)
tis.focus = False
def get_params(self, b):
return (self.get_text(), False, self.ext)
class ConfirmSeedDialog(RestoreSeedDialog):
def get_params(self, b):
return (self.get_text(),)
def options_dialog(self):
pass
class ShowXpubDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
WizardDialog.__init__(self, wizard, **kwargs)
self.xpub = kwargs['xpub']
self.ids.next.disabled = False
def do_copy(self):
self.app._clipboard.copy(self.xpub)
def do_share(self):
self.app.do_share(self.xpub, _("Master Public Key"))
def do_qr(self):
from .qr_dialog import QRDialog
popup = QRDialog(_("Master Public Key"), self.xpub, True)
popup.open()
class AddXpubDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
WizardDialog.__init__(self, wizard, **kwargs)
self.is_valid = kwargs['is_valid']
self.title = kwargs['title']
self.message = kwargs['message']
self.allow_multi = kwargs.get('allow_multi', False)
def check_text(self, dt):
self.ids.next.disabled = not bool(self.is_valid(self.get_text()))
def get_text(self):
ti = self.ids.text_input
return ti.text.strip()
def get_params(self, button):
return (self.get_text(),)
def scan_xpub(self):
def on_complete(text):
if self.allow_multi:
self.ids.text_input.text += text + '\n'
else:
self.ids.text_input.text = text
self.app.scan_qr(on_complete)
def do_paste(self):
self.ids.text_input.text = test_xpub if is_test else self.app._clipboard.paste()
def do_clear(self):
self.ids.text_input.text = ''
class InstallWizard(BaseWizard, Widget):
'''
events::
`on_wizard_complete` Fired when the wizard is done creating/ restoring
wallet/s.
'''
__events__ = ('on_wizard_complete', )
def on_wizard_complete(self, wallet):
"""overriden by main_window"""
pass
def waiting_dialog(self, task, msg, on_finished=None):
'''Perform a blocking task in the background by running the passed
method in a thread.
'''
def target():
# run your threaded function
try:
task()
except Exception as err:
self.show_error(str(err))
# on completion hide message
Clock.schedule_once(lambda dt: app.info_bubble.hide(now=True), -1)
if on_finished:
Clock.schedule_once(lambda dt: on_finished(), -1)
app = App.get_running_app()
app.show_info_bubble(
text=msg, icon='atlas://gui/kivy/theming/light/important',
pos=Window.center, width='200sp', arrow_pos=None, modal=True)
t = threading.Thread(target = target)
t.start()
def terminate(self, **kwargs):
self.dispatch('on_wizard_complete', self.wallet)
def choice_dialog(self, **kwargs):
choices = kwargs['choices']
if len(choices) > 1:
WizardChoiceDialog(self, **kwargs).open()
else:
f = kwargs['run_next']
f(choices[0][0])
def multisig_dialog(self, **kwargs): WizardMultisigDialog(self, **kwargs).open()
def show_seed_dialog(self, **kwargs): ShowSeedDialog(self, **kwargs).open()
def line_dialog(self, **kwargs): LineDialog(self, **kwargs).open()
def confirm_seed_dialog(self, **kwargs):
kwargs['title'] = _('Confirm Seed')
kwargs['message'] = _('Please retype your seed phrase, to confirm that you properly saved it')
ConfirmSeedDialog(self, **kwargs).open()
def restore_seed_dialog(self, **kwargs):
RestoreSeedDialog(self, **kwargs).open()
def confirm_dialog(self, **kwargs):
WizardConfirmDialog(self, **kwargs).open()
def tos_dialog(self, **kwargs):
WizardTOSDialog(self, **kwargs).open()
def email_dialog(self, **kwargs):
WizardEmailDialog(self, **kwargs).open()
def otp_dialog(self, **kwargs):
if kwargs['otp_secret']:
WizardNewOTPDialog(self, **kwargs).open()
else:
WizardKnownOTPDialog(self, **kwargs).open()
def add_xpub_dialog(self, **kwargs):
kwargs['message'] += ' ' + _('Use the camera button to scan a QR code.')
AddXpubDialog(self, **kwargs).open()
def add_cosigner_dialog(self, **kwargs):
kwargs['title'] = _("Add Cosigner") + " %d"%kwargs['index']
kwargs['message'] = _('Please paste your cosigners master public key, or scan it using the camera button.')
AddXpubDialog(self, **kwargs).open()
def show_xpub_dialog(self, **kwargs): ShowXpubDialog(self, **kwargs).open()
def show_message(self, msg): self.show_error(msg)
def show_error(self, msg):
app = App.get_running_app()
Clock.schedule_once(lambda dt: app.show_error(msg))
def request_password(self, run_next, force_disable_encrypt_cb=False):
def on_success(old_pin, pin):
assert old_pin is None
run_next(pin, False)
def on_failure():
self.show_error(_('PIN mismatch'))
self.run('request_password', run_next)
popup = PasswordDialog()
app = App.get_running_app()
popup.init(app, None, _('Choose PIN code'), on_success, on_failure, is_change=2)
popup.open()
def action_dialog(self, action, run_next):
f = getattr(self, action)
f()
|
py | 1a54cacbe7f737a78d13db54e9ad0302c6d923ee | from tkinter import *
from tkinter import messagebox
from random import choice, randint, shuffle
import pyperclip
import json
# ---------------------------- PASSWORD GENERATOR ------------------------------- #
#Password Generator Project
def generate_password():
letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
numbers = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
symbols = ['!', '#', '$', '%', '&', '(', ')', '*', '+']
password_letters = [choice(letters) for _ in range(randint(8, 10))]
password_symbols = [choice(symbols) for _ in range(randint(2, 4))]
password_numbers = [choice(numbers) for _ in range(randint(2, 4))]
password_list = password_letters + password_symbols + password_numbers
shuffle(password_list)
password = "".join(password_list)
password_entry.insert(0, password)
pyperclip.copy(password)
# ---------------------------- SAVE PASSWORD ------------------------------- #
def save():
website = website_entry.get()
email = email_entry.get()
password = password_entry.get()
new_data = {
website: {
"email": email,
"password": password,
}
}
if len(website) == 0 or len(password) == 0:
messagebox.showinfo(title="Oops", message="Please make sure you haven't left any fields empty.")
else:
try:
with open("Day30/data.json", "r") as data_file:
#Reading old data
data = json.load(data_file)
except FileNotFoundError:
with open("Day30/data.json", "w") as data_file:
json.dump(new_data, data_file, indent=4)
else:
#Updating old data with new data
data.update(new_data)
with open("Day30/data.json", "w") as data_file:
#Saving updated data
json.dump(data, data_file, indent=4)
finally:
website_entry.delete(0, END)
password_entry.delete(0, END)
# ---------------------------- FIND PASSWORD ------------------------------- #
def find_password():
website = website_entry.get()
try:
with open("Day30/data.json") as data_file:
data = json.load(data_file)
except FileNotFoundError:
messagebox.showinfo(title="Error", message="No Data File Found.")
else:
if website in data:
email = data[website]["email"]
password = data[website]["password"]
messagebox.showinfo(title=website, message=f"Email: {email}\nPassword: {password}")
else:
messagebox.showinfo(title="Error", message=f"No details for {website} exists.")
# ---------------------------- UI SETUP ------------------------------- #
window = Tk()
window.title("Password Manager")
window.config(padx=50, pady=50)
canvas = Canvas(height=200, width=200)
logo_img = PhotoImage(file="Day30/logo.png")
canvas.create_image(100, 100, image=logo_img)
canvas.grid(row=0, column=1)
#Labels
website_label = Label(text="Website:")
website_label.grid(row=1, column=0)
email_label = Label(text="Email/Username:")
email_label.grid(row=2, column=0)
password_label = Label(text="Password:")
password_label.grid(row=3, column=0)
#Entries
website_entry = Entry(width=21)
website_entry.grid(row=1, column=1)
website_entry.focus()
email_entry = Entry(width=35)
email_entry.grid(row=2, column=1, columnspan=2)
email_entry.insert(0, "[email protected]")
password_entry = Entry(width=21)
password_entry.grid(row=3, column=1)
# Buttons
search_button = Button(text="Search", width=13, command=find_password)
search_button.grid(row=1, column=2)
generate_password_button = Button(text="Generate Password", command=generate_password)
generate_password_button.grid(row=3, column=2)
add_button = Button(text="Add", width=36, command=save)
add_button.grid(row=4, column=1, columnspan=2)
window.mainloop() |
py | 1a54cbcbdfc62370450759b12b43d571b1290356 | from urllib.request import urlopen, Request
from urllib.parse import urlparse
from bs4 import BeautifulSoup
import re
import socket
import smtplib
import dns.resolver
import csv
import multiprocessing
TIMEOUT = 120
in_path = "alchemist_accelerator.csv"
out_path = "alchemist_accelerator_result.csv"
headers = {"User-Agent": "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Trident/5.0)"}
def getinternalLinks(bsObj, includeUrl):
includeUrl = urlparse(includeUrl).scheme + "://" + urlparse(includeUrl).netloc
internalLinks = []
for link in bsObj.findAll("a", href=re.compile("^(/|.*" + includeUrl + ")")):
if link.attrs['href'] is not None:
if link.attrs['href'].startswith("/"):
internalLinks.append(includeUrl + link.attrs['href'])
else:
internalLinks.append(link.attrs['href'])
return internalLinks
def getAllInternalLinks(siteUrl):
try:
req = Request(siteUrl, headers=headers)
html = urlopen(req, timeout=20)
domain = urlparse(siteUrl).scheme + "://" + urlparse(siteUrl).netloc
bsObj = BeautifulSoup(html, "html.parser")
internalLinks = getinternalLinks(bsObj, domain)
for link in internalLinks:
if link not in allIntLinks:
allIntLinks.add(link)
print(link)
except:
pass
def verify_email(email):
records = dns.resolver.query(email.split('@')[-1], 'MX')
mxRecord = records[0].exchange
mxRecord = str(mxRecord)
host = socket.gethostname()
server = smtplib.SMTP()
server.set_debuglevel(0)
server.connect(mxRecord)
server.helo(host)
server.mail('[email protected]')
code, message = server.rcpt(str(email))
server.quit()
if code == 250:
return True
else:
return False
def extractEmails(allIntLinks, return_dict):
for intLink in allIntLinks:
try:
req = Request(intLink, headers=headers)
html = urlopen(req, timeout=20).read().decode("utf-8")
regex = r"([a-zA-Z0-9_.+-]+@[a-pr-zA-PRZ0-9-]+\.[a-zA-Z0-9-.]+)"
for email in re.findall(regex, html):
email = email.lower()
if email not in allEmails:
if not (email.endswith(('.', '.png', '.jpg', '.JPG', '.jpeg', '.gif', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.x', '.webm', '.webp', '.svg', "example.com", "email.com", "yourdomain.com", "yourself.com", "domain.com")) or "sentry" in email):
# if verify_email(email): # takes a long time
allEmails.add(email)
except:
pass
return_dict[0] = "\n".join(list(allEmails))
manager = multiprocessing.Manager()
return_dict = manager.dict()
with open(in_path, "r") as f:
fieldnames = ['portfolio', 'website', 'year', 'summary']
reader = csv.DictReader(f, fieldnames=fieldnames)
next(reader)
with open(out_path, "w") as f1:
fieldnames1 = ['portfolio', 'website', 'year', 'emails', 'summary']
writer = csv.DictWriter(f1, fieldnames=fieldnames1)
writer.writeheader()
idx = 1
for row in reader:
print(str(idx) + ". " + row['portfolio'])
allIntLinks = set()
allEmails = set()
return_dict[0] = ""
print(row['website'])
if len(row['website']):
allIntLinks.add(row['website'])
getAllInternalLinks(row['website'])
p = multiprocessing.Process(target=extractEmails, args=(allIntLinks, return_dict))
p.start()
p.join(TIMEOUT)
if p.is_alive():
print("Time out!")
p.terminate()
emails = return_dict.values()[0]
row['emails'] = emails
row['portfolio'] = row['portfolio'].strip()
writer.writerow(row)
f1.flush()
print(emails)
idx += 1 |
py | 1a54ccfba8c526e2ed32e521e8b3670e353e2e5a | #!/usr/bin/env python3
import subprocess
import os
import tempfile
import shutil
import codecs
import sys
import time
import json
def makeDirs(path) :
os.makedirs(path, exist_ok = True)
def writeFile(fileName, content) :
file = codecs.open(fileName, "w", "utf-8")
file.write(str(content))
file.close()
def executeCommand(*args) :
process = subprocess.Popen(
args,
stdin = subprocess.PIPE,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
universal_newlines=True
)
cliStdout, cliStderr = process.communicate(input = None)
returncode = process.poll()
return (cliStdout + "\n" + cliStderr).strip()
def decodeJson(content) :
try :
return json.loads(content)
except Exception as e:
return None
def die(message) :
print(message + "\n")
sys.exit(1)
def getNodePort(nodeIndex) :
return 20000 + nodeIndex
def getNodeRpcPort(nodeIndex) :
return 30000 + nodeIndex
def getNodeListenAddress(nodeIndex) :
return '127.0.0.1:%d' % (getNodePort(nodeIndex))
def getNodeAlias(nodeIndex) :
return 'node%d' % (nodeIndex)
def checkError(result) :
if result['error'] :
print('executeCli error: %s, command: %s' % ( result['output'], result['command'] ))
# command utility functions
def getUnspent(node, address) :
if address == None :
return -1
json = '["' + address + '"]'
result = node.executeCli('listunspent', 0, 999999, json)
if result['error'] :
print('getUnspent error: ' + result['output'])
return -1
amount = 0
for item in result['json'] :
amount += item['amount']
return amount
def getBlockCount(node) :
result = node.executeCli('getblockcount')
if result['error'] :
print('getBlockCount error: ' + result['output'])
return None
return result['json']
class Node :
def __init__(self, app, nodeIndex) :
self._app = app
self._nodeIndex = nodeIndex
self._nodePath = os.path.join(self._app.getRootPath(), 'node%d' % (nodeIndex))
self._rpcUser = 'rpcuser%d' % (nodeIndex)
self._iqcashd = self._app.getIqcashd()
self._iqcashCli = self._app.getIqcashCli()
self._daemonProcess = None
def createDataDir(self, nodeCount, masterNodePrivateKey = None) :
makeDirs(self._nodePath)
writeFile(
os.path.join(self._nodePath, 'iqcash.conf'),
self._generateIqcashConf(nodeCount, masterNodePrivateKey)
)
def startNode(self) :
self._daemonProcess = subprocess.Popen([ self._iqcashd, '-datadir=' + self._nodePath, '-daemon' ])
def stopNode(self) :
self.executeCli('stop')
time.sleep(0.1)
if self._daemonProcess != None :
self._daemonProcess.kill()
time.sleep(2)
def executeCli(self, *args) :
normalizedArgs = []
for arg in args :
normalizedArgs.append(str(arg))
output = executeCommand(self._iqcashCli, '-datadir=' + self._nodePath, *normalizedArgs)
command = ' '.join(normalizedArgs)
if output.find('error') >= 0 :
return {
'error' : True,
'output' : output,
'json' : None,
'command' : command,
}
else :
json = decodeJson(output)
if json == None :
json = output
return {
'error' : False,
'output' : output,
'json' : json,
'command' : command,
}
def waitNodeStarting(self, timeoutSeconds = 15) :
startTime = time.time()
while time.time() - startTime < timeoutSeconds :
if getBlockCount(self) != None :
return True
time.sleep(1)
print('waitNodeStarting failed')
return False
def _generateIqcashConf(self, nodeCount, masterNodePrivateKey) :
result = ""
result += "regtest=1\n"
result += "server=1\n"
result += "debug=1\n"
result += "debug=net\n"
result += "debug=iqcash\n"
result += "rpcuser=%s\n" % (self._rpcUser)
result += "rpcpassword=%s\n" % (self._rpcPassword)
result += "port=%d\n" % (getNodePort(self._nodeIndex))
result += "rpcport=%d\n" % (getNodeRpcPort(self._nodeIndex))
result += "listenonion=0\n"
result += "txindex=1\n"
result += "externalip=%s\n" % getNodeListenAddress(self._nodeIndex)
result += "budgetvotemode=suggest\n"
for i in range(nodeCount) :
if i == self._nodeIndex :
continue
result += "addnode=%s\n" % getNodeListenAddress(i)
if masterNodePrivateKey != None :
result += "masternode=1\n"
result += "masternodeprivkey=%s\n" % (masterNodePrivateKey)
result += "masternodeaddr=%s\n" % getNodeListenAddress(i)
return result
def writeMasterNodeConfig(self, config) :
writeFile(
os.path.join(self._nodePath, 'regtest', 'masternode.conf'),
config
)
def isMasterNodeSynced(self) :
json = self.executeCli('mnsync', 'status')['json']
if json == None :
return False
if json['RequestedMasternodeAssets'] > 100 :
return True
return False
def dataDirExist(self) :
return os.path.exists(self._nodePath)
class Application :
def __init__(self) :
self._nodeCount = 4
self._nodeList = []
self._budgetCycle = 864
self._removeFolderAfterExit = not True
def run(self) :
self._setup()
try :
self._doRun()
finally :
self._cleanup()
def _setup(self) :
self._rootPath = self._makeRootPath()
makeDirs(self._rootPath)
print('Root path: %s' % (self._rootPath))
self._iqcashd = os.getenv('IQCASHD', None)
if not self._iqcashd :
die('Undefined IQCASHD')
self._iqcashCli = os.getenv('IQCASHCLI', None)
if not self._iqcashCli :
die('Undefined IQCASH')
print('iqcashd: %s' % (self._iqcashd))
def _cleanup(self) :
self._stopAllNodes()
if self._removeFolderAfterExit :
shutil.rmtree(self._rootPath)
def _doRun(self) :
self._createNodes()
node = self._nodeList[0]
self._mineBlocks(node, 200)
address = node.executeCli('getnewaddress')['json']
address = node.executeCli('getnewaddress')['json']
print('Before budget: ' + str(getUnspent(node, address)))
blockCount = getBlockCount(node)
superBlock = blockCount - blockCount % self._budgetCycle + self._budgetCycle
wtx = node.executeCli('preparebudget', 'ppp1', 'http://test1.com', 5, superBlock, address, 100)['json']
print('preparebudget: ' + wtx)
self._mineBlocks(node, 100)
hash = node.executeCli('submitbudget', 'ppp1', 'http://test1.com', 5, superBlock, address, 100, wtx)['json']
print('submitbudget: ' + hash)
self._mineBlocks(node, 100)
result = node.executeCli('getbudgetinfo')
for i in range(1, self._nodeCount) :
result = self._nodeList[i].executeCli('mnbudgetvote', 'local', hash, 'yes')
print(result['output'])
for i in range(self._nodeCount) :
masterNode = self._nodeList[i]
blockCount = getBlockCount(node)
blocksToMine = self._budgetCycle - blockCount % self._budgetCycle - 1
if blocksToMine == 0 :
blocksToMine = self._budgetCycle
blocksToMine = blocksToMine - 1
self._mineBlocks(masterNode, blocksToMine)
previousBlockCount = getBlockCount(masterNode)
if previousBlockCount == None :
continue
self._mineBlocks(masterNode, 1)
self._mineBlocks(masterNode, 1)
self._mineBlocks(masterNode, 1)
newBlockCount = getBlockCount(masterNode)
print(
'During super block: previousBlockCount=%d newBlockCount=%d expect=%d'
% (previousBlockCount, newBlockCount, previousBlockCount + 3)
)
self._mineBlocks(masterNode, 100)
#print(node.executeCli('getbudgetinfo')['output'])
print('After budget: ' + str(getUnspent(node, address)))
def _mineBlocks(self, node, count) :
node.executeCli('setgenerate', 'true', count)
self._syncAllNodes()
def _createNodes(self) :
nodesExist = True
for i in range(0, self._nodeCount) :
if not Node(self, i).dataDirExist() :
nodesExist = False
break
if nodesExist :
print("All nodes data dirs exist, resuming")
for i in range(0, self._nodeCount) :
node = Node(self, i)
self._nodeList.append(node)
else :
controllingNode = self._createControllingNode()
for i in range(1, self._nodeCount) :
node = Node(self, i)
self._nodeList.append(node)
key = controllingNode['masterNodePrivateKeyList'][i - 1]
node.createDataDir(self._nodeCount, key)
for node in reversed(self._nodeList) :
node.startNode()
node.waitNodeStarting()
for node in self._nodeList :
self._mineBlocks(node, 200)
time.sleep(3)
self._syncMasterNodes()
for i in range(1, self._nodeCount) :
result = self._nodeList[0].executeCli('startmasternode', 'alias', 'false', getNodeAlias(i))
print(result['output'])
def _createControllingNode(self) :
node = Node(self, 0)
self._nodeList.append(node)
node.createDataDir(self._nodeCount)
node.startNode()
node.waitNodeStarting()
self._mineBlocks(node, 200)
masterNodePrivateKeyList = []
masterNodeConfig = ''
for i in range(1, self._nodeCount) :
key = node.executeCli('masternode', 'genkey')['json']
masterNodePrivateKeyList.append(key)
nodeName = getNodeAlias(i)
# Intended to generate address twice
address = node.executeCli('getnewaddress')['json']
address = node.executeCli('getnewaddress')['json']
result = node.executeCli('sendtoaddress', address, 10000)
#print(getUnspent(node, address))
checkError(result)
tx = result['json']
outputs = node.executeCli('masternode', 'outputs')
outputsList = outputs['json']
txIndex = 0
for o in outputsList :
if o['txhash'] == tx :
txIndex = o['outputidx']
break
masterNodeConfig += "%s %s %s %s %s\n" % (nodeName, getNodeListenAddress(i), key, tx, str(txIndex))
self._mineBlocks(node, 100)
node.writeMasterNodeConfig(masterNodeConfig)
node.stopNode()
print('Created controlling node')
return {
'node' : node,
'masterNodePrivateKeyList' : masterNodePrivateKeyList,
}
def _stopAllNodes(self) :
for node in self._nodeList :
node.stopNode()
def _syncAllNodes(self, timeoutSeconds = 60) :
if not self._syncBlocks(timeoutSeconds) :
return False
if not self._syncMemPools(timeoutSeconds) :
return False
return True
def _syncBlocks(self, timeoutSeconds) :
startTime = time.time()
printError = True
while time.time() - startTime < timeoutSeconds :
tips = []
for node in self._nodeList :
result = node.executeCli('getbestblockhash')
tips.append(result['json'])
if tips[-1] == None :
if printError :
print('getbestblockhash error: %s' % (result['output']))
printError = False
if tips == [ tips[0] ]*len(tips):
return True
time.sleep(1)
print(tips)
print('_syncBlocks failed')
return False
def _syncMemPools(self, timeoutSeconds) :
startTime = time.time()
while time.time() - startTime < timeoutSeconds :
pool = set(self._nodeList[0].executeCli('getrawmempool')['json'])
matchedCount = 1
for i in range(1, len(self._nodeList)):
if set(self._nodeList[i].executeCli('getrawmempool')['json']) == pool :
matchedCount = matchedCount + 1
if matchedCount == len(self._nodeList):
return True
time.sleep(1)
print('_syncMemPools failed')
return False
def _syncMasterNodes(self, timeoutSeconds = 60) :
return # it never works, don't waste time to try
startTime = time.time()
while timeoutSeconds < 0 or time.time() - startTime < timeoutSeconds :
allSynced = True
for i in range(1, len(self._nodeList)):
if not self._nodeList[i].isMasterNodeSynced() :
#print('MN %d status %s' % (i, self._nodeList[i].executeCli('mnsync', 'status')['output']))
allSynced = False
break
if allSynced :
return True
time.sleep(1)
print('_syncMasterNodes failed')
return False
def _makeRootPath(self) :
return '/tmp/testbudget/'
return tempfile.mkdtemp(
suffix = None,
prefix = 'testbudget_',
dir = None
)
def getRootPath(self) :
return self._rootPath
def getIqcashd(self) :
return self._iqcashd
def getIqcashCli(self) :
return self._iqcashCli
if __name__ == '__main__':
Application().run()
|
py | 1a54d0aef058d4384006e969f75e4928f766ab7d | # -*- coding: utf-8 -*-
# Definition of item pipelines
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
from w3lib.html import remove_tags, remove_tags_with_content
from scrapy.exceptions import DropItem
class RemoveTagsPipeline(object):
"""removing formatting tags (span, a, ...) from extracted paragraphs"""
def process_item(self, item, spider):
ps = [remove_tags(remove_tags_with_content(p, ('script', ))).strip().replace(u'\xa0', u' ')
for p in item['text']]
item['text'] = '\n'.join(ps)
# additional stripping for description
if item['description']:
item['description'] = item['description'].strip()
return item
class DropIfEmptyFieldPipeline(object):
def process_item(self, item, spider):
if not item['text']:
raise DropItem()
else:
return item
|
py | 1a54d0bf152d45b7863c9c5415cdeb50ee231d84 | import logging
import os
import time
import h5py
import numpy as np
from scipy.sparse import coo_matrix, csr_matrix
from implicit.datasets import _download
log = logging.getLogger("implicit")
URL = 'https://github.com/benfred/recommender_data/releases/download/v1.0/reddit.hdf5'
def get_reddit():
""" Returns the reddit dataset, downloading locally if necessary.
This dataset was released here:
https://www.reddit.com/r/redditdev/comments/dtg4j/want_to_help_reddit_build_a_recommender_a_public/
and contains 23M up/down votes from 44K users on 3.4M links.
Returns a CSR matrix of (item, user, rating """
filename = os.path.join(_download.LOCAL_CACHE_DIR, "reddit.hdf5")
if not os.path.isfile(filename):
log.info("Downloading dataset to '%s'", filename)
_download.download_file(URL, filename)
else:
log.info("Using cached dataset at '%s'", filename)
with h5py.File(filename, 'r') as f:
m = f.get('item_user_ratings')
return csr_matrix((m.get('data'), m.get('indices'), m.get('indptr')))
def generate_dataset(filename, outputfilename):
""" Generates a hdf5 reddit datasetfile from the raw datafiles found at:
https://www.reddit.com/r/redditdev/comments/dtg4j/want_to_help_reddit_build_a_recommender_a_public/
You shouldn't have to run this yourself, and can instead just download the
output using the 'get_reddit' funciton.
"""
data = _read_dataframe(filename)
_hfd5_from_dataframe(data, outputfilename)
def _read_dataframe(filename):
""" Reads the original dataset TSV as a pandas dataframe """
# delay importing this to avoid another dependency
import pandas
# read in triples of user/artist/playcount from the input dataset
# get a model based off the input params
start = time.time()
log.debug("reading data from %s", filename)
data = pandas.read_table(filename, usecols=[0, 1, 3], names=['user', 'item', 'rating'])
# map each artist and user to a unique numeric value
data['user'] = data['user'].astype("category")
data['item'] = data['item'].astype("category")
# store as a CSR matrix
log.debug("read data file in %s", time.time() - start)
return data
def _hfd5_from_dataframe(data, outputfilename):
ratings = coo_matrix((data['rating'].astype(np.float32),
(data['item'].cat.codes.copy(),
data['user'].cat.codes.copy()))).tocsr()
print(repr(ratings))
print(repr(ratings.indices))
print(repr(ratings.indptr))
with h5py.File(outputfilename, "w") as f:
g = f.create_group('item_user_ratings')
g.create_dataset("data", data=ratings.data)
g.create_dataset("indptr", data=ratings.indptr)
g.create_dataset("indices", data=ratings.indices)
# Note: not saving itemid strings or userid strings here
# they are just salted hashes, and only lead to bloat/slowness for no benefit.
|
py | 1a54d0d99e5544ceef7b0e6ba2f33b7be474df61 | import numpy as np
from scipy import ndimage
import tifffile as tiff
import matplotlib.pyplot as plt
import pandas as pd
from enum import Enum
from skimage.transform import resize
# Worldview-3 - Panchromatic (3349, 3338): 400nm - 800nm
# Worldview-3 RGB (3350, 3338)
# Worldview-3 - 8 Multispectral bands (838, 835):
# Coastal: 400 - 450 nm (0, QGIS: 1, WV-3-Band-no:2) Red: 630 - 690 nm (4, QGIS: 5, WV-3-Band-no:6)
# Blue: 450 - 510 nm (1, QGIS: 2, WV-3-Band-no:3) Red Edge: 705 - 745 nm (5, QGIS: 6, WV-3-Band-no:7)
# Green: 510 - 580 nm (2, QGIS: 3, WV-3-Band-no:4) Near-IR1: 770 - 895 nm (6, QGIS: 7, WV-3-Band-no:8)
# Yellow: 585 - 625 nm (3, QGIS: 4, WV-3-Band-no:5) Near-IR2: 860 - 1040 nm (7, QGIS: 8, WV-3-Band-no:9)
# NIR - Near Infra Red: 750nm - 1400nm
# MIR - Mid Infra Red: 3000nm - 8000nm
# Worldview-3 - 8 SWIR bands (134, 133):
# SWIR-1: 1195 - 1225 nm SWIR-5: 2145 - 2185 nm
# SWIR-2: 1550 - 1590 nm SWIR-6: 2185 - 2225 nm
# SWIR-3: 1640 - 1680 nm SWIR-7: 2235 - 2285 nm
# SWIR-4: 1710 - 1750 nm SWIR-8: 2295 - 2365 nm
class WV3ms(Enum):
COASTAL = 0
BLUE = 1
GREEN = 2
YELLOW = 3
RED = 4
REDEDGE = 5
NEARIR1 = 6
NEARIR2 = 7
class WV3swir(Enum):
SWIR_1 = 0
SWIR_2 = 1
SWIR_3 = 2
SWIR_4 = 3
SWIR_5 = 4
SWIR_6 = 5
SWIR_7 = 6
SWIR_8 = 7
CCCI_THRESHOLD_U = 0.5
CCCI_THRESHOLD_L = -4
FAUX_CCCI_THRESHOLD = 0.11
# CCCI_SWIR_THRESHOLD = 1.03
CCCI_SWIR_THRESHOLD = .94
NDWI_THRESHOLD = 0.07
NDVI_THRESHOLD = 0.07
def stretch_8bit(bands, lower_percent=2, higher_percent=98, depth=3):
# contrast enhancement as per QGIS Stretch to MinMax
# note that input image range is 0 .. 1
out = np.zeros_like(bands).astype(np.float32)
for i in range(depth):
a = 0
b = 1
if depth == 1:
c = np.percentile(bands[:, :], lower_percent)
d = np.percentile(bands[:, :], higher_percent)
t = a + (bands[:, :] - c) * (b - a) / (d - c)
else:
c = np.percentile(bands[:, :, i], lower_percent)
d = np.percentile(bands[:, :, i], higher_percent)
t = a + (bands[:, :, i] - c) * (b - a) / (d - c)
t[t < a] = a
t[t > b] = b
if depth == 1:
out[:, :] = t
else:
out[:, :, i] = t
return out.astype(np.float32)
def EVI_index(msdata):
# Enhanced Vegetation Index
NIR2 = msdata[WV3ms.NEARIR2.value, :, :].astype(np.float32)
R = msdata[WV3ms.RED.value, :, :].astype(np.float32)
CB = msdata[WV3ms.COASTAL.value, :, :].astype(np.float32)
# EVI = 2.5 * (NIR2 - R)/(NIR2 + 6.0*R - 7.5*CB + 1.0)
a = 2.5 * (NIR2 - R)
b = NIR2 + 6.0*R - 7.5*CB + 1.0
with np.errstate(divide='ignore', invalid='ignore'):
EVI = np.true_divide(a, b)
EVI[EVI == np.inf] = 0
EVI = np.nan_to_num(EVI)
return EVI
def SAVI_index(msdata):
# Soil Adjusted Vegetation Index
NIR1 = msdata[WV3ms.NEARIR1.value, :, :].astype(np.float32)
R = msdata[WV3ms.RED.value, :, :].astype(np.float32)
# The value of L varies by the amount or cover of green vegetation: in very high vegetation regions,
# L=0; and in areas with no green vegetation, L=1. Generally, an L=0.5 works well in most situations
# and is the default value used. When L=0, then SAVI = NDVI.
L = 0.5
# SAVI = (1 + L) * (NIR1 - R)/(NIR1 + R + L)
a = (1 + L) * (NIR1 - R)
b = NIR1 + R + L
with np.errstate(divide='ignore', invalid='ignore'):
SAVI = np.true_divide(a, b)
SAVI[SAVI == np.inf] = 0
SAVI = np.nan_to_num(SAVI)
return SAVI
def faux_CCCI_index(msdata, rgbdata):
RE = resize(msdata[WV3ms.REDEDGE.value, :, :], (rgbdata.shape[0], rgbdata.shape[1]),
mode='constant', preserve_range=False)
NIR2 = resize(msdata[WV3ms.NEARIR2.value, :, :], (rgbdata.shape[0], rgbdata.shape[1]),
mode='constant', preserve_range=False)
R = rgbdata[:, :, 0]
# resize: note that with the default preserve_range=False the input image is
# converted according to the conventions of img_as_float (values in [0, 1])
# from the original 11 bits range [0, 2047]. preserve_range=True should be used.
# faux_CCCI_index only works preserve_range=False - reason unknown
# Canopy Chlorophyll Content Index
# CCCI = ((NIR2 - RE) / (NIR2 + RE)) / ((NIR2 - R) / (NIR2 + R))
a = NIR2 - RE
b = NIR2 + RE
# c = NIR2 - R
# d = NIR2 + R
c = R * (-1)
d = R
with np.errstate(divide='ignore', invalid='ignore'):
e = np.true_divide(a, b)
e[e == np.inf] = 0
e = np.nan_to_num(e)
f = np.true_divide(c, d)
f[f == np.inf] = 0
f = np.nan_to_num(f)
CCCI = np.true_divide(e, f)
CCCI[CCCI == np.inf] = 0
CCCI = np.nan_to_num(CCCI)
return CCCI
def CCCI_NIR2_index(msdata):
# Canopy Chlorophyll Content Index
# uses NIR2 rather than SWIR_1
RE = msdata[WV3ms.REDEDGE.value, :, :].astype(np.float32)
NIR2 = msdata[WV3ms.NEARIR2.value, :, :].astype(np.float32)
R = msdata[WV3ms.RED.value, :, :].astype(np.float32)
# CCCI = ((NIR2 - RE)/ NIR2 + RE)) / ((NIR2 - R)/(NIR2 + R))
a = NIR2 - RE
b = NIR2 + RE
c = NIR2 - R
d = NIR2 + R
with np.errstate(divide='ignore', invalid='ignore'):
e = np.true_divide(a, b)
e[e == np.inf] = 0
e = np.nan_to_num(e)
f = np.true_divide(c, d)
f[f == np.inf] = 0
f = np.nan_to_num(f)
CCCI = np.true_divide(e, f)
CCCI[CCCI == np.inf] = 0
CCCI = np.nan_to_num(CCCI)
return CCCI
def CCCI_SWIR_index(msdata, swirdata):
# Canopy Chlorophyll Content Index
# uses SWIR_1
RE = msdata[WV3ms.REDEDGE.value, :, :].astype(np.float32)
SWIR1 = resize(swirdata[WV3swir.SWIR_1.value, :, :], (msdata.shape[1], msdata.shape[2]),
mode='constant', preserve_range=True).astype(np.float32)
R = msdata[WV3ms.RED.value, :, :].astype(np.float32)
# CCCI = ((SWIR1 - RE)/ SWIR1 + RE)) / ((SWIR1 - R)/(SWIR1 + R))
a = SWIR1 - RE
b = SWIR1 + RE
c = SWIR1 - R
d = SWIR1 + R
with np.errstate(divide='ignore', invalid='ignore'):
e = np.true_divide(a, b)
e[e == np.inf] = 0
e = np.nan_to_num(e)
f = np.true_divide(c, d)
f[f == np.inf] = 0
f = np.nan_to_num(f)
CCCI = np.true_divide(e, f)
CCCI[CCCI == np.inf] = 0
CCCI = np.nan_to_num(CCCI)
return CCCI
def NDWI_index(msdata):
# Normalized Difference Water Index
# Uses McFeeter's NDWI based on MODIS band 2 and band 4
G = msdata[WV3ms.GREEN.value, :, :].astype(np.float32)
NIR1 = msdata[WV3ms.NEARIR1.value, :, :].astype(np.float32)
# NDWI = (G - NIR1)/(G + NIR1)
a = G - NIR1
b = G + NIR1
with np.errstate(divide='ignore', invalid='ignore'):
NDWI = np.true_divide(a, b)
NDWI[NDWI == np.inf] = 0
NDWI = np.nan_to_num(NDWI)
return NDWI
def NDVI_index(msdata):
# Normalized Difference Vegetation Index
R = msdata[WV3ms.RED.value, :, :].astype(np.float32)
NIR1 = msdata[WV3ms.NEARIR1.value, :, :].astype(np.float32)
# NDVI = (NIR1 - R)/(NIR1 + R )
a = NIR1 - R
b = NIR1 + R
with np.errstate(divide='ignore', invalid='ignore'):
NDVI = np.true_divide(a, b)
NDVI[NDVI == np.inf] = 0
NDVI = np.nan_to_num(NDVI)
return NDVI
def display(IM_ID):
# read rgb and m bands
# tifffile RGB = ndarray shape (3, 3350, 3338) i.e. (colour, row, col)
# [0] = red, [1] = green, [2] = blue, 16 bit depth
rgb = tiff.imread('three_band/{}.tif'.format(IM_ID))
# change shape to regular (3350, 3338, 3) i.e. (row, col, colour)
rgb = np.rollaxis(rgb, 0, 3)
# tifffile M = ndarray shape (8, 838, 835) i.e. (spectrum, row, col)
m = tiff.imread('sixteen_band/{}_M.tif'.format(IM_ID))
# tiffile panchrom = ndarray shape (3349, 3338) i.e. (row, col)
panchrom = tiff.imread('sixteen_band/{}_P.tif'.format(IM_ID))
# tiffile SWIR = ndarray shape (8, 134, 133) i.e. (spectrum, row, col)
swir = tiff.imread('sixteen_band/{}_A.tif'.format(IM_ID))
# get our indices
myFauxCCCI = faux_CCCI_index(m, rgb)
myCCCI = CCCI_NIR2_index(m)
mySwirCCCI = CCCI_SWIR_index(m, swir)
myNDWI = NDWI_index(m)
myNDVI = NDVI_index(m)
myEVI = EVI_index(m)
mySAVI = SAVI_index(m)
# you can look on histogram and pick your favorite threshold value
# ccci_binary = (myCCCI < CCCI_THRESHOLD).astype(np.float32)
ccci_binary_1 = (myCCCI < CCCI_THRESHOLD_U)
ccci_binary_2 = (myCCCI > CCCI_THRESHOLD_L)
ccci_binary_3 = np.logical_and(ccci_binary_1, ccci_binary_2)
ccci_binary_4 = np.logical_not(ccci_binary_3)
ccci_binary_5 = ndimage.binary_opening(ccci_binary_4)
ccci_binary = ndimage.binary_closing(ccci_binary_5).astype(np.float32)
ndwi_binary = (myNDWI > NDWI_THRESHOLD).astype(np.float32)
ndvi_binary = (myNDWI > NDVI_THRESHOLD).astype(np.float32)
faux_ccci_binary = (myFauxCCCI > FAUX_CCCI_THRESHOLD).astype(np.float32)
ccci_swir_binary = (mySwirCCCI > CCCI_SWIR_THRESHOLD).astype(np.float32)
fig, axes = plt.subplots(ncols=5, nrows=2, figsize=(18, 9))
ax = axes.ravel()
ax[0].imshow(ccci_binary, cmap='binary_r')
ax[0].set_title('CCCI NIR 2 Mask')
ax[0].axis('off')
ax[1].imshow(ndwi_binary, cmap='binary_r')
ax[1].set_title('NDWI Mask')
ax[1].axis('off')
ax[2].imshow(ndvi_binary, cmap='binary_r')
ax[2].set_title('NDVI Mask')
ax[2].axis('off')
ax[3].imshow(faux_ccci_binary, cmap='binary_r')
ax[3].set_title('Faux CCCI Mask')
ax[3].axis('off')
ax[4].imshow(ccci_swir_binary, cmap='binary_r')
ax[4].set_title('CCCI SWIR 1 Mask')
ax[4].axis('off')
hist, bins = np.histogram(myCCCI, range=(-2, 2), bins=50)
width = 0.7 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
ax[5].set_title('CCCI NIR 2 Histogram')
ax[5].bar(center, hist, align='center', width=width)
hist, bins = np.histogram(myNDWI, bins=50)
width = 0.7 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
ax[6].set_title('NDWI Histogram')
ax[6].bar(center, hist, align='center', width=width)
hist, bins = np.histogram(myNDVI, bins=50)
width = 0.7 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
ax[7].set_title('NDVI Histogram')
ax[7].bar(center, hist, align='center', width=width)
hist, bins = np.histogram(myFauxCCCI, range=(-.4, .4), bins=50)
width = 0.7 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
ax[8].set_title('Faux CCCI Histogram')
ax[8].bar(center, hist, align='center', width=width)
hist, bins = np.histogram(mySwirCCCI, range=(.4, 1.2), bins=50)
width = 0.7 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
ax[9].set_title('CCCI SWIR 1 Histogram')
ax[9].bar(center, hist, align='center', width=width)
plt.tight_layout()
plt.show()
# fig, axes = plt.subplots(ncols=2, nrows=1, figsize=(18, 10))
# ax = axes.ravel()
# ax[0].imshow(stretch_8bit(rgb))
# ax[0].set_title('RGB {}'.format(IM_ID))
# ax[0].axis('off')
# ax[1].imshow(stretch_8bit(panchrom, depth=1), cmap='gray')
# ax[1].set_title('Panchromatic {}'.format(IM_ID))
# ax[1].axis('off')
# plt.tight_layout()
# plt.show()
fig, axes = plt.subplots(ncols=3, nrows=2, figsize=(18, 10))
ax = axes.ravel()
ax[0].imshow(myCCCI, vmin=-.5, vmax=.5)
ax[0].set_title('CCCI NIR 2')
ax[0].axis('off')
ax[1].imshow(myNDWI, vmin=-.3, vmax=.3)
ax[1].set_title('NDWI')
ax[1].axis('off')
ax[2].imshow(myNDVI)
ax[2].set_title('NDVI')
ax[2].axis('off')
ax[3].imshow(myEVI, vmin=-.5, vmax=.5)
ax[3].set_title('EVI')
ax[3].axis('off')
ax[4].imshow(mySAVI)
ax[4].set_title('SAVI')
ax[4].axis('off')
ax[5].imshow(mySwirCCCI, vmin=0.6, vmax=1.2)
ax[5].set_title('CCCI SWIR 1')
ax[5].axis('off')
plt.tight_layout()
plt.show()
# -----Main------
data = pd.read_csv('train_wkt_v4.csv')
data = data[data.MultipolygonWKT != 'MULTIPOLYGON EMPTY']
# display('6150_3_4')
# use training data images for waterway
for IMG_ID in data[data.ClassType == 7].ImageId:
display(IMG_ID)
# test images
# take some pictures from test
waterway_test = ['6080_4_3', '6080_4_0',
'6080_1_3', '6080_1_1',
'6150_3_4', '6050_2_1']
for IMG_ID in waterway_test:
display(IMG_ID)
|
py | 1a54d36e15b232e0ea521f8c422e827fd574f5a6 | """Base classes for all estimators."""
# Author: Gael Varoquaux <[email protected]>
# License: BSD 3 clause
import copy
import warnings
from collections import defaultdict
import platform
import inspect
import re
import numpy as np
from . import __version__
from .utils import _IS_32BIT
_DEFAULT_TAGS = {
'non_deterministic': False,
'requires_positive_X': False,
'requires_positive_y': False,
'X_types': ['2darray'],
'poor_score': False,
'no_validation': False,
'multioutput': False,
"allow_nan": False,
'stateless': False,
'multilabel': False,
'_skip_test': False,
'multioutput_only': False,
'binary_only': False,
'requires_fit': True}
def clone(estimator, safe=True):
"""Constructs a new estimator with the same parameters.
Clone does a deep copy of the model in an estimator
without actually copying attached data. It yields a new estimator
with the same parameters that has not been fit on any data.
Parameters
----------
estimator : estimator object, or list, tuple or set of objects
The estimator or group of estimators to be cloned
safe : boolean, optional
If safe is false, clone will fall back to a deep copy on objects
that are not estimators.
"""
estimator_type = type(estimator)
# XXX: not handling dictionaries
if estimator_type in (list, tuple, set, frozenset):
return estimator_type([clone(e, safe=safe) for e in estimator])
elif not hasattr(estimator, 'get_params') or isinstance(estimator, type):
if not safe:
return copy.deepcopy(estimator)
else:
raise TypeError("Cannot clone object '%s' (type %s): "
"it does not seem to be a scikit-learn estimator "
"as it does not implement a 'get_params' methods."
% (repr(estimator), type(estimator)))
klass = estimator.__class__
new_object_params = estimator.get_params(deep=False)
for name, param in new_object_params.items():
new_object_params[name] = clone(param, safe=False)
new_object = klass(**new_object_params)
params_set = new_object.get_params(deep=False)
# quick sanity check of the parameters of the clone
for name in new_object_params:
param1 = new_object_params[name]
param2 = params_set[name]
if param1 is not param2:
raise RuntimeError('Cannot clone object %s, as the constructor '
'either does not set or modifies parameter %s' %
(estimator, name))
return new_object
def _pprint(params, offset=0, printer=repr):
"""Pretty print the dictionary 'params'
Parameters
----------
params : dict
The dictionary to pretty print
offset : int
The offset in characters to add at the begin of each line.
printer : callable
The function to convert entries to strings, typically
the builtin str or repr
"""
# Do a multi-line justified repr:
options = np.get_printoptions()
np.set_printoptions(precision=5, threshold=64, edgeitems=2)
params_list = list()
this_line_length = offset
line_sep = ',\n' + (1 + offset // 2) * ' '
for i, (k, v) in enumerate(sorted(params.items())):
if type(v) is float:
# use str for representing floating point numbers
# this way we get consistent representation across
# architectures and versions.
this_repr = '%s=%s' % (k, str(v))
else:
# use repr of the rest
this_repr = '%s=%s' % (k, printer(v))
if len(this_repr) > 500:
this_repr = this_repr[:300] + '...' + this_repr[-100:]
if i > 0:
if (this_line_length + len(this_repr) >= 75 or '\n' in this_repr):
params_list.append(line_sep)
this_line_length = len(line_sep)
else:
params_list.append(', ')
this_line_length += 2
params_list.append(this_repr)
this_line_length += len(this_repr)
np.set_printoptions(**options)
lines = ''.join(params_list)
# Strip trailing space to avoid nightmare in doctests
lines = '\n'.join(l.rstrip(' ') for l in lines.split('\n'))
return lines
class BaseEstimator:
"""Base class for all estimators in scikit-learn
Notes
-----
All estimators should specify all the parameters that can be set
at the class level in their ``__init__`` as explicit keyword
arguments (no ``*args`` or ``**kwargs``).
"""
@classmethod
def _get_param_names(cls):
"""Get parameter names for the estimator"""
# fetch the constructor or the original constructor before
# deprecation wrapping if any
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
if init is object.__init__:
# No explicit constructor to introspect
return []
# introspect the constructor arguments to find the model parameters
# to represent
init_signature = inspect.signature(init)
# Consider the constructor parameters excluding 'self'
parameters = [p for p in init_signature.parameters.values()
if p.name != 'self' and p.kind != p.VAR_KEYWORD]
for p in parameters:
if p.kind == p.VAR_POSITIONAL:
raise RuntimeError("scikit-learn estimators should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s with constructor %s doesn't "
" follow this convention."
% (cls, init_signature))
# Extract and sort argument names excluding 'self'
return sorted([p.name for p in parameters])
def get_params(self, deep=True):
"""
Get parameters for this estimator.
Parameters
----------
deep : bool, default=True
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
out = dict()
for key in self._get_param_names():
try:
value = getattr(self, key)
except AttributeError:
warnings.warn('From version 0.24, get_params will raise an '
'AttributeError if a parameter cannot be '
'retrieved as an instance attribute. Previously '
'it would return None.',
FutureWarning)
value = None
if deep and hasattr(value, 'get_params'):
deep_items = value.get_params().items()
out.update((key + '__' + k, val) for k, val in deep_items)
out[key] = value
return out
def set_params(self, **params):
"""
Set the parameters of this estimator.
The method works on simple estimators as well as on nested objects
(such as pipelines). The latter have parameters of the form
``<component>__<parameter>`` so that it's possible to update each
component of a nested object.
Parameters
----------
**params : dict
Estimator parameters.
Returns
-------
self : object
Estimator instance.
"""
if not params:
# Simple optimization to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
nested_params = defaultdict(dict) # grouped by prefix
for key, value in params.items():
key, delim, sub_key = key.partition('__')
if key not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(key, self))
if delim:
nested_params[key][sub_key] = value
else:
setattr(self, key, value)
valid_params[key] = value
for key, sub_params in nested_params.items():
valid_params[key].set_params(**sub_params)
return self
def __repr__(self, N_CHAR_MAX=700):
# N_CHAR_MAX is the (approximate) maximum number of non-blank
# characters to render. We pass it as an optional parameter to ease
# the tests.
from .utils._pprint import _EstimatorPrettyPrinter
N_MAX_ELEMENTS_TO_SHOW = 30 # number of elements to show in sequences
# use ellipsis for sequences with a lot of elements
pp = _EstimatorPrettyPrinter(
compact=True, indent=1, indent_at_name=True,
n_max_elements_to_show=N_MAX_ELEMENTS_TO_SHOW)
repr_ = pp.pformat(self)
# Use bruteforce ellipsis when there are a lot of non-blank characters
n_nonblank = len(''.join(repr_.split()))
if n_nonblank > N_CHAR_MAX:
lim = N_CHAR_MAX // 2 # apprx number of chars to keep on both ends
regex = r'^(\s*\S){%d}' % lim
# The regex '^(\s*\S){%d}' % n
# matches from the start of the string until the nth non-blank
# character:
# - ^ matches the start of string
# - (pattern){n} matches n repetitions of pattern
# - \s*\S matches a non-blank char following zero or more blanks
left_lim = re.match(regex, repr_).end()
right_lim = re.match(regex, repr_[::-1]).end()
if '\n' in repr_[left_lim:-right_lim]:
# The left side and right side aren't on the same line.
# To avoid weird cuts, e.g.:
# categoric...ore',
# we need to start the right side with an appropriate newline
# character so that it renders properly as:
# categoric...
# handle_unknown='ignore',
# so we add [^\n]*\n which matches until the next \n
regex += r'[^\n]*\n'
right_lim = re.match(regex, repr_[::-1]).end()
ellipsis = '...'
if left_lim + len(ellipsis) < len(repr_) - right_lim:
# Only add ellipsis if it results in a shorter repr
repr_ = repr_[:left_lim] + '...' + repr_[-right_lim:]
return repr_
def __getstate__(self):
try:
state = super().__getstate__()
except AttributeError:
state = self.__dict__.copy()
if type(self).__module__.startswith('sklearn.'):
return dict(state.items(), _sklearn_version=__version__)
else:
return state
def __setstate__(self, state):
if type(self).__module__.startswith('sklearn.'):
pickle_version = state.pop("_sklearn_version", "pre-0.18")
if pickle_version != __version__:
warnings.warn(
"Trying to unpickle estimator {0} from version {1} when "
"using version {2}. This might lead to breaking code or "
"invalid results. Use at your own risk.".format(
self.__class__.__name__, pickle_version, __version__),
UserWarning)
try:
super().__setstate__(state)
except AttributeError:
self.__dict__.update(state)
def _more_tags(self):
return _DEFAULT_TAGS
def _get_tags(self):
collected_tags = {}
for base_class in reversed(inspect.getmro(self.__class__)):
if hasattr(base_class, '_more_tags'):
# need the if because mixins might not have _more_tags
# but might do redundant work in estimators
# (i.e. calling more tags on BaseEstimator multiple times)
more_tags = base_class._more_tags(self)
collected_tags.update(more_tags)
return collected_tags
class ClassifierMixin:
"""Mixin class for all classifiers in scikit-learn."""
_estimator_type = "classifier"
def score(self, X, y, sample_weight=None):
"""
Return the mean accuracy on the given test data and labels.
In multi-label classification, this is the subset accuracy
which is a harsh metric since you require for each sample that
each label set be correctly predicted.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test samples.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
True labels for X.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
score : float
Mean accuracy of self.predict(X) wrt. y.
"""
from .metrics import accuracy_score
return accuracy_score(y, self.predict(X), sample_weight=sample_weight)
class RegressorMixin:
"""Mixin class for all regression estimators in scikit-learn."""
_estimator_type = "regressor"
def score(self, X, y, sample_weight=None):
"""Returns the coefficient of determination R^2 of the prediction.
The coefficient R^2 is defined as (1 - u/v), where u is the residual
sum of squares ((y_true - y_pred) ** 2).sum() and v is the total
sum of squares ((y_true - y_true.mean()) ** 2).sum().
The best possible score is 1.0 and it can be negative (because the
model can be arbitrarily worse). A constant model that always
predicts the expected value of y, disregarding the input features,
would get a R^2 score of 0.0.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test samples. For some estimators this may be a
precomputed kernel matrix instead, shape = (n_samples,
n_samples_fitted], where n_samples_fitted is the number of
samples used in the fitting for the estimator.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
True values for X.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
score : float
R^2 of self.predict(X) wrt. y.
Notes
-----
The R2 score used when calling ``score`` on a regressor will use
``multioutput='uniform_average'`` from version 0.23 to keep consistent
with :func:`~sklearn.metrics.r2_score`. This will influence the
``score`` method of all the multioutput regressors (except for
:class:`~sklearn.multioutput.MultiOutputRegressor`). To specify the
default value manually and avoid the warning, please either call
:func:`~sklearn.metrics.r2_score` directly or make a custom scorer with
:func:`~sklearn.metrics.make_scorer` (the built-in scorer ``'r2'`` uses
``multioutput='uniform_average'``).
"""
from .metrics import r2_score
from .metrics._regression import _check_reg_targets
y_pred = self.predict(X)
# XXX: Remove the check in 0.23
y_type, _, _, _ = _check_reg_targets(y, y_pred, None)
if y_type == 'continuous-multioutput':
warnings.warn("The default value of multioutput (not exposed in "
"score method) will change from 'variance_weighted' "
"to 'uniform_average' in 0.23 to keep consistent "
"with 'metrics.r2_score'. To specify the default "
"value manually and avoid the warning, please "
"either call 'metrics.r2_score' directly or make a "
"custom scorer with 'metrics.make_scorer' (the "
"built-in scorer 'r2' uses "
"multioutput='uniform_average').", FutureWarning)
return r2_score(y, y_pred, sample_weight=sample_weight,
multioutput='variance_weighted')
class ClusterMixin:
"""Mixin class for all cluster estimators in scikit-learn."""
_estimator_type = "clusterer"
def fit_predict(self, X, y=None):
"""
Perform clustering on X and returns cluster labels.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Input data.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
labels : ndarray, shape (n_samples,)
Cluster labels.
"""
# non-optimized default implementation; override when a better
# method is possible for a given clustering algorithm
self.fit(X)
return self.labels_
class BiclusterMixin:
"""Mixin class for all bicluster estimators in scikit-learn"""
@property
def biclusters_(self):
"""Convenient way to get row and column indicators together.
Returns the ``rows_`` and ``columns_`` members.
"""
return self.rows_, self.columns_
def get_indices(self, i):
"""Row and column indices of the i'th bicluster.
Only works if ``rows_`` and ``columns_`` attributes exist.
Parameters
----------
i : int
The index of the cluster.
Returns
-------
row_ind : np.array, dtype=np.intp
Indices of rows in the dataset that belong to the bicluster.
col_ind : np.array, dtype=np.intp
Indices of columns in the dataset that belong to the bicluster.
"""
rows = self.rows_[i]
columns = self.columns_[i]
return np.nonzero(rows)[0], np.nonzero(columns)[0]
def get_shape(self, i):
"""Shape of the i'th bicluster.
Parameters
----------
i : int
The index of the cluster.
Returns
-------
shape : (int, int)
Number of rows and columns (resp.) in the bicluster.
"""
indices = self.get_indices(i)
return tuple(len(i) for i in indices)
def get_submatrix(self, i, data):
"""Returns the submatrix corresponding to bicluster `i`.
Parameters
----------
i : int
The index of the cluster.
data : array
The data.
Returns
-------
submatrix : array
The submatrix corresponding to bicluster i.
Notes
-----
Works with sparse matrices. Only works if ``rows_`` and
``columns_`` attributes exist.
"""
from .utils.validation import check_array
data = check_array(data, accept_sparse='csr')
row_ind, col_ind = self.get_indices(i)
return data[row_ind[:, np.newaxis], col_ind]
class TransformerMixin:
"""Mixin class for all transformers in scikit-learn."""
def fit_transform(self, X, y=None, **fit_params):
"""
Fit to data, then transform it.
Fits transformer to X and y with optional parameters fit_params
and returns a transformed version of X.
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Training set.
y : numpy array of shape [n_samples]
Target values.
**fit_params : dict
Additional fit parameters.
Returns
-------
X_new : numpy array of shape [n_samples, n_features_new]
Transformed array.
"""
# non-optimized default implementation; override when a better
# method is possible for a given clustering algorithm
if y is None:
# fit method of arity 1 (unsupervised transformation)
return self.fit(X, **fit_params).transform(X)
else:
# fit method of arity 2 (supervised transformation)
return self.fit(X, y, **fit_params).transform(X)
class DensityMixin:
"""Mixin class for all density estimators in scikit-learn."""
_estimator_type = "DensityEstimator"
def score(self, X, y=None):
"""Returns the score of the model on the data X
Parameters
----------
X : array-like of shape (n_samples, n_features)
Returns
-------
score : float
"""
pass
class OutlierMixin:
"""Mixin class for all outlier detection estimators in scikit-learn."""
_estimator_type = "outlier_detector"
def fit_predict(self, X, y=None):
"""Perform fit on X and returns labels for X.
Returns -1 for outliers and 1 for inliers.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Input data.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
y : ndarray, shape (n_samples,)
1 for inliers, -1 for outliers.
"""
# override for transductive outlier detectors like LocalOulierFactor
return self.fit(X).predict(X)
class MetaEstimatorMixin:
_required_parameters = ["estimator"]
"""Mixin class for all meta estimators in scikit-learn."""
class MultiOutputMixin:
"""Mixin to mark estimators that support multioutput."""
def _more_tags(self):
return {'multioutput': True}
class _UnstableArchMixin:
"""Mark estimators that are non-determinstic on 32bit or PowerPC"""
def _more_tags(self):
return {'non_deterministic': (
_IS_32BIT or platform.machine().startswith(('ppc', 'powerpc')))}
def is_classifier(estimator):
"""Returns True if the given estimator is (probably) a classifier.
Parameters
----------
estimator : object
Estimator object to test.
Returns
-------
out : bool
True if estimator is a classifier and False otherwise.
"""
return getattr(estimator, "_estimator_type", None) == "classifier"
def is_regressor(estimator):
"""Returns True if the given estimator is (probably) a regressor.
Parameters
----------
estimator : object
Estimator object to test.
Returns
-------
out : bool
True if estimator is a regressor and False otherwise.
"""
return getattr(estimator, "_estimator_type", None) == "regressor"
def is_outlier_detector(estimator):
"""Returns True if the given estimator is (probably) an outlier detector.
Parameters
----------
estimator : object
Estimator object to test.
Returns
-------
out : bool
True if estimator is an outlier detector and False otherwise.
"""
return getattr(estimator, "_estimator_type", None) == "outlier_detector"
|
py | 1a54d3859b175b3345617a2b9daa7818294a47ab | #!/usr/bin/env python
"""
Helper functions to measure things like noise covariances from data files.
"""
import numpy as np
#../hera_data/zen.2458042.14789.xx.HH.uvOR_0_1.npz
def load_hera_data(fname):
"""
Load data and RFI flags from a pre-processed numpy file generated by
hera_extract_data.py.
"""
dat = np.load(fname)
return dat['d'], dat['w']
def estimate_noise_cov(d, w):
"""
Estimate frequency-frequency noise covariance by performing finite
differences on the data in the time direction and then averaging.
Currently assumes a diagonal noise covariance. The real and imaginary noise
covariances are estimated separately. The results are turned into a smooth
function by fitting a Chebyshev polynomial.
"""
# Create masked array
#mask = np.logical_not( w.astype(bool) )
mask = np.logical_not( w.astype(bool) )
d_masked = np.ma.array(d, mask=mask)
# Difference masked array in the time direction. Assuming that the signal
# is almost constant between neighbouring time samples, these values should
# essentially be the difference of the noise in neighbouring time samples
diff_d = np.ma.diff(d_masked, axis=0)
#sigma = np.std(diff_d, axis=0)
idxs = np.where(np.abs(diff_d) < 0.1)
sigma = np.std(diff_d[idxs].real)
idxs = np.where(np.abs(diff_d.imag) < 0.1)
sigma_i = np.std(diff_d[idxs].imag)
print diff_d
print sigma, sigma_i
P.hist(diff_d.flatten(), bins=100, range=(-0.2, 0.2), normed=True)
xx = np.linspace(-0.1, 0.1, 500)
P.plot(xx, np.exp(-0.5*(xx/sigma)**2.)/np.sqrt(2.*np.pi)/sigma, 'r-', lw=1.8)
P.show()
return sigma, np.std(diff_d.real, axis=0), np.std(diff_d.imag, axis=0)
sqrtNr
ch = np.polynomial.chebyshev.chebfit(x, avg_r2, deg=18, w=ww2)
#d, w = load_hera_data("../hera_data/zen.2458042.14789.xx.HH.uvOR_0_1.npz")
|
py | 1a54d4d70d96b218bd587457a70346365976c733 | """In this module we define the OrderByNameMixin.
The OrderByNameMixin adds ordering by the name parameter to a Class"""
class OrderByNameMixin:
"""The OrderByNameMixin adds ordering by the name parameter to a Class"""
def __lt__(self, other):
if hasattr(other, "name"):
return self.name.casefold() < other.name.casefold()
return True
def __eq__(self, other):
if hasattr(other, "name"):
return self.name == other.name
return False
|
py | 1a54d51eb4fb7b9238419a97eeada1cf99a112fc | import numpy as np
WARMRATE=0.5
class NSB:
def apply(self,data):
self.df = data
def iterrt(self):
df = self.df
# First we'll calculate the hourly RT for NSB: nrt
df['nrt']=0.0
rt=22
newtonK=0.036
for i,row in df.iterrows():
rtsp = row.nrtsp
delta = float(rtsp-rt)
#print("D: %f"%delta)
if delta > 0:
# increase temperature by delta to a max of 1
if delta > 1: delta = WARMRATE
elif delta < 0:
# newton's law of cooling for T=+1
rt1 = row.OAT + (rt-row.OAT)*np.exp(-newtonK)
CDRATE = rt1-rt
#print("CD: %f"%CDRATE)
if delta < CDRATE: delta = CDRATE
rt = rt + delta
df.nrt.at[i]= rt
#print("%d rtsp=%f rt=%f delta=%f oat=%f" % (i.hour,rtsp,rt,delta,row.OAT))
def calchds(self):
df = self.df
# Now calculate the sum of delta-T for BAU vs NSB (heating only)
df['bauhd']=df.rt-df.OAT # hourly bau delta T
df['nsbhd']=df.nrt-df.OAT # hourly nsb delta T
# Eliminate negative delta T
df.loc[df['bauhd']<0,'bauhd']=0
df.loc[df['nsbhd']<0,'nsbhd']=0
#project savings
self.heatDTsaved = (df.bauhd.sum() - df.nsbhd.sum())
self.heatfracsaved = self.heatDTsaved/df.bauhd.sum()
print("Heating savings: %.2f%%"%(100*heatfracsaved))
#df.nsbhd.describe()
#df.bauhd.describe()
def saveDT(row):
RTN=row.nrt
RTB=row.rt
OAT=row.OAT
# Computes the fractional reduction in delta T for NSB as compared to BAU
DTN=RTN-OAT
DTB=RTB-OAT
if(DTB<=0): return 0 # There are no savings possible if the BAU DT indicates no heating.
if(DTN<=0): DTN=0 # Best we can do is turn off the heat and save 100%
return (DTB-DTN)/DTB
def calcCoolAndSS(self):
# Categorize periods and calculate new heating load for each period
h = df[(df.hload!=0)].dropna()
# Set default values for cooling
h['cooldown']= h.nrt > h.nrtsp
h.loc[h.cooldown,'savefrac'] = 1
h.loc[h.cooldown,'nhload'] = 0
#compute savings fraction and hload for the Steady State periods
# I think there's an error here.
h['ss']= h.nrt == h.nrtsp
#h['savefrac'] =
h.loc[h.ss,'savefrac']=h[h.ss].apply(saveDT,axis=1)
h.loc[h.ss,'nhload']=h[h.ss]['hload'] * (1-h[h.ss]['savefrac'])
#now get ready for the warmup rows
h['warmup']= h.nrt < h.nrtsp
self.h = h
def calcWarmups(self):
h = self.h
# calculate the expected total NSB heating load, and the number of hours over which it should be distributed
hw = pd.DataFrame(h[h.warmup])
# Count the whole and fractional hours where warmup occurs
hw['whrs'] = hw.nrtsp - hw.nrt # This is correct where DT<=1
hw.loc[hw.whrs>1,'whrs'] = 1 # Because whrs is 1 where DT>1
whrs = hw.whrs.sum()
# Total Warmup heating is expected NSB total, less expected NSB at SS.
expectedHnsb = h.hload.sum() * (1-heatfracsaved)
expectedHss = h[h.ss].hload.sum()
expectHwarmup = expectedHnsb - expectedHss
# Now distribute the warmup load
HwarmPerWhr = expectHwarmup / whrs
h.loc[hw.index,'nhload'] = (hw.whrs * HwarmPerWhr)
# There is an error here:
h.nhload.sum() / h.hload.sum()
|
py | 1a54d593883477680b21310de519be8a7ae15bcb |
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
import re
from base64 import b64decode
from flask import Flask, Response, request
from netaddr import IPAddress, IPSet
from typing import Callable, Any, cast, Dict, Tuple
from math import ceil
import dateparser
''' GLOBAL VARIABLES '''
INTEGRATION_NAME: str = 'Export Indicators Service'
PAGE_SIZE: int = 200
APP: Flask = Flask('demisto-export_iocs')
CTX_VALUES_KEY: str = 'dmst_export_iocs_values'
CTX_MIMETYPE_KEY: str = 'dmst_export_iocs_mimetype'
FORMAT_CSV: str = 'csv'
FORMAT_TEXT: str = 'text'
FORMAT_JSON_SEQ: str = 'json-seq'
FORMAT_JSON: str = 'json'
FORMAT_ARG_MWG = 'mwg'
FORMAT_ARG_PANOSURL = 'panosurl'
FORMAT_ARG_BLUECOAT = 'bluecoat'
FORMAT_ARG_PROXYSG = 'proxysg'
FORMAT_MWG: str = 'McAfee Web Gateway'
FORMAT_PROXYSG: str = "Symantec ProxySG"
FORMAT_PANOSURL: str = "PAN-OS URL"
FORMAT_XSOAR_JSON: str = 'XSOAR json'
FORMAT_ARG_XSOAR_JSON: str = 'xsoar-json'
FORMAT_XSOAR_JSON_SEQ: str = 'XSOAR json-seq'
FORAMT_ARG_XSOAR_JSON_SEQ: str = 'xsoar-seq'
FORMAT_XSOAR_CSV: str = 'XSOAR csv'
FORMAT_ARG_XSOAR_CSV: str = 'xsoar-csv'
MWG_TYPE_OPTIONS = ["string", "applcontrol", "dimension", "category", "ip", "mediatype", "number", "regex"]
CTX_FORMAT_ERR_MSG: str = 'Please provide a valid format from: text, json, json-seq, csv, mgw, panosurl and proxysg'
CTX_LIMIT_ERR_MSG: str = 'Please provide a valid integer for List Size'
CTX_OFFSET_ERR_MSG: str = 'Please provide a valid integer for Starting Index'
CTX_MWG_TYPE_ERR_MSG: str = 'The McAFee Web Gateway type can only be one of the following: string,' \
' applcontrol, dimension, category, ip, mediatype, number, regex'
CTX_COLLAPSE_ERR_MSG: str = 'The Collapse parameter can only get the following: 0 - Dont Collapse, ' \
'1 - Collapse to Ranges, 2 - Collapse to CIDRS'
CTX_MISSING_REFRESH_ERR_MSG: str = 'Refresh Rate must be "number date_range_unit", examples: (2 hours, 4 minutes, ' \
'6 months, 1 day, etc.)'
CTX_NO_URLS_IN_PROXYSG_FORMAT = 'ProxySG format only outputs URLs - no URLs found in the current query'
MIMETYPE_JSON_SEQ: str = 'application/json-seq'
MIMETYPE_JSON: str = 'application/json'
MIMETYPE_CSV: str = 'text/csv'
MIMETYPE_TEXT: str = 'text/plain'
DONT_COLLAPSE = "Don't Collapse"
COLLAPSE_TO_CIDR = "To CIDRs"
COLLAPSE_TO_RANGES = "To Ranges"
SORT_ASCENDING = 'asc'
SORT_DESCENDING = 'desc'
_PROTOCOL_REMOVAL = re.compile(r'^(?:[a-z]+:)*//')
_PORT_REMOVAL = re.compile(r'^([a-z0-9\-\.]+)(?:\:[0-9]+)*')
_INVALID_TOKEN_REMOVAL = re.compile(r'(?:[^\./+=\?&]+\*[^\./+=\?&]*)|(?:[^\./+=\?&]*\*[^\./+=\?&]+)')
_BROAD_PATTERN = re.compile(r'^(?:\*\.)+[a-zA-Z]+(?::[0-9]+)?$')
'''Request Arguments Class'''
class RequestArguments:
def __init__(self, query: str, out_format: str = FORMAT_TEXT, limit: int = 10000, offset: int = 0,
mwg_type: str = 'string', strip_port: bool = False, drop_invalids: bool = False,
category_default: str = 'bc_category', category_attribute: str = '',
collapse_ips: str = DONT_COLLAPSE, csv_text: bool = False, sort_field: str = '',
sort_order: str = ''):
self.query = query
self.out_format = out_format
self.limit = limit
self.offset = offset
self.mwg_type = mwg_type
self.strip_port = strip_port
self.drop_invalids = drop_invalids
self.category_default = category_default
self.category_attribute = [] # type:List
self.collapse_ips = collapse_ips
self.csv_text = csv_text
self.sort_field = sort_field
self.sort_order = sort_order
if category_attribute is not None:
category_attribute_list = category_attribute.split(',')
if len(category_attribute_list) != 1 or '' not in category_attribute_list:
self.category_attribute = category_attribute_list
def is_request_change(self, last_update_data: Dict):
if self.limit != last_update_data.get('last_limit'):
return True
elif self.offset != last_update_data.get('last_offset'):
return True
elif self.out_format != last_update_data.get('last_format'):
return True
elif self.mwg_type != last_update_data.get('mwg_type'):
return True
elif self.drop_invalids != last_update_data.get('drop_invalids'):
return True
elif self.strip_port != last_update_data.get('strip_port'):
return True
elif self.category_default != last_update_data.get('category_default'):
return True
elif self.category_attribute != last_update_data.get('category_attribute'):
return True
elif self.collapse_ips != last_update_data.get('collapse_ips'):
return True
elif self.csv_text != last_update_data.get('csv_text'):
return True
elif self.sort_field != last_update_data.get('sort_field'):
return True
elif self.sort_order != last_update_data.get('sort_order'):
return True
return False
''' HELPER FUNCTIONS '''
def list_to_str(inp_list: list, delimiter: str = ',', map_func: Callable = str) -> str:
"""
Transforms a list to an str, with a custom delimiter between each list item
"""
str_res = ""
if inp_list:
if isinstance(inp_list, list):
str_res = delimiter.join(map(map_func, inp_list))
else:
raise AttributeError('Invalid inp_list provided to list_to_str')
return str_res
def sort_iocs(request_args: RequestArguments, iocs: list) -> list:
"""
Sorts the IoCs according to the sort field and order.
Returns: Sorted List of IoCs, if sorting arguments are defined.
"""
try:
if request_args.sort_field:
if request_args.sort_order == SORT_ASCENDING:
return sorted(iocs, key=lambda ioc: ioc[request_args.sort_field], reverse=False)
elif request_args.sort_order == SORT_DESCENDING:
return sorted(iocs, key=lambda ioc: ioc[request_args.sort_field], reverse=True)
except KeyError:
demisto.debug('ExportIndicators - Could not sort IoCs, please verify that you entered the correct field name.\n'
f'Field used: {request_args.sort_field}')
except Exception as e:
demisto.debug(f'ExportIndicators - Could not sort IoCs due to an unknown error.\n{e}')
return iocs
def refresh_outbound_context(request_args: RequestArguments, on_demand: bool = False) -> str:
"""
Refresh the values and format using an indicator_query to call demisto.searchIndicators
Update integration cache only in case of running on demand
Returns: List(IoCs in output format)
"""
now = datetime.now()
# poll indicators into list from demisto
iocs = find_indicators_with_limit(request_args.query, request_args.limit, request_args.offset)
iocs = sort_iocs(request_args, iocs)
out_dict, actual_indicator_amount = create_values_for_returned_dict(iocs, request_args)
# if in CSV format - the "indicator" header
if request_args.out_format in [FORMAT_CSV, FORMAT_XSOAR_CSV]:
actual_indicator_amount = actual_indicator_amount - 1
# re-polling in case formatting or ip collapse caused a lack in results
while actual_indicator_amount < request_args.limit:
# from where to start the new poll and how many results should be fetched
new_offset = len(iocs) + request_args.offset + actual_indicator_amount - 1
new_limit = request_args.limit - actual_indicator_amount
# poll additional indicators into list from demisto
new_iocs = find_indicators_with_limit(request_args.query, new_limit, new_offset)
# in case no additional indicators exist - exit
if len(new_iocs) == 0:
break
# add the new results to the existing results
iocs += new_iocs
iocs = sort_iocs(request_args, iocs)
# reformat the output
out_dict, actual_indicator_amount = create_values_for_returned_dict(iocs, request_args)
if request_args.out_format == FORMAT_CSV:
actual_indicator_amount = actual_indicator_amount - 1
if request_args.out_format == FORMAT_JSON:
out_dict[CTX_MIMETYPE_KEY] = MIMETYPE_JSON
elif request_args.out_format in [FORMAT_CSV, FORMAT_XSOAR_CSV]:
if request_args.csv_text:
out_dict[CTX_MIMETYPE_KEY] = MIMETYPE_TEXT
else:
out_dict[CTX_MIMETYPE_KEY] = MIMETYPE_CSV
elif request_args.out_format in [FORMAT_JSON_SEQ, FORMAT_XSOAR_JSON_SEQ]:
out_dict[CTX_MIMETYPE_KEY] = MIMETYPE_JSON_SEQ
else:
out_dict[CTX_MIMETYPE_KEY] = MIMETYPE_TEXT
if on_demand:
set_integration_context({
"last_output": out_dict,
'last_run': date_to_timestamp(now),
'last_limit': request_args.limit,
'last_offset': request_args.offset,
'last_format': request_args.out_format,
'last_query': request_args.query,
'current_iocs': iocs,
'mwg_type': request_args.mwg_type,
'drop_invalids': request_args.drop_invalids,
'strip_port': request_args.strip_port,
'category_default': request_args.category_default,
'category_attribute': request_args.category_attribute,
'collapse_ips': request_args.collapse_ips,
'csv_text': request_args.csv_text,
'sort_field': request_args.sort_field,
'sort_order': request_args.sort_order,
})
return out_dict[CTX_VALUES_KEY]
def find_indicators_with_limit(indicator_query: str, limit: int, offset: int) -> list:
"""
Finds indicators using demisto.searchIndicators
"""
# calculate the starting page (each page holds 200 entries)
if offset:
next_page = int(offset / PAGE_SIZE)
# set the offset from the starting page
offset_in_page = offset - (PAGE_SIZE * next_page)
else:
next_page = 0
offset_in_page = 0
iocs, _ = find_indicators_with_limit_loop(indicator_query, limit, next_page=next_page)
# if offset in page is bigger than the amount of results returned return empty list
if len(iocs) <= offset_in_page:
return []
return iocs[offset_in_page:limit + offset_in_page]
def find_indicators_with_limit_loop(indicator_query: str, limit: int, total_fetched: int = 0, next_page: int = 0,
last_found_len: int = PAGE_SIZE):
"""
Finds indicators using while loop with demisto.searchIndicators, and returns result and last page
"""
iocs: List[dict] = []
if not last_found_len:
last_found_len = total_fetched
search_indicators = IndicatorsSearcher(page=next_page)
while last_found_len == PAGE_SIZE and limit and total_fetched < limit:
fetched_iocs = search_indicators.search_indicators_by_version(query=indicator_query, size=PAGE_SIZE).get('iocs')
iocs.extend(fetched_iocs)
last_found_len = len(fetched_iocs)
total_fetched += last_found_len
return iocs, search_indicators.page
def ip_groups_to_cidrs(ip_range_groups: list):
"""Collapse ip groups list to CIDRs
Args:
ip_range_groups (list): a list of lists containing connected IPs
Returns:
list. a list of CIDRs.
"""
ip_ranges = [] # type:List
for cidr in ip_range_groups:
# handle single ips
if len(cidr) == 1:
# CIDR with a single IP appears with "/32" suffix so handle them differently
ip_ranges.append(str(cidr[0]))
continue
ip_ranges.append(str(cidr))
return ip_ranges
def ip_groups_to_ranges(ip_range_groups: list):
"""Collapse ip groups list to ranges.
Args:
ip_range_groups (list): a list of lists containing connected IPs
Returns:
list. a list of Ranges.
"""
ip_ranges = [] # type:List
for group in ip_range_groups:
# handle single ips
if len(group) == 1:
ip_ranges.append(str(group[0]))
continue
ip_ranges.append(str(group))
return ip_ranges
def ips_to_ranges(ips: list, collapse_ips: str):
"""Collapse IPs to Ranges or CIDRs.
Args:
ips (list): a list of IP strings.
collapse_ips (str): Whether to collapse to Ranges or CIDRs.
Returns:
list. a list to Ranges or CIDRs.
"""
if collapse_ips == COLLAPSE_TO_RANGES:
ips_range_groups = IPSet(ips).iter_ipranges()
return ip_groups_to_ranges(ips_range_groups)
else:
cidrs = IPSet(ips).iter_cidrs()
return ip_groups_to_cidrs(cidrs)
def panos_url_formatting(iocs: list, drop_invalids: bool, strip_port: bool):
formatted_indicators = [] # type:List
for indicator_data in iocs:
# only format URLs and Domains
indicator = indicator_data.get('value')
if not indicator:
continue
if indicator_data.get('indicator_type') in ['URL', 'Domain', 'DomainGlob']:
indicator = indicator.lower()
# remove initial protocol - http/https/ftp/ftps etc
indicator = _PROTOCOL_REMOVAL.sub('', indicator)
indicator_with_port = indicator
# remove port from indicator - from demisto.com:369/rest/of/path -> demisto.com/rest/of/path
indicator = _PORT_REMOVAL.sub(r'\g<1>', indicator)
# check if removing the port changed something about the indicator
if indicator != indicator_with_port and not strip_port:
# if port was in the indicator and strip_port param not set - ignore the indicator
continue
with_invalid_tokens_indicator = indicator
# remove invalid tokens from indicator
indicator = _INVALID_TOKEN_REMOVAL.sub('*', indicator)
# check if the indicator held invalid tokens
if with_invalid_tokens_indicator != indicator:
# invalid tokens in indicator- if drop_invalids is set - ignore the indicator
if drop_invalids:
continue
# check if after removing the tokens the indicator is too broad if so - ignore
# example of too broad terms: "*.paloalto", "*.*.paloalto", "*.paloalto:60"
hostname = indicator
if '/' in hostname:
hostname, _ = hostname.split('/', 1)
if _BROAD_PATTERN.match(hostname) is not None:
continue
# for PAN-OS "*.domain.com" does not match "domain.com" - we should provide both
if indicator.startswith('*.'):
formatted_indicators.append(indicator[2:])
formatted_indicators.append(indicator)
return {CTX_VALUES_KEY: list_to_str(formatted_indicators, '\n')}, len(formatted_indicators)
def create_json_out_format(iocs: list):
formatted_indicators = [] # type:List
for indicator_data in iocs:
if indicator_data.get("value"):
json_format_indicator = json_format_single_indicator(indicator_data)
formatted_indicators.append(json_format_indicator)
return {CTX_VALUES_KEY: json.dumps(formatted_indicators)}
def json_format_single_indicator(indicator: dict):
json_format_indicator = {
"indicator": indicator.get("value")
}
indicator.pop("value", None)
json_format_indicator["value"] = indicator
return json_format_indicator
def add_indicator_to_category(indicator, category, category_dict):
if category in category_dict.keys():
category_dict[category].append(indicator)
else:
category_dict[category] = [indicator]
return category_dict
def create_proxysg_out_format(iocs: list, category_attribute: list, category_default: str = 'bc_category'):
formatted_indicators = ''
category_dict = {} # type:Dict
num_of_returned_indicators = 0
for indicator in iocs:
if indicator.get('indicator_type') in ['URL', 'Domain', 'DomainGlob'] and indicator.get('value'):
indicator_proxysg_category = indicator.get('proxysgcategory')
# if a ProxySG Category is set and it is in the category_attribute list or that the attribute list is empty
# than list add the indicator to it's category list
if indicator_proxysg_category is not None and \
(indicator_proxysg_category in category_attribute or len(category_attribute) == 0):
category_dict = add_indicator_to_category(indicator.get('value'), indicator_proxysg_category,
category_dict)
else:
# if ProxySG Category is not set or does not exist in the category_attribute list
category_dict = add_indicator_to_category(indicator.get('value'), category_default, category_dict)
for category, indicator_list in category_dict.items():
sub_output_string = f"define category {category}\n"
sub_output_string += list_to_str(indicator_list, '\n')
sub_output_string += "\nend\n"
formatted_indicators += sub_output_string
num_of_returned_indicators = num_of_returned_indicators + len(indicator_list)
if len(formatted_indicators) == 0:
raise Exception(CTX_NO_URLS_IN_PROXYSG_FORMAT)
return {CTX_VALUES_KEY: formatted_indicators}, num_of_returned_indicators
def create_mwg_out_format(iocs: list, mwg_type: str) -> dict:
formatted_indicators = [] # type:List
for indicator in iocs:
if not indicator.get('value'):
continue
value = "\"" + indicator.get('value') + "\""
sources = indicator.get('sourceBrands')
if sources:
sources_string = "\"" + ','.join(sources) + "\""
else:
sources_string = "\"from CORTEX XSOAR\""
formatted_indicators.append(value + " " + sources_string)
string_formatted_indicators = list_to_str(formatted_indicators, '\n')
if isinstance(mwg_type, list):
mwg_type = mwg_type[0]
string_formatted_indicators = "type=" + mwg_type + "\n" + string_formatted_indicators
return {CTX_VALUES_KEY: string_formatted_indicators}
def create_values_for_returned_dict(iocs: list, request_args: RequestArguments) -> Tuple[dict, int]:
"""
Create a dictionary for output values using the selected format (json, json-seq, text, csv, McAfee Web Gateway,
Symantec ProxySG, panosurl)
"""
if request_args.out_format == FORMAT_PANOSURL:
return panos_url_formatting(iocs, request_args.drop_invalids, request_args.strip_port)
if request_args.out_format == FORMAT_PROXYSG:
return create_proxysg_out_format(iocs, request_args.category_attribute, request_args.category_default)
if request_args.out_format == FORMAT_MWG:
return create_mwg_out_format(iocs, request_args.mwg_type), len(iocs)
if request_args.out_format == FORMAT_JSON:
return create_json_out_format(iocs), len(iocs)
if request_args.out_format == FORMAT_XSOAR_JSON:
iocs_list = [ioc for ioc in iocs]
return {CTX_VALUES_KEY: json.dumps(iocs_list)}, len(iocs)
else:
ipv4_formatted_indicators = []
ipv6_formatted_indicators = []
formatted_indicators = []
if request_args.out_format == FORMAT_XSOAR_CSV and len(iocs) > 0: # add csv keys as first item
headers = list(iocs[0].keys())
formatted_indicators.append(list_to_str(headers))
elif request_args.out_format == FORMAT_CSV and len(iocs) > 0:
formatted_indicators.append('indicator')
for ioc in iocs:
value = ioc.get('value')
type = ioc.get('indicator_type')
if value:
if request_args.out_format in [FORMAT_TEXT, FORMAT_CSV]:
if type == 'IP' and request_args.collapse_ips != DONT_COLLAPSE:
ipv4_formatted_indicators.append(IPAddress(value))
elif type == 'IPv6' and request_args.collapse_ips != DONT_COLLAPSE:
ipv6_formatted_indicators.append(IPAddress(value))
else:
formatted_indicators.append(value)
elif request_args.out_format == FORMAT_XSOAR_JSON_SEQ:
formatted_indicators.append(json.dumps(ioc))
elif request_args.out_format == FORMAT_JSON_SEQ:
json_format_indicator = json_format_single_indicator(ioc)
formatted_indicators.append(json.dumps(json_format_indicator))
elif request_args.out_format == FORMAT_XSOAR_CSV:
# wrap csv values with " to escape them
values = list(ioc.values())
formatted_indicators.append(list_to_str(values, map_func=lambda val: f'"{val}"'))
if len(ipv4_formatted_indicators) > 0:
ipv4_formatted_indicators = ips_to_ranges(ipv4_formatted_indicators, request_args.collapse_ips)
formatted_indicators.extend(ipv4_formatted_indicators)
if len(ipv6_formatted_indicators) > 0:
ipv6_formatted_indicators = ips_to_ranges(ipv6_formatted_indicators, request_args.collapse_ips)
formatted_indicators.extend(ipv6_formatted_indicators)
return {CTX_VALUES_KEY: list_to_str(formatted_indicators, '\n')}, len(formatted_indicators)
def get_outbound_mimetype() -> str:
"""Returns the mimetype of the export_iocs"""
ctx = get_integration_context().get('last_output', {})
return ctx.get(CTX_MIMETYPE_KEY, 'text/plain')
def get_outbound_ioc_values(on_demand, request_args: RequestArguments,
last_update_data=None, cache_refresh_rate=None) -> str:
"""
Get the ioc list to return in the list
"""
if last_update_data is None:
last_update_data = {}
last_update = last_update_data.get('last_run')
last_query = last_update_data.get('last_query')
current_iocs = last_update_data.get('current_iocs')
# on_demand ignores cache
if on_demand:
if request_args.is_request_change(last_update_data):
values_str = get_ioc_values_str_from_context(request_args=request_args, iocs=current_iocs)
else:
values_str = get_ioc_values_str_from_context(request_args=request_args)
else:
if last_update:
# takes the cache_refresh_rate amount of time back since run time.
cache_time, _ = parse_date_range(cache_refresh_rate, to_timestamp=True)
if last_update <= cache_time or request_args.is_request_change(last_update_data) or \
request_args.query != last_query:
values_str = refresh_outbound_context(request_args=request_args)
else:
values_str = get_ioc_values_str_from_context(request_args=request_args)
else:
values_str = refresh_outbound_context(request_args)
return values_str
def get_ioc_values_str_from_context(request_args: RequestArguments, iocs=None) -> str:
"""
Extracts output values from cache
"""
if iocs:
if request_args.offset > len(iocs):
return ''
iocs = iocs[request_args.offset: request_args.limit + request_args.offset]
returned_dict, _ = create_values_for_returned_dict(iocs, request_args=request_args)
current_cache = get_integration_context()
current_cache['last_output'] = returned_dict
set_integration_context(current_cache)
else:
returned_dict = get_integration_context().get('last_output', {})
return returned_dict.get(CTX_VALUES_KEY, '')
def try_parse_integer(int_to_parse: Any, err_msg: str) -> int:
"""
Tries to parse an integer, and if fails will throw DemistoException with given err_msg
"""
try:
res = int(int_to_parse)
except (TypeError, ValueError):
raise DemistoException(err_msg)
return res
def validate_basic_authentication(headers: dict, username: str, password: str) -> bool:
"""
Checks whether the authentication is valid.
:param headers: The headers of the http request
:param username: The integration's username
:param password: The integration's password
:return: Boolean which indicates whether the authentication is valid or not
"""
credentials: str = headers.get('Authorization', '')
if not credentials or 'Basic ' not in credentials:
return False
encoded_credentials: str = credentials.split('Basic ')[1]
credentials: str = b64decode(encoded_credentials).decode('utf-8')
if ':' not in credentials:
return False
credentials_list = credentials.split(':')
if len(credentials_list) != 2:
return False
user, pwd = credentials_list
return user == username and pwd == password
''' ROUTE FUNCTIONS '''
def get_request_args(params):
limit = try_parse_integer(request.args.get('n', params.get('list_size', 10000)), CTX_LIMIT_ERR_MSG)
offset = try_parse_integer(request.args.get('s', 0), CTX_OFFSET_ERR_MSG)
out_format = request.args.get('v', params.get('format', 'text'))
query = request.args.get('q', params.get('indicators_query'))
mwg_type = request.args.get('t', params.get('mwg_type', "string"))
strip_port = request.args.get('sp', params.get('strip_port', False))
drop_invalids = request.args.get('di', params.get('drop_invalids', False))
category_default = request.args.get('cd', params.get('category_default', 'bc_category'))
category_attribute = request.args.get('ca', params.get('category_attribute', ''))
collapse_ips = request.args.get('tr', params.get('collapse_ips', DONT_COLLAPSE))
csv_text = request.args.get('tx', params.get('csv_text', False))
sort_field = request.args.get('sf', params.get('sort_field'))
sort_order = request.args.get('so', params.get('sort_order'))
# handle flags
if strip_port is not None and strip_port == '':
strip_port = True
if drop_invalids is not None and drop_invalids == '':
drop_invalids = True
if csv_text is not None and csv_text == '':
csv_text = True
if collapse_ips is not None and collapse_ips not in [DONT_COLLAPSE, COLLAPSE_TO_CIDR, COLLAPSE_TO_RANGES]:
collapse_ips = try_parse_integer(collapse_ips, CTX_COLLAPSE_ERR_MSG)
if collapse_ips == 0:
collapse_ips = DONT_COLLAPSE
elif collapse_ips == 1:
collapse_ips = COLLAPSE_TO_RANGES
elif collapse_ips == 2:
collapse_ips = COLLAPSE_TO_CIDR
# prevent given empty params
if len(query) == 0:
query = params.get('indicators_query')
if len(out_format) == 0:
out_format = params.get('format', 'text')
if out_format not in [FORMAT_PROXYSG, FORMAT_PANOSURL, FORMAT_TEXT, FORMAT_JSON, FORMAT_CSV,
FORMAT_JSON_SEQ, FORMAT_MWG, FORMAT_ARG_BLUECOAT, FORMAT_ARG_MWG, FORMAT_ARG_PANOSURL,
FORMAT_ARG_PROXYSG, FORMAT_ARG_PANOSURL, FORMAT_XSOAR_JSON, FORMAT_ARG_XSOAR_JSON,
FORMAT_XSOAR_JSON_SEQ, FORAMT_ARG_XSOAR_JSON_SEQ, FORMAT_XSOAR_CSV, FORMAT_ARG_XSOAR_CSV]:
raise DemistoException(CTX_FORMAT_ERR_MSG)
elif out_format in [FORMAT_ARG_PROXYSG, FORMAT_ARG_BLUECOAT]:
out_format = FORMAT_PROXYSG
elif out_format == FORMAT_ARG_MWG:
out_format = FORMAT_MWG
elif out_format == FORMAT_ARG_PANOSURL:
out_format = FORMAT_PANOSURL
elif out_format == FORMAT_ARG_XSOAR_JSON:
out_format = FORMAT_XSOAR_JSON
elif out_format == FORAMT_ARG_XSOAR_JSON_SEQ:
out_format = FORMAT_XSOAR_JSON_SEQ
elif out_format == FORMAT_ARG_XSOAR_CSV:
out_format = FORMAT_XSOAR_CSV
if out_format == FORMAT_MWG:
if mwg_type not in MWG_TYPE_OPTIONS:
raise DemistoException(CTX_MWG_TYPE_ERR_MSG)
return RequestArguments(query, out_format, limit, offset, mwg_type, strip_port, drop_invalids, category_default,
category_attribute, collapse_ips, csv_text, sort_field, sort_order)
@APP.route('/', methods=['GET'])
def route_list_values() -> Response:
"""
Main handler for values saved in the integration context
"""
try:
params = demisto.params()
credentials = params.get('credentials') if params.get('credentials') else {}
username: str = credentials.get('identifier', '')
password: str = credentials.get('password', '')
if username and password:
headers: dict = cast(Dict[Any, Any], request.headers)
if not validate_basic_authentication(headers, username, password):
err_msg: str = 'Basic authentication failed. Make sure you are using the right credentials.'
demisto.debug(err_msg)
return Response(err_msg, status=401, mimetype='text/plain', headers=[
('WWW-Authenticate', 'Basic realm="Login Required"'),
])
request_args = get_request_args(params)
created = datetime.now(timezone.utc)
cache_refresh_rate = params.get('cache_refresh_rate')
values = get_outbound_ioc_values(
on_demand=params.get('on_demand'),
last_update_data=get_integration_context(),
cache_refresh_rate=cache_refresh_rate,
request_args=request_args
)
query_time = (datetime.now(timezone.utc) - created).total_seconds()
if not get_integration_context() and params.get('on_demand'):
values = 'You are running in On-Demand mode - please run !eis-update command to initialize the ' \
'export process'
elif not values:
values = "No Results Found For the Query"
# if the case there are strings to add to the EDL, add them if the output type is text
if request_args.out_format == FORMAT_TEXT:
append_str = params.get("append_string")
prepend_str = params.get("prepend_string")
if append_str:
append_str = append_str.replace("\\n", "\n")
values = f"{values}{append_str}"
if prepend_str:
prepend_str = prepend_str.replace("\\n", "\n")
values = f"{prepend_str}\n{values}"
mimetype = get_outbound_mimetype()
list_size = 0
if values.strip():
list_size = values.count('\n') + 1 # add 1 as last line doesn't have a \n
max_age = ceil((datetime.now() - dateparser.parse(cache_refresh_rate)).total_seconds()) # type: ignore[operator]
demisto.debug(f'Returning exported indicators list of size: [{list_size}], created: [{created}], '
f'query time seconds: [{query_time}], max age: [{max_age}]')
resp = Response(values, status=200, mimetype=mimetype, headers=[
('X-ExportIndicators-Created', created.isoformat()),
('X-ExportIndicators-Query-Time-Secs', "{:.3f}".format(query_time)),
('X-ExportIndicators-Size', str(list_size))
])
resp.cache_control.max_age = max_age
resp.cache_control[
'stale-if-error'] = '600' # number of seconds we are willing to serve stale content when there is an error
return resp
except Exception:
return Response(traceback.format_exc(), status=400, mimetype='text/plain')
''' COMMAND FUNCTIONS '''
def test_module(args, params):
"""
Validates:
1. Valid port.
2. Valid cache_refresh_rate
"""
get_params_port(params)
on_demand = params.get('on_demand', None)
if not on_demand:
try_parse_integer(params.get('list_size'), CTX_LIMIT_ERR_MSG) # validate export_iocs Size was set
query = params.get('indicators_query') # validate indicators_query isn't empty
if not query:
raise ValueError('"Indicator Query" is required. Provide a valid query.')
cache_refresh_rate = params.get('cache_refresh_rate', '')
if not cache_refresh_rate:
raise ValueError(CTX_MISSING_REFRESH_ERR_MSG)
# validate cache_refresh_rate value
range_split = cache_refresh_rate.split(' ')
if len(range_split) != 2:
raise ValueError(CTX_MISSING_REFRESH_ERR_MSG)
try_parse_integer(range_split[0], 'Invalid time value for the Refresh Rate. Must be a valid integer.')
if not range_split[1] in ['minute', 'minutes', 'hour', 'hours', 'day', 'days', 'month', 'months', 'year',
'years']:
raise ValueError(
'Invalid time unit for the Refresh Rate. Must be minutes, hours, days, months, or years.')
parse_date_range(cache_refresh_rate, to_timestamp=True)
run_long_running(params, is_test=True)
return 'ok'
def update_outbound_command(args, params):
"""
Updates the export_iocs values and format on demand
"""
on_demand = params.get('on_demand')
if not on_demand:
raise DemistoException(
'"Update exported IOCs On Demand" is off. If you want to update manually please toggle it on.')
limit = try_parse_integer(args.get('list_size', params.get('list_size')), CTX_LIMIT_ERR_MSG)
print_indicators = args.get('print_indicators')
query = args.get('query')
# in case no query is entered take the query in the integration params
if not query:
query = params.get('indicators_query')
out_format = args.get('format')
offset = try_parse_integer(args.get('offset', 0), CTX_OFFSET_ERR_MSG)
mwg_type = args.get('mwg_type')
strip_port = args.get('strip_port') == 'True'
drop_invalids = args.get('drop_invalids') == 'True'
category_attribute = args.get('category_attribute')
category_default = args.get('category_default')
collapse_ips = args.get('collapse_ips')
csv_text = args.get('csv_text') == 'True'
sort_field = args.get('sort_field')
sort_order = args.get('sort_order')
request_args = RequestArguments(query, out_format, limit, offset, mwg_type, strip_port, drop_invalids,
category_default, category_attribute, collapse_ips, csv_text, sort_field, sort_order)
indicators = refresh_outbound_context(request_args, on_demand=on_demand)
if indicators:
hr = tableToMarkdown('List was updated successfully with the following values', indicators,
['Indicators']) if print_indicators == 'true' else 'List was updated successfully'
else:
hr = "No Results Found For the Query"
return CommandResults(readable_output=hr, raw_response=indicators)
def main():
"""
Main
"""
params = demisto.params()
credentials = params.get('credentials') if params.get('credentials') else {}
username: str = credentials.get('identifier', '')
password: str = credentials.get('password', '')
if (username and not password) or (password and not username):
err_msg: str = 'If using credentials, both username and password should be provided.'
demisto.debug(err_msg)
raise DemistoException(err_msg)
command = demisto.command()
demisto.debug('Command being called is {}'.format(command))
commands = {
'test-module': test_module,
'eis-update': update_outbound_command
}
try:
if command == 'long-running-execution':
run_long_running(params)
elif command in commands:
return_results(commands[command](demisto.args(), params))
else:
raise NotImplementedError(f'Command "{command}" is not implemented.')
except Exception as e:
demisto.error(traceback.format_exc())
err_msg = f'Error in {INTEGRATION_NAME} Integration [{e}]'
return_error(err_msg)
from NGINXApiModule import * # noqa: E402
if __name__ in ['__main__', '__builtin__', 'builtins']:
main()
|
py | 1a54d5a8df6fed96f84e659c2f023638fc99dc1e |
'''
Command line args
no filtering
sink: check if a file exists
'''
'''
Created by Paul E. Black and William Mentzer 2020
This software was developed at the National Institute of Standards and Technology
by employees of the Federal Government in the course of their official duties.
Pursuant to title 17 Section 105 of the United States Code the software is not
subject to copyright protection and are in the public domain.
We would appreciate acknowledgment if the software is used.
Paul E. Black [email protected]
William Mentzer [email protected]
'''
import math
import os
import sys
def main():
tainted_2 = None
tainted_3 = None
tainted_2 = sys.argv[1]
tainted_3 = tainted_2
if(1==1):
{}
elif(not 1==1):
# No filtering (sanitization)
tainted_3 = tainted_2
#flaw
os.path.exists(tainted_3)
if __name__ == '__main__':
main() |
py | 1a54d6caf82dae24247a36f6c7dd90441f5333a0 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
sys.path = [
os.path.abspath('../..'),
os.path.abspath('../../bin')
] + sys.path
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '2.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'sphinx.ext.graphviz',
'sphinx.ext.viewcode',
'openstackdocstheme'
]
# geeneral information about project
openstackdocs_repo_name = u'openstack/python-monascaclient'
project = u'Monasca Client Dev Docs'
openstackdocs_use_storyboard = True
copyright = u'2014-present, OpenStack Foundation'
author = u'OpenStack Foundation'
openstackdocs_auto_name = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
source_suffix = '.rst'
# The encoding of source files.
source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = [
'common',
'doc',
'documentation',
'etc',
'java'
]
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'native'
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'openstackdocs'
# If false, no index is generated.
html_use_index = True
# If false, no module index is generated.
html_use_modindex = True
# Output file base name for HTML help builder.
htmlhelp_basename = 'python-monascaclientdoc'
# -- Options for LaTeX output ---------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'python-monascaclient.tex', u'python-monascaclient Documentation',
u'Openstack Foundation \\textless{}[email protected]\\textgreater{}', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'python-monascaclient', u'python-monascaclient Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'python-monascaclient', u'python-monascaclient Documentation',
author, 'python-monascaclient', 'Rest-API to collect logs from your cloud.',
'Miscellaneous'),
]
|
py | 1a54d7413c494987176872c88963f255fb51d175 | from django import forms
from django.utils.translation import gettext_lazy as _
from i18nfield.forms import I18nModelForm
from pretalx.common.mixins.forms import ReadOnlyFlag
from pretalx.mail.context import get_context_explanation
from pretalx.mail.models import MailTemplate, QueuedMail
from pretalx.person.models import User
class MailTemplateForm(ReadOnlyFlag, I18nModelForm):
def __init__(self, *args, event=None, **kwargs):
self.event = event
if event:
kwargs['locales'] = event.locales
super().__init__(*args, **kwargs)
def clean_text(self):
text = self.cleaned_data['text']
if self.instance and self.instance.id:
_is_template_with_submission_context = self.instance in [
t
for t in self.instance.event.fixed_templates
if t != self.event.update_template
]
if _is_template_with_submission_context:
context = {item['name']: 'test' for item in get_context_explanation()}
try:
for language, local_text in text.data.items():
local_text.format(**context)
except KeyError as e:
msg = _('Unknown template key: "{key}", locale: {locale}').format(
key=e.args[0], locale=language
)
raise forms.ValidationError(msg)
return text
class Meta:
model = MailTemplate
fields = ['subject', 'text', 'reply_to', 'bcc']
class MailDetailForm(ReadOnlyFlag, forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not self.instance or not self.instance.to_users.all().count():
self.fields.pop('to_users')
else:
self.fields['to_users'].queryset = self.instance.to_users.all()
self.fields['to_users'].required = False
def clean(self, *args, **kwargs):
cleaned_data = super().clean(*args, **kwargs)
if not cleaned_data['to'] and not cleaned_data.get('to_users'):
self.add_error('to', forms.ValidationError(_('An email needs to have at least one recipient.')))
return cleaned_data
def save(self, *args, **kwargs):
obj = super().save(*args, **kwargs)
if self.has_changed() and 'to' in self.changed_data:
addresses = list(set(a.strip().lower() for a in (obj.to or '').split(',') if a.strip()))
for address in addresses:
user = User.objects.filter(email__iexact=address).first()
if user:
addresses.remove(address)
obj.to_users.add(user)
addresses = ','.join(addresses) if addresses else ''
obj.to = addresses
obj.save()
return obj
class Meta:
model = QueuedMail
fields = ['to', 'to_users', 'reply_to', 'cc', 'bcc', 'subject', 'text']
widgets = {'to_users': forms.CheckboxSelectMultiple}
class WriteMailForm(forms.ModelForm):
recipients = forms.MultipleChoiceField(
label=_('Recipient groups'),
choices=(
(
'submitted',
_(
'Everyone with submission(s) that have not been accepted/rejected yet'
),
),
(
'accepted',
_('All accepted speakers (who have not confirmed their talk yet)'),
),
('confirmed', _('All confirmed speakers')),
('rejected', _('All rejected speakers')),
('reviewers', _('All reviewers in your team')),
),
widget=forms.CheckboxSelectMultiple,
required=False,
)
tracks = forms.MultipleChoiceField(label=_('All submissions in these tracks'), required=False)
submission_types = forms.MultipleChoiceField(label=_('All submissions of these types'), required=False)
submissions = forms.MultipleChoiceField(required=False)
additional_recipients = forms.CharField(
label=_('Recipients'),
required=False,
help_text=_('One email address or several addresses separated by commas.'),
)
reply_to = forms.CharField(required=False)
def __init__(self, event, **kwargs):
super().__init__(**kwargs)
self.fields['submissions'].choices = [
(sub.code, sub.title) for sub in event.submissions.all()
]
if event.settings.use_tracks and event.tracks.all().exists():
self.fields['tracks'].choices = [
(track.pk, track.name) for track in event.tracks.all()
]
else:
del self.fields['tracks']
self.fields['submission_types'].choices = [
(submission_type.pk, submission_type.name) for submission_type in event.submission_types.all()
]
self.fields['text'].help_text = _(
'Please note: Placeholders will not be substituted, this is an upcoming feature. '
'Leave no placeholders in this field.'
)
class Meta:
model = QueuedMail
fields = ['cc', 'bcc', 'subject', 'text']
|
py | 1a54d7bac248d2f649cef65d98e690951fd4d843 | # -*- coding: utf-8 -*-
import hashlib, json, os, sys, socket, traceback, time, struct, collections
from datetime import datetime, timedelta
from struct import calcsize
from google.protobuf.json_format import MessageToJson
from threading import RLock
from futu.common.conn_mng import *
from futu.common.sys_config import *
from futu.common.pbjson import json2pb
ProtoInfo = collections.namedtuple('ProtoInfo', ['proto_id', 'serial_no'])
def get_message_head_len():
return calcsize(MESSAGE_HEAD_FMT)
def check_date_str_format(s, default_time="00:00:00"):
"""Check the format of date string"""
try:
str_fmt = s
if ":" not in s:
str_fmt = '{} {}'.format(s, default_time)
dt_obj = datetime.strptime(str_fmt, "%Y-%m-%d %H:%M:%S")
return RET_OK, dt_obj
except ValueError:
error_str = ERROR_STR_PREFIX + "wrong time or time format"
return RET_ERROR, error_str
def normalize_date_format(date_str, default_time="00:00:00"):
"""normalize the format of data"""
ret_code, ret_data = check_date_str_format(date_str, default_time)
if ret_code != RET_OK:
return ret_code, ret_data
return RET_OK, ret_data.strftime("%Y-%m-%d %H:%M:%S")
def normalize_start_end_date(start, end, delta_days=0, default_time_start="00:00:00", default_time_end="23:59:59", prefer_end_now=True):
"""
:param start:
:param end:
:param delta_days:
:param default_time_start:
:param default_time_end:
:param prefer_end_now: 为True时,当start和end都为None时,end设为当前时间,为False则start设为当前时间
:return:
"""
if start is not None and is_str(start) is False:
error_str = ERROR_STR_PREFIX + "the type of start param is wrong"
return RET_ERROR, error_str, None, None
if end is not None and is_str(end) is False:
error_str = ERROR_STR_PREFIX + "the type of end param is wrong"
return RET_ERROR, error_str, None, None
dt_start = None
dt_end = None
delta = timedelta(days=delta_days)
hour_end, min_end, sec_end = [int(x) for x in default_time_end.split(':')]
hour_start, min_start, sec_start = [int(x) for x in default_time_start.split(':')]
if start:
ret_code, ret_data = check_date_str_format(start, default_time_start)
if ret_code != RET_OK:
return ret_code, ret_data, start, end
dt_start = ret_data
if end:
ret_code, ret_data = check_date_str_format(end, default_time_end)
if ret_code != RET_OK:
return ret_code, ret_data, start, end
dt_end = ret_data
if end and not start:
dt_tmp = dt_end - delta
dt_start = datetime(year=dt_tmp.year, month=dt_tmp.month, day=dt_tmp.day, hour=hour_start, minute=min_start, second=sec_start)
if start and not end:
dt_tmp = dt_start + delta
dt_end = datetime(year=dt_tmp.year, month=dt_tmp.month, day=dt_tmp.day, hour=hour_end, minute=min_end, second=sec_end)
if not start and not end:
if prefer_end_now:
dt_now = datetime.now()
dt_end = datetime(year=dt_now.year, month=dt_now.month, day=dt_now.day, hour=hour_end, minute=min_end, second=sec_end)
dt_tmp = dt_end - delta
dt_start = datetime(year=dt_tmp.year, month=dt_tmp.month, day=dt_tmp.day, hour=hour_start, minute=min_start, second=sec_start)
else:
dt_now = datetime.now()
dt_start = datetime(year=dt_now.year, month=dt_now.month, day=dt_now.day, hour=hour_start, minute=min_start,
second=sec_start)
dt_tmp = dt_start + delta
dt_end = datetime(year=dt_tmp.year, month=dt_tmp.month, day=dt_tmp.day, hour=hour_end, minute=min_end,
second=sec_end)
start = dt_start.strftime("%Y-%m-%d %H:%M:%S")
end = dt_end.strftime("%Y-%m-%d %H:%M:%S")
return RET_OK, '', start, end
def extract_pls_rsp(rsp_str):
"""Extract the response of PLS"""
try:
rsp = json.loads(rsp_str)
except ValueError:
traceback.print_exc()
err = sys.exc_info()[1]
err_str = ERROR_STR_PREFIX + str(err)
return RET_ERROR, err_str, None
error_code = int(rsp['retType'])
if error_code != 1:
error_str = ERROR_STR_PREFIX + rsp['retMsg']
return RET_ERROR, error_str, None
return RET_OK, "", rsp
def split_stock_str(stock_str_param):
"""split the stock string"""
stock_str = str(stock_str_param)
split_loc = stock_str.find(".")
'''do not use the built-in split function in python.
The built-in function cannot handle some stock strings correctly.
for instance, US..DJI, where the dot . itself is a part of original code'''
if 0 <= split_loc < len(
stock_str) - 1 and stock_str[0:split_loc] in MKT_MAP:
market_str = stock_str[0:split_loc]
market_code = MKT_MAP[market_str]
partial_stock_str = stock_str[split_loc + 1:]
return RET_OK, (market_code, partial_stock_str)
else:
error_str = ERROR_STR_PREFIX + "format of %s is wrong. (US.AAPL, HK.00700, SZ.000001)" % stock_str
return RET_ERROR, error_str
def merge_qot_mkt_stock_str(qot_mkt, partial_stock_str):
"""
Merge the string of stocks
:param market: market code
:param partial_stock_str: original stock code string. i.e. "AAPL","00700", "000001"
:return: unified representation of a stock code. i.e. "US.AAPL", "HK.00700", "SZ.000001"
"""
market_str = QUOTE.REV_MKT_MAP[qot_mkt]
stock_str = '.'.join([market_str, partial_stock_str])
return stock_str
def merge_trd_mkt_stock_str(trd_mkt, partial_stock_str):
"""
Merge the string of stocks
:param market: market code
:param partial_stock_str: original stock code string. i.e. "AAPL","00700", "000001"
:return: unified representation of a stock code. i.e. "US.AAPL", "HK.00700", "SZ.000001"
"""
mkt_qot = Market.NONE
mkt = TRADE.REV_TRD_MKT_MAP[trd_mkt] if trd_mkt in TRADE.REV_TRD_MKT_MAP else TrdMarket.NONE
if mkt == TrdMarket.HK:
mkt_qot = Market.HK
elif mkt == TrdMarket.US:
mkt_qot = Market.US
elif mkt == TrdMarket.HKCC or mkt == TrdMarket.CN:
if partial_stock_str.startswith('6') or partial_stock_str.startswith('9'):
mkt_qot = Market.SH
else:
mkt_qot = Market.SZ
else:
raise Exception("merge_trd_mkt_stock_str: unknown trd_mkt.")
return merge_qot_mkt_stock_str(MKT_MAP[mkt_qot], partial_stock_str)
def str2binary(s):
"""
Transfer string to binary
:param s: string content to be transformed to binary
:return: binary
"""
return s.encode('utf-8')
def is_str(obj):
if sys.version_info.major == 3:
return isinstance(obj, str) or isinstance(obj, bytes)
else:
return isinstance(obj, basestring)
def price_to_str_int1000(price):
return str(int(round(float(price) * 1000,
0))) if str(price) is not '' else ''
# 1000*int price to float val
def int1000_price_to_float(price):
return round(float(price) / 1000.0,
3) if str(price) is not '' else float(0)
# 10^9 int price to float val
def int10_9_price_to_float(price):
return round(float(price) / float(10**9),
3) if str(price) is not '' else float(0)
# list 参数除重及规整
def unique_and_normalize_list(lst):
ret = []
if not lst:
return ret
tmp = lst if isinstance(lst, list) else [lst]
[ret.append(x) for x in tmp if x not in ret]
return ret
def md5_transform(raw_str):
h1 = hashlib.md5()
h1.update(raw_str.encode(encoding='utf-8'))
return h1.hexdigest()
g_unique_id = int(time.time() % 10000)
g_unique_lock = RLock()
def get_unique_id32():
global g_unique_id
with g_unique_lock:
g_unique_id += 1
if g_unique_id >= 4294967295:
g_unique_id = int(time.time() % 10000)
ret_id = g_unique_id
return ret_id
class ProtobufMap(dict):
created_protobuf_map = {}
def __init__(self):
""" InitConnect = 1001 # 初始化连接 """
from futu.common.pb.InitConnect_pb2 import Response
ProtobufMap.created_protobuf_map[ProtoId.InitConnect] = Response()
""" GetGlobalState = 1002 # 获取全局状态 """
from futu.common.pb.GetGlobalState_pb2 import Response
ProtobufMap.created_protobuf_map[ProtoId.GetGlobalState] = Response()
""" Notify = 1003 # 通知推送 """
from futu.common.pb.Notify_pb2 import Response
ProtobufMap.created_protobuf_map[ProtoId.Notify] = Response()
""" KeepAlive = 1004 # 通知推送 """
from futu.common.pb.KeepAlive_pb2 import Response
ProtobufMap.created_protobuf_map[ProtoId.KeepAlive] = Response()
""" GetUserInfo = 1005 # 获取全局状态 """
from futu.common.pb.GetUserInfo_pb2 import Response
ProtobufMap.created_protobuf_map[ProtoId.GetUserInfo] = Response()
""" GetUserInfo = 1006 # 获取用户信息 """
from futu.common.pb.Verification_pb2 import Response
ProtobufMap.created_protobuf_map[ProtoId.Verification] = Response()
""" Trd_GetAccList = 2001 # 获取业务账户列表 """
from futu.common.pb.Trd_GetAccList_pb2 import Response
ProtobufMap.created_protobuf_map[ProtoId.Trd_GetAccList] = Response()
""" Trd_UnlockTrade = 2005 # 解锁或锁定交易 """
from futu.common.pb.Trd_UnlockTrade_pb2 import Response
ProtobufMap.created_protobuf_map[ProtoId.Trd_UnlockTrade] = Response()
""" Trd_SubAccPush = 2008 # 订阅业务账户的交易推送数据 """
from futu.common.pb.Trd_SubAccPush_pb2 import Response
ProtobufMap.created_protobuf_map[ProtoId.Trd_SubAccPush] = Response()
""" Trd_GetFunds = 2101 # 获取账户资金 """
from futu.common.pb.Trd_GetFunds_pb2 import Response
ProtobufMap.created_protobuf_map[ProtoId.Trd_GetFunds] = Response()
""" Trd_GetPositionList = 2102 # 获取账户持仓 """
from futu.common.pb.Trd_GetPositionList_pb2 import Response
ProtobufMap.created_protobuf_map[ProtoId.Trd_GetPositionList] = Response()
""" Trd_GetOrderList = 2201 # 获取订单列表 """
from futu.common.pb.Trd_GetOrderList_pb2 import Response
ProtobufMap.created_protobuf_map[ProtoId.Trd_GetOrderList] = Response()
""" Trd_PlaceOrder = 2202 # 下单 """
from futu.common.pb.Trd_PlaceOrder_pb2 import Response
ProtobufMap.created_protobuf_map[ProtoId.Trd_PlaceOrder] = Response()
""" Trd_ModifyOrder = 2205 # 修改订单 """
from futu.common.pb.Trd_ModifyOrder_pb2 import Response
ProtobufMap.created_protobuf_map[ProtoId.Trd_ModifyOrder] = Response()
""" Trd_UpdateOrder = 2208 # 订单状态变动通知(推送) """
from futu.common.pb.Trd_UpdateOrder_pb2 import Response
ProtobufMap.created_protobuf_map[ProtoId.Trd_UpdateOrder] = Response()
""" Trd_GetOrderFillList = 2211 # 获取成交列表 """
from futu.common.pb.Trd_GetOrderFillList_pb2 import Response
ProtobufMap.created_protobuf_map[ProtoId.Trd_GetOrderFillList] = Response()
""" Trd_UpdateOrderFill = 2218 # 成交通知(推送) """
from futu.common.pb.Trd_UpdateOrderFill_pb2 import Response
ProtobufMap.created_protobuf_map[ProtoId.Trd_UpdateOrderFill] = Response()
""" Trd_GetHistoryOrderList = 2221 # 获取历史订单列表 """
from futu.common.pb.Trd_GetHistoryOrderList_pb2 import Response
ProtobufMap.created_protobuf_map[ProtoId.Trd_GetHistoryOrderList] = Response()
""" Trd_GetHistoryOrderFillList = 2222 # 获取历史成交列表 """
from futu.common.pb.Trd_GetHistoryOrderFillList_pb2 import Response
ProtobufMap.created_protobuf_map[ProtoId.Trd_GetHistoryOrderFillList] = Response()
""" Qot_Sub = 3001 # 订阅或者反订阅 """
from futu.common.pb.Qot_Sub_pb2 import Response
ProtobufMap.created_protobuf_map[ProtoId.Qot_Sub] = Response()
""" Qot_RegQotPush = 3002 # 注册推送 """
from futu.common.pb.Qot_RegQotPush_pb2 import Response
ProtobufMap.created_protobuf_map[ProtoId.Qot_RegQotPush] = Response()
""" Qot_GetSubInfo = 3003 # 获取订阅信息 """
from futu.common.pb.Qot_GetSubInfo_pb2 import Response
ProtobufMap.created_protobuf_map[ProtoId.Qot_GetSubInfo] = Response()
""" Qot_GetBasicQot = 3004 # 获取股票基本行情 """
from futu.common.pb.Qot_GetBasicQot_pb2 import Response
ProtobufMap.created_protobuf_map[ProtoId.Qot_GetBasicQot] = Response()
""" Qot_UpdateBasicQot = 3005 # 推送股票基本行情 """
from futu.common.pb.Qot_UpdateBasicQot_pb2 import Response
ProtobufMap.created_protobuf_map[ProtoId.Qot_UpdateBasicQot] = Response()
""" Qot_GetKL = 3006 # 获取K线 """
from futu.common.pb.Qot_GetKL_pb2 import Response
ProtobufMap.created_protobuf_map[ProtoId.Qot_GetKL] = Response()
""" Qot_UpdateKL = 3007 # 推送K线 """
from futu.common.pb.Qot_UpdateKL_pb2 import Response
ProtobufMap.created_protobuf_map[ProtoId.Qot_UpdateKL] = Response()
""" Qot_GetRT = 3008 # 获取分时 """
from futu.common.pb.Qot_GetRT_pb2 import Response
ProtobufMap.created_protobuf_map[ProtoId.Qot_GetRT] = Response()
""" Qot_UpdateRT = 3009 # 推送分时 """
from futu.common.pb.Qot_UpdateRT_pb2 import Response
ProtobufMap.created_protobuf_map[ProtoId.Qot_UpdateRT] = Response()
""" Qot_GetTicker = 3010 # 获取逐笔 """
from futu.common.pb.Qot_GetTicker_pb2 import Response
ProtobufMap.created_protobuf_map[ProtoId.Qot_GetTicker] = Response()
""" Qot_UpdateTicker = 3011 # 推送逐笔 """
from futu.common.pb.Qot_UpdateTicker_pb2 import Response
ProtobufMap.created_protobuf_map[ProtoId.Qot_UpdateTicker] = Response()
""" Qot_GetOrderBook = 3012 # 获取买卖盘 """
from futu.common.pb.Qot_GetOrderBook_pb2 import Response
ProtobufMap.created_protobuf_map[ProtoId.Qot_GetOrderBook] = Response()
""" Qot_UpdateOrderBook = 3013 # 推送买卖盘 """
from futu.common.pb.Qot_UpdateOrderBook_pb2 import Response
ProtobufMap.created_protobuf_map[ProtoId.Qot_UpdateOrderBook] = Response()
""" Qot_GetBroker = 3014 # 获取经纪队列 """
from futu.common.pb.Qot_GetBroker_pb2 import Response
ProtobufMap.created_protobuf_map[ProtoId.Qot_GetBroker] = Response()
""" Qot_UpdateBroker = 3015 # 推送经纪队列 """
from futu.common.pb.Qot_UpdateBroker_pb2 import Response
ProtobufMap.created_protobuf_map[ProtoId.Qot_UpdateBroker] = Response()
""" Qot_GetHistoryKL = 3100 # 获取历史K线 """
from futu.common.pb.Qot_GetHistoryKL_pb2 import Response
ProtobufMap.created_protobuf_map[ProtoId.Qot_GetHistoryKL] = Response()
""" Qot_GetHistoryKLPoints = 3101 # 获取多只股票历史单点K线 """
from futu.common.pb.Qot_GetHistoryKLPoints_pb2 import Response
ProtobufMap.created_protobuf_map[ProtoId.Qot_GetHistoryKLPoints] = Response()
""" Qot_GetRehab = 3102 # 获取复权信息 """
from futu.common.pb.Qot_GetRehab_pb2 import Response
ProtobufMap.created_protobuf_map[ProtoId.Qot_GetRehab] = Response()
""" Qot_GetTradeDate = 3200 # 获取市场交易日 """
from futu.common.pb.Qot_GetTradeDate_pb2 import Response
ProtobufMap.created_protobuf_map[ProtoId.Qot_GetTradeDate] = Response()
""" Qot_GetSuspend = 3201 # 获取股票停牌信息 """
from futu.common.pb.Qot_GetSuspend_pb2 import Response
ProtobufMap.created_protobuf_map[ProtoId.Qot_GetSuspend] = Response()
""" Qot_GetStaticInfo = 3202 # 获取股票列表 """
from futu.common.pb.Qot_GetStaticInfo_pb2 import Response
ProtobufMap.created_protobuf_map[ProtoId.Qot_GetStaticInfo] = Response()
""" Qot_GetSecuritySnapshot = 3203 # 获取股票快照 """
from futu.common.pb.Qot_GetSecuritySnapshot_pb2 import Response
ProtobufMap.created_protobuf_map[ProtoId.Qot_GetSecuritySnapshot] = Response()
""" Qot_GetPlateSet = 3204 # 获取板块集合下的板块 """
from futu.common.pb.Qot_GetPlateSet_pb2 import Response
ProtobufMap.created_protobuf_map[ProtoId.Qot_GetPlateSet] = Response()
""" Qot_GetPlateSecurity = 3205 # 获取板块下的股票 """
from futu.common.pb.Qot_GetPlateSecurity_pb2 import Response
ProtobufMap.created_protobuf_map[ProtoId.Qot_GetPlateSecurity] = Response()
""" Trd_GetMaxTrdQtys = 2111 查询最大买卖数量 """
from futu.common.pb.Trd_GetMaxTrdQtys_pb2 import Response
ProtobufMap.created_protobuf_map[ProtoId.Trd_GetAccTradingInfo] = Response()
""" Qot_GetReference = 3206 获取正股相关股票,暂时只有窝轮"""
from futu.common.pb.Qot_GetReference_pb2 import Response
ProtobufMap.created_protobuf_map[ProtoId.Qot_GetReference] = Response()
""" Qot_GetOwnerPlate = 3207 获取股票所属板块"""
from futu.common.pb.Qot_GetOwnerPlate_pb2 import Response
ProtobufMap.created_protobuf_map[ProtoId.Qot_GetOwnerPlate] = Response()
""" Qot_GetOwnerPlate = 3208 获取高管持股变动"""
from futu.common.pb.Qot_GetHoldingChangeList_pb2 import Response
ProtobufMap.created_protobuf_map[ProtoId.Qot_GetHoldingChangeList] = Response()
from futu.common.pb.Qot_RequestHistoryKL_pb2 import Response
ProtobufMap.created_protobuf_map[ProtoId.Qot_RequestHistoryKL] = Response()
from futu.common.pb.Qot_GetOptionChain_pb2 import Response
ProtobufMap.created_protobuf_map[ProtoId.Qot_GetOptionChain] = Response()
""" Qot_GetOrderDetail = 3016 获取委托明细 """
from futu.common.pb.Qot_GetOrderDetail_pb2 import Response
ProtobufMap.created_protobuf_map[ProtoId.Qot_GetOrderDetail] = Response()
""" Qot_UpdateOrderDetail = 3017 推送委托明细 """
from futu.common.pb.Qot_UpdateOrderDetail_pb2 import Response
ProtobufMap.created_protobuf_map[ProtoId.Qot_UpdateOrderDetail] = Response()
""" Qot_GetWarrantData = 3210 获取涡轮 """
from futu.common.pb.Qot_GetWarrant_pb2 import Response as GetWarrantPBResponse
ProtobufMap.created_protobuf_map[ProtoId.Qot_GetWarrantData] = GetWarrantPBResponse()
""" Qot_GetOrderDetail = 3104 已使用过的额度 """
from futu.common.pb.Qot_RequestHistoryKLQuota_pb2 import Response
ProtobufMap.created_protobuf_map[ProtoId.Qot_RequestHistoryKLQuota] = Response()
"""获取除权信息"""
from futu.common.pb.Qot_RequestRehab_pb2 import Response
ProtobufMap.created_protobuf_map[ProtoId.Qot_RequestRehab] = Response()
def __getitem__(self, key):
return ProtobufMap.created_protobuf_map[key] if key in ProtobufMap.created_protobuf_map else None
pb_map = ProtobufMap()
def binary2str(b, proto_id, proto_fmt_type):
"""
Transfer binary to string
:param b: binary content to be transformed to string
:return: string
"""
if proto_fmt_type == ProtoFMT.Json:
return b.decode('utf-8')
elif proto_fmt_type == ProtoFMT.Protobuf:
rsp = pb_map[proto_id]
if IS_PY2:
rsp.ParseFromString(str(b))
else:
rsp.ParseFromString(b)
return MessageToJson(rsp)
else:
raise Exception("binary2str: unknown proto format.")
def binary2pb(b, proto_id, proto_fmt_type):
"""
Transfer binary to pb message
:param b: binary content to be transformed to pb message
:return: pb message
"""
rsp = pb_map[proto_id]
if rsp is None:
return None
if proto_fmt_type == ProtoFMT.Json:
return json2pb(type(rsp), b.decode('utf-8'))
elif proto_fmt_type == ProtoFMT.Protobuf:
rsp = type(rsp)()
# logger.debug((proto_id))
if IS_PY2:
rsp.ParseFromString(str(b))
else:
rsp.ParseFromString(b)
return rsp
else:
raise Exception("binary2str: unknown proto format.")
def pack_pb_req(pb_req, proto_id, conn_id, serial_no=0):
proto_fmt = SysConfig.get_proto_fmt()
serial_no = serial_no if serial_no else get_unique_id32()
is_encrypt = FutuConnMng.is_conn_encrypt(conn_id)
if proto_fmt == ProtoFMT.Json:
req_json = MessageToJson(pb_req)
ret, msg, req = _joint_head(proto_id, proto_fmt, len(req_json),
req_json.encode(), conn_id, serial_no, is_encrypt)
return ret, msg, req
elif proto_fmt == ProtoFMT.Protobuf:
ret, msg, req = _joint_head(proto_id, proto_fmt, pb_req.ByteSize(), pb_req, conn_id, serial_no, is_encrypt)
return ret, msg, req
else:
error_str = ERROR_STR_PREFIX + 'unknown protocol format, %d' % proto_fmt
return RET_ERROR, error_str, None
def _joint_head(proto_id, proto_fmt_type, body_len, str_body, conn_id, serial_no, is_encrypt):
# sha20 = b'00000000000000000000'
reserve8 = b'\x00\x00\x00\x00\x00\x00\x00\x00'
if proto_fmt_type == ProtoFMT.Protobuf:
str_body = str_body.SerializeToString()
if type(str_body) is not bytes:
str_body = bytes_utf8(str_body)
sha20 = hashlib.sha1(str_body).digest()
# init connect 需要用rsa加密
try:
if proto_id == ProtoId.InitConnect:
if SysConfig.INIT_RSA_FILE != '':
str_body = RsaCrypt.encrypt(str_body)
body_len = len(str_body)
else:
if is_encrypt:
ret, msg, str_body = FutuConnMng.encrypt_conn_data(conn_id, str_body)
body_len = len(str_body)
if ret != RET_OK:
return ret, msg, str_body
except Exception as e:
return RET_ERROR, str(e), None
fmt = "%s%ds" % (MESSAGE_HEAD_FMT, body_len)
bin_head = struct.pack(fmt, b'F', b'T', proto_id, proto_fmt_type,
API_PROTO_VER, serial_no, body_len, sha20, reserve8, str_body)
return RET_OK, "", bin_head
def parse_head(head_bytes):
head_dict = {}
head_dict['head_1'], head_dict['head_2'], head_dict['proto_id'], \
head_dict['proto_fmt_type'], head_dict['proto_ver'], \
head_dict['serial_no'], head_dict['body_len'], head_dict['sha20'], \
head_dict['reserve8'], = struct.unpack(MESSAGE_HEAD_FMT, head_bytes)
return head_dict
def parse_proto_info(head_bytes):
unpacked = struct.unpack(MESSAGE_HEAD_FMT, head_bytes)
return ProtoInfo(unpacked[2], unpacked[5])
def decrypt_rsp_body(rsp_body, head_dict, conn_id, is_encrypt):
ret_code = RET_OK
msg = ''
sha20 = head_dict['sha20']
proto_id = head_dict['proto_id']
if is_encrypt:
try:
if proto_id == ProtoId.InitConnect:
rsp_body = RsaCrypt.decrypt(rsp_body)
else:
ret_code, msg, decrypt_data = FutuConnMng.decrypt_conn_data(conn_id, rsp_body)
rsp_body = decrypt_data
except Exception as e:
msg = sys.exc_info()[1]
ret_code = RET_ERROR
# check sha20
if ret_code == RET_OK:
sha20_check = hashlib.sha1(rsp_body).digest()
if sha20_check != sha20:
ret_code = RET_ERROR
msg = "proto id:{} conn_id:{} check sha error!".format(proto_id, conn_id)
return ret_code, msg, rsp_body
def make_from_namedtuple(t, **kwargs):
"""
t是namedtuple,复制一份t,但其中部分字段更新为kwargs的值
:param t:
:param kwargs:
:return:
"""
d = t._asdict()
d.update(kwargs)
cls = type(t)
return cls(**d) |
py | 1a54d884c4211a0e6cc4bbb0f6577d0bce1d7137 | #!/usr/bin/env python
# Lint as: python3
from absl.testing import absltest
from grr_response_core.lib.rdfvalues import osquery as rdf_osquery
from grr_response_server.gui.api_plugins import osquery as api_osquery
class UtilsTest(absltest.TestCase):
"""Test for osquery utils."""
def testListToCSVBytes(self):
output_bytes = api_osquery._LineToCsvBytes(["a", "b", "c", "d"])
output_text = output_bytes.decode("utf-8")
self.assertEqual("a,b,c,d\r\n", output_text)
def testSomeTextToCsvBytes(self):
table = rdf_osquery.OsqueryTable()
table.header.columns.append(rdf_osquery.OsqueryColumn(name="A"))
table.header.columns.append(rdf_osquery.OsqueryColumn(name="B"))
table.rows.append(rdf_osquery.OsqueryRow(values=["1-A", "1-B"]))
table.rows.append(rdf_osquery.OsqueryRow(values=["2-A", "2-B"]))
result = rdf_osquery.OsqueryResult()
result.table = table
output_bytes = api_osquery._ParseToCsvBytes([result])
output_text = list(map(lambda b: b.decode("utf-8"), output_bytes))
self.assertListEqual(["A,B\r\n", "1-A,1-B\r\n", "2-A,2-B\r\n"], output_text)
def testTextWithCommasToCsvBytes(self):
table = rdf_osquery.OsqueryTable()
table.header.columns.append(rdf_osquery.OsqueryColumn(name="c,o,l,u,m,n"))
table.rows.append(rdf_osquery.OsqueryRow(values=["c,e,l,l"]))
result = rdf_osquery.OsqueryResult()
result.table = table
output_bytes = api_osquery._ParseToCsvBytes([result])
output_text = list(map(lambda b: b.decode("utf-8"), output_bytes))
self.assertListEqual(["\"c,o,l,u,m,n\"\r\n", "\"c,e,l,l\"\r\n"],
output_text)
if __name__ == "__main__":
absltest.main()
|
py | 1a54d94bd3e3e59b01da3aae0a95ea302b73a0c8 | # -*- coding: utf-8 -*-
from Crypto.Cipher import AES
class BaseWeChatCipher:
def __init__(self, cipher):
self.cipher = cipher
def encrypt(self, plaintext):
return self.cipher.encrypt(plaintext)
def decrypt(self, ciphertext):
return self.cipher.decrypt(ciphertext)
class WeChatCipher(BaseWeChatCipher):
def __init__(self, key, iv=None):
iv = iv or key[:16]
super().__init__(AES.new(key, AES.MODE_CBC, iv))
class AesEcbCipher(BaseWeChatCipher):
def __init__(self, key):
super().__init__(AES.new(key, AES.MODE_ECB))
|
py | 1a54db51f4c1c1cb4a1ea35e7dcc0cccecc0937f | # -*- coding: utf-8 -*-
"""
Package setup
"""
import sys
import os
import io
import setuptools
from setuptools import setup
def read(fname):
with io.open(
os.path.join(os.path.dirname(__file__), fname), encoding="utf-8"
) as _in:
return _in.read()
if __name__ == "__main__":
import versioneer
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="kitchen",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description="Manipulate counts matrix files and cook scRNA-seq data from command line",
long_description=long_description,
author="Cody Heiser",
author_email="[email protected]",
url="https://github.com/codyheiser/kitchen",
install_requires=read("requirements.txt").splitlines(),
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Topic :: Scientific/Engineering",
],
python_requires=">=3.6",
entry_points={
"console_scripts": ["kitchen = kitchen.kitchen:main"]
},
)
|
py | 1a54db8f9d17d859e92feba6e289b50a9ae8a3d1 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example adds a text ad with ad parameters.
To get ad groups, run get_ad_groups.py. To get keywords, run add_keywords.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
from googleads import adwords
AD_GROUP_ID = 'INSERT_AD_GROUP_ID_HERE'
CRITERION_ID = 'INSERT_KEYWORD_CRITERION_ID_HERE'
def main(client, ad_group_id, criterion_id):
# Initialize appropriate service.
ad_group_ad_service = client.GetService('AdGroupAdService', version='v201601')
ad_param_service = client.GetService('AdParamService', version='v201601')
# Construct operations for adding text ad object and add to an ad group.
operations = [{
'operator': 'ADD',
'operand': {
'xsi_type': 'AdGroupAd',
'adGroupId': ad_group_id,
'ad': {
'xsi_type': 'TextAd',
'finalUrls': ['http://www.example.com'],
'displayUrl': 'example.com',
'description1': 'Low-gravity fun for {param1:cheap}.',
'description2': 'Only {param2:a few} seats left!',
'headline': 'Luxury Mars Cruises'
},
'status': 'ENABLED'
}
}]
ads = ad_group_ad_service.mutate(operations)['value']
# Display results.
for ad in ads:
print ('Text ad with id \'%s\' was successfully added to an ad group with '
'id \'%s\'.' % (ad['adGroupId'], ad['ad']['id']))
# Construct operations for setting ad parameters.
operations = [
{
'operator': 'SET',
'operand': {
'adGroupId': ad_group_id,
'criterionId': criterion_id,
'insertionText': u'£100',
'paramIndex': '1'
}
},
{
'operator': 'SET',
'operand': {
'adGroupId': ad_group_id,
'criterionId': criterion_id,
'insertionText': '50',
'paramIndex': '2'
}
}
]
ad_params = ad_param_service.mutate(operations)
# Display results.
for ad_param in ad_params:
print ('Ad parameter with text \'%s\' was successfully set for criterion '
'with id \'%s\' and ad group id \'%s\'.'
% (ad_param['insertionText'], ad_param['criterionId'],
ad_param['adGroupId']))
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, AD_GROUP_ID, CRITERION_ID)
|
py | 1a54dc2841e703c1b5db576a9876b233da61244c | # Generated by Django 2.2.9 on 2020-01-04 21:20
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Human',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('age', models.IntegerField(blank=True, null=True)),
('is_female', models.BooleanField(default=True)),
('body_temp', models.DecimalField(blank=True, decimal_places=2, max_digits=15, null=True)),
('birth_date', models.DateField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='Person',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Pet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='PizzaOrder',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.CharField(choices=[('ORDERED', 'Ordered'), ('COOKING', 'Cooking'), ('COMPLETE', 'Complete')], max_length=64)),
],
),
migrations.CreateModel(
name='Owner',
fields=[
('person_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='tests.Person')),
('pet', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='tests.Pet')),
],
bases=('tests.person',),
),
]
|
py | 1a54dc4277611825e135dea49683f11e6eb7af39 |
from ..ch09.adaptable_heap_priority_queue import AdaptableHeapPriorityQueue
def shortest_path_lengths(g, src):
"""Compute shortest-path distances from src to reachable vertices of g.
Graph g can be undirected or directed, but must be weighted such that
e.element() returns a numeric weight for each edge e.
Return dictionary mapping each reachable vertex to its distance from src.
"""
d = {} # d[v] is upper bound from s to v
cloud = {} # map reachable v to its d[v] value
pq = AdaptableHeapPriorityQueue() # vertex v will have key d[v]
pqlocator = {} # map from vertex to its pq locator
# for each vertex v of the graph, add an entry to the priority queue, with
# the source having distance 0 and all others having infinite distance
for v in g.vertices():
if v is src:
d[v] = 0
else:
d[v] = float('inf') # syntax for positive infinity
pqlocator[v] = pq.add(d[v], v) # save locator for future updates
while not pq.is_empty():
key, u = pq.remove_min()
cloud[u] = key # its correct d[u] value
del pqlocator[u] # u is no longer in pq
for e in g.incident_edges(u): # outgoing edges (u,v)
v = e.opposite(u)
if v not in cloud:
# perform relaxation step on edge (u,v)
wgt = e.element()
if d[u] + wgt < d[v]: # better path to v?
d[v] = d[u] + wgt # update the distance
pq.update(pqlocator[v], d[v], v) # update the pq entry
return cloud # only includes reachable vertices
def shortest_path_tree(g, s, d):
"""Reconstruct shortest-path tree rooted at vertex s, given distance map d.
Return tree as a map from each reachable vertex v (other than s) to the
edge e=(u,v) that is used to reach v from its parent u in the tree.
"""
tree = {}
for v in d:
if v is not s:
for e in g.incident_edges(v, False):
u = e.opposite(v)
wgt = e.element()
if d[v] == d[u] + wgt:
tree[v] = e # edge e is used to reach v
return tree
|
py | 1a54dc465638320a4a50b1bdc04f6cd82d0003f3 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=12
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0])) # number=6
c.append(cirq.Z.on(input_qubit[3])) # number=7
c.append(cirq.H.on(input_qubit[0])) # number=9
c.append(cirq.CZ.on(input_qubit[3],input_qubit[0])) # number=10
c.append(cirq.H.on(input_qubit[0])) # number=11
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq145.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() |
py | 1a54de36000a0ea102da754ce2e0b63160e8bac1 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import nltk
import pytest
from mxnet.gluon.data import DataLoader
from ..question_answering.data_pipeline import SQuADDataLoaderTransformer
@pytest.mark.remote_required
@pytest.mark.serial
def test_data_loader_able_to_read_spacy(squad_dev_and_vocab_spacy_provider):
_, _, train_dataset, dev_dataset, word_vocab, char_vocab = squad_dev_and_vocab_spacy_provider
dataloader = DataLoader(train_dataset.transform(SQuADDataLoaderTransformer()), batch_size=1)
assert word_vocab is not None
assert char_vocab is not None
for record_index, context, query, context_char, query_char, begin, end in dataloader:
assert record_index is not None
assert context is not None
assert query is not None
assert context_char is not None
assert query_char is not None
assert begin is not None
assert end is not None
break
def test_data_loader_able_to_read_nltk(squad_dev_and_vocab_nltk_provider):
nltk.download('punkt')
_, _, train_dataset, dev_dataset, word_vocab, char_vocab = squad_dev_and_vocab_nltk_provider
dataloader = DataLoader(train_dataset.transform(SQuADDataLoaderTransformer()), batch_size=1)
assert word_vocab is not None
assert char_vocab is not None
for record_index, context, query, context_char, query_char, begin, end in dataloader:
assert record_index is not None
assert context is not None
assert query is not None
assert context_char is not None
assert query_char is not None
assert begin is not None
assert end is not None
break
|
py | 1a54debe366f883157b147032beff57eae2bb490 | """
Parser and serializer for file formats supported by compare-locales library:
https://hg.mozilla.org/l10n/compare-locales/
"""
from __future__ import absolute_import
import logging
from collections import OrderedDict
from compare_locales import (
parser,
serializer,
)
from pontoon.sync import SyncError
from pontoon.sync.exceptions import ParseError
from pontoon.sync.formats.base import ParsedResource
from pontoon.sync.utils import create_parent_directory
from pontoon.sync.vcs.models import VCSTranslation
log = logging.getLogger(__name__)
class CompareLocalesEntity(VCSTranslation):
"""
Represents an entity in a file handled by compare-locales.
"""
def __init__(self, key, string, comment, order):
self.key = key
self.source_string = string
self.source_string_plural = ''
self.strings = {None: self.source_string} if self.source_string is not None else {}
self.comments = comment.val.split('\n') if comment else []
self.order = order
self.fuzzy = False
self.source = []
class CompareLocalesResource(ParsedResource):
def __init__(self, path, source_resource=None):
self.path = path
self.entities = OrderedDict() # Preserve entity order.
self.source_resource = source_resource
try:
self.parser = parser.getParser(self.path)
except UserWarning as err:
raise ParseError(err)
self.parsed_objects = []
# A monolingual l10n file might not contain all entities, but the code
# expects ParsedResource to contain representations of all of them. So
# when parsing the l10n resource, we first create empty entity for each
# source resource entity.
if source_resource:
for key, entity in source_resource.entities.items():
self.entities[key] = CompareLocalesEntity(
entity.key,
None,
None,
None,
)
try:
self.parser.readFile(self.path)
except IOError as err:
# If the file doesn't exist, but we have a source resource,
# we can keep going, we'll just not have any translations.
if source_resource:
return
else:
raise ParseError(err)
self.parsed_objects = list(self.parser.walk())
order = 0
for entity in self.parsed_objects:
if isinstance(entity, parser.Entity):
self.entities[entity.key] = CompareLocalesEntity(
entity.key,
entity.unwrap(),
entity.pre_comment,
order,
)
order += 1
@property
def translations(self):
return sorted(self.entities.values(), key=lambda e: e.order)
def save(self, locale):
if not self.source_resource:
raise SyncError(
'Cannot save resource {0}: No source resource given.'
.format(self.path)
)
# A dictionary of new translations
new_l10n = {
key: entity.strings[None] if entity.strings else None
for key, entity in self.entities.items()
}
# Create parent folders if necessary
create_parent_directory(self.path)
with open(self.path, 'w') as output_file:
log.debug('Saving file: %s', self.path)
output_file.write(
serializer.serialize(
self.path,
self.source_resource.parsed_objects,
self.parsed_objects,
new_l10n,
)
)
def parse(path, source_path=None, locale=None):
if source_path is not None:
source_resource = CompareLocalesResource(source_path)
else:
source_resource = None
return CompareLocalesResource(path, source_resource)
|
py | 1a54e04fa69cb1a8d4d14c368d80343af5b34d32 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
from hanlp import *
"""
File: source/inputters/voc.py
"""
# Default word tokens
PAD_token = 0 # Used for padding short sentences
# SOS_token = 1 # Start-of-sentence token
# EOS_token = 2 # End-of-sentence token
# UNK_token = 3 # unknown token
#
# tokenizer = None
#
# def getTokenizer(tokenizer):
tokenizer = hanlp.load("PKU_NAME_MERGED_SIX_MONTHS_CONVSEG")
class Voc:
"""
Voc 词典类
@param name 词典名称
"""
def __init__(self, name):
self.name = name
self.trimmed = False
self.word2index = {}
self.word2count = {}
self.index2word = {PAD_token: "PAD"}
self.num_words = 4 # Count SOS, EOS, PAD, UNK
def add_sentence(self, sentence):
self.add_word(tokenizer(sentence))
def add_word(self, word_list):
for word in word_list:
if word not in self.word2index:
self.word2index[word] = self.num_words
self.word2count[word] = 1
self.index2word[self.num_words] = word
self.num_words += 1
else:
self.word2count[word] += 1
# Remove words below a certain count threshold
def trim(self, min_count):
if self.trimmed:
return
self.trimmed = True
keep_words = []
for k, v in self.word2count.items():
if v >= min_count:
keep_words.append(k)
print('keep_words {} / {} = {:.4f}'.format(
len(keep_words), len(self.word2index), len(keep_words) / len(self.word2index)
))
# Reinitialize dictionaries
self.word2index = {}
self.word2count = {}
self.index2word = {PAD_token: "PAD"}
self.num_words = 4 # Count default tokens
for word in keep_words:
self.add_word(word)
def load(self, file_path):
"""
从文件加载词典
@param file_path: 文件路径
@return:
"""
with open(file_path, 'r', encoding='utf-8') as f:
for line in f:
attr = line.split()
index = int(attr[0])
word = attr[1]
self.word2index[word] = index
self.word2count[word] = 1
self.index2word[index] = word
|
py | 1a54e0ec4870da2231752bc8f39e2b977d599a6b | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import range
from builtins import super
import mock
import string
import unittest
import random
from pprint import pprint
from bhive import Hive
from bhive.exceptions import (
InsufficientAuthorityError,
MissingKeyError,
InvalidWifError,
WalletLocked
)
from bhiveapi import exceptions
from bhive.amount import Amount
from bhive.witness import Witness
from bhive.account import Account
from bhive.instance import set_shared_hive_instance, shared_hive_instance
from bhive.blockchain import Blockchain
from bhive.block import Block
from bhive.memo import Memo
from bhive.transactionbuilder import TransactionBuilder
from bhivebase.operations import Transfer
from bhivegraphenebase.account import PasswordKey, PrivateKey, PublicKey
from bhive.utils import parse_time, formatTimedelta
from bhiveapi.rpcutils import NumRetriesReached
from bhive.nodelist import NodeList
# Py3 compatibility
import sys
core_unit = "STX"
class Testcases(unittest.TestCase):
@classmethod
def setUpClass(cls):
nodelist = NodeList()
# hv = shared_hive_instance()
# hv.config.refreshBackup()
# nodes = nodelist.get_testnet()
cls.nodes = nodelist.get_nodes()
cls.bts = Hive(
node=cls.nodes,
nobroadcast=True,
num_retries=10,
expiration=120,
)
# from getpass import getpass
# self.bts.wallet.unlock(getpass())
cls.bts.set_default_account("bhive")
# Test account "bhive"
cls.active_key = "5Jt2wTfhUt5GkZHV1HYVfkEaJ6XnY8D2iA4qjtK9nnGXAhThM3w"
cls.posting_key = "5Jh1Gtu2j4Yi16TfhoDmg8Qj3ULcgRi7A49JXdfUUTVPkaFaRKz"
cls.memo_key = "5KPbCuocX26aMxN9CDPdUex4wCbfw9NoT5P7UhcqgDwxXa47bit"
# Test account "bhive1"
cls.active_key1 = "5Jo9SinzpdAiCDLDJVwuN7K5JcusKmzFnHpEAtPoBHaC1B5RDUd"
cls.posting_key1 = "5JGNhDXuDLusTR3nbmpWAw4dcmE8WfSM8odzqcQ6mDhJHP8YkQo"
cls.memo_key1 = "5KA2ddfAffjfRFoe1UhQjJtKnGsBn9xcsdPQTfMt1fQuErDAkWr"
cls.active_private_key_of_bhive4 = '5JkZZEUWrDsu3pYF7aknSo7BLJx7VfxB3SaRtQaHhsPouDYjxzi'
cls.active_private_key_of_bhive5 = '5Hvbm9VjRbd1B3ft8Lm81csaqQudwFwPGdiRKrCmTKcomFS3Z9J'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
raise unittest.SkipTest()
hv = self.bts
hv.nobroadcast = True
hv.wallet.wipe(True)
hv.wallet.create("123")
hv.wallet.unlock("123")
hv.wallet.addPrivateKey(self.active_key1)
hv.wallet.addPrivateKey(self.memo_key1)
hv.wallet.addPrivateKey(self.posting_key1)
hv.wallet.addPrivateKey(self.active_key)
hv.wallet.addPrivateKey(self.memo_key)
hv.wallet.addPrivateKey(self.posting_key)
hv.wallet.addPrivateKey(self.active_private_key_of_bhive4)
hv.wallet.addPrivateKey(self.active_private_key_of_bhive5)
@classmethod
def tearDownClass(cls):
hv = shared_hive_instance()
hv.config.recover_with_latest_backup()
def test_wallet_keys(self):
hv = self.bts
hv.wallet.unlock("123")
priv_key = hv.wallet.getPrivateKeyForPublicKey(str(PrivateKey(self.posting_key, prefix=hv.prefix).pubkey))
self.assertEqual(str(priv_key), self.posting_key)
priv_key = hv.wallet.getKeyForAccount("bhive", "active")
self.assertEqual(str(priv_key), self.active_key)
priv_key = hv.wallet.getKeyForAccount("bhive1", "posting")
self.assertEqual(str(priv_key), self.posting_key1)
priv_key = hv.wallet.getPrivateKeyForPublicKey(str(PrivateKey(self.active_private_key_of_bhive4, prefix=hv.prefix).pubkey))
self.assertEqual(str(priv_key), self.active_private_key_of_bhive4)
priv_key = hv.wallet.getKeyForAccount("bhive4", "active")
self.assertEqual(str(priv_key), self.active_private_key_of_bhive4)
priv_key = hv.wallet.getPrivateKeyForPublicKey(str(PrivateKey(self.active_private_key_of_bhive5, prefix=hv.prefix).pubkey))
self.assertEqual(str(priv_key), self.active_private_key_of_bhive5)
priv_key = hv.wallet.getKeyForAccount("bhive5", "active")
self.assertEqual(str(priv_key), self.active_private_key_of_bhive5)
def test_transfer(self):
bts = self.bts
bts.nobroadcast = False
bts.wallet.unlock("123")
# bts.wallet.addPrivateKey(self.active_key)
# bts.prefix ="STX"
acc = Account("bhive", hive_instance=bts)
tx = acc.transfer(
"bhive1", 1.33, "HBD", memo="Foobar")
self.assertEqual(
tx["operations"][0][0],
"transfer"
)
self.assertEqual(len(tx['signatures']), 1)
op = tx["operations"][0][1]
self.assertIn("memo", op)
self.assertEqual(op["from"], "bhive")
self.assertEqual(op["to"], "bhive1")
amount = Amount(op["amount"], hive_instance=bts)
self.assertEqual(float(amount), 1.33)
bts.nobroadcast = True
def test_transfer_memo(self):
bts = self.bts
bts.nobroadcast = False
bts.wallet.unlock("123")
acc = Account("bhive", hive_instance=bts)
tx = acc.transfer(
"bhive1", 1.33, "HBD", memo="#Foobar")
self.assertEqual(
tx["operations"][0][0],
"transfer"
)
op = tx["operations"][0][1]
self.assertIn("memo", op)
self.assertIn("#", op["memo"])
m = Memo(from_account=op["from"], to_account=op["to"], hive_instance=bts)
memo = m.decrypt(op["memo"])
self.assertEqual(memo, "Foobar")
self.assertEqual(op["from"], "bhive")
self.assertEqual(op["to"], "bhive1")
amount = Amount(op["amount"], hive_instance=bts)
self.assertEqual(float(amount), 1.33)
bts.nobroadcast = True
def test_transfer_1of1(self):
hive = self.bts
hive.nobroadcast = False
tx = TransactionBuilder(use_condenser_api=True, hive_instance=hive)
tx.appendOps(Transfer(**{"from": 'bhive',
"to": 'bhive1',
"amount": Amount("0.01 HIVE", hive_instance=hive),
"memo": '1 of 1 transaction'}))
self.assertEqual(
tx["operations"][0]["type"],
"transfer_operation"
)
tx.appendWif(self.active_key)
tx.sign()
tx.sign()
self.assertEqual(len(tx['signatures']), 1)
tx.broadcast()
hive.nobroadcast = True
def test_transfer_2of2_simple(self):
# Send a 2 of 2 transaction from elf which needs bhive4's cosign to send funds
hive = self.bts
hive.nobroadcast = False
tx = TransactionBuilder(use_condenser_api=True, hive_instance=hive)
tx.appendOps(Transfer(**{"from": 'bhive5',
"to": 'bhive1',
"amount": Amount("0.01 HIVE", hive_instance=hive),
"memo": '2 of 2 simple transaction'}))
tx.appendWif(self.active_private_key_of_bhive5)
tx.sign()
tx.clearWifs()
tx.appendWif(self.active_private_key_of_bhive4)
tx.sign(reconstruct_tx=False)
self.assertEqual(len(tx['signatures']), 2)
tx.broadcast()
hive.nobroadcast = True
def test_transfer_2of2_wallet(self):
# Send a 2 of 2 transaction from bhive5 which needs bhive4's cosign to send
# priv key of bhive5 and bhive4 are stored in the wallet
# appendSigner fetches both keys and signs automatically with both keys.
hive = self.bts
hive.nobroadcast = False
hive.wallet.unlock("123")
tx = TransactionBuilder(use_condenser_api=True, hive_instance=hive)
tx.appendOps(Transfer(**{"from": 'bhive5',
"to": 'bhive1',
"amount": Amount("0.01 HIVE", hive_instance=hive),
"memo": '2 of 2 serialized/deserialized transaction'}))
tx.appendSigner("bhive5", "active")
tx.sign()
self.assertEqual(len(tx['signatures']), 2)
tx.broadcast()
hive.nobroadcast = True
def test_transfer_2of2_serialized_deserialized(self):
# Send a 2 of 2 transaction from bhive5 which needs bhive4's cosign to send
# funds but sign the transaction with bhive5's key and then serialize the transaction
# and deserialize the transaction. After that, sign with bhive4's key.
hive = self.bts
hive.nobroadcast = False
hive.wallet.unlock("123")
# hive.wallet.removeAccount("bhive4")
hive.wallet.removePrivateKeyFromPublicKey(str(PublicKey(self.active_private_key_of_bhive4, prefix=core_unit)))
tx = TransactionBuilder(use_condenser_api=True, hive_instance=hive)
tx.appendOps(Transfer(**{"from": 'bhive5',
"to": 'bhive1',
"amount": Amount("0.01 HIVE", hive_instance=hive),
"memo": '2 of 2 serialized/deserialized transaction'}))
tx.appendSigner("bhive5", "active")
tx.addSigningInformation("bhive5", "active")
tx.sign()
tx.clearWifs()
self.assertEqual(len(tx['signatures']), 1)
# hive.wallet.removeAccount("bhive5")
hive.wallet.removePrivateKeyFromPublicKey(str(PublicKey(self.active_private_key_of_bhive5, prefix=core_unit)))
tx_json = tx.json()
del tx
new_tx = TransactionBuilder(tx=tx_json, hive_instance=hive)
self.assertEqual(len(new_tx['signatures']), 1)
hive.wallet.addPrivateKey(self.active_private_key_of_bhive4)
new_tx.appendMissingSignatures()
new_tx.sign(reconstruct_tx=False)
self.assertEqual(len(new_tx['signatures']), 2)
new_tx.broadcast()
hive.nobroadcast = True
def test_transfer_2of2_offline(self):
# Send a 2 of 2 transaction from bhive5 which needs bhive4's cosign to send
# funds but sign the transaction with bhive5's key and then serialize the transaction
# and deserialize the transaction. After that, sign with bhive4's key.
hive = self.bts
hive.nobroadcast = False
hive.wallet.unlock("123")
# hive.wallet.removeAccount("bhive4")
hive.wallet.removePrivateKeyFromPublicKey(str(PublicKey(self.active_private_key_of_bhive4, prefix=core_unit)))
tx = TransactionBuilder(use_condenser_api=True, hive_instance=hive)
tx.appendOps(Transfer(**{"from": 'bhive5',
"to": 'bhive',
"amount": Amount("0.01 HIVE", hive_instance=hive),
"memo": '2 of 2 serialized/deserialized transaction'}))
tx.appendSigner("bhive5", "active")
tx.addSigningInformation("bhive5", "active")
tx.sign()
tx.clearWifs()
self.assertEqual(len(tx['signatures']), 1)
# hive.wallet.removeAccount("bhive5")
hive.wallet.removePrivateKeyFromPublicKey(str(PublicKey(self.active_private_key_of_bhive5, prefix=core_unit)))
hive.wallet.addPrivateKey(self.active_private_key_of_bhive4)
tx.appendMissingSignatures()
tx.sign(reconstruct_tx=False)
self.assertEqual(len(tx['signatures']), 2)
tx.broadcast()
hive.nobroadcast = True
hive.wallet.addPrivateKey(self.active_private_key_of_bhive5)
def test_transfer_2of2_wif(self):
nodelist = NodeList()
# Send a 2 of 2 transaction from elf which needs bhive4's cosign to send
# funds but sign the transaction with elf's key and then serialize the transaction
# and deserialize the transaction. After that, sign with bhive4's key.
hive = Hive(
node=self.nodes,
num_retries=10,
keys=[self.active_private_key_of_bhive5],
expiration=360,
)
tx = TransactionBuilder(use_condenser_api=True, hive_instance=hive)
tx.appendOps(Transfer(**{"from": 'bhive5',
"to": 'bhive',
"amount": Amount("0.01 HIVE", hive_instance=hive),
"memo": '2 of 2 serialized/deserialized transaction'}))
tx.appendSigner("bhive5", "active")
tx.addSigningInformation("bhive5", "active")
tx.sign()
tx.clearWifs()
self.assertEqual(len(tx['signatures']), 1)
tx_json = tx.json()
del hive
del tx
hive = Hive(
node=self.nodes,
num_retries=10,
keys=[self.active_private_key_of_bhive4],
expiration=360,
)
new_tx = TransactionBuilder(tx=tx_json, hive_instance=hive)
new_tx.appendMissingSignatures()
new_tx.sign(reconstruct_tx=False)
self.assertEqual(len(new_tx['signatures']), 2)
new_tx.broadcast()
def test_verifyAuthority(self):
hv = self.bts
hv.wallet.unlock("123")
tx = TransactionBuilder(use_condenser_api=True, hive_instance=hv)
tx.appendOps(Transfer(**{"from": "bhive",
"to": "bhive1",
"amount": Amount("1.300 HBD", hive_instance=hv),
"memo": "Foobar"}))
account = Account("bhive", hive_instance=hv)
tx.appendSigner(account, "active")
self.assertTrue(len(tx.wifs) > 0)
tx.sign()
tx.verify_authority()
self.assertTrue(len(tx["signatures"]) > 0)
def test_create_account(self):
bts = self.bts
name = ''.join(random.choice(string.ascii_lowercase) for _ in range(12))
key1 = PrivateKey()
key2 = PrivateKey()
key3 = PrivateKey()
key4 = PrivateKey()
key5 = PrivateKey()
tx = bts.create_account(
name,
creator="bhive",
owner_key=format(key1.pubkey, core_unit),
active_key=format(key2.pubkey, core_unit),
posting_key=format(key3.pubkey, core_unit),
memo_key=format(key4.pubkey, core_unit),
additional_owner_keys=[format(key5.pubkey, core_unit)],
additional_active_keys=[format(key5.pubkey, core_unit)],
additional_owner_accounts=["bhive1"], # 1.2.0
additional_active_accounts=["bhive1"],
storekeys=False
)
self.assertEqual(
tx["operations"][0][0],
"account_create"
)
op = tx["operations"][0][1]
role = "active"
self.assertIn(
format(key5.pubkey, core_unit),
[x[0] for x in op[role]["key_auths"]])
self.assertIn(
format(key5.pubkey, core_unit),
[x[0] for x in op[role]["key_auths"]])
self.assertIn(
"bhive1",
[x[0] for x in op[role]["account_auths"]])
role = "owner"
self.assertIn(
format(key5.pubkey, core_unit),
[x[0] for x in op[role]["key_auths"]])
self.assertIn(
format(key5.pubkey, core_unit),
[x[0] for x in op[role]["key_auths"]])
self.assertIn(
"bhive1",
[x[0] for x in op[role]["account_auths"]])
self.assertEqual(
op["creator"],
"bhive")
def test_connect(self):
nodelist = NodeList()
self.bts.connect(node=self.nodes)
bts = self.bts
self.assertEqual(bts.prefix, "STX")
def test_set_default_account(self):
self.bts.set_default_account("bhive")
def test_info(self):
info = self.bts.info()
for key in ['current_witness',
'head_block_id',
'head_block_number',
'id',
'last_irreversible_block_num',
'current_witness',
'total_pow',
'time']:
self.assertTrue(key in info)
def test_finalizeOps(self):
bts = self.bts
tx1 = bts.new_tx()
tx2 = bts.new_tx()
acc = Account("bhive", hive_instance=bts)
acc.transfer("bhive1", 1, "HIVE", append_to=tx1)
acc.transfer("bhive1", 2, "HIVE", append_to=tx2)
acc.transfer("bhive1", 3, "HIVE", append_to=tx1)
tx1 = tx1.json()
tx2 = tx2.json()
ops1 = tx1["operations"]
ops2 = tx2["operations"]
self.assertEqual(len(ops1), 2)
self.assertEqual(len(ops2), 1)
def test_weight_threshold(self):
bts = self.bts
auth = {'account_auths': [['test', 1]],
'extensions': [],
'key_auths': [
['STX55VCzsb47NZwWe5F3qyQKedX9iHBHMVVFSc96PDvV7wuj7W86n', 1],
['STX7GM9YXcsoAJAgKbqW2oVj7bnNXFNL4pk9NugqKWPmuhoEDbkDv', 1]],
'weight_threshold': 3} # threshold fine
bts._test_weights_treshold(auth)
auth = {'account_auths': [['test', 1]],
'extensions': [],
'key_auths': [
['STX55VCzsb47NZwWe5F3qyQKedX9iHBHMVVFSc96PDvV7wuj7W86n', 1],
['STX7GM9YXcsoAJAgKbqW2oVj7bnNXFNL4pk9NugqKWPmuhoEDbkDv', 1]],
'weight_threshold': 4} # too high
with self.assertRaises(ValueError):
bts._test_weights_treshold(auth)
def test_allow(self):
bts = self.bts
self.assertIn(bts.prefix, "STX")
acc = Account("bhive", hive_instance=bts)
self.assertIn(acc.hive.prefix, "STX")
tx = acc.allow(
"STX55VCzsb47NZwWe5F3qyQKedX9iHBHMVVFSc96PDvV7wuj7W86n",
account="bhive",
weight=1,
threshold=1,
permission="active",
)
self.assertEqual(
(tx["operations"][0][0]),
"account_update"
)
op = tx["operations"][0][1]
self.assertIn("active", op)
self.assertIn(
["STX55VCzsb47NZwWe5F3qyQKedX9iHBHMVVFSc96PDvV7wuj7W86n", '1'],
op["active"]["key_auths"])
self.assertEqual(op["active"]["weight_threshold"], 1)
def test_disallow(self):
bts = self.bts
acc = Account("bhive", hive_instance=bts)
if sys.version > '3':
_assertRaisesRegex = self.assertRaisesRegex
else:
_assertRaisesRegex = self.assertRaisesRegexp
with _assertRaisesRegex(ValueError, ".*Changes nothing.*"):
acc.disallow(
"STX55VCzsb47NZwWe5F3qyQKedX9iHBHMVVFSc96PDvV7wuj7W86n",
weight=1,
threshold=1,
permission="active"
)
with _assertRaisesRegex(ValueError, ".*Changes nothing!.*"):
acc.disallow(
"STX6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV",
weight=1,
threshold=1,
permission="active"
)
def test_update_memo_key(self):
bts = self.bts
bts.wallet.unlock("123")
self.assertEqual(bts.prefix, "STX")
acc = Account("bhive", hive_instance=bts)
tx = acc.update_memo_key("STX55VCzsb47NZwWe5F3qyQKedX9iHBHMVVFSc96PDvV7wuj7W86n")
self.assertEqual(
(tx["operations"][0][0]),
"account_update"
)
op = tx["operations"][0][1]
self.assertEqual(
op["memo_key"],
"STX55VCzsb47NZwWe5F3qyQKedX9iHBHMVVFSc96PDvV7wuj7W86n")
def test_approvewitness(self):
bts = self.bts
w = Account("bhive", hive_instance=bts)
tx = w.approvewitness("bhive1")
self.assertEqual(
(tx["operations"][0][0]),
"account_witness_vote"
)
op = tx["operations"][0][1]
self.assertIn(
"bhive1",
op["witness"])
def test_appendWif(self):
nodelist = NodeList()
hv = Hive(node=self.nodes,
nobroadcast=True,
expiration=120,
num_retries=10)
tx = TransactionBuilder(use_condenser_api=True, hive_instance=hv)
tx.appendOps(Transfer(**{"from": "bhive",
"to": "bhive1",
"amount": Amount("1 HIVE", hive_instance=hv),
"memo": ""}))
with self.assertRaises(
MissingKeyError
):
tx.sign()
with self.assertRaises(
InvalidWifError
):
tx.appendWif("abcdefg")
tx.appendWif(self.active_key)
tx.sign()
self.assertTrue(len(tx["signatures"]) > 0)
def test_appendSigner(self):
nodelist = NodeList()
hv = Hive(node=self.nodes,
keys=[self.active_key],
nobroadcast=True,
expiration=120,
num_retries=10)
tx = TransactionBuilder(use_condenser_api=True, hive_instance=hv)
tx.appendOps(Transfer(**{"from": "bhive",
"to": "bhive1",
"amount": Amount("1 HIVE", hive_instance=hv),
"memo": ""}))
account = Account("bhive", hive_instance=hv)
with self.assertRaises(
AssertionError
):
tx.appendSigner(account, "abcdefg")
tx.appendSigner(account, "active")
self.assertTrue(len(tx.wifs) > 0)
tx.sign()
self.assertTrue(len(tx["signatures"]) > 0)
def test_verifyAuthorityException(self):
nodelist = NodeList()
hv = Hive(node=self.nodes,
keys=[self.posting_key],
nobroadcast=True,
expiration=120,
num_retries=10)
tx = TransactionBuilder(use_condenser_api=True, hive_instance=hv)
tx.appendOps(Transfer(**{"from": "bhive",
"to": "bhive1",
"amount": Amount("1 HIVE", hive_instance=hv),
"memo": ""}))
account = Account("bhive2", hive_instance=hv)
tx.appendSigner(account, "active")
tx.appendWif(self.posting_key)
self.assertTrue(len(tx.wifs) > 0)
tx.sign()
with self.assertRaises(
exceptions.MissingRequiredActiveAuthority
):
tx.verify_authority()
self.assertTrue(len(tx["signatures"]) > 0)
def test_Transfer_broadcast(self):
nodelist = NodeList()
hv = Hive(node=self.nodes,
keys=[self.active_key],
nobroadcast=True,
expiration=120,
num_retries=10)
tx = TransactionBuilder(use_condenser_api=True, expiration=10, hive_instance=hv)
tx.appendOps(Transfer(**{"from": "bhive",
"to": "bhive1",
"amount": Amount("1 HIVE", hive_instance=hv),
"memo": ""}))
tx.appendSigner("bhive", "active")
tx.sign()
tx.broadcast()
def test_TransactionConstructor(self):
hv = self.bts
opTransfer = Transfer(**{"from": "bhive",
"to": "bhive1",
"amount": Amount("1 HIVE", hive_instance=hv),
"memo": ""})
tx1 = TransactionBuilder(use_condenser_api=True, hive_instance=hv)
tx1.appendOps(opTransfer)
tx = TransactionBuilder(tx1, hive_instance=hv)
self.assertFalse(tx.is_empty())
self.assertTrue(len(tx.list_operations()) == 1)
self.assertTrue(repr(tx) is not None)
self.assertTrue(str(tx) is not None)
account = Account("bhive", hive_instance=hv)
tx.appendSigner(account, "active")
self.assertTrue(len(tx.wifs) > 0)
tx.sign()
self.assertTrue(len(tx["signatures"]) > 0)
def test_follow_active_key(self):
nodelist = NodeList()
hv = Hive(node=self.nodes,
keys=[self.active_key],
nobroadcast=True,
expiration=120,
num_retries=10)
account = Account("bhive", hive_instance=hv)
account.follow("bhive1")
def test_follow_posting_key(self):
nodelist = NodeList()
hv = Hive(node=self.nodes,
keys=[self.posting_key],
nobroadcast=True,
expiration=120,
num_retries=10)
account = Account("bhive", hive_instance=hv)
account.follow("bhive1")
|
py | 1a54e12e5205f4355fa6e276a04e9c3dd8594e40 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from airflow.contrib.hooks.wasb_hook import WasbHook
from airflow.operators.sensors import BaseSensorOperator
from airflow.utils.decorators import apply_defaults
class WasbBlobSensor(BaseSensorOperator):
"""
Waits for a blob to arrive on Azure Blob Storage.
:param container_name: Name of the container.
:type container_name: str
:param blob_name: Name of the blob.
:type blob_name: str
:param wasb_conn_id: Reference to the wasb connection.
:type wasb_conn_id: str
:param check_options: Optional keyword arguments that
`WasbHook.check_for_blob()` takes.
:type check_options: dict
"""
template_fields = ('container_name', 'blob_name')
@apply_defaults
def __init__(self, container_name, blob_name,
wasb_conn_id='wasb_default', check_options=None, *args,
**kwargs):
super(WasbBlobSensor, self).__init__(*args, **kwargs)
if check_options is None:
check_options = {}
self.wasb_conn_id = wasb_conn_id
self.container_name = container_name
self.blob_name = blob_name
self.check_options = check_options
def poke(self, context):
self.logger.info(
'Poking for blob: {self.blob_name}\n'
'in wasb://{self.container_name}'.format(**locals())
)
hook = WasbHook(wasb_conn_id=self.wasb_conn_id)
return hook.check_for_blob(self.container_name, self.blob_name,
**self.check_options)
class WasbPrefixSensor(BaseSensorOperator):
"""
Waits for blobs matching a prefix to arrive on Azure Blob Storage.
:param container_name: Name of the container.
:type container_name: str
:param prefix: Prefix of the blob.
:type prefix: str
:param wasb_conn_id: Reference to the wasb connection.
:type wasb_conn_id: str
:param check_options: Optional keyword arguments that
`WasbHook.check_for_prefix()` takes.
:type check_options: dict
"""
template_fields = ('container_name', 'prefix')
@apply_defaults
def __init__(self, container_name, prefix, wasb_conn_id='wasb_default',
check_options=None, *args, **kwargs):
super(WasbPrefixSensor, self).__init__(*args, **kwargs)
if check_options is None:
check_options = {}
self.wasb_conn_id = wasb_conn_id
self.container_name = container_name
self.prefix = prefix
self.check_options = check_options
def poke(self, context):
self.logger.info(
'Poking for prefix: {self.prefix}\n'
'in wasb://{self.container_name}'.format(**locals())
)
hook = WasbHook(wasb_conn_id=self.wasb_conn_id)
return hook.check_for_prefix(self.container_name, self.prefix,
**self.check_options)
|
py | 1a54e18a0bf3c50ed3375f2b256f1e45a11d120a | from typing import Dict, Optional, Tuple
from kale.types.blockchain_format.program import Program, INFINITE_COST
from kale.types.condition_opcodes import ConditionOpcode
from kale.types.spend_bundle import SpendBundle
from kale.util.condition_tools import conditions_dict_for_solution
from kale.wallet.cc_wallet import cc_utils
from kale.wallet.trade_record import TradeRecord
from kale.wallet.trading.trade_status import TradeStatus
def trade_status_ui_string(status: TradeStatus):
if status is TradeStatus.PENDING_CONFIRM:
return "Pending Confirmation"
elif status is TradeStatus.CANCELED:
return "Canceled"
elif status is TradeStatus.CONFIRMED:
return "Confirmed"
elif status is TradeStatus.PENDING_CANCEL:
return "Pending Cancellation"
elif status is TradeStatus.FAILED:
return "Failed"
elif status is TradeStatus.PENDING_ACCEPT:
return "Pending"
def trade_record_to_dict(record: TradeRecord) -> Dict:
"""Convenience function to return only part of trade record we care about and show correct status to the ui"""
result = {}
result["trade_id"] = record.trade_id.hex()
result["sent"] = record.sent
result["my_offer"] = record.my_offer
result["created_at_time"] = record.created_at_time
result["accepted_at_time"] = record.accepted_at_time
result["confirmed_at_index"] = record.confirmed_at_index
result["status"] = trade_status_ui_string(TradeStatus(record.status))
success, offer_dict, error = get_discrepancies_for_spend_bundle(record.spend_bundle)
if success is False or offer_dict is None:
raise ValueError(error)
result["offer_dict"] = offer_dict
return result
# Returns the relative difference in value between the amount outputted by a puzzle and solution and a coin's amount
def get_output_discrepancy_for_puzzle_and_solution(coin, puzzle, solution):
discrepancy = coin.amount - get_output_amount_for_puzzle_and_solution(puzzle, solution)
return discrepancy
# Returns the amount of value outputted by a puzzle and solution
def get_output_amount_for_puzzle_and_solution(puzzle: Program, solution: Program) -> int:
error, conditions, cost = conditions_dict_for_solution(puzzle, solution, INFINITE_COST)
total = 0
if conditions:
for _ in conditions.get(ConditionOpcode.CREATE_COIN, []):
total += Program.to(_.vars[1]).as_int()
return total
def get_discrepancies_for_spend_bundle(
trade_offer: SpendBundle,
) -> Tuple[bool, Optional[Dict], Optional[Exception]]:
try:
cc_discrepancies: Dict[str, int] = dict()
for coinsol in trade_offer.coin_spends:
puzzle: Program = Program.from_bytes(bytes(coinsol.puzzle_reveal))
solution: Program = Program.from_bytes(bytes(coinsol.solution))
# work out the deficits between coin amount and expected output for each
r = cc_utils.uncurry_cc(puzzle)
if r:
# Calculate output amounts
mod_hash, genesis_checker, inner_puzzle = r
innersol = solution.first()
total = get_output_amount_for_puzzle_and_solution(inner_puzzle, innersol)
colour = bytes(genesis_checker).hex()
if colour in cc_discrepancies:
cc_discrepancies[colour] += coinsol.coin.amount - total
else:
cc_discrepancies[colour] = coinsol.coin.amount - total
else:
coin_amount = coinsol.coin.amount
out_amount = get_output_amount_for_puzzle_and_solution(puzzle, solution)
diff = coin_amount - out_amount
if "kale" in cc_discrepancies:
cc_discrepancies["kale"] = cc_discrepancies["kale"] + diff
else:
cc_discrepancies["kale"] = diff
return True, cc_discrepancies, None
except Exception as e:
return False, None, e
|
py | 1a54e18a1aee091009d70c35ee4cf0d4fd7136c3 | import xarray as xr
from matplotlib import pyplot as plt
from matplotlib import dates as mdates
def _get_labels(da, label):
# Add legend label
if label is None:
try:
# Set the label for each line so that they can
# be returned by Legend.get_legend_handles_labels()
label = da.attrs['label']
except KeyError:
pass
if not isinstance(label, list):
label = [label]
return label
def plot(data, ax=None, labels=None, legend=True,
title='', xaxis='on', xlabel=None, ylabel=None, **kwargs):
# Make sure we can iterate over the data arrays,
# not the data within them
if isinstance(data, xr.DataArray):
data = [data]
if labels is None:
labels = [None]*len(data)
# Get a set of axes in which to plot
if ax is None:
ax = plt.axes()
# Plot each data array
lines = []
for da, label in zip(data, labels):
da_lines = da.plot(ax=ax)
da_labels = _get_labels(da, label)
for da_line, da_label in zip(da_lines, da_labels):
da_line.set_label(da_label)
lines.append(*da_lines)
# Annotate axes
ax.set_title(title)
if xaxis == 'on':
ax.set_xlabel(xlabel)
locator = mdates.AutoDateLocator()
formatter = mdates.ConciseDateFormatter(locator)
ax.xaxis.set_major_locator(locator)
ax.xaxis.set_major_formatter(formatter)
for tick in ax.get_xticklabels():
tick.set_rotation(45)
else:
ax.set_xticklabels([])
ax.set_xlabel('')
if ylabel is None:
try:
ax.set_ylabel('{0}\n({1})'.format(data[0].attrs['ylabel'],
data[0].attrs['units'])
)
except KeyError:
pass
else:
ax.set_ylabel(ylabel)
# Add a legend
if legend:
# Create the legend outside the right-most axes
leg = ax.legend(bbox_to_anchor=(1.05, 1),
borderaxespad=0.0,
frameon=False,
handlelength=0,
handletextpad=0,
loc='upper left')
# Color the text the same as the lines
for line, text in zip(lines, leg.get_texts()):
text.set_color(line.get_color())
return ax |
py | 1a54e1a0ef9cd7acefbe2c75c4943c1fd6236563 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_wireless_controller_hotspot20_anqp_network_auth_type
short_description: Configure network authentication type in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify wireless_controller_hotspot20 feature and anqp_network_auth_type category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.9"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
state:
description:
- Indicates whether to create or remove the object.
type: str
required: true
choices:
- present
- absent
wireless_controller_hotspot20_anqp_network_auth_type:
description:
- Configure network authentication type.
default: null
type: dict
suboptions:
auth_type:
description:
- Network authentication type.
type: str
choices:
- acceptance-of-terms
- online-enrollment
- http-redirection
- dns-redirection
name:
description:
- Authentication type name.
required: true
type: str
url:
description:
- Redirect URL.
type: str
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure network authentication type.
fortios_wireless_controller_hotspot20_anqp_network_auth_type:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
wireless_controller_hotspot20_anqp_network_auth_type:
auth_type: "acceptance-of-terms"
name: "default_name_4"
url: "myurl.com"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_wireless_controller_hotspot20_anqp_network_auth_type_data(json):
option_list = ['auth_type', 'name', 'url']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def wireless_controller_hotspot20_anqp_network_auth_type(data, fos):
vdom = data['vdom']
state = data['state']
wireless_controller_hotspot20_anqp_network_auth_type_data = data['wireless_controller_hotspot20_anqp_network_auth_type']
filtered_data = \
underscore_to_hyphen(filter_wireless_controller_hotspot20_anqp_network_auth_type_data(wireless_controller_hotspot20_anqp_network_auth_type_data))
if state == "present":
return fos.set('wireless-controller.hotspot20',
'anqp-network-auth-type',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('wireless-controller.hotspot20',
'anqp-network-auth-type',
mkey=filtered_data['name'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_wireless_controller_hotspot20(data, fos):
if data['wireless_controller_hotspot20_anqp_network_auth_type']:
resp = wireless_controller_hotspot20_anqp_network_auth_type(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"wireless_controller_hotspot20_anqp_network_auth_type": {
"required": False, "type": "dict", "default": None,
"options": {
"auth_type": {"required": False, "type": "str",
"choices": ["acceptance-of-terms", "online-enrollment", "http-redirection",
"dns-redirection"]},
"name": {"required": True, "type": "str"},
"url": {"required": False, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_wireless_controller_hotspot20(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_wireless_controller_hotspot20(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
|
py | 1a54e1a30a5cd3f3501c014210e1ebb564301136 | # -*- coding: utf-8 -*-
# @Time : 2022/3/31 13:52
# @Author : ZhaoXiangPeng
# @File : create_setting.py
import os
import shutil
class CreateSetting:
def create(self):
if os.path.exists('setting.py'):
confirm = input("配置文件已存在 是否覆盖 (y/n). ")
if confirm != "y":
print("取消覆盖 退出")
return
template_file_path = os.path.abspath(
os.path.join(__file__, "../../../templates/project_template/setting.py")
)
shutil.copy(template_file_path, "./", follow_symlinks=False)
print("配置文件生成成功, 如有旧版setting文件(settings.py), 请删除.")
|
py | 1a54e284a2f41d3d57ce57f1833583deac6d28f3 | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/crafting/station/shared_food_tool.iff"
result.attribute_template_id = 1
result.stfName("crafting","food_tool_name")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result |
py | 1a54e45813190de7e5dfa4296833e81450aa178e | # https://www.hackerrank.com/challenges/maximize-it/problem
import itertools
(K, N) = map(int, raw_input().split())
L = list()
for i in range(K):
l = map(int, raw_input().split())
n = l[0]
L.append(l[1:])
assert len(L[i]) == n
S_max = 0
L_max = None
for l in itertools.product(*L):
s = sum([x**2 for x in l]) % N
if s > S_max:
S_max = s
L_max = l
print S_max
|
py | 1a54e4817135f1b95752590fe7dc017076137b70 | # coding=utf-8
"""
2
2 1 4 3
4
1 2 0 2
"""
def solver(n, init, m, nums):
ret = []
for i in nums:
gap = 2**i
init = reverseNum(init, gap) # 每gap间隔翻转
cur = computeReversePair(init)
ret.append(cur)
return ret
def reverseNum(init, gap):
ret = []
for i in range(0, len(init), gap):
c = init[i:i+gap][::-1]
ret.extend(c)
# print(ret, gap)
return ret
def computeReversePair(init):
count = 0
for i in range(len(init)-1):
for j in range(i, len(init)):
if init[i] > init[j]:
count += 1
return count
def test():
n = 2
init = [2,1,4,3]
m = 4
nums = [1,2,0,2]
ret = solver(n, init, m, nums)
print(ret)
if __name__ == '__main__':
test() |
py | 1a54e49c24b9376d789b42b42d0087eca2e412b6 | from constants import token
import discord
import discordSuperUtils
from bot.core.bot import DiscordSuperUtilsBot
def main():
bot = DiscordSuperUtilsBot(token, command_prefix=".", intents=discord.Intents.all())
discordSuperUtils.CommandHinter(bot)
bot.run()
if __name__ == "__main__":
main()
|
py | 1a54e4d370d9194bf9d97fa8e84314c66cc46367 | # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from textwrap import dedent
from typing import Dict
from twitter.common.collections import OrderedSet
from pants.option.config import Config
from pants.testutil.test_base import TestBase
from pants.util.contextutil import temporary_file
class ConfigTest(TestBase):
def _setup_config(self, config1_content: str, config2_content: str, *, suffix: str) -> Config:
with temporary_file(binary_mode=False, suffix=suffix) as config1, \
temporary_file(binary_mode=False, suffix=suffix) as config2:
config1.write(config1_content)
config1.close()
config2.write(config2_content)
config2.close()
parsed_config = Config.load(
config_paths=[config1.name, config2.name], seed_values={"buildroot": self.build_root}
)
assert [config1.name, config2.name] == parsed_config.sources()
return parsed_config
def setUp(self) -> None:
ini1_content = dedent(
"""
[DEFAULT]
name: foo
answer: 42
scale: 1.2
path: /a/b/%(answer)s
embed: %(path)s::foo
disclaimer:
Let it be known
that.
[a]
list: [1, 2, 3, %(answer)s]
list2: +[7, 8, 9]
[b]
preempt: True
[b.nested]
dict: {
'a': 1,
'b': %(answer)s,
'c': ['%(answer)s', '%(answer)s'],
}
[b.nested.nested-again]
movie: inception
"""
)
ini2_content = dedent(
"""
[a]
fast: True
[b]
preempt: False
[c.child]
no_values_in_parent: True
[defined_section]
"""
)
self.config = self._setup_config(ini1_content, ini2_content, suffix=".ini")
self.default_seed_values = Config._determine_seed_values(
seed_values={"buildroot": self.build_root},
)
self.default_file1_values = {
"name": "foo",
"answer": "42",
"scale": "1.2",
"path": "/a/b/42",
"embed": "/a/b/42::foo",
"disclaimer": "\nLet it be known\nthat.",
}
self.expected_file1_options = {
"a": {
"list": "[1, 2, 3, 42]",
"list2": "+[7, 8, 9]",
},
"b": {
"preempt": "True",
},
"b.nested": {
"dict": "{\n'a': 1,\n'b': 42,\n'c': ['42', '42'],\n}"
},
"b.nested.nested-again": {
"movie": "inception",
},
}
self.expected_file2_options: Dict[str, Dict[str, str]] = {
"a": {
"fast": "True",
},
"b": {
"preempt": "False",
},
"c.child": {
"no_values_in_parent": "True",
},
"defined_section": {},
}
self.expected_combined_values: Dict[str, Dict[str, str]] = {
**self.expected_file1_options,
**self.expected_file2_options,
"a": {
**self.expected_file2_options["a"], **self.expected_file1_options["a"],
},
}
def test_sections(self) -> None:
expected_sections = list(
OrderedSet([*self.expected_file2_options.keys(), *self.expected_file1_options.keys()])
)
assert self.config.sections() == expected_sections
for section in expected_sections:
assert self.config.has_section(section) is True
# We should only look at explicitly defined sections. For example, if `cache.java` is defined
# but `cache` is not, then `cache` should not be included in the sections.
assert self.config.has_section('c') is False
def test_has_option(self) -> None:
# Check has all DEFAULT values
for default_option in (*self.default_seed_values.keys(), *self.default_file1_values.keys()):
assert self.config.has_option(section="DEFAULT", option=default_option) is True
# Check every explicitly defined section has its options + the seed defaults
for section, options in self.expected_combined_values.items():
for option in (*options, *self.default_seed_values):
assert self.config.has_option(section=section, option=option) is True
# Check every section for file1 also has file1's DEFAULT values
for section in self.expected_file1_options:
for option in self.default_file1_values:
assert self.config.has_option(section=section, option=option) is True
# Check that file1's DEFAULT values don't apply to sections only defined in file2
sections_only_in_file2 = set(self.expected_file2_options.keys()) - set(
self.expected_file1_options.keys()
)
for section in sections_only_in_file2:
for option in self.default_file1_values:
assert self.config.has_option(section=section, option=option) is False
# Check that non-existent options are False
nonexistent_options = {
"DEFAULT": "fake",
"a": "fake",
"b": "fast",
}
for section, option in nonexistent_options.items():
assert self.config.has_option(section=section, option=option) is False
def test_list_all_options(self) -> None:
# This is used in `options_bootstrapper.py` to validate that every option is recognized.
file1_config = self.config.configs()[1]
file2_config = self.config.configs()[0]
for section, options in self.expected_file1_options.items():
assert file1_config.values.options(section=section) == [
*options.keys(), *self.default_seed_values.keys(), *self.default_file1_values.keys(),
]
for section, options in self.expected_file2_options.items():
assert file2_config.values.options(section=section) == [
*options.keys(), *self.default_seed_values.keys()]
def test_default_values(self) -> None:
# This is used in `options_bootstrapper.py` to ignore default values when validating options.
file1_config = self.config.configs()[1]
file2_config = self.config.configs()[0]
# NB: string interpolation should only happen when calling _ConfigValues.get_value(). The
# values for _ConfigValues.defaults() are not yet interpolated.
default_file1_values_unexpanded = {
**self.default_file1_values, "path": "/a/b/%(answer)s", "embed": "%(path)s::foo",
}
assert file1_config.values.defaults() == {
**self.default_seed_values, **default_file1_values_unexpanded,
}
assert file2_config.values.defaults() == self.default_seed_values
def test_get(self) -> None:
# Check the DEFAULT section
for option, value in {**self.default_seed_values, **self.default_file1_values}.items():
assert self.config.get(section="DEFAULT", option=option) == value
# Check the combined values, including that each section has the default seed values
for section, section_values in self.expected_combined_values.items():
for option, value in {**section_values, **self.default_seed_values}.items():
assert self.config.get(section=section, option=option) == value
# Check that each section from file1 also has file1's default values
for section in self.expected_file1_options:
for option, value in self.default_file1_values.items():
assert self.config.get(section=section, option=option) == value
def check_defaults(default: str) -> None:
assert self.config.get(section='c', option='fast') is None
assert self.config.get(section='c', option='preempt', default=None) is None
assert self.config.get(section='c', option='jake', default=default) == default
check_defaults('')
check_defaults('42')
def test_empty(self) -> None:
config = Config.load([])
assert config.sections() == []
assert config.sources() == []
assert config.has_section("DEFAULT") is False
assert config.has_option(section="DEFAULT", option="name") is False
|
py | 1a54e5658dd91de43d6a7a6254a94b0cd7fcabc6 | # Autogenerated from KST: please remove this line if doing any edits by hand!
import unittest
from repeat_eos_struct import _schema
class TestRepeatEosStruct(unittest.TestCase):
def test_repeat_eos_struct(self):
r = _schema.parse_file('src/repeat_eos_struct.bin')
self.assertEqual(len(r.chunks), 2)
self.assertEqual(r.chunks[0].offset, 0)
self.assertEqual(r.chunks[0].len, 66)
self.assertEqual(r.chunks[1].offset, 66)
self.assertEqual(r.chunks[1].len, 2069)
|
py | 1a54e62abb48b56448ec4853753342e3b689d7db | # Copyright (c) 2017 The Verde Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
#
# This code is part of the Fatiando a Terra project (https://www.fatiando.org)
#
"""
Test the utility functions.
"""
from unittest import mock
import numpy as np
import numpy.testing as npt
import pytest
import xarray as xr
from scipy.spatial import cKDTree
from .. import utils
from ..coordinates import grid_coordinates, scatter_points
from ..utils import (
dummy_jit,
get_ndim_horizontal_coords,
grid_to_table,
kdtree,
make_xarray_grid,
meshgrid_from_1d,
meshgrid_to_1d,
parse_engine,
partition_by_sum,
)
def test_parse_engine():
"Check that it works for common input"
assert parse_engine("numba") == "numba"
assert parse_engine("numpy") == "numpy"
with mock.patch.object(utils, "numba", None):
assert parse_engine("auto") == "numpy"
with mock.patch.object(utils, "numba", mock.MagicMock()):
assert parse_engine("auto") == "numba"
def test_parse_engine_fails():
"Check that the exception is raised for invalid engines"
with pytest.raises(ValueError):
parse_engine("some invalid engine")
def test_dummy_jit():
"Make sure the dummy function raises an exception"
@dummy_jit(target="cpt")
def function():
"Some random function"
return 0
with pytest.raises(RuntimeError):
function()
def test_kdtree():
"Test that the kdtree returned works for query"
coords = grid_coordinates((-10, 0, 0, 20), spacing=1)
for use_pykdtree in [True, False]:
tree = kdtree(coords, use_pykdtree=use_pykdtree)
dist, labels = tree.query(np.array([[-10, 0.1]]))
assert labels.size == 1
assert labels[0] == 0
npt.assert_allclose(dist, 0.1)
if not use_pykdtree:
assert isinstance(tree, cKDTree)
def test_grid_to_table_order():
"Check that coordinates are in the right order when converting to tables"
lon, lat = grid_coordinates(region=(1, 10, -10, -1), shape=(3, 4))
data = lon ** 2
# If the DataArray is created with coords in an order that doesn't match
# the dims (which is valid), we were getting it wrong because we were
# relying on the order of the coords instead of dims. This test would have
# caught that bug.
grid = xr.DataArray(
data=data,
coords={"longitude": lon[0, :], "latitude": lat[:, 0]},
dims=("latitude", "longitude"),
).to_dataset(name="field")
table = grid_to_table(grid)
true_lat = [-10, -10, -10, -10, -5.5, -5.5, -5.5, -5.5, -1, -1, -1, -1]
true_lon = [1, 4, 7, 10, 1, 4, 7, 10, 1, 4, 7, 10]
true_field = [1, 16, 49, 100, 1, 16, 49, 100, 1, 16, 49, 100]
npt.assert_allclose(true_lat, table.latitude)
npt.assert_allclose(true_lon, table.longitude)
npt.assert_allclose(true_field, table.field)
def test_partition_by_sum_fails_size():
"Should raise an exception if given more parts than elements."
with pytest.raises(ValueError) as error:
partition_by_sum(np.arange(10), 11)
assert "array of size 10 into 11 parts" in str(error)
def test_partition_by_sum_fails_no_partitions():
"Should raise an exception if could not find unique partition points"
with pytest.raises(ValueError) as error:
partition_by_sum(np.arange(10), 8)
assert "Could not find partition points" in str(error)
def test_make_xarray_grid():
"""
Check if xarray.Dataset is correctly created
"""
region = (-10, -5, 6, 10)
spacing = 1
coordinates = grid_coordinates(region, spacing=spacing)
data = np.ones_like(coordinates[0])
grid = make_xarray_grid(coordinates, data, data_names="dummy")
npt.assert_allclose(grid.easting, [-10, -9, -8, -7, -6, -5])
npt.assert_allclose(grid.northing, [6, 7, 8, 9, 10])
npt.assert_allclose(grid.dummy, 1)
assert grid.dummy.shape == (5, 6)
# Change dims
grid = make_xarray_grid(
coordinates, data, data_names="dummy", dims=("latitude", "longitude")
)
npt.assert_allclose(grid.longitude, [-10, -9, -8, -7, -6, -5])
npt.assert_allclose(grid.latitude, [6, 7, 8, 9, 10])
npt.assert_allclose(grid.dummy, 1)
assert grid.dummy.shape == (5, 6)
def test_make_xarray_grid_multiple_data():
"""
Check if xarray.Dataset with multiple data is correctly created
"""
region = (-10, -5, 6, 10)
spacing = 1
coordinates = grid_coordinates(region, spacing=spacing)
data_arrays = tuple(i * np.ones_like(coordinates[0]) for i in range(1, 4))
data_names = list("data_{}".format(i) for i in range(1, 4))
dataset = make_xarray_grid(coordinates, data_arrays, data_names=data_names)
npt.assert_allclose(dataset.easting, [-10, -9, -8, -7, -6, -5])
npt.assert_allclose(dataset.northing, [6, 7, 8, 9, 10])
for i in range(1, 4):
npt.assert_allclose(dataset["data_{}".format(i)], i)
assert dataset["data_{}".format(i)].shape == (5, 6)
def test_make_xarray_grid_no_data():
"""
Check if the function creates a xarray.Dataset with no data
"""
region = (-10, -5, 6, 10)
spacing = 1
coordinates = grid_coordinates(region, spacing=spacing)
dataset = make_xarray_grid(coordinates, data=None, data_names=None)
# Check if no data is present in the grid
assert len(dataset.data_vars) == 0
# Check if coordinates are in the grid
npt.assert_allclose(dataset.easting, [-10, -9, -8, -7, -6, -5])
npt.assert_allclose(dataset.northing, [6, 7, 8, 9, 10])
def test_make_xarray_grid_extra_coords():
"""
Check if xarray.Dataset with extra coords is correctly created
"""
region = (-10, -5, 6, 10)
spacing = 1
extra_coords = [1, 2]
coordinates = grid_coordinates(region, spacing=spacing, extra_coords=extra_coords)
data = np.ones_like(coordinates[0])
dataset = make_xarray_grid(
coordinates,
data,
data_names="dummy",
extra_coords_names=["upward", "time"],
)
npt.assert_allclose(dataset.easting, [-10, -9, -8, -7, -6, -5])
npt.assert_allclose(dataset.northing, [6, 7, 8, 9, 10])
npt.assert_allclose(dataset.upward, 1)
npt.assert_allclose(dataset.time, 2)
npt.assert_allclose(dataset.dummy, 1)
assert dataset.dummy.shape == (5, 6)
assert dataset.upward.shape == (5, 6)
assert dataset.time.shape == (5, 6)
def test_make_xarray_grid_invalid_names():
"""
Check if errors are raise after invalid data names
"""
region = (-10, -5, 6, 10)
spacing = 1
coordinates = grid_coordinates(region, spacing=spacing)
# Single data, multiple data_name
data = np.ones_like(coordinates[0])
with pytest.raises(ValueError):
make_xarray_grid(coordinates, data, data_names=["bla_1", "bla_2"])
# data_names equal to None
with pytest.raises(ValueError):
make_xarray_grid(coordinates, data, data_names=None)
# Multiple data, single data_name
data = tuple(i * np.ones_like(coordinates[0]) for i in (1, 2))
with pytest.raises(ValueError):
make_xarray_grid(coordinates, data, data_names="blabla")
def test_make_xarray_grid_invalid_extra_coords():
"""
Check if errors are raise after invalid extra coords
"""
region = (-10, -5, 6, 10)
spacing = 1
# No extra coords, extra_coords_name should be ignored
coordinates = grid_coordinates(region, spacing=spacing)
data = np.ones_like(coordinates[0])
make_xarray_grid(coordinates, data, data_names="dummy", extra_coords_names="upward")
# Single extra coords, extra_coords_name equal to None
coordinates = grid_coordinates(region, spacing=spacing, extra_coords=1)
data = np.ones_like(coordinates[0])
with pytest.raises(ValueError):
make_xarray_grid(coordinates, data, data_names="dummy", extra_coords_names=None)
# Multiple extra coords, single extra_coords_name as a str
coordinates = grid_coordinates(region, spacing=spacing, extra_coords=[1, 2])
data = np.ones_like(coordinates[0])
with pytest.raises(ValueError):
make_xarray_grid(
coordinates, data, data_names="dummy", extra_coords_names="upward"
)
# Multiple extra coords, multiple extra_coords_name but not equal
coordinates = grid_coordinates(region, spacing=spacing, extra_coords=[1, 2, 3])
data = np.ones_like(coordinates[0])
with pytest.raises(ValueError):
make_xarray_grid(
coordinates, data, data_names="dummy", extra_coords_names=["upward", "time"]
)
def test_make_xarray_grid_invalid_2d_coordinates():
"""
Check if error is raised if invaild 2d coordinates array are passed
"""
region = (-10, -5, 6, 10)
spacing = 1
easting, northing = grid_coordinates(region, spacing=spacing)
# Change only one element of the easting array
easting[2, 2] = -1000
data = np.ones_like(easting)
with pytest.raises(ValueError):
make_xarray_grid((easting, northing), data, data_names="dummy")
def test_make_xarray_grid_coordinates_as_1d_arrays():
"""
Check if it can handle coordinates as 1d-arrays
"""
region = (-10, -5, 6, 10)
easting = np.linspace(*region[:2], 6, dtype=float)
northing = np.linspace(*region[2:], 5, dtype=float)
data = np.ones((northing.size, easting.size))
grid = make_xarray_grid((easting, northing), data, data_names="dummy")
npt.assert_allclose(grid.easting, [-10, -9, -8, -7, -6, -5])
npt.assert_allclose(grid.northing, [6, 7, 8, 9, 10])
npt.assert_allclose(grid.dummy, 1)
assert grid.dummy.shape == (5, 6)
def test_make_xarray_grid_invalid_mixed_coordinates():
"""
Check if error is raised when horizontal coordinates have mixed dimensions
"""
region = (-10, -5, 6, 10)
spacing = 1
easting, northing = grid_coordinates(region, spacing=spacing)
data = np.ones_like(easting)
# easting is 1d, but northing is 2d
with pytest.raises(ValueError):
make_xarray_grid((easting[0, :], northing), data, data_names="dummy")
# northing is 1d, but easting is 2d
with pytest.raises(ValueError):
make_xarray_grid((easting, northing[:, 0]), data, data_names="dummy")
def test_meshgrid_to_1d_invalid():
"""
Check if error is raised after invalid meshgrid
"""
region = (-10, -5, 6, 10)
# Modify one element of easting
easting, northing = grid_coordinates(region=region, spacing=1)
easting[2, 2] = -9999
with pytest.raises(ValueError):
meshgrid_to_1d((easting, northing))
# Modify one element of northing
easting, northing = grid_coordinates(region=region, spacing=1)
northing[2, 3] = -9999
with pytest.raises(ValueError):
meshgrid_to_1d((easting, northing))
# Pass invalid shapes
easting = np.arange(16).reshape(4, 4)
northing = np.arange(9).reshape(3, 3)
with pytest.raises(ValueError):
meshgrid_to_1d((easting, northing))
# Pass 1d arrays
easting = np.linspace(0, 10, 11)
northing = np.linspace(-4, -4, 9)
with pytest.raises(ValueError):
meshgrid_to_1d((easting, northing))
def test_meshgrid_from_1d_invalid():
"""
Check if error is raised after non 1d arrays passed to meshgrid_from_1d
"""
coordinates = grid_coordinates(region=(0, 10, -5, 5), shape=(11, 11))
with pytest.raises(ValueError):
meshgrid_from_1d(coordinates)
def test_check_ndim_easting_northing():
"""
Test if check_ndim_easting_northing works as expected
"""
# Easting and northing as 1d arrays
easting, northing = scatter_points((-5, 5, 0, 4), 50, random_state=42)
assert get_ndim_horizontal_coords(easting, northing) == 1
# Easting and northing as 2d arrays
easting, northing = grid_coordinates((-5, 5, 0, 4), spacing=1)
assert get_ndim_horizontal_coords(easting, northing) == 2
# Check if error is raised after easting and northing with different ndims
easting = np.linspace(0, 5, 6)
northing = np.linspace(-5, 5, 16).reshape(4, 4)
with pytest.raises(ValueError):
get_ndim_horizontal_coords(easting, northing)
|
py | 1a54e62d8b3dfd0d22871c9e516626d185b162cb | import warnings
import pytest
import numpy as np
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_warns, assert_array_equal,
temppath,
)
from numpy.core.tests._locales import CommaDecimalPointLocale
LD_INFO = np.finfo(np.longdouble)
longdouble_longer_than_double = (LD_INFO.eps < np.finfo(np.double).eps)
_o = 1 + LD_INFO.eps
string_to_longdouble_inaccurate = (_o != np.longdouble(repr(_o)))
del _o
def test_scalar_extraction():
"""Confirm that extracting a value doesn't convert to python float"""
o = 1 + LD_INFO.eps
a = np.array([o, o, o])
assert_equal(a[1], o)
# Conversions string -> long double
# 0.1 not exactly representable in base 2 floating point.
repr_precision = len(repr(np.longdouble(0.1)))
# +2 from macro block starting around line 842 in scalartypes.c.src.
@pytest.mark.skipif(LD_INFO.precision + 2 >= repr_precision,
reason="repr precision not enough to show eps")
def test_repr_roundtrip():
# We will only see eps in repr if within printing precision.
o = 1 + LD_INFO.eps
assert_equal(np.longdouble(repr(o)), o, "repr was %s" % repr(o))
@pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l")
def test_repr_roundtrip_bytes():
o = 1 + LD_INFO.eps
assert_equal(np.longdouble(repr(o).encode("ascii")), o)
@pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l")
@pytest.mark.parametrize("strtype", (np.str_, np.bytes_, str, bytes))
def test_array_and_stringlike_roundtrip(strtype):
"""
Test that string representations of long-double roundtrip both
for array casting and scalar coercion, see also gh-15608.
"""
o = 1 + LD_INFO.eps
if strtype in (np.bytes_, bytes):
o_str = strtype(repr(o).encode("ascii"))
else:
o_str = strtype(repr(o))
# Test that `o` is correctly coerced from the string-like
assert o == np.longdouble(o_str)
# Test that arrays also roundtrip correctly:
o_strarr = np.asarray([o] * 3, dtype=strtype)
assert (o == o_strarr.astype(np.longdouble)).all()
# And array coercion and casting to string give the same as scalar repr:
assert (o_strarr == o_str).all()
assert (np.asarray([o] * 3).astype(strtype) == o_str).all()
def test_bogus_string():
assert_raises(ValueError, np.longdouble, "spam")
assert_raises(ValueError, np.longdouble, "1.0 flub")
@pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l")
def test_fromstring():
o = 1 + LD_INFO.eps
s = (" " + repr(o))*5
a = np.array([o]*5)
assert_equal(np.fromstring(s, sep=" ", dtype=np.longdouble), a,
err_msg="reading '%s'" % s)
def test_fromstring_complex():
for ctype in ["complex", "cdouble", "cfloat"]:
# Check spacing between separator
assert_equal(np.fromstring("1, 2 , 3 ,4", sep=",", dtype=ctype),
np.array([1., 2., 3., 4.]))
# Real component not specified
assert_equal(np.fromstring("1j, -2j, 3j, 4e1j", sep=",", dtype=ctype),
np.array([1.j, -2.j, 3.j, 40.j]))
# Both components specified
assert_equal(np.fromstring("1+1j,2-2j, -3+3j, -4e1+4j", sep=",", dtype=ctype),
np.array([1. + 1.j, 2. - 2.j, - 3. + 3.j, - 40. + 4j]))
# Spaces at wrong places
with assert_warns(DeprecationWarning):
assert_equal(np.fromstring("1+2 j,3", dtype=ctype, sep=","),
np.array([1.]))
with assert_warns(DeprecationWarning):
assert_equal(np.fromstring("1+ 2j,3", dtype=ctype, sep=","),
np.array([1.]))
with assert_warns(DeprecationWarning):
assert_equal(np.fromstring("1 +2j,3", dtype=ctype, sep=","),
np.array([1.]))
with assert_warns(DeprecationWarning):
assert_equal(np.fromstring("1+j", dtype=ctype, sep=","),
np.array([1.]))
with assert_warns(DeprecationWarning):
assert_equal(np.fromstring("1+", dtype=ctype, sep=","),
np.array([1.]))
with assert_warns(DeprecationWarning):
assert_equal(np.fromstring("1j+1", dtype=ctype, sep=","),
np.array([1j]))
def test_fromstring_bogus():
with assert_warns(DeprecationWarning):
assert_equal(np.fromstring("1. 2. 3. flop 4.", dtype=float, sep=" "),
np.array([1., 2., 3.]))
def test_fromstring_empty():
with assert_warns(DeprecationWarning):
assert_equal(np.fromstring("xxxxx", sep="x"),
np.array([]))
def test_fromstring_missing():
with assert_warns(DeprecationWarning):
assert_equal(np.fromstring("1xx3x4x5x6", sep="x"),
np.array([1]))
class TestFileBased:
ldbl = 1 + LD_INFO.eps
tgt = np.array([ldbl]*5)
out = ''.join([repr(t) + '\n' for t in tgt])
def test_fromfile_bogus(self):
with temppath() as path:
with open(path, 'wt') as f:
f.write("1. 2. 3. flop 4.\n")
with assert_warns(DeprecationWarning):
res = np.fromfile(path, dtype=float, sep=" ")
assert_equal(res, np.array([1., 2., 3.]))
def test_fromfile_complex(self):
for ctype in ["complex", "cdouble", "cfloat"]:
# Check spacing between separator and only real component specified
with temppath() as path:
with open(path, 'wt') as f:
f.write("1, 2 , 3 ,4\n")
res = np.fromfile(path, dtype=ctype, sep=",")
assert_equal(res, np.array([1., 2., 3., 4.]))
# Real component not specified
with temppath() as path:
with open(path, 'wt') as f:
f.write("1j, -2j, 3j, 4e1j\n")
res = np.fromfile(path, dtype=ctype, sep=",")
assert_equal(res, np.array([1.j, -2.j, 3.j, 40.j]))
# Both components specified
with temppath() as path:
with open(path, 'wt') as f:
f.write("1+1j,2-2j, -3+3j, -4e1+4j\n")
res = np.fromfile(path, dtype=ctype, sep=",")
assert_equal(res, np.array([1. + 1.j, 2. - 2.j, - 3. + 3.j, - 40. + 4j]))
# Spaces at wrong places
with temppath() as path:
with open(path, 'wt') as f:
f.write("1+2 j,3\n")
with assert_warns(DeprecationWarning):
res = np.fromfile(path, dtype=ctype, sep=",")
assert_equal(res, np.array([1.]))
# Spaces at wrong places
with temppath() as path:
with open(path, 'wt') as f:
f.write("1+ 2j,3\n")
with assert_warns(DeprecationWarning):
res = np.fromfile(path, dtype=ctype, sep=",")
assert_equal(res, np.array([1.]))
# Spaces at wrong places
with temppath() as path:
with open(path, 'wt') as f:
f.write("1 +2j,3\n")
with assert_warns(DeprecationWarning):
res = np.fromfile(path, dtype=ctype, sep=",")
assert_equal(res, np.array([1.]))
# Spaces at wrong places
with temppath() as path:
with open(path, 'wt') as f:
f.write("1+j\n")
with assert_warns(DeprecationWarning):
res = np.fromfile(path, dtype=ctype, sep=",")
assert_equal(res, np.array([1.]))
# Spaces at wrong places
with temppath() as path:
with open(path, 'wt') as f:
f.write("1+\n")
with assert_warns(DeprecationWarning):
res = np.fromfile(path, dtype=ctype, sep=",")
assert_equal(res, np.array([1.]))
# Spaces at wrong places
with temppath() as path:
with open(path, 'wt') as f:
f.write("1j+1\n")
with assert_warns(DeprecationWarning):
res = np.fromfile(path, dtype=ctype, sep=",")
assert_equal(res, np.array([1.j]))
@pytest.mark.skipif(string_to_longdouble_inaccurate,
reason="Need strtold_l")
def test_fromfile(self):
with temppath() as path:
with open(path, 'wt') as f:
f.write(self.out)
res = np.fromfile(path, dtype=np.longdouble, sep="\n")
assert_equal(res, self.tgt)
@pytest.mark.skipif(string_to_longdouble_inaccurate,
reason="Need strtold_l")
def test_genfromtxt(self):
with temppath() as path:
with open(path, 'wt') as f:
f.write(self.out)
res = np.genfromtxt(path, dtype=np.longdouble)
assert_equal(res, self.tgt)
@pytest.mark.skipif(string_to_longdouble_inaccurate,
reason="Need strtold_l")
def test_loadtxt(self):
with temppath() as path:
with open(path, 'wt') as f:
f.write(self.out)
res = np.loadtxt(path, dtype=np.longdouble)
assert_equal(res, self.tgt)
@pytest.mark.skipif(string_to_longdouble_inaccurate,
reason="Need strtold_l")
def test_tofile_roundtrip(self):
with temppath() as path:
self.tgt.tofile(path, sep=" ")
res = np.fromfile(path, dtype=np.longdouble, sep=" ")
assert_equal(res, self.tgt)
# Conversions long double -> string
def test_repr_exact():
o = 1 + LD_INFO.eps
assert_(repr(o) != '1')
@pytest.mark.skipif(longdouble_longer_than_double, reason="BUG #2376")
@pytest.mark.skipif(string_to_longdouble_inaccurate,
reason="Need strtold_l")
def test_format():
o = 1 + LD_INFO.eps
assert_("{0:.40g}".format(o) != '1')
@pytest.mark.skipif(longdouble_longer_than_double, reason="BUG #2376")
@pytest.mark.skipif(string_to_longdouble_inaccurate,
reason="Need strtold_l")
def test_percent():
o = 1 + LD_INFO.eps
assert_("%.40g" % o != '1')
@pytest.mark.skipif(longdouble_longer_than_double,
reason="array repr problem")
@pytest.mark.skipif(string_to_longdouble_inaccurate,
reason="Need strtold_l")
def test_array_repr():
o = 1 + LD_INFO.eps
a = np.array([o])
b = np.array([1], dtype=np.longdouble)
if not np.all(a != b):
raise ValueError("precision loss creating arrays")
assert_(repr(a) != repr(b))
#
# Locale tests: scalar types formatting should be independent of the locale
#
class TestCommaDecimalPointLocale(CommaDecimalPointLocale):
def test_repr_roundtrip_foreign(self):
o = 1.5
assert_equal(o, np.longdouble(repr(o)))
def test_fromstring_foreign_repr(self):
f = 1.234
a = np.fromstring(repr(f), dtype=float, sep=" ")
assert_equal(a[0], f)
def test_fromstring_best_effort_float(self):
with assert_warns(DeprecationWarning):
assert_equal(np.fromstring("1,234", dtype=float, sep=" "),
np.array([1.]))
def test_fromstring_best_effort(self):
with assert_warns(DeprecationWarning):
assert_equal(np.fromstring("1,234", dtype=np.longdouble, sep=" "),
np.array([1.]))
def test_fromstring_foreign(self):
s = "1.234"
a = np.fromstring(s, dtype=np.longdouble, sep=" ")
assert_equal(a[0], np.longdouble(s))
def test_fromstring_foreign_sep(self):
a = np.array([1, 2, 3, 4])
b = np.fromstring("1,2,3,4,", dtype=np.longdouble, sep=",")
assert_array_equal(a, b)
def test_fromstring_foreign_value(self):
with assert_warns(DeprecationWarning):
b = np.fromstring("1,234", dtype=np.longdouble, sep=" ")
assert_array_equal(b[0], 1)
@pytest.mark.parametrize("int_val", [
# cases discussed in gh-10723
# and gh-9968
2 ** 1024, 0])
def test_longdouble_from_int(int_val):
# for issue gh-9968
str_val = str(int_val)
# we'll expect a RuntimeWarning on platforms
# with np.longdouble equivalent to np.double
# for large integer input
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
# can be inf==inf on some platforms
assert np.longdouble(int_val) == np.longdouble(str_val)
# we can't directly compare the int and
# max longdouble value on all platforms
if np.allclose(np.finfo(np.longdouble).max,
np.finfo(np.double).max) and w:
assert w[0].category is RuntimeWarning
@pytest.mark.parametrize("bool_val", [
True, False])
def test_longdouble_from_bool(bool_val):
assert np.longdouble(bool_val) == np.longdouble(int(bool_val))
|
py | 1a54e6c5b8c308b7fac84442464c3a851df2fb4d | from datetime import datetime
from telegram import base, messageentity, userprofilephotos
import imageText
from io import BytesIO
import random
userKey = "userDict"
'''
This is the key needed to access the
user dictionary on the cotext.bot_data
dictionary.
'''
randomKey = "randomMsg"
'''
This is the key needed to access the
random # of messages on the cotext.chat_data
dictionary.
'''
rndLowerBound = 1
'''
The smallest possible number the
random number generation will
generate when called.
'''
rndUpperBound = 7
'''
The biggest possible number the
random number generation will
generate when called.
'''
def printTime(textToPrint):
now = datetime.now()
current_time = now.strftime("[%Y/%m/%d - %r]")
print(current_time, textToPrint)
def isMessageFromAGroup(typeOfMessage):
return "group" in typeOfMessage or "channel" in typeOfMessage
def DictHasElems(pDict):
"""checks if a dictionary is
not empty"""
return not not pDict
def getMentions(entitiesDict: dict[str, str], typeToSearch: messageentity):
for entity, text in entitiesDict.items():
if(entity.type == typeToSearch):
return text
return None
def validMessageLength(message: str, mention: str):
message = removeMention(message, mention)
msgLen = len(message)
return (0 < msgLen) and (msgLen < 500)
def userIDFromUsername(username: str, userDict: dict):
validUsername = username[1:] #The username on the dictionary does not contain
#the "@" at the begining. It needs to be removed
#to be a valid key for the dictionary.
if(validUsername in userDict):
return userDict[validUsername]
else:
return None
def generateRandom():
return random.randint(rndLowerBound, rndUpperBound)
def getUserIdFromBotData(mention: str, bot_data:dict):
if userKey in bot_data:
return userIDFromUsername(mention, bot_data[userKey])
else:
return None
def shouldProcessImage(mention, bot_data, chat_data):
msgsToNextPicture = 0
if (randomKey not in chat_data):
msgsToNextPicture = generateRandom()
else:
msgsToNextPicture = chat_data[randomKey] - 1
if (msgsToNextPicture < 1 and userKey in bot_data):
userId = userIDFromUsername(mention, bot_data[userKey])
if (userId):
chat_data[randomKey] = generateRandom()
return userId
else:
chat_data[randomKey] = msgsToNextPicture
return None
def addUserIDToDict(messageUser, userDict):
userDict[messageUser.username] = messageUser.id
return userDict
def processUser(messageUser, bot_data):
if(not messageUser.is_bot):
if(userKey not in bot_data):
newUserDict = {}
bot_data[userKey] = addUserIDToDict(messageUser, newUserDict)
elif(messageUser.username not in bot_data[userKey]):
bot_data[userKey] = addUserIDToDict(messageUser, bot_data[userKey])
def removeMention(textMessage: str, mention: str):
baseText = textMessage.replace(mention, "").replace("\n", "").strip()
return baseText.replace(" ", " ") #This makes sure no extra whitespaces are in the message
def processImage(userProfilePic: userprofilephotos, textMessage: str, mention: str, invert=False, name=""):
if(userProfilePic.total_count > 0):
profilePicture = userProfilePic.photos[0][-1].get_file() #This is the Highest resolution of the users profile picture.
photoByteArr = profilePicture.download_as_bytearray()
oldImageBArr = BytesIO(photoByteArr)
img = imageText.createImage(oldImageBArr)
if not invert:
imageText.addTextToProfilePicture(img, removeMention(textMessage, mention))
else:
img = imageText.addTextToInverseProfilePicture(img, textMessage, name)
newImageBArr = BytesIO()
newImageBArr.name = "response.jpg"
img.save(newImageBArr, "PNG")
newImageBArr.seek(0)
return newImageBArr
return None
if __name__ == "__main__":
pass
|
py | 1a54e788888e97e5f6379a0cdd5d84accf0eaf22 | import math, random
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.autograd as autograd
import torch.nn.functional as F
USE_CUDA = torch.cuda.is_available()
Variable = lambda *args, **kwargs: autograd.Variable(*args, **kwargs).cuda() if USE_CUDA else autograd.Variable(*args, **kwargs)
class Encoder(nn.Module):
def __init__(self, din=32, hidden_dim=128):
super(Encoder, self).__init__()
self.fc = nn.Linear(din, hidden_dim)
def forward(self, x):
embedding = F.relu(self.fc(x))
return embedding
class AttModel(nn.Module):
def __init__(self, n_node, din, hidden_dim, dout):
super(AttModel, self).__init__()
self.fcv = nn.Linear(din, hidden_dim)
self.fck = nn.Linear(din, hidden_dim)
self.fcq = nn.Linear(din, hidden_dim)
self.fcout = nn.Linear(hidden_dim, dout)
def forward(self, x, mask):
v = F.relu(self.fcv(x))
q = F.relu(self.fcq(x))
k = F.relu(self.fck(x)).permute(0,2,1)
att = F.softmax(torch.mul(torch.bmm(q,k), mask) - 9e15*(1 - mask),dim=2)
out = torch.bmm(att,v)
#out = torch.add(out,v)
out = F.relu(self.fcout(out))
return out
class Q_Net(nn.Module):
def __init__(self, hidden_dim, dout):
super(Q_Net, self).__init__()
self.fc = nn.Linear(hidden_dim, dout)
def forward(self, x):
q = self.fc(x)
return q
class DGN(nn.Module):
def __init__(self,n_agent,num_inputs,hidden_dim,num_actions):
super(DGN, self).__init__()
self.encoder = Encoder(num_inputs,hidden_dim)
self.att_1 = AttModel(n_agent,hidden_dim,hidden_dim,hidden_dim)
self.att_2 = AttModel(n_agent,hidden_dim,hidden_dim,hidden_dim)
self.q_net = Q_Net(hidden_dim,num_actions)
def forward(self, x, mask):
h1 = self.encoder(x)
h2 = self.att_1(h1, mask)
h3 = self.att_2(h2, mask)
q = self.q_net(h3)
return q
|
py | 1a54e92ff7c96df78afaac6538857793cc46bcda | import os
import blend_pproc_render
""" SET THESE AS ARGUMENTS """
""" SET THESE AS ARGUMENTS """
# where to look for the meshes. Use multiple paths if there are more folders to look into
meshes_prefixes = ["C:/Users/benatti/codes/synchrono/src/data/vehicle/sedan/", 'C:/Users/benatti/codes/synchrono/src/data/simpleRural/' ]
# directory where the generated images are saved
out_dir = os.path.dirname(os.path.realpath(__file__)) + '/rendered_images/'
# paths where the post-process outputs (.dat, .chpf) are looked for. There might be multiple files in different directories
# defining different bodies in the same timestep (cosimulation) but the prefixes must be the same
# e.g. : path1/001.dat, path1/001.chpf, path2/001.dat will be processed together.
# Please, in follow and lookat modes the tracked object must be in the first element of the list
#datadir = os.path.dirname(os.path.realpath(__file__))
datadir = ['./', 'C:/Users/benatti/codes/blender/NADS/dat_files/']
# resolution: 'HIGH', 'MID', 'LOW': divide by 1,4,16 the default HD resolution of 3840x2160
res = 'MID'#'LOW'
# Camera options: 'Follow', 'Fixed', 'Lookat'
camera_mode = "Fixed" # 'Fixed' 'Lookat'
# If true, sky is added
use_sky = True
# camera position (unused in follow mode)
camera_pos = (200,0,200)
# Camera target data: some keys might be unused depending on the type
target = dict([
# ID of the body to target (lookat and follow modes).
('bodyid' , 0),
# ID of the shape on the body to target (lookat and follow modes). The body might have many shapes
('shapetypeid' , 5),
# name of the mesh on the body to target (lookat and follow modes). The body might have many vis meshes
('name' , 'sedan_chassis_vis'),
# Point to look at. Used only by Fixed
('position', (0,0,-10)),
# Distance, relative to the target, from which the camera is looking at. Only in Follow mode
('distfrom', (-15.5,-5,1.5))
])
# point light origin
light_loc=(10, 50, 50)
# light intensity
light_energy=53000
# 'up' axis
axis_up = 'Z'
blend_pproc_render.bl_render(meshes_prefixes, out_dir, datadir, res, camera_mode, use_sky, camera_pos, target, axis_up, light_loc, light_energy)
|
py | 1a54e941385d6f99609db84605dd12d38db171b1 | from mermer.consensus.constants import ConsensusConstants
from mermer.consensus.pos_quality import _expected_plot_size
from mermer.types.blockchain_format.sized_bytes import bytes32
from mermer.util.hash import std_hash
from mermer.util.ints import uint8, uint64, uint128
def is_overflow_block(constants: ConsensusConstants, signage_point_index: uint8) -> bool:
if signage_point_index >= constants.NUM_SPS_SUB_SLOT:
raise ValueError("SP index too high")
return signage_point_index >= constants.NUM_SPS_SUB_SLOT - constants.NUM_SP_INTERVALS_EXTRA
def calculate_sp_interval_iters(constants: ConsensusConstants, sub_slot_iters: uint64) -> uint64:
assert sub_slot_iters % constants.NUM_SPS_SUB_SLOT == 0
return uint64(sub_slot_iters // constants.NUM_SPS_SUB_SLOT)
def calculate_sp_iters(constants: ConsensusConstants, sub_slot_iters: uint64, signage_point_index: uint8) -> uint64:
if signage_point_index >= constants.NUM_SPS_SUB_SLOT:
raise ValueError("SP index too high")
return uint64(calculate_sp_interval_iters(constants, sub_slot_iters) * signage_point_index)
def calculate_ip_iters(
constants: ConsensusConstants,
sub_slot_iters: uint64,
signage_point_index: uint8,
required_iters: uint64,
) -> uint64:
# Note that the SSI is for the block passed in, which might be in the previous epoch
sp_iters = calculate_sp_iters(constants, sub_slot_iters, signage_point_index)
sp_interval_iters: uint64 = calculate_sp_interval_iters(constants, sub_slot_iters)
if sp_iters % sp_interval_iters != 0 or sp_iters >= sub_slot_iters:
raise ValueError(f"Invalid sp iters {sp_iters} for this ssi {sub_slot_iters}")
if required_iters >= sp_interval_iters or required_iters == 0:
raise ValueError(
f"Required iters {required_iters} is not below the sp interval iters {sp_interval_iters} "
f"{sub_slot_iters} or not >0."
)
return uint64((sp_iters + constants.NUM_SP_INTERVALS_EXTRA * sp_interval_iters + required_iters) % sub_slot_iters)
def calculate_iterations_quality(
difficulty_constant_factor: uint128,
quality_string: bytes32,
size: int,
difficulty: uint64,
cc_sp_output_hash: bytes32,
) -> uint64:
"""
Calculates the number of iterations from the quality. This is derives as the difficulty times the constant factor
times a random number between 0 and 1 (based on quality string), divided by plot size.
"""
sp_quality_string: bytes32 = std_hash(quality_string + cc_sp_output_hash)
iters = uint64(
int(difficulty)
* int(difficulty_constant_factor)
* int.from_bytes(sp_quality_string, "big", signed=False)
// (int(pow(2, 256)) * int(_expected_plot_size(size)))
)
return max(iters, uint64(1))
|
py | 1a54ea8a790030d6d03b1ed063edea35d59ef766 | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._ssh_public_keys_operations import build_create_request, build_delete_request, build_generate_key_pair_request, build_get_request, build_list_by_resource_group_request, build_list_by_subscription_request, build_update_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class SshPublicKeysOperations:
"""SshPublicKeysOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2021_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list_by_subscription(
self,
**kwargs: Any
) -> AsyncIterable["_models.SshPublicKeysGroupListResult"]:
"""Lists all of the SSH public keys in the subscription. Use the nextLink property in the response
to get the next page of SSH public keys.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SshPublicKeysGroupListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2021_11_01.models.SshPublicKeysGroupListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2021-11-01") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.SshPublicKeysGroupListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_subscription_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_subscription.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_subscription_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("SshPublicKeysGroupListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_subscription.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/sshPublicKeys"} # type: ignore
@distributed_trace
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.SshPublicKeysGroupListResult"]:
"""Lists all of the SSH public keys in the specified resource group. Use the nextLink property in
the response to get the next page of SSH public keys.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SshPublicKeysGroupListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2021_11_01.models.SshPublicKeysGroupListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2021-11-01") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.SshPublicKeysGroupListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_resource_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("SshPublicKeysGroupListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/sshPublicKeys"} # type: ignore
@distributed_trace_async
async def create(
self,
resource_group_name: str,
ssh_public_key_name: str,
parameters: "_models.SshPublicKeyResource",
**kwargs: Any
) -> "_models.SshPublicKeyResource":
"""Creates a new SSH public key resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ssh_public_key_name: The name of the SSH public key.
:type ssh_public_key_name: str
:param parameters: Parameters supplied to create the SSH public key.
:type parameters: ~azure.mgmt.compute.v2021_11_01.models.SshPublicKeyResource
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SshPublicKeyResource, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_11_01.models.SshPublicKeyResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SshPublicKeyResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-11-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'SshPublicKeyResource')
request = build_create_request(
resource_group_name=resource_group_name,
ssh_public_key_name=ssh_public_key_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.create.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('SshPublicKeyResource', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('SshPublicKeyResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/sshPublicKeys/{sshPublicKeyName}"} # type: ignore
@distributed_trace_async
async def update(
self,
resource_group_name: str,
ssh_public_key_name: str,
parameters: "_models.SshPublicKeyUpdateResource",
**kwargs: Any
) -> "_models.SshPublicKeyResource":
"""Updates a new SSH public key resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ssh_public_key_name: The name of the SSH public key.
:type ssh_public_key_name: str
:param parameters: Parameters supplied to update the SSH public key.
:type parameters: ~azure.mgmt.compute.v2021_11_01.models.SshPublicKeyUpdateResource
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SshPublicKeyResource, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_11_01.models.SshPublicKeyResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SshPublicKeyResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-11-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'SshPublicKeyUpdateResource')
request = build_update_request(
resource_group_name=resource_group_name,
ssh_public_key_name=ssh_public_key_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('SshPublicKeyResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/sshPublicKeys/{sshPublicKeyName}"} # type: ignore
@distributed_trace_async
async def delete( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
ssh_public_key_name: str,
**kwargs: Any
) -> None:
"""Delete an SSH public key.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ssh_public_key_name: The name of the SSH public key.
:type ssh_public_key_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-11-01") # type: str
request = build_delete_request(
resource_group_name=resource_group_name,
ssh_public_key_name=ssh_public_key_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/sshPublicKeys/{sshPublicKeyName}"} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
ssh_public_key_name: str,
**kwargs: Any
) -> "_models.SshPublicKeyResource":
"""Retrieves information about an SSH public key.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ssh_public_key_name: The name of the SSH public key.
:type ssh_public_key_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SshPublicKeyResource, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_11_01.models.SshPublicKeyResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SshPublicKeyResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-11-01") # type: str
request = build_get_request(
resource_group_name=resource_group_name,
ssh_public_key_name=ssh_public_key_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('SshPublicKeyResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/sshPublicKeys/{sshPublicKeyName}"} # type: ignore
@distributed_trace_async
async def generate_key_pair(
self,
resource_group_name: str,
ssh_public_key_name: str,
**kwargs: Any
) -> "_models.SshPublicKeyGenerateKeyPairResult":
"""Generates and returns a public/private key pair and populates the SSH public key resource with
the public key. The length of the key will be 3072 bits. This operation can only be performed
once per SSH public key resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ssh_public_key_name: The name of the SSH public key.
:type ssh_public_key_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SshPublicKeyGenerateKeyPairResult, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_11_01.models.SshPublicKeyGenerateKeyPairResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SshPublicKeyGenerateKeyPairResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-11-01") # type: str
request = build_generate_key_pair_request(
resource_group_name=resource_group_name,
ssh_public_key_name=ssh_public_key_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.generate_key_pair.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('SshPublicKeyGenerateKeyPairResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
generate_key_pair.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/sshPublicKeys/{sshPublicKeyName}/generateKeyPair"} # type: ignore
|
py | 1a54ec1244350933f0333a247afe59c60910d3d8 | #!/usr/bin/env python3
import wellmap
from pytest_unordered import unordered
from .param_helpers import *
class MockPathManager:
def check_named_plates(self, names):
pass
def get_index_for_only_plate(self):
return {'path': '/path/to/data'}
def get_index_for_named_plate(self, name):
return {'plate': name, 'path': f'/path/to/{name.lower()}'}
@parametrize_from_file(
schema=Schema({
'config': with_py.eval,
**with_wellmap.error_or({
'expected': with_nan.eval,
}),
}),
)
def test_table_from_config(config, expected, error):
with error:
df = wellmap.table_from_config(config, MockPathManager())
assert df.to_dict('records') == unordered(expected)
|
py | 1a54ec270ca92d990c47b0f5af1ce20bd98aebfa | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import hashlib
import json
import logging
import os
import shutil
import subprocess
import zipfile
from pathlib import Path
import yaml
from graphscope.analytical.udf.utils import InMemoryZip
from graphscope.framework.app import AppAssets
from graphscope.framework.app import AppDAGNode
from graphscope.framework.app import check_argument
from graphscope.framework.context import create_context_node
from graphscope.framework.dag import DAGNode
from graphscope.framework.dag_utils import bind_app
from graphscope.framework.errors import InvalidArgumentError
from graphscope.framework.graph import Graph
from graphscope.framework.utils import get_tempdir
from graphscope.proto import graph_def_pb2
__all__ = ["JavaApp"]
logger = logging.getLogger("graphscope")
# runtime workspace
try:
WORKSPACE = os.environ["GRAPHSCOPE_RUNTIME"]
except KeyError:
WORKSPACE = os.path.join(get_tempdir(), "gs")
DEFAULT_GS_CONFIG_FILE = ".gs_conf.yaml"
POSSIBLE_APP_TYPES = [
"default_property",
"parallel_property",
"default_simple",
"parallel_simple",
]
def _parse_user_app(java_app_class: str, java_jar_full_path: str):
_java_app_type = ""
_frag_param_str = ""
_java_inner_context_type = ""
_java_executable = "java"
if shutil.which("java") is None:
if os.environ.get("JAVA_HOME", None) is not None:
_java_executable = os.path.join(os.environ.get("JAVA_HOME"), "bin", "java")
if not os.path.isfile(_java_executable) or not os.access(
_java_executable, os.X_OK
):
raise RuntimeError(
"Java executable not found, you shall install a java runtime."
)
parse_user_app_cmd = [
_java_executable,
"-cp",
"{}".format(java_jar_full_path),
"com.alibaba.graphscope.utils.AppBaseParser",
java_app_class,
]
logger.info(" ".join(parse_user_app_cmd))
parse_user_app_process = subprocess.Popen(
parse_user_app_cmd,
env=os.environ.copy(),
encoding="utf-8",
errors="replace",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
bufsize=1,
)
out, err = parse_user_app_process.communicate()
logger.info(err)
for line in out.split("\n"):
logger.info(line)
if len(line) == 0:
continue
if line.find("DefaultPropertyApp") != -1:
_java_app_type = "default_property"
elif line.find("ParallelPropertyApp") != -1:
_java_app_type = "parallel_property"
elif line.find("DefaultAppBase") != -1:
_java_app_type = "default_simple"
elif line.find("ParallelAppBase") != -1:
_java_app_type = "parallel_simple"
elif line.find("Error") != -1:
raise Exception("Error occured in verifying user app")
elif line.find("TypeParams") != -1:
_frag_param_str = line.split(":")[-1].strip()
elif line.find("ContextType") != -1:
_java_inner_context_type = line.split(":")[-1].strip()
logger.info(
"Java app type: {}, frag type str: {}, ctx type: {}".format(
_java_app_type, _frag_param_str, _java_inner_context_type
)
)
parse_user_app_process.wait()
return _java_app_type, _frag_param_str, _java_inner_context_type
def _type_param_consistent(graph_actucal_type_param, java_app_type_param):
if java_app_type_param == "java.lang.Long":
if graph_actucal_type_param in {"uint64_t", "int64_t"}:
return True
return False
if java_app_type_param == "java.lang.Double":
if graph_actucal_type_param in {"double"}:
return True
return False
if java_app_type_param == "java.lang.Integer":
if graph_actucal_type_param in {"int32_t", "uint32_t"}:
return True
return False
return False
class JavaApp(AppAssets):
"""A class represents a java app assert node in a DAG that holds the jar file.
It holds neccessary resouces to run a java app, including java class path, the gar
file which consists jar and configuration yaml, and the specified java class.
On creating a JavaApp, graphscope will try to load the specified java class, and parse
the Base class for your app, and the base class for your Context Class. This operation
requires a java runtime environment installed in your client machine where your graphscope
session is created.
To run your app, provide `JavaApp` with a property or projected graph and your querying args.
"""
def __init__(self, full_jar_path: str, java_app_class: str):
"""Init JavaApp with the full path of your `jar` file and the fully-qualified name of your
app class.
Args:
full_jar_path (str): The path where the jar file exists.
java_app_class (str): the fully-qualified name of your app class.
"""
self._java_app_class = java_app_class
self._full_jar_path = full_jar_path
self._jar_name = Path(self._full_jar_path).name
gar = self._pack_jar(self._full_jar_path)
gs_config = {
"app": [
{
"algo": "java_app",
"type": "java_pie",
"java_jar_path": self._full_jar_path,
"java_app_class": self.java_app_class,
}
]
}
# extract java app type with help of java class.
self._java_app_type, self._frag_param_str, _java_ctx_type = _parse_user_app(
java_app_class, full_jar_path
)
# For four different java type, we use two different driver class
if self._java_app_type not in POSSIBLE_APP_TYPES:
raise RuntimeError("Unexpected app type: {}".format(self._java_app_type))
if self._java_app_type.find("property") != -1:
gs_config["app"][0]["compatible_graph"] = ["vineyard::ArrowFragment"]
else:
gs_config["app"][0]["compatible_graph"] = ["gs::ArrowProjectedFragment"]
gs_config["app"][0]["context_type"] = _java_ctx_type
if self._java_app_type == "default_property":
gs_config["app"][0][
"driver_header"
] = "apps/java_pie/java_pie_property_default_app.h"
gs_config["app"][0]["class_name"] = "gs::JavaPIEPropertyDefaultApp"
elif self._java_app_type == "parallel_property":
gs_config["app"][0][
"driver_header"
] = "apps/java_pie/java_pie_property_parallel_app.h"
gs_config["app"][0]["class_name"] = "gs::JavaPIEPropertyParallelApp"
elif self._java_app_type == "default_simple":
gs_config["app"][0][
"driver_header"
] = "apps/java_pie/java_pie_projected_default_app.h"
gs_config["app"][0]["class_name"] = "gs::JavaPIEProjectedDefaultApp"
elif self._java_app_type == "parallel_simple":
gs_config["app"][0][
"driver_header"
] = "apps/java_pie/java_pie_projected_parallel_app.h"
gs_config["app"][0]["class_name"] = "gs::JavaPIEProjectedParallelApp"
else:
raise Exception(
"Unrecognizable java app type: {}".format(self._java_app_type)
)
gar.append(DEFAULT_GS_CONFIG_FILE, yaml.dump(gs_config))
super().__init__("java_app", _java_ctx_type, gar.read_bytes())
# Override is_compatible to make sure type params of graph consists with java app.
def is_compatible(self, graph):
splited = graph.template_str.split("<")
java_app_type_params = self.frag_param_str.split(",")
num_type_params = 0
if len(splited) != 2:
raise Exception(
"Unrecoginizable graph template str: {}".format(graph.template_str)
)
if splited[0] == "vineyard::ArrowFragment":
if self.java_app_type.find("property") == -1:
logger.error("Expected property app")
return False
if len(java_app_type_params) != 1:
logger.error("Expected one type params.")
return False
num_type_params = 1
if splited[1] == "gs::ArrowProjectedFragment":
if self.java_app_type.find("simple") == -1:
logger.error("Expected simple app")
return False
if len(java_app_type_params) != 4:
logger.error("Expected 4 type params")
return False
num_type_params = 4
graph_actual_type_params = splited[1][:-1].split(",")
for i in range(0, num_type_params):
graph_actual_type_param = graph_actual_type_params[i]
java_app_type_param = java_app_type_params[i]
if not _type_param_consistent(graph_actual_type_param, java_app_type_param):
return False
return True
def _pack_jar(self, full_jar_path: str):
garfile = InMemoryZip()
if not os.path.exists(full_jar_path):
raise FileNotFoundError("Jar file not found in {}.".format(full_jar_path))
if not full_jar_path.endswith(".jar") or not zipfile.is_zipfile(full_jar_path):
raise KeyError(
"{} is not a jar file, please feed your packed jar file to JavaApp.".format(
full_jar_path
)
)
tmp_jar_file = open(full_jar_path, "rb")
jar_bytes = tmp_jar_file.read()
if len(jar_bytes) <= 0:
raise KeyError("Expect a non-empty Jar.")
garfile.append("{}".format(full_jar_path.split("/")[-1]), jar_bytes)
return garfile
def signature(self):
s = hashlib.sha256()
s.update(
f"{self.type}.{self._full_jar_path}.{self.java_app_class}".encode("utf-8")
)
s.update(self.gar)
return s.hexdigest()
@property
def java_app_class(self):
return self._java_app_class
@property
def jar_name(self):
return self._jar_name
@property
def java_app_type(self):
return self._java_app_type
@property
def frag_param_str(self):
return self._frag_param_str
def __call__(self, graph: Graph, *args, **kwargs):
kwargs_extend = dict(app_class=self.java_app_class, **kwargs)
if not hasattr(graph, "graph_type"):
raise InvalidArgumentError("Missing graph_type attribute in graph object.")
if (
self.java_app_type.find("simple") != -1
and graph.graph_type == graph_def_pb2.ARROW_PROPERTY
):
graph = graph._project_to_simple()
app_ = graph.session._wrapper(JavaAppDagNode(graph, self))
return app_(*args, **kwargs_extend)
class JavaAppDagNode(AppDAGNode):
"""retrict app assets to javaAppAssets"""
def __init__(self, graph: Graph, app_assets: JavaApp):
self._graph = graph
self._app_assets = app_assets
self._session = graph.session
if not self._app_assets.is_compatible(self._graph):
raise Exception(
"No compactiable app and graph: {} and {}".format(
self._app_assets.java_app_type, self._graph.template_str
)
)
self._op = bind_app(graph, self._app_assets)
# add op to dag
self._session.dag.add_op(self._app_assets.op)
self._session.dag.add_op(self._op)
def _convert_arrow_frag_for_java(self, cpp_frag_str: str):
"""Convert vineyard::ArrowFragment<OID,VID> to gs::ArrowFragmentDefault<OID>"""
res = cpp_frag_str.split(",")[0] + ">"
return res.replace("<", "Default<", 1).replace("vineyard", "gs")
def __call__(self, *args, **kwargs):
"""When called, check arguments based on app type, Then do build and query.
Raises:
InvalidArgumentError: If app_type is None,
or positional argument found when app_type not `cpp_pie`.
Returns:
:class:`Context`: Query context, include running results of the app.
"""
check_argument(self._app_assets.type == "java_pie", "expect java_pie app")
if not isinstance(self._graph, DAGNode) and not self._graph.loaded():
raise RuntimeError("The graph is not loaded")
check_argument(not args, "Only support using keyword arguments in cython app.")
if self._app_assets.java_app_type.find("property") != -1:
frag_name_for_java = self._convert_arrow_frag_for_java(
self._graph.template_str
)
logger.info(
"Set frag name to {}, {}".format(
self._graph.template_str, frag_name_for_java
)
)
else:
frag_name_for_java = self._graph.template_str
# get number of worker on each host, so we can determine the java memory settings.
kwargs_extend = dict(
frag_name=frag_name_for_java,
jar_name=self._app_assets.jar_name,
**kwargs,
)
logger.info("dumping to json {}".format(json.dumps(kwargs_extend)))
return create_context_node(
self._app_assets.context_type, self, self._graph, json.dumps(kwargs_extend)
)
|
py | 1a54edece939210c7571c646a36aca0ce7a0fb56 | from datasette.plugins import DEFAULT_PLUGINS
from datasette.utils import detect_json1
from .fixtures import ( # noqa
app_client,
app_client_no_files,
app_client_with_hash,
app_client_shorter_time_limit,
app_client_larger_cache_size,
app_client_returned_rows_matches_page_size,
app_client_two_attached_databases,
app_client_two_attached_databases_one_immutable,
app_client_conflicting_database_names,
app_client_with_cors,
app_client_with_dot,
app_client_immutable_and_inspect_file,
generate_compound_rows,
generate_sortable_rows,
make_app_client,
EXPECTED_PLUGINS,
METADATA,
)
import json
import pytest
import sys
import urllib
def test_homepage(app_client):
response = app_client.get("/.json")
assert response.status == 200
assert "application/json; charset=utf-8" == response.headers["content-type"]
assert response.json.keys() == {"fixtures": 0}.keys()
d = response.json["fixtures"]
assert d["name"] == "fixtures"
assert d["tables_count"] == 24
assert len(d["tables_and_views_truncated"]) == 5
assert d["tables_and_views_more"] is True
# 4 hidden FTS tables + no_primary_key (hidden in metadata)
assert d["hidden_tables_count"] == 5
# 201 in no_primary_key, plus 5 in other hidden tables:
assert d["hidden_table_rows_sum"] == 206
assert d["views_count"] == 4
def test_homepage_sort_by_relationships(app_client):
response = app_client.get("/.json?_sort=relationships")
assert response.status == 200
tables = [
t["name"] for t in response.json["fixtures"]["tables_and_views_truncated"]
]
assert [
"simple_primary_key",
"complex_foreign_keys",
"roadside_attraction_characteristics",
"searchable_tags",
"foreign_key_references",
] == tables
def test_database_page(app_client):
response = app_client.get("/fixtures.json")
data = response.json
assert "fixtures" == data["database"]
assert [
{
"name": "123_starts_with_digits",
"columns": ["content"],
"primary_keys": [],
"count": 0,
"hidden": False,
"fts_table": None,
"foreign_keys": {"incoming": [], "outgoing": []},
"private": False,
},
{
"name": "Table With Space In Name",
"columns": ["pk", "content"],
"primary_keys": ["pk"],
"count": 0,
"hidden": False,
"fts_table": None,
"foreign_keys": {"incoming": [], "outgoing": []},
"private": False,
},
{
"name": "attraction_characteristic",
"columns": ["pk", "name"],
"primary_keys": ["pk"],
"count": 2,
"hidden": False,
"fts_table": None,
"foreign_keys": {
"incoming": [
{
"other_table": "roadside_attraction_characteristics",
"column": "pk",
"other_column": "characteristic_id",
}
],
"outgoing": [],
},
"private": False,
},
{
"name": "binary_data",
"columns": ["data"],
"primary_keys": [],
"count": 1,
"hidden": False,
"fts_table": None,
"foreign_keys": {"incoming": [], "outgoing": []},
"private": False,
},
{
"name": "complex_foreign_keys",
"columns": ["pk", "f1", "f2", "f3"],
"primary_keys": ["pk"],
"count": 1,
"hidden": False,
"fts_table": None,
"foreign_keys": {
"incoming": [],
"outgoing": [
{
"other_table": "simple_primary_key",
"column": "f3",
"other_column": "id",
},
{
"other_table": "simple_primary_key",
"column": "f2",
"other_column": "id",
},
{
"other_table": "simple_primary_key",
"column": "f1",
"other_column": "id",
},
],
},
"private": False,
},
{
"name": "compound_primary_key",
"columns": ["pk1", "pk2", "content"],
"primary_keys": ["pk1", "pk2"],
"count": 1,
"hidden": False,
"fts_table": None,
"foreign_keys": {"incoming": [], "outgoing": []},
"private": False,
},
{
"name": "compound_three_primary_keys",
"columns": ["pk1", "pk2", "pk3", "content"],
"primary_keys": ["pk1", "pk2", "pk3"],
"count": 1001,
"hidden": False,
"fts_table": None,
"foreign_keys": {"incoming": [], "outgoing": []},
"private": False,
},
{
"name": "custom_foreign_key_label",
"columns": ["pk", "foreign_key_with_custom_label"],
"primary_keys": ["pk"],
"count": 1,
"hidden": False,
"fts_table": None,
"foreign_keys": {
"incoming": [],
"outgoing": [
{
"other_table": "primary_key_multiple_columns_explicit_label",
"column": "foreign_key_with_custom_label",
"other_column": "id",
}
],
},
"private": False,
},
{
"name": "facet_cities",
"columns": ["id", "name"],
"primary_keys": ["id"],
"count": 4,
"hidden": False,
"fts_table": None,
"foreign_keys": {
"incoming": [
{
"other_table": "facetable",
"column": "id",
"other_column": "city_id",
}
],
"outgoing": [],
},
"private": False,
},
{
"name": "facetable",
"columns": [
"pk",
"created",
"planet_int",
"on_earth",
"state",
"city_id",
"neighborhood",
"tags",
"complex_array",
"distinct_some_null",
],
"primary_keys": ["pk"],
"count": 15,
"hidden": False,
"fts_table": None,
"foreign_keys": {
"incoming": [],
"outgoing": [
{
"other_table": "facet_cities",
"column": "city_id",
"other_column": "id",
}
],
},
"private": False,
},
{
"name": "foreign_key_references",
"columns": ["pk", "foreign_key_with_label", "foreign_key_with_no_label"],
"primary_keys": ["pk"],
"count": 2,
"hidden": False,
"fts_table": None,
"foreign_keys": {
"incoming": [],
"outgoing": [
{
"other_table": "primary_key_multiple_columns",
"column": "foreign_key_with_no_label",
"other_column": "id",
},
{
"other_table": "simple_primary_key",
"column": "foreign_key_with_label",
"other_column": "id",
},
],
},
"private": False,
},
{
"name": "infinity",
"columns": ["value"],
"primary_keys": [],
"count": 3,
"hidden": False,
"fts_table": None,
"foreign_keys": {"incoming": [], "outgoing": []},
"private": False,
},
{
"name": "primary_key_multiple_columns",
"columns": ["id", "content", "content2"],
"primary_keys": ["id"],
"count": 1,
"hidden": False,
"fts_table": None,
"foreign_keys": {
"incoming": [
{
"other_table": "foreign_key_references",
"column": "id",
"other_column": "foreign_key_with_no_label",
}
],
"outgoing": [],
},
"private": False,
},
{
"name": "primary_key_multiple_columns_explicit_label",
"columns": ["id", "content", "content2"],
"primary_keys": ["id"],
"count": 1,
"hidden": False,
"fts_table": None,
"foreign_keys": {
"incoming": [
{
"other_table": "custom_foreign_key_label",
"column": "id",
"other_column": "foreign_key_with_custom_label",
}
],
"outgoing": [],
},
"private": False,
},
{
"name": "roadside_attraction_characteristics",
"columns": ["attraction_id", "characteristic_id"],
"primary_keys": [],
"count": 5,
"hidden": False,
"fts_table": None,
"foreign_keys": {
"incoming": [],
"outgoing": [
{
"other_table": "attraction_characteristic",
"column": "characteristic_id",
"other_column": "pk",
},
{
"other_table": "roadside_attractions",
"column": "attraction_id",
"other_column": "pk",
},
],
},
"private": False,
},
{
"name": "roadside_attractions",
"columns": ["pk", "name", "address", "latitude", "longitude"],
"primary_keys": ["pk"],
"count": 4,
"hidden": False,
"fts_table": None,
"foreign_keys": {
"incoming": [
{
"other_table": "roadside_attraction_characteristics",
"column": "pk",
"other_column": "attraction_id",
}
],
"outgoing": [],
},
"private": False,
},
{
"name": "searchable",
"columns": ["pk", "text1", "text2", "name with . and spaces"],
"primary_keys": ["pk"],
"count": 2,
"hidden": False,
"fts_table": "searchable_fts",
"foreign_keys": {
"incoming": [
{
"other_table": "searchable_tags",
"column": "pk",
"other_column": "searchable_id",
}
],
"outgoing": [],
},
"private": False,
},
{
"name": "searchable_tags",
"columns": ["searchable_id", "tag"],
"primary_keys": ["searchable_id", "tag"],
"count": 2,
"hidden": False,
"fts_table": None,
"foreign_keys": {
"incoming": [],
"outgoing": [
{"other_table": "tags", "column": "tag", "other_column": "tag"},
{
"other_table": "searchable",
"column": "searchable_id",
"other_column": "pk",
},
],
},
"private": False,
},
{
"name": "select",
"columns": ["group", "having", "and", "json"],
"primary_keys": [],
"count": 1,
"hidden": False,
"fts_table": None,
"foreign_keys": {"incoming": [], "outgoing": []},
"private": False,
},
{
"name": "simple_primary_key",
"columns": ["id", "content"],
"primary_keys": ["id"],
"count": 4,
"hidden": False,
"fts_table": None,
"foreign_keys": {
"incoming": [
{
"other_table": "foreign_key_references",
"column": "id",
"other_column": "foreign_key_with_label",
},
{
"other_table": "complex_foreign_keys",
"column": "id",
"other_column": "f3",
},
{
"other_table": "complex_foreign_keys",
"column": "id",
"other_column": "f2",
},
{
"other_table": "complex_foreign_keys",
"column": "id",
"other_column": "f1",
},
],
"outgoing": [],
},
"private": False,
},
{
"name": "sortable",
"columns": [
"pk1",
"pk2",
"content",
"sortable",
"sortable_with_nulls",
"sortable_with_nulls_2",
"text",
],
"primary_keys": ["pk1", "pk2"],
"count": 201,
"hidden": False,
"fts_table": None,
"foreign_keys": {"incoming": [], "outgoing": []},
"private": False,
},
{
"name": "table/with/slashes.csv",
"columns": ["pk", "content"],
"primary_keys": ["pk"],
"count": 1,
"hidden": False,
"fts_table": None,
"foreign_keys": {"incoming": [], "outgoing": []},
"private": False,
},
{
"name": "tags",
"columns": ["tag"],
"primary_keys": ["tag"],
"count": 2,
"hidden": False,
"fts_table": None,
"foreign_keys": {
"incoming": [
{
"other_table": "searchable_tags",
"column": "tag",
"other_column": "tag",
}
],
"outgoing": [],
},
"private": False,
},
{
"name": "units",
"columns": ["pk", "distance", "frequency"],
"primary_keys": ["pk"],
"count": 3,
"hidden": False,
"fts_table": None,
"foreign_keys": {"incoming": [], "outgoing": []},
"private": False,
},
{
"name": "no_primary_key",
"columns": ["content", "a", "b", "c"],
"primary_keys": [],
"count": 201,
"hidden": True,
"fts_table": None,
"foreign_keys": {"incoming": [], "outgoing": []},
"private": False,
},
{
"name": "searchable_fts",
"columns": ["text1", "text2", "name with . and spaces", "content"],
"primary_keys": [],
"count": 2,
"hidden": True,
"fts_table": "searchable_fts",
"foreign_keys": {"incoming": [], "outgoing": []},
"private": False,
},
{
"name": "searchable_fts_content",
"columns": [
"docid",
"c0text1",
"c1text2",
"c2name with . and spaces",
"c3content",
],
"primary_keys": ["docid"],
"count": 2,
"hidden": True,
"fts_table": None,
"foreign_keys": {"incoming": [], "outgoing": []},
"private": False,
},
{
"name": "searchable_fts_segdir",
"columns": [
"level",
"idx",
"start_block",
"leaves_end_block",
"end_block",
"root",
],
"primary_keys": ["level", "idx"],
"count": 1,
"hidden": True,
"fts_table": None,
"foreign_keys": {"incoming": [], "outgoing": []},
"private": False,
},
{
"name": "searchable_fts_segments",
"columns": ["blockid", "block"],
"primary_keys": ["blockid"],
"count": 0,
"hidden": True,
"fts_table": None,
"foreign_keys": {"incoming": [], "outgoing": []},
"private": False,
},
] == data["tables"]
def test_no_files_uses_memory_database(app_client_no_files):
response = app_client_no_files.get("/.json")
assert response.status == 200
assert {
":memory:": {
"hash": None,
"color": "f7935d",
"hidden_table_rows_sum": 0,
"hidden_tables_count": 0,
"name": ":memory:",
"show_table_row_counts": False,
"path": "/:memory:",
"table_rows_sum": 0,
"tables_count": 0,
"tables_and_views_more": False,
"tables_and_views_truncated": [],
"views_count": 0,
"private": False,
}
} == response.json
# Try that SQL query
response = app_client_no_files.get(
"/:memory:.json?sql=select+sqlite_version()&_shape=array"
)
assert 1 == len(response.json)
assert ["sqlite_version()"] == list(response.json[0].keys())
def test_database_page_for_database_with_dot_in_name(app_client_with_dot):
response = app_client_with_dot.get("/fixtures.dot.json")
assert 200 == response.status
def test_custom_sql(app_client):
response = app_client.get(
"/fixtures.json?sql=select+content+from+simple_primary_key&_shape=objects"
)
data = response.json
assert {"sql": "select content from simple_primary_key", "params": {}} == data[
"query"
]
assert [
{"content": "hello"},
{"content": "world"},
{"content": ""},
{"content": "RENDER_CELL_DEMO"},
] == data["rows"]
assert ["content"] == data["columns"]
assert "fixtures" == data["database"]
assert not data["truncated"]
def test_canned_query_with_named_parameter(app_client):
response = app_client.get("/fixtures/neighborhood_search.json?text=town")
assert [
["Corktown", "Detroit", "MI"],
["Downtown", "Los Angeles", "CA"],
["Downtown", "Detroit", "MI"],
["Greektown", "Detroit", "MI"],
["Koreatown", "Los Angeles", "CA"],
["Mexicantown", "Detroit", "MI"],
] == response.json["rows"]
def test_sql_time_limit(app_client_shorter_time_limit):
response = app_client_shorter_time_limit.get("/fixtures.json?sql=select+sleep(0.5)")
assert 400 == response.status
assert "SQL Interrupted" == response.json["title"]
def test_custom_sql_time_limit(app_client):
response = app_client.get("/fixtures.json?sql=select+sleep(0.01)")
assert 200 == response.status
response = app_client.get("/fixtures.json?sql=select+sleep(0.01)&_timelimit=5")
assert 400 == response.status
assert "SQL Interrupted" == response.json["title"]
def test_invalid_custom_sql(app_client):
response = app_client.get("/fixtures.json?sql=.schema")
assert response.status == 400
assert response.json["ok"] is False
assert "Statement must be a SELECT" == response.json["error"]
def test_table_json(app_client):
response = app_client.get("/fixtures/simple_primary_key.json?_shape=objects")
assert response.status == 200
data = response.json
assert (
data["query"]["sql"]
== "select id, content from simple_primary_key order by id limit 51"
)
assert data["query"]["params"] == {}
assert data["rows"] == [
{"id": "1", "content": "hello"},
{"id": "2", "content": "world"},
{"id": "3", "content": ""},
{"id": "4", "content": "RENDER_CELL_DEMO"},
]
def test_table_not_exists_json(app_client):
assert {
"ok": False,
"error": "Table not found: blah",
"status": 404,
"title": None,
} == app_client.get("/fixtures/blah.json").json
def test_jsono_redirects_to_shape_objects(app_client_with_hash):
response_1 = app_client_with_hash.get(
"/fixtures/simple_primary_key.jsono", allow_redirects=False
)
response = app_client_with_hash.get(
response_1.headers["Location"], allow_redirects=False
)
assert response.status == 302
assert response.headers["Location"].endswith("?_shape=objects")
def test_table_shape_arrays(app_client):
response = app_client.get("/fixtures/simple_primary_key.json?_shape=arrays")
assert [
["1", "hello"],
["2", "world"],
["3", ""],
["4", "RENDER_CELL_DEMO"],
] == response.json["rows"]
def test_table_shape_arrayfirst(app_client):
response = app_client.get(
"/fixtures.json?"
+ urllib.parse.urlencode(
{
"sql": "select content from simple_primary_key order by id",
"_shape": "arrayfirst",
}
)
)
assert ["hello", "world", "", "RENDER_CELL_DEMO"] == response.json
def test_table_shape_objects(app_client):
response = app_client.get("/fixtures/simple_primary_key.json?_shape=objects")
assert [
{"id": "1", "content": "hello"},
{"id": "2", "content": "world"},
{"id": "3", "content": ""},
{"id": "4", "content": "RENDER_CELL_DEMO"},
] == response.json["rows"]
def test_table_shape_array(app_client):
response = app_client.get("/fixtures/simple_primary_key.json?_shape=array")
assert [
{"id": "1", "content": "hello"},
{"id": "2", "content": "world"},
{"id": "3", "content": ""},
{"id": "4", "content": "RENDER_CELL_DEMO"},
] == response.json
def test_table_shape_array_nl(app_client):
response = app_client.get("/fixtures/simple_primary_key.json?_shape=array&_nl=on")
lines = response.text.split("\n")
results = [json.loads(line) for line in lines]
assert [
{"id": "1", "content": "hello"},
{"id": "2", "content": "world"},
{"id": "3", "content": ""},
{"id": "4", "content": "RENDER_CELL_DEMO"},
] == results
def test_table_shape_invalid(app_client):
response = app_client.get("/fixtures/simple_primary_key.json?_shape=invalid")
assert {
"ok": False,
"error": "Invalid _shape: invalid",
"status": 400,
"title": None,
} == response.json
def test_table_shape_object(app_client):
response = app_client.get("/fixtures/simple_primary_key.json?_shape=object")
assert {
"1": {"id": "1", "content": "hello"},
"2": {"id": "2", "content": "world"},
"3": {"id": "3", "content": ""},
"4": {"id": "4", "content": "RENDER_CELL_DEMO"},
} == response.json
def test_table_shape_object_compound_primary_Key(app_client):
response = app_client.get("/fixtures/compound_primary_key.json?_shape=object")
assert {"a,b": {"pk1": "a", "pk2": "b", "content": "c"}} == response.json
def test_table_with_slashes_in_name(app_client):
response = app_client.get(
"/fixtures/table%2Fwith%2Fslashes.csv?_shape=objects&_format=json"
)
assert response.status == 200
data = response.json
assert data["rows"] == [{"pk": "3", "content": "hey"}]
def test_table_with_reserved_word_name(app_client):
response = app_client.get("/fixtures/select.json?_shape=objects")
assert response.status == 200
data = response.json
assert data["rows"] == [
{
"rowid": 1,
"group": "group",
"having": "having",
"and": "and",
"json": '{"href": "http://example.com/", "label":"Example"}',
}
]
@pytest.mark.parametrize(
"path,expected_rows,expected_pages",
[
("/fixtures/no_primary_key.json", 201, 5),
("/fixtures/paginated_view.json", 201, 9),
("/fixtures/no_primary_key.json?_size=25", 201, 9),
("/fixtures/paginated_view.json?_size=50", 201, 5),
("/fixtures/paginated_view.json?_size=max", 201, 3),
("/fixtures/123_starts_with_digits.json", 0, 1),
# Ensure faceting doesn't break pagination:
("/fixtures/compound_three_primary_keys.json?_facet=pk1", 1001, 21),
# Paginating while sorted by an expanded foreign key should work
(
"/fixtures/roadside_attraction_characteristics.json?_size=2&_sort=attraction_id&_labels=on",
5,
3,
),
],
)
def test_paginate_tables_and_views(app_client, path, expected_rows, expected_pages):
fetched = []
count = 0
while path:
response = app_client.get(path)
assert 200 == response.status
count += 1
fetched.extend(response.json["rows"])
path = response.json["next_url"]
if path:
assert urllib.parse.urlencode({"_next": response.json["next"]}) in path
path = path.replace("http://localhost", "")
assert count < 30, "Possible infinite loop detected"
assert expected_rows == len(fetched)
assert expected_pages == count
@pytest.mark.parametrize(
"path,expected_error",
[
("/fixtures/no_primary_key.json?_size=-4", "_size must be a positive integer"),
("/fixtures/no_primary_key.json?_size=dog", "_size must be a positive integer"),
("/fixtures/no_primary_key.json?_size=1001", "_size must be <= 100"),
],
)
def test_validate_page_size(app_client, path, expected_error):
response = app_client.get(path)
assert expected_error == response.json["error"]
assert 400 == response.status
def test_page_size_zero(app_client):
"For _size=0 we return the counts, empty rows and no continuation token"
response = app_client.get("/fixtures/no_primary_key.json?_size=0")
assert 200 == response.status
assert [] == response.json["rows"]
assert 201 == response.json["filtered_table_rows_count"]
assert None is response.json["next"]
assert None is response.json["next_url"]
def test_paginate_compound_keys(app_client):
fetched = []
path = "/fixtures/compound_three_primary_keys.json?_shape=objects"
page = 0
while path:
page += 1
response = app_client.get(path)
fetched.extend(response.json["rows"])
path = response.json["next_url"]
if path:
path = path.replace("http://localhost", "")
assert page < 100
assert 1001 == len(fetched)
assert 21 == page
# Should be correctly ordered
contents = [f["content"] for f in fetched]
expected = [r[3] for r in generate_compound_rows(1001)]
assert expected == contents
def test_paginate_compound_keys_with_extra_filters(app_client):
fetched = []
path = (
"/fixtures/compound_three_primary_keys.json?content__contains=d&_shape=objects"
)
page = 0
while path:
page += 1
assert page < 100
response = app_client.get(path)
fetched.extend(response.json["rows"])
path = response.json["next_url"]
if path:
path = path.replace("http://localhost", "")
assert 2 == page
expected = [r[3] for r in generate_compound_rows(1001) if "d" in r[3]]
assert expected == [f["content"] for f in fetched]
@pytest.mark.parametrize(
"query_string,sort_key,human_description_en",
[
("_sort=sortable", lambda row: row["sortable"], "sorted by sortable"),
(
"_sort_desc=sortable",
lambda row: -row["sortable"],
"sorted by sortable descending",
),
(
"_sort=sortable_with_nulls",
lambda row: (
1 if row["sortable_with_nulls"] is not None else 0,
row["sortable_with_nulls"],
),
"sorted by sortable_with_nulls",
),
(
"_sort_desc=sortable_with_nulls",
lambda row: (
1 if row["sortable_with_nulls"] is None else 0,
-row["sortable_with_nulls"]
if row["sortable_with_nulls"] is not None
else 0,
row["content"],
),
"sorted by sortable_with_nulls descending",
),
# text column contains '$null' - ensure it doesn't confuse pagination:
("_sort=text", lambda row: row["text"], "sorted by text"),
],
)
def test_sortable(app_client, query_string, sort_key, human_description_en):
path = "/fixtures/sortable.json?_shape=objects&{}".format(query_string)
fetched = []
page = 0
while path:
page += 1
assert page < 100
response = app_client.get(path)
assert human_description_en == response.json["human_description_en"]
fetched.extend(response.json["rows"])
path = response.json["next_url"]
if path:
path = path.replace("http://localhost", "")
assert 5 == page
expected = list(generate_sortable_rows(201))
expected.sort(key=sort_key)
assert [r["content"] for r in expected] == [r["content"] for r in fetched]
def test_sortable_and_filtered(app_client):
path = (
"/fixtures/sortable.json"
"?content__contains=d&_sort_desc=sortable&_shape=objects"
)
response = app_client.get(path)
fetched = response.json["rows"]
assert (
'where content contains "d" sorted by sortable descending'
== response.json["human_description_en"]
)
expected = [row for row in generate_sortable_rows(201) if "d" in row["content"]]
assert len(expected) == response.json["filtered_table_rows_count"]
expected.sort(key=lambda row: -row["sortable"])
assert [r["content"] for r in expected] == [r["content"] for r in fetched]
def test_sortable_argument_errors(app_client):
response = app_client.get("/fixtures/sortable.json?_sort=badcolumn")
assert "Cannot sort table by badcolumn" == response.json["error"]
response = app_client.get("/fixtures/sortable.json?_sort_desc=badcolumn2")
assert "Cannot sort table by badcolumn2" == response.json["error"]
response = app_client.get(
"/fixtures/sortable.json?_sort=sortable_with_nulls&_sort_desc=sortable"
)
assert "Cannot use _sort and _sort_desc at the same time" == response.json["error"]
def test_sortable_columns_metadata(app_client):
response = app_client.get("/fixtures/sortable.json?_sort=content")
assert "Cannot sort table by content" == response.json["error"]
# no_primary_key has ALL sort options disabled
for column in ("content", "a", "b", "c"):
response = app_client.get("/fixtures/sortable.json?_sort={}".format(column))
assert "Cannot sort table by {}".format(column) == response.json["error"]
@pytest.mark.parametrize(
"path,expected_rows",
[
(
"/fixtures/searchable.json?_search=dog",
[
[1, "barry cat", "terry dog", "panther"],
[2, "terry dog", "sara weasel", "puma"],
],
),
(
# Special keyword shouldn't break FTS query
"/fixtures/searchable.json?_search=AND",
[],
),
(
# Without _searchmode=raw this should return no results
"/fixtures/searchable.json?_search=te*+AND+do*",
[],
),
(
# _searchmode=raw
"/fixtures/searchable.json?_search=te*+AND+do*&_searchmode=raw",
[
[1, "barry cat", "terry dog", "panther"],
[2, "terry dog", "sara weasel", "puma"],
],
),
(
"/fixtures/searchable.json?_search=weasel",
[[2, "terry dog", "sara weasel", "puma"]],
),
(
"/fixtures/searchable.json?_search_text2=dog",
[[1, "barry cat", "terry dog", "panther"]],
),
(
"/fixtures/searchable.json?_search_name%20with%20.%20and%20spaces=panther",
[[1, "barry cat", "terry dog", "panther"]],
),
],
)
def test_searchable(app_client, path, expected_rows):
response = app_client.get(path)
assert expected_rows == response.json["rows"]
@pytest.mark.parametrize(
"path,expected_rows",
[
(
"/fixtures/searchable_view_configured_by_metadata.json?_search=weasel",
[[2, "terry dog", "sara weasel", "puma"]],
),
# This should return all results because search is not configured:
(
"/fixtures/searchable_view.json?_search=weasel",
[
[1, "barry cat", "terry dog", "panther"],
[2, "terry dog", "sara weasel", "puma"],
],
),
(
"/fixtures/searchable_view.json?_search=weasel&_fts_table=searchable_fts&_fts_pk=pk",
[[2, "terry dog", "sara weasel", "puma"]],
),
],
)
def test_searchable_views(app_client, path, expected_rows):
response = app_client.get(path)
assert expected_rows == response.json["rows"]
def test_searchable_invalid_column(app_client):
response = app_client.get("/fixtures/searchable.json?_search_invalid=x")
assert 400 == response.status
assert {
"ok": False,
"error": "Cannot search by that column",
"status": 400,
"title": None,
} == response.json
@pytest.mark.parametrize(
"path,expected_rows",
[
("/fixtures/simple_primary_key.json?content=hello", [["1", "hello"]]),
(
"/fixtures/simple_primary_key.json?content__contains=o",
[["1", "hello"], ["2", "world"], ["4", "RENDER_CELL_DEMO"]],
),
("/fixtures/simple_primary_key.json?content__exact=", [["3", ""]]),
(
"/fixtures/simple_primary_key.json?content__not=world",
[["1", "hello"], ["3", ""], ["4", "RENDER_CELL_DEMO"]],
),
],
)
def test_table_filter_queries(app_client, path, expected_rows):
response = app_client.get(path)
assert expected_rows == response.json["rows"]
def test_table_filter_queries_multiple_of_same_type(app_client):
response = app_client.get(
"/fixtures/simple_primary_key.json?content__not=world&content__not=hello"
)
assert [["3", ""], ["4", "RENDER_CELL_DEMO"]] == response.json["rows"]
@pytest.mark.skipif(not detect_json1(), reason="Requires the SQLite json1 module")
def test_table_filter_json_arraycontains(app_client):
response = app_client.get("/fixtures/facetable.json?tags__arraycontains=tag1")
assert [
[
1,
"2019-01-14 08:00:00",
1,
1,
"CA",
1,
"Mission",
'["tag1", "tag2"]',
'[{"foo": "bar"}]',
"one",
],
[
2,
"2019-01-14 08:00:00",
1,
1,
"CA",
1,
"Dogpatch",
'["tag1", "tag3"]',
"[]",
"two",
],
] == response.json["rows"]
def test_table_filter_extra_where(app_client):
response = app_client.get("/fixtures/facetable.json?_where=neighborhood='Dogpatch'")
assert [
[
2,
"2019-01-14 08:00:00",
1,
1,
"CA",
1,
"Dogpatch",
'["tag1", "tag3"]',
"[]",
"two",
]
] == response.json["rows"]
def test_table_filter_extra_where_invalid(app_client):
response = app_client.get("/fixtures/facetable.json?_where=neighborhood=Dogpatch'")
assert 400 == response.status
assert "Invalid SQL" == response.json["title"]
def test_table_filter_extra_where_disabled_if_no_sql_allowed():
with make_app_client(metadata={"allow_sql": {}}) as client:
response = client.get("/fixtures/facetable.json?_where=neighborhood='Dogpatch'")
assert 403 == response.status
assert "_where= is not allowed" == response.json["error"]
def test_table_through(app_client):
# Just the museums:
response = app_client.get(
'/fixtures/roadside_attractions.json?_through={"table":"roadside_attraction_characteristics","column":"characteristic_id","value":"1"}'
)
assert [
[
3,
"Burlingame Museum of PEZ Memorabilia",
"214 California Drive, Burlingame, CA 94010",
37.5793,
-122.3442,
],
[
4,
"Bigfoot Discovery Museum",
"5497 Highway 9, Felton, CA 95018",
37.0414,
-122.0725,
],
] == response.json["rows"]
assert (
'where roadside_attraction_characteristics.characteristic_id = "1"'
== response.json["human_description_en"]
)
def test_max_returned_rows(app_client):
response = app_client.get("/fixtures.json?sql=select+content+from+no_primary_key")
data = response.json
assert {"sql": "select content from no_primary_key", "params": {}} == data["query"]
assert data["truncated"]
assert 100 == len(data["rows"])
def test_view(app_client):
response = app_client.get("/fixtures/simple_view.json?_shape=objects")
assert response.status == 200
data = response.json
assert data["rows"] == [
{"upper_content": "HELLO", "content": "hello"},
{"upper_content": "WORLD", "content": "world"},
{"upper_content": "", "content": ""},
{"upper_content": "RENDER_CELL_DEMO", "content": "RENDER_CELL_DEMO"},
]
def test_row(app_client):
response = app_client.get("/fixtures/simple_primary_key/1.json?_shape=objects")
assert response.status == 200
assert [{"id": "1", "content": "hello"}] == response.json["rows"]
def test_row_format_in_querystring(app_client):
# regression test for https://github.com/simonw/datasette/issues/563
response = app_client.get(
"/fixtures/simple_primary_key/1?_format=json&_shape=objects"
)
assert response.status == 200
assert [{"id": "1", "content": "hello"}] == response.json["rows"]
def test_row_strange_table_name(app_client):
response = app_client.get(
"/fixtures/table%2Fwith%2Fslashes.csv/3.json?_shape=objects"
)
assert response.status == 200
assert [{"pk": "3", "content": "hey"}] == response.json["rows"]
def test_row_foreign_key_tables(app_client):
response = app_client.get(
"/fixtures/simple_primary_key/1.json?_extras=foreign_key_tables"
)
assert response.status == 200
assert [
{
"column": "id",
"count": 1,
"other_column": "foreign_key_with_label",
"other_table": "foreign_key_references",
},
{
"column": "id",
"count": 1,
"other_column": "f3",
"other_table": "complex_foreign_keys",
},
{
"column": "id",
"count": 0,
"other_column": "f2",
"other_table": "complex_foreign_keys",
},
{
"column": "id",
"count": 1,
"other_column": "f1",
"other_table": "complex_foreign_keys",
},
] == response.json["foreign_key_tables"]
def test_unit_filters(app_client):
response = app_client.get(
"/fixtures/units.json?distance__lt=75km&frequency__gt=1kHz"
)
assert response.status == 200
data = response.json
assert data["units"]["distance"] == "m"
assert data["units"]["frequency"] == "Hz"
assert len(data["rows"]) == 1
assert data["rows"][0][0] == 2
def test_databases_json(app_client_two_attached_databases_one_immutable):
response = app_client_two_attached_databases_one_immutable.get("/-/databases.json")
databases = response.json
assert 2 == len(databases)
extra_database, fixtures_database = databases
assert "extra database" == extra_database["name"]
assert None == extra_database["hash"]
assert True == extra_database["is_mutable"]
assert False == extra_database["is_memory"]
assert "fixtures" == fixtures_database["name"]
assert fixtures_database["hash"] is not None
assert False == fixtures_database["is_mutable"]
assert False == fixtures_database["is_memory"]
def test_metadata_json(app_client):
response = app_client.get("/-/metadata.json")
assert METADATA == response.json
def test_threads_json(app_client):
response = app_client.get("/-/threads.json")
expected_keys = {"threads", "num_threads"}
if sys.version_info >= (3, 7, 0):
expected_keys.update({"tasks", "num_tasks"})
assert expected_keys == set(response.json.keys())
def test_plugins_json(app_client):
response = app_client.get("/-/plugins.json")
assert EXPECTED_PLUGINS == sorted(response.json, key=lambda p: p["name"])
# Try with ?all=1
response = app_client.get("/-/plugins.json?all=1")
names = {p["name"] for p in response.json}
assert names.issuperset(p["name"] for p in EXPECTED_PLUGINS)
assert names.issuperset(DEFAULT_PLUGINS)
def test_versions_json(app_client):
response = app_client.get("/-/versions.json")
assert "python" in response.json
assert "3.0" == response.json.get("asgi")
assert "version" in response.json["python"]
assert "full" in response.json["python"]
assert "datasette" in response.json
assert "version" in response.json["datasette"]
assert "sqlite" in response.json
assert "version" in response.json["sqlite"]
assert "fts_versions" in response.json["sqlite"]
assert "compile_options" in response.json["sqlite"]
def test_config_json(app_client):
response = app_client.get("/-/config.json")
assert {
"default_page_size": 50,
"default_facet_size": 30,
"facet_suggest_time_limit_ms": 50,
"facet_time_limit_ms": 200,
"max_returned_rows": 100,
"sql_time_limit_ms": 200,
"allow_download": True,
"allow_facet": True,
"suggest_facets": True,
"default_cache_ttl": 5,
"default_cache_ttl_hashed": 365 * 24 * 60 * 60,
"num_sql_threads": 3,
"cache_size_kb": 0,
"allow_csv_stream": True,
"max_csv_mb": 100,
"truncate_cells_html": 2048,
"force_https_urls": False,
"hash_urls": False,
"template_debug": False,
"base_url": "/",
} == response.json
def test_page_size_matching_max_returned_rows(
app_client_returned_rows_matches_page_size,
):
fetched = []
path = "/fixtures/no_primary_key.json"
while path:
response = app_client_returned_rows_matches_page_size.get(path)
fetched.extend(response.json["rows"])
assert len(response.json["rows"]) in (1, 50)
path = response.json["next_url"]
if path:
path = path.replace("http://localhost", "")
assert 201 == len(fetched)
@pytest.mark.parametrize(
"path,expected_facet_results",
[
(
"/fixtures/facetable.json?_facet=state&_facet=city_id",
{
"state": {
"name": "state",
"hideable": True,
"type": "column",
"toggle_url": "/fixtures/facetable.json?_facet=city_id",
"results": [
{
"value": "CA",
"label": "CA",
"count": 10,
"toggle_url": "_facet=state&_facet=city_id&state=CA",
"selected": False,
},
{
"value": "MI",
"label": "MI",
"count": 4,
"toggle_url": "_facet=state&_facet=city_id&state=MI",
"selected": False,
},
{
"value": "MC",
"label": "MC",
"count": 1,
"toggle_url": "_facet=state&_facet=city_id&state=MC",
"selected": False,
},
],
"truncated": False,
},
"city_id": {
"name": "city_id",
"hideable": True,
"type": "column",
"toggle_url": "/fixtures/facetable.json?_facet=state",
"results": [
{
"value": 1,
"label": "San Francisco",
"count": 6,
"toggle_url": "_facet=state&_facet=city_id&city_id=1",
"selected": False,
},
{
"value": 2,
"label": "Los Angeles",
"count": 4,
"toggle_url": "_facet=state&_facet=city_id&city_id=2",
"selected": False,
},
{
"value": 3,
"label": "Detroit",
"count": 4,
"toggle_url": "_facet=state&_facet=city_id&city_id=3",
"selected": False,
},
{
"value": 4,
"label": "Memnonia",
"count": 1,
"toggle_url": "_facet=state&_facet=city_id&city_id=4",
"selected": False,
},
],
"truncated": False,
},
},
),
(
"/fixtures/facetable.json?_facet=state&_facet=city_id&state=MI",
{
"state": {
"name": "state",
"hideable": True,
"type": "column",
"toggle_url": "/fixtures/facetable.json?_facet=city_id&state=MI",
"results": [
{
"value": "MI",
"label": "MI",
"count": 4,
"selected": True,
"toggle_url": "_facet=state&_facet=city_id",
}
],
"truncated": False,
},
"city_id": {
"name": "city_id",
"hideable": True,
"type": "column",
"toggle_url": "/fixtures/facetable.json?_facet=state&state=MI",
"results": [
{
"value": 3,
"label": "Detroit",
"count": 4,
"selected": False,
"toggle_url": "_facet=state&_facet=city_id&state=MI&city_id=3",
}
],
"truncated": False,
},
},
),
(
"/fixtures/facetable.json?_facet=planet_int",
{
"planet_int": {
"name": "planet_int",
"hideable": True,
"type": "column",
"toggle_url": "/fixtures/facetable.json",
"results": [
{
"value": 1,
"label": 1,
"count": 14,
"selected": False,
"toggle_url": "_facet=planet_int&planet_int=1",
},
{
"value": 2,
"label": 2,
"count": 1,
"selected": False,
"toggle_url": "_facet=planet_int&planet_int=2",
},
],
"truncated": False,
}
},
),
(
# planet_int is an integer field:
"/fixtures/facetable.json?_facet=planet_int&planet_int=1",
{
"planet_int": {
"name": "planet_int",
"hideable": True,
"type": "column",
"toggle_url": "/fixtures/facetable.json?planet_int=1",
"results": [
{
"value": 1,
"label": 1,
"count": 14,
"selected": True,
"toggle_url": "_facet=planet_int",
}
],
"truncated": False,
}
},
),
],
)
def test_facets(app_client, path, expected_facet_results):
response = app_client.get(path)
facet_results = response.json["facet_results"]
# We only compare the querystring portion of the taggle_url
for facet_name, facet_info in facet_results.items():
assert facet_name == facet_info["name"]
assert False is facet_info["truncated"]
for facet_value in facet_info["results"]:
facet_value["toggle_url"] = facet_value["toggle_url"].split("?")[1]
assert expected_facet_results == facet_results
def test_suggested_facets(app_client):
suggestions = [
{
"name": suggestion["name"],
"querystring": suggestion["toggle_url"].split("?")[-1],
}
for suggestion in app_client.get("/fixtures/facetable.json").json[
"suggested_facets"
]
]
expected = [
{"name": "created", "querystring": "_facet=created"},
{"name": "planet_int", "querystring": "_facet=planet_int"},
{"name": "on_earth", "querystring": "_facet=on_earth"},
{"name": "state", "querystring": "_facet=state"},
{"name": "city_id", "querystring": "_facet=city_id"},
{"name": "neighborhood", "querystring": "_facet=neighborhood"},
{"name": "tags", "querystring": "_facet=tags"},
{"name": "complex_array", "querystring": "_facet=complex_array"},
{"name": "created", "querystring": "_facet_date=created"},
]
if detect_json1():
expected.append({"name": "tags", "querystring": "_facet_array=tags"})
assert expected == suggestions
def test_allow_facet_off():
with make_app_client(config={"allow_facet": False}) as client:
assert 400 == client.get("/fixtures/facetable.json?_facet=planet_int").status
# Should not suggest any facets either:
assert [] == client.get("/fixtures/facetable.json").json["suggested_facets"]
def test_suggest_facets_off():
with make_app_client(config={"suggest_facets": False}) as client:
# Now suggested_facets should be []
assert [] == client.get("/fixtures/facetable.json").json["suggested_facets"]
def test_expand_labels(app_client):
response = app_client.get(
"/fixtures/facetable.json?_shape=object&_labels=1&_size=2"
"&neighborhood__contains=c"
)
assert {
"2": {
"pk": 2,
"created": "2019-01-14 08:00:00",
"planet_int": 1,
"on_earth": 1,
"state": "CA",
"city_id": {"value": 1, "label": "San Francisco"},
"neighborhood": "Dogpatch",
"tags": '["tag1", "tag3"]',
"complex_array": "[]",
"distinct_some_null": "two",
},
"13": {
"pk": 13,
"created": "2019-01-17 08:00:00",
"planet_int": 1,
"on_earth": 1,
"state": "MI",
"city_id": {"value": 3, "label": "Detroit"},
"neighborhood": "Corktown",
"tags": "[]",
"complex_array": "[]",
"distinct_some_null": None,
},
} == response.json
def test_expand_label(app_client):
response = app_client.get(
"/fixtures/foreign_key_references.json?_shape=object"
"&_label=foreign_key_with_label&_size=1"
)
assert {
"1": {
"pk": "1",
"foreign_key_with_label": {"value": "1", "label": "hello"},
"foreign_key_with_no_label": "1",
}
} == response.json
@pytest.mark.parametrize(
"path,expected_cache_control",
[
("/fixtures/facetable.json", "max-age=5"),
("/fixtures/facetable.json?_ttl=invalid", "max-age=5"),
("/fixtures/facetable.json?_ttl=10", "max-age=10"),
("/fixtures/facetable.json?_ttl=0", "no-cache"),
],
)
def test_ttl_parameter(app_client, path, expected_cache_control):
response = app_client.get(path)
assert expected_cache_control == response.headers["Cache-Control"]
@pytest.mark.parametrize(
"path,expected_redirect",
[
("/fixtures/facetable.json?_hash=1", "/fixtures-HASH/facetable.json"),
(
"/fixtures/facetable.json?city_id=1&_hash=1",
"/fixtures-HASH/facetable.json?city_id=1",
),
],
)
def test_hash_parameter(
app_client_two_attached_databases_one_immutable, path, expected_redirect
):
# First get the current hash for the fixtures database
current_hash = app_client_two_attached_databases_one_immutable.ds.databases[
"fixtures"
].hash[:7]
response = app_client_two_attached_databases_one_immutable.get(
path, allow_redirects=False
)
assert response.status == 302
location = response.headers["Location"]
assert expected_redirect.replace("HASH", current_hash) == location
def test_hash_parameter_ignored_for_mutable_databases(app_client):
path = "/fixtures/facetable.json?_hash=1"
response = app_client.get(path, allow_redirects=False)
assert response.status == 200
test_json_columns_default_expected = [
{"intval": 1, "strval": "s", "floatval": 0.5, "jsonval": '{"foo": "bar"}'}
]
@pytest.mark.parametrize(
"extra_args,expected",
[
("", test_json_columns_default_expected),
("&_json=intval", test_json_columns_default_expected),
("&_json=strval", test_json_columns_default_expected),
("&_json=floatval", test_json_columns_default_expected),
(
"&_json=jsonval",
[{"intval": 1, "strval": "s", "floatval": 0.5, "jsonval": {"foo": "bar"}}],
),
],
)
def test_json_columns(app_client, extra_args, expected):
sql = """
select 1 as intval, "s" as strval, 0.5 as floatval,
'{"foo": "bar"}' as jsonval
"""
path = "/fixtures.json?" + urllib.parse.urlencode({"sql": sql, "_shape": "array"})
path += extra_args
response = app_client.get(path)
assert expected == response.json
def test_config_cache_size(app_client_larger_cache_size):
response = app_client_larger_cache_size.get("/fixtures/pragma_cache_size.json")
assert [[-2500]] == response.json["rows"]
def test_config_force_https_urls():
with make_app_client(config={"force_https_urls": True}) as client:
response = client.get("/fixtures/facetable.json?_size=3&_facet=state")
assert response.json["next_url"].startswith("https://")
assert response.json["facet_results"]["state"]["results"][0][
"toggle_url"
].startswith("https://")
assert response.json["suggested_facets"][0]["toggle_url"].startswith("https://")
# Also confirm that request.url and request.scheme are set correctly
response = client.get("/")
assert client.ds._last_request.url.startswith("https://")
assert client.ds._last_request.scheme == "https"
def test_infinity_returned_as_null(app_client):
response = app_client.get("/fixtures/infinity.json?_shape=array")
assert [
{"rowid": 1, "value": None},
{"rowid": 2, "value": None},
{"rowid": 3, "value": 1.5},
] == response.json
def test_infinity_returned_as_invalid_json_if_requested(app_client):
response = app_client.get("/fixtures/infinity.json?_shape=array&_json_infinity=1")
assert [
{"rowid": 1, "value": float("inf")},
{"rowid": 2, "value": float("-inf")},
{"rowid": 3, "value": 1.5},
] == response.json
def test_custom_query_with_unicode_characters(app_client):
response = app_client.get("/fixtures/𝐜𝐢𝐭𝐢𝐞𝐬.json?_shape=array")
assert [{"id": 1, "name": "San Francisco"}] == response.json
def test_trace(app_client):
response = app_client.get("/fixtures/simple_primary_key.json?_trace=1")
data = response.json
assert "_trace" in data
trace_info = data["_trace"]
assert isinstance(trace_info["request_duration_ms"], float)
assert isinstance(trace_info["sum_trace_duration_ms"], float)
assert isinstance(trace_info["num_traces"], int)
assert isinstance(trace_info["traces"], list)
assert len(trace_info["traces"]) == trace_info["num_traces"]
for trace in trace_info["traces"]:
assert isinstance(trace["type"], str)
assert isinstance(trace["start"], float)
assert isinstance(trace["end"], float)
assert trace["duration_ms"] == (trace["end"] - trace["start"]) * 1000
assert isinstance(trace["traceback"], list)
assert isinstance(trace["database"], str)
assert isinstance(trace["sql"], str)
assert isinstance(trace["params"], (list, dict, None.__class__))
@pytest.mark.parametrize(
"path,status_code",
[
("/fixtures.json", 200),
("/fixtures/no_primary_key.json", 200),
# A 400 invalid SQL query should still have the header:
("/fixtures.json?sql=select+blah", 400),
],
)
def test_cors(app_client_with_cors, path, status_code):
response = app_client_with_cors.get(path)
assert response.status == status_code
assert "*" == response.headers["Access-Control-Allow-Origin"]
@pytest.mark.parametrize(
"path",
(
"/",
".json",
"/searchable",
"/searchable.json",
"/searchable_view",
"/searchable_view.json",
),
)
def test_database_with_space_in_name(app_client_two_attached_databases, path):
response = app_client_two_attached_databases.get("/extra database" + path)
assert response.status == 200
def test_common_prefix_database_names(app_client_conflicting_database_names):
# https://github.com/simonw/datasette/issues/597
assert ["fixtures", "foo", "foo-bar"] == [
d["name"]
for d in app_client_conflicting_database_names.get("/-/databases.json").json
]
for db_name, path in (("foo", "/foo.json"), ("foo-bar", "/foo-bar.json")):
data = app_client_conflicting_database_names.get(path).json
assert db_name == data["database"]
def test_null_foreign_keys_are_not_expanded(app_client):
response = app_client.get(
"/fixtures/foreign_key_references.json?_shape=array&_labels=on"
)
assert [
{
"pk": "1",
"foreign_key_with_label": {"value": "1", "label": "hello"},
"foreign_key_with_no_label": {"value": "1", "label": "1"},
},
{"pk": "2", "foreign_key_with_label": None, "foreign_key_with_no_label": None,},
] == response.json
def test_inspect_file_used_for_count(app_client_immutable_and_inspect_file):
response = app_client_immutable_and_inspect_file.get("/fixtures/sortable.json")
assert response.json["filtered_table_rows_count"] == 100
|
py | 1a54ee1633b535bc7d4356e53cdbb28b365e53fd | """
Background Music Example
If Python and Arcade are installed, this example can be run from the command line with:
python -m arcade.examples.background_music
"""
import time
import arcade
SCREEN_WIDTH = 600
SCREEN_HEIGHT = 300
SCREEN_TITLE = "Starting Template Simple"
MUSIC_VOLUME = 0.5
class MyGame(arcade.Window):
"""Main application class."""
def __init__(self, width, height, title):
super().__init__(width, height, title)
arcade.set_background_color(arcade.color.WHITE)
# Variables used to manage our music. See setup() for giving them
# values.
self.music_list = []
self.current_song_index = 0
self.current_player = None
self.music = None
def advance_song(self):
"""Advance our pointer to the next song. This does NOT start the song."""
self.current_song_index += 1
if self.current_song_index >= len(self.music_list):
self.current_song_index = 0
print(f"Advancing song to {self.current_song_index}.")
def play_song(self):
"""Play the song."""
# Stop what is currently playing.
if self.music:
self.music.stop(self.current_player)
# Play the next song
print(f"Playing {self.music_list[self.current_song_index]}")
self.music = arcade.Sound(
self.music_list[self.current_song_index], streaming=True
)
self.current_player = self.music.play(MUSIC_VOLUME)
# This is a quick delay. If we don't do this, our elapsed time is 0.0
# and on_update will think the music is over and advance us to the next
# song before starting this one.
time.sleep(0.03)
def setup(self):
"""Set up the game here. Call this function to restart the game."""
# List of music
self.music_list = [
":resources:music/funkyrobot.mp3",
":resources:music/1918.mp3",
]
# Array index of what to play
self.current_song_index = 0
# Play the song
self.play_song()
def on_draw(self):
"""Render the screen."""
arcade.start_render()
position = self.music.get_stream_position(self.current_player)
length = self.music.get_length()
size = 20
margin = size * 0.5
# Print time elapsed and total
y = SCREEN_HEIGHT - (size + margin)
text = f"{int(position) // 60}:{int(position) % 60:02} of {int(length) // 60}:{int(length) % 60:02}"
arcade.draw_text(text, 0, y, arcade.csscolor.BLACK, size)
# Print current song
y -= size + margin
text = f"Currently playing: {self.music_list[self.current_song_index]}"
arcade.draw_text(text, 0, y, arcade.csscolor.BLACK, size)
def on_update(self, dt):
position = self.music.get_stream_position(self.current_player)
# The position pointer is reset to 0 right after we finish the song.
# This makes it very difficult to figure out if we just started playing
# or if we are doing playing.
if position == 0.0:
self.advance_song()
self.play_song()
def main():
"""Main method"""
window = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
window.setup()
arcade.run()
if __name__ == "__main__":
main()
|
py | 1a54ee7b357e75b34d92e36af5c0c52703c355e7 | from enum import Enum
class KlineInterval(str, Enum):
ONE_MINUTE = '1m'
THREE_MINUTES = '3m'
FIVE_MINUTES = '5m'
FIFTEEN_MINUTES = '15m'
THIRTY_MINUTES = '30m'
ONE_HOUR = '1h'
TWO_HOURS = '2h'
FOUR_HOURS = '4h'
SIX_HOURS = '6h'
EIGHT_HOURS = '8h'
TWELVE_HOURS = '12h'
ONE_DAY = '1d'
THREE_DAYS = '3d'
ONE_WEEK = '1w'
ONE_MONTH = '1M'
class OrderStatus(str, Enum):
ACK = 'Ack'
PARTIAL_FILL = 'PartialFill'
IOC_NO_FILL = 'IocNoFill'
FULLY_FILL = 'FullyFill'
CANCELED = 'Canceled'
EXPIRED = 'Expired'
FAILED_BLOCKING = 'FailedBlocking'
FAILED_MATCHING = 'FailedMatching'
class OrderSide(str, Enum):
BUY = 'buy'
SELL = 'sell'
class TimeInForce(str, Enum):
GOOD_TILL_EXPIRE = "GTE"
IMMEDIATE_OR_CANCEL = "IOC"
class TransactionSide(str, Enum):
RECEIVE = 'RECEIVE'
SEND = 'SEND'
class TransactionType(str, Enum):
NEW_ORDER = 'NEW_ORDER'
ISSUE_TOKEN = 'ISSUE_TOKEN'
BURN_TOKEN = 'BURN_TOKEN'
LIST_TOKEN = 'LIST_TOKEN'
CANCEL_ORDER = 'CANCEL_ORDER'
FREEZE_TOKEN = 'FREEZE_TOKEN'
UN_FREEZE_TOKEN = 'UN_FREEZE_TOKEN'
TRANSFER = 'TRANSFER'
PROPOSAL = 'PROPOSAL'
VOTE = 'VOTE'
class OrderType(str, Enum):
LIMIT = "LIMIT"
class PeerType(str, Enum):
NODE = 'node'
WEBSOCKET = 'ws'
class RpcBroadcastRequestType(int, Enum):
SYNC = 1
ASYNC = 2
COMMIT = 3
|
py | 1a54efb3c4639f987e40302650a7bb04e662294c | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 The Project U-Ray Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
import edalize
import os
work_root = 'build'
post_imp_file = os.path.realpath(os.path.join(work_root, 'post.tcl'))
os.makedirs(work_root, exist_ok=True)
synth_tool = 'vivado'
srcs = [
'lowrisc_constants_top_pkg_0/rtl/top_pkg.sv',
'lowrisc_dv_pins_if_0/pins_if.sv',
'lowrisc_prim_generic_clock_gating_0/rtl/prim_generic_clock_gating.sv',
'lowrisc_prim_generic_clock_mux2_0/rtl/prim_generic_clock_mux2.sv',
'lowrisc_prim_generic_flash_0/rtl/prim_generic_flash.sv',
'lowrisc_prim_generic_pad_wrapper_0/rtl/prim_generic_pad_wrapper.sv',
'lowrisc_prim_generic_ram_1p_0/rtl/prim_generic_ram_1p.sv',
'lowrisc_prim_generic_ram_2p_0/rtl/prim_generic_ram_2p.sv',
'lowrisc_prim_prim_pkg_0.1/rtl/prim_pkg.sv',
'lowrisc_prim_xilinx_clock_gating_0/rtl/prim_xilinx_clock_gating.sv',
'lowrisc_prim_xilinx_clock_mux2_0/rtl/prim_xilinx_clock_mux2.sv',
'lowrisc_prim_xilinx_pad_wrapper_0/rtl/prim_xilinx_pad_wrapper.sv',
'lowrisc_prim_xilinx_ram_2p_0/rtl/prim_xilinx_ram_2p.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_pkg.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_alu.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_compressed_decoder.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_controller.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_cs_registers.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_decoder.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_ex_block.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_fetch_fifo.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_id_stage.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_if_stage.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_load_store_unit.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_multdiv_fast.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_multdiv_slow.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_prefetch_buffer.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_pmp.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_register_file_ff.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_core.sv',
'lowrisc_ip_flash_ctrl_pkg_0.1/rtl/flash_ctrl_pkg.sv',
'lowrisc_prim_clock_gating_0/abstract/prim_clock_gating.sv',
'lowrisc_prim_clock_mux2_0/abstract/prim_clock_mux2.sv',
'lowrisc_prim_diff_decode_0/rtl/prim_diff_decode.sv',
'lowrisc_prim_pad_wrapper_0/abstract/prim_pad_wrapper.sv',
'lowrisc_prim_ram_1p_0/abstract/prim_ram_1p.sv',
'lowrisc_prim_ram_2p_0/abstract/prim_ram_2p.sv',
'lowrisc_tlul_headers_0.1/rtl/tlul_pkg.sv',
'lowrisc_prim_all_0.1/rtl/prim_clock_inverter.sv',
'lowrisc_prim_all_0.1/rtl/prim_alert_receiver.sv',
'lowrisc_prim_all_0.1/rtl/prim_alert_sender.sv',
'lowrisc_prim_all_0.1/rtl/prim_arbiter_ppc.sv',
'lowrisc_prim_all_0.1/rtl/prim_arbiter_tree.sv',
'lowrisc_prim_all_0.1/rtl/prim_esc_receiver.sv',
'lowrisc_prim_all_0.1/rtl/prim_esc_sender.sv',
'lowrisc_prim_all_0.1/rtl/prim_sram_arbiter.sv',
'lowrisc_prim_all_0.1/rtl/prim_fifo_async.sv',
'lowrisc_prim_all_0.1/rtl/prim_fifo_sync.sv',
'lowrisc_prim_all_0.1/rtl/prim_flop_2sync.sv',
'lowrisc_prim_all_0.1/rtl/prim_lfsr.sv',
'lowrisc_prim_all_0.1/rtl/prim_packer.sv',
'lowrisc_prim_all_0.1/rtl/prim_pulse_sync.sv',
'lowrisc_prim_all_0.1/rtl/prim_filter.sv',
'lowrisc_prim_all_0.1/rtl/prim_filter_ctr.sv',
'lowrisc_prim_all_0.1/rtl/prim_subreg.sv',
'lowrisc_prim_all_0.1/rtl/prim_subreg_ext.sv',
'lowrisc_prim_all_0.1/rtl/prim_intr_hw.sv',
'lowrisc_prim_all_0.1/rtl/prim_secded_39_32_enc.sv',
'lowrisc_prim_all_0.1/rtl/prim_secded_39_32_dec.sv',
'lowrisc_prim_all_0.1/rtl/prim_ram_2p_adv.sv',
'lowrisc_prim_all_0.1/rtl/prim_ram_2p_async_adv.sv',
'lowrisc_prim_flash_0/abstract/prim_flash.sv',
'lowrisc_top_earlgrey_alert_handler_reg_0.1/rtl/autogen/alert_handler_reg_pkg.sv',
'lowrisc_top_earlgrey_alert_handler_reg_0.1/rtl/autogen/alert_handler_reg_top.sv',
'lowrisc_top_earlgrey_pinmux_reg_0.1/rtl/autogen/pinmux_reg_pkg.sv',
'lowrisc_top_earlgrey_pinmux_reg_0.1/rtl/autogen/pinmux_reg_top.sv',
'lowrisc_ip_usb_fs_nb_pe_0.1/rtl/usb_consts_pkg.sv',
'lowrisc_ip_usb_fs_nb_pe_0.1/rtl/usb_fs_nb_in_pe.sv',
'lowrisc_ip_usb_fs_nb_pe_0.1/rtl/usb_fs_nb_out_pe.sv',
'lowrisc_ip_usb_fs_nb_pe_0.1/rtl/usb_fs_nb_pe.sv',
'lowrisc_ip_usb_fs_nb_pe_0.1/rtl/usb_fs_rx.sv',
'lowrisc_ip_usb_fs_nb_pe_0.1/rtl/usb_fs_tx.sv',
'lowrisc_ip_usb_fs_nb_pe_0.1/rtl/usb_fs_tx_mux.sv',
'lowrisc_prim_generic_rom_0/rtl/prim_generic_rom.sv',
'lowrisc_prim_xilinx_rom_0/rtl/prim_xilinx_rom.sv',
'lowrisc_tlul_common_0.1/rtl/tlul_fifo_sync.sv',
'lowrisc_tlul_common_0.1/rtl/tlul_fifo_async.sv',
'lowrisc_tlul_common_0.1/rtl/tlul_assert.sv',
'lowrisc_tlul_common_0.1/rtl/tlul_err.sv',
'lowrisc_tlul_common_0.1/rtl/tlul_assert_multiple.sv',
'pulp-platform_riscv-dbg_0.1_0/pulp_riscv_dbg/debug_rom/debug_rom.sv',
'pulp-platform_riscv-dbg_0.1_0/pulp_riscv_dbg/src/dm_pkg.sv',
'pulp-platform_riscv-dbg_0.1_0/pulp_riscv_dbg/src/dm_sba.sv',
'pulp-platform_riscv-dbg_0.1_0/pulp_riscv_dbg/src/dm_csrs.sv',
'pulp-platform_riscv-dbg_0.1_0/pulp_riscv_dbg/src/dm_mem.sv',
'pulp-platform_riscv-dbg_0.1_0/pulp_riscv_dbg/src/dmi_cdc.sv',
'pulp-platform_riscv-dbg_0.1_0/pulp_riscv_dbg/src/dmi_jtag.sv',
'pulp-platform_riscv-dbg_0.1_0/pulp_riscv_dbg/src/dmi_jtag_tap.sv',
'lowrisc_prim_rom_0/abstract/prim_rom.sv',
'lowrisc_tlul_adapter_reg_0.1/rtl/tlul_adapter_reg.sv',
'lowrisc_tlul_adapter_sram_0.1/rtl/tlul_adapter_sram.sv',
'lowrisc_tlul_socket_1n_0.1/rtl/tlul_err_resp.sv',
'lowrisc_tlul_socket_1n_0.1/rtl/tlul_socket_1n.sv',
'lowrisc_tlul_socket_m1_0.1/rtl/tlul_socket_m1.sv',
'lowrisc_tlul_sram2tlul_0.1/rtl/sram2tlul.sv',
'lowrisc_ip_aes_0.5/rtl/aes_pkg.sv',
'lowrisc_ip_aes_0.5/rtl/aes_reg_pkg.sv',
'lowrisc_ip_aes_0.5/rtl/aes_reg_top.sv',
'lowrisc_ip_aes_0.5/rtl/aes_core.sv',
'lowrisc_ip_aes_0.5/rtl/aes_control.sv',
'lowrisc_ip_aes_0.5/rtl/aes_cipher_core.sv',
'lowrisc_ip_aes_0.5/rtl/aes_cipher_control.sv',
'lowrisc_ip_aes_0.5/rtl/aes_sub_bytes.sv',
'lowrisc_ip_aes_0.5/rtl/aes_sbox.sv',
'lowrisc_ip_aes_0.5/rtl/aes_sbox_lut.sv',
'lowrisc_ip_aes_0.5/rtl/aes_sbox_canright.sv',
'lowrisc_ip_aes_0.5/rtl/aes_shift_rows.sv',
'lowrisc_ip_aes_0.5/rtl/aes_mix_columns.sv',
'lowrisc_ip_aes_0.5/rtl/aes_mix_single_column.sv',
'lowrisc_ip_aes_0.5/rtl/aes_key_expand.sv',
'lowrisc_ip_aes_0.5/rtl/aes.sv',
'lowrisc_ip_alert_handler_component_0.1/rtl/alert_pkg.sv',
'lowrisc_ip_alert_handler_component_0.1/rtl/alert_handler_reg_wrap.sv',
'lowrisc_ip_alert_handler_component_0.1/rtl/alert_handler_class.sv',
'lowrisc_ip_alert_handler_component_0.1/rtl/alert_handler_ping_timer.sv',
'lowrisc_ip_alert_handler_component_0.1/rtl/alert_handler_esc_timer.sv',
'lowrisc_ip_alert_handler_component_0.1/rtl/alert_handler_accu.sv',
'lowrisc_ip_alert_handler_component_0.1/rtl/alert_handler.sv',
'lowrisc_ip_flash_ctrl_0.1/rtl/flash_ctrl_reg_pkg.sv',
'lowrisc_ip_flash_ctrl_0.1/rtl/flash_ctrl_reg_top.sv',
'lowrisc_ip_flash_ctrl_0.1/rtl/flash_ctrl.sv',
'lowrisc_ip_flash_ctrl_0.1/rtl/flash_erase_ctrl.sv',
'lowrisc_ip_flash_ctrl_0.1/rtl/flash_prog_ctrl.sv',
'lowrisc_ip_flash_ctrl_0.1/rtl/flash_rd_ctrl.sv',
'lowrisc_ip_flash_ctrl_0.1/rtl/flash_mp.sv',
'lowrisc_ip_flash_ctrl_0.1/rtl/flash_phy.sv',
'lowrisc_ip_gpio_0.1/rtl/gpio_reg_pkg.sv',
'lowrisc_ip_gpio_0.1/rtl/gpio.sv',
'lowrisc_ip_gpio_0.1/rtl/gpio_reg_top.sv',
'lowrisc_ip_hmac_0.1/rtl/hmac_pkg.sv',
'lowrisc_ip_hmac_0.1/rtl/sha2.sv',
'lowrisc_ip_hmac_0.1/rtl/sha2_pad.sv',
'lowrisc_ip_hmac_0.1/rtl/hmac_reg_pkg.sv',
'lowrisc_ip_hmac_0.1/rtl/hmac_reg_top.sv',
'lowrisc_ip_hmac_0.1/rtl/hmac_core.sv',
'lowrisc_ip_hmac_0.1/rtl/hmac.sv',
'lowrisc_ip_nmi_gen_0.1/rtl/nmi_gen_reg_pkg.sv',
'lowrisc_ip_nmi_gen_0.1/rtl/nmi_gen_reg_top.sv',
'lowrisc_ip_nmi_gen_0.1/rtl/nmi_gen.sv',
'lowrisc_ip_pinmux_component_0.1/rtl/pinmux.sv',
'lowrisc_ip_rv_core_ibex_0.1/rtl/rv_core_ibex.sv',
'lowrisc_ip_rv_dm_0.1/rtl/rv_dm.sv',
'lowrisc_ip_rv_dm_0.1/rtl/tlul_adapter_host.sv',
'lowrisc_ip_rv_plic_component_0.1/rtl/rv_plic_gateway.sv',
'lowrisc_ip_rv_plic_component_0.1/rtl/rv_plic_target.sv',
'lowrisc_ip_rv_timer_0.1/rtl/rv_timer_reg_pkg.sv',
'lowrisc_ip_rv_timer_0.1/rtl/rv_timer_reg_top.sv',
'lowrisc_ip_rv_timer_0.1/rtl/timer_core.sv',
'lowrisc_ip_rv_timer_0.1/rtl/rv_timer.sv',
'lowrisc_ip_spi_device_0.1/rtl/spi_device_reg_pkg.sv',
'lowrisc_ip_spi_device_0.1/rtl/spi_device_reg_top.sv',
'lowrisc_ip_spi_device_0.1/rtl/spi_device_pkg.sv',
'lowrisc_ip_spi_device_0.1/rtl/spi_fwm_rxf_ctrl.sv',
'lowrisc_ip_spi_device_0.1/rtl/spi_fwm_txf_ctrl.sv',
'lowrisc_ip_spi_device_0.1/rtl/spi_fwmode.sv',
'lowrisc_ip_spi_device_0.1/rtl/spi_device.sv',
'lowrisc_ip_uart_0.1/rtl/uart_reg_pkg.sv',
'lowrisc_ip_uart_0.1/rtl/uart_reg_top.sv',
'lowrisc_ip_uart_0.1/rtl/uart_rx.sv',
'lowrisc_ip_uart_0.1/rtl/uart_tx.sv',
'lowrisc_ip_uart_0.1/rtl/uart_core.sv',
'lowrisc_ip_uart_0.1/rtl/uart.sv',
'lowrisc_ip_usbdev_0.1/rtl/usbdev_reg_pkg.sv',
'lowrisc_ip_usbdev_0.1/rtl/usbdev_reg_top.sv',
'lowrisc_ip_usbdev_0.1/rtl/usbdev_usbif.sv',
'lowrisc_ip_usbdev_0.1/rtl/usbdev_flop_2syncpulse.sv',
'lowrisc_ip_usbdev_0.1/rtl/usbdev_linkstate.sv',
'lowrisc_ip_usbdev_0.1/rtl/usbdev_iomux.sv',
'lowrisc_ip_usbdev_0.1/rtl/usbdev.sv',
'lowrisc_ip_xbar_main_0.1/tl_main_pkg.sv',
'lowrisc_ip_xbar_main_0.1/xbar_main.sv',
'lowrisc_ip_xbar_peri_0.1/tl_peri_pkg.sv',
'lowrisc_ip_xbar_peri_0.1/xbar_peri.sv',
'lowrisc_top_earlgrey_rv_plic_0.1/rtl/autogen/rv_plic_reg_pkg.sv',
'lowrisc_top_earlgrey_rv_plic_0.1/rtl/autogen/rv_plic_reg_top.sv',
'lowrisc_top_earlgrey_rv_plic_0.1/rtl/autogen/rv_plic.sv',
'lowrisc_systems_top_earlgrey_0.1/rtl/padctl.sv',
'lowrisc_systems_top_earlgrey_0.1/rtl/autogen/top_earlgrey.sv',
'lowrisc_systems_top_earlgrey_zcu104_0.1/rtl/clkgen_xilusp.sv',
'lowrisc_systems_top_earlgrey_zcu104_0.1/rtl/top_earlgrey_zcu104.sv',
]
with open(post_imp_file, 'w') as f:
f.write('write_checkpoint -force design.dcp')
files = [{
'name':
os.path.realpath(
'lowrisc_systems_top_earlgrey_zcu104_0.1/data/pins_zcu104.xdc'),
'file_type':
'xdc'
},
{
'name':
os.path.realpath('lowrisc_prim_assert_0.1/rtl/prim_assert.sv'),
'file_type':
'systemVerilogSource',
'is_include_file':
'true'
}]
parameters = {
'ROM_INIT_FILE': {
'datatype': 'str',
'paramtype': 'vlogdefine'
},
'PRIM_DEFAULT_IMPL': {
'datatype': 'str',
'paramtype': 'vlogdefine'
},
}
for src in srcs:
files.append({
'name': os.path.realpath(src),
'file_type': 'systemVerilogSource'
})
tool = 'vivado'
incdirs = [os.path.realpath('lowrisc_prim_assert_0.1/rtl')]
edam = {
'files': files,
'name': 'design',
'toplevel': 'top_earlgrey_zcu104',
'parameters': parameters,
'tool_options': {
'vivado': {
'part': os.environ['URAY_PART'],
'post_imp': post_imp_file,
'synth': synth_tool
}
}
}
backend = edalize.get_edatool(tool)(edam=edam, work_root=work_root)
args = [
'--ROM_INIT_FILE={}'.format(
os.path.realpath('boot_rom_fpga_nexysvideo.vmem')),
'--PRIM_DEFAULT_IMPL=prim_pkg::ImplXilinx'
]
backend.configure(args)
backend.build()
|
py | 1a54efd018dc112c661bd2f1734f6594bf92be02 | import dataclasses
@dataclasses.dataclass
class A1:
a: int
b: dataclasses.InitVar[str]
def __post_init__(self, b: str):
print(f"b: {b}")
@dataclasses.dataclass
class B1(A1):
c: dataclasses.InitVar[int]
def __post_init__(self, b: str, c: int):
super(B1, self).__post_init__(b)
print(f"c: {c}") |
py | 1a54f125eba19a3271ce90e79457e06b81c1575d | # Copyright 2014-2020 Chris Cummins <[email protected]>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility code for working with sqlalchemy."""
import contextlib
import os
import pathlib
import queue
import sqlite3
import sys
import threading
import time
import typing
from typing import Callable
from typing import List
from typing import Optional
import sqlalchemy as sql
from absl import flags as absl_flags
from sqlalchemy import func
from sqlalchemy import orm
from sqlalchemy.dialects import mysql
from sqlalchemy.ext import declarative
from labm8.py import humanize
from labm8.py import labdate
from labm8.py import pbutil
from labm8.py import progress
from labm8.py import text
from labm8.py.internal import labm8_logging as logging
FLAGS = absl_flags.FLAGS
absl_flags.DEFINE_boolean(
"sqlutil_echo",
False,
"If True, the Engine will log all statements as well as a repr() of their "
"parameter lists to the engines logger, which defaults to sys.stdout.",
)
absl_flags.DEFINE_boolean(
"sqlutil_pool_pre_ping",
True,
"Enable pessimistic pre-ping to check that database connections are "
"alive. This adds some overhead, but reduces the risk of "
'"server has gone away" errors. See:'
"<https://docs.sqlalchemy.org/en/13/core/pooling.html#disconnect-handling-pessimistic>",
)
absl_flags.DEFINE_integer(
"mysql_engine_pool_size",
5,
"The number of connections to keep open inside the connection pool. A "
"--mysql_engine_pool_size of 0 indicates no limit",
)
absl_flags.DEFINE_integer(
"mysql_engine_max_overflow",
10,
"The number of connections to allow in connection pool “overflow”, that "
"is connections that can be opened above and beyond the "
"--mysql_engine_pool_size setting",
)
absl_flags.DEFINE_boolean(
"mysql_assume_utf8_charset",
True,
"Default to adding the '?charset=utf8' suffix to MySQL database URLs.",
)
absl_flags.DEFINE_boolean(
"sqlite_enable_foreign_keys",
True,
"Enable foreign key support for SQLite. This enforces foreign key "
"constraints, and enables cascaded update/delete statements. See: "
"https://docs.sqlalchemy.org/en/13/dialects/sqlite.html#foreign-key-support",
)
# The Query type is returned by Session.query(). This is a convenience for type
# annotations.
Query = orm.query.Query
class DatabaseNotFound(FileNotFoundError):
"""An error that is raised if the requested database cannot be found."""
def __init__(self, url: str):
self._url = url
@property
def url(self):
return self._url
def __repr__(self) -> str:
return f"Database not found: '{self.url}'"
def __str__(self) -> str:
return repr(self)
def Base(*args, **kwargs) -> sql.ext.declarative.DeclarativeMeta:
"""Construct a base class for declarative class definitions."""
return sql.ext.declarative.declarative_base(*args, **kwargs)
def GetOrAdd(
session: sql.orm.session.Session,
model,
defaults: typing.Dict[str, object] = None,
**kwargs,
):
"""Instantiate a mapped database object.
If the object is not in the database,
add it. Note that no change is written to disk until commit() is called on the
session.
Args:
session: The database session.
model: The database table class.
defaults: Default values for mapped objects.
kwargs: The values for the table row.
Returns:
An instance of the model class, with the values specified.
"""
instance = session.query(model).filter_by(**kwargs).first()
if not instance:
params = {
k: v
for k, v in kwargs.items()
if not isinstance(v, sql.sql.expression.ClauseElement)
}
params.update(defaults or {})
instance = model(**params)
session.add(instance)
logging.Log(
logging.GetCallingModuleName(),
5,
"New record: %s(%s)",
model.__name__,
params,
)
return instance
def Get(
session: sql.orm.session.Session,
model,
defaults: typing.Dict[str, object] = None,
**kwargs,
):
"""Determine if a database object exists.
Args:
session: The database session.
model: The database table class.
defaults: Default values for mapped objects.
kwargs: The values for the table row.
Returns:
An instance of the model class with the values specified, or None if the
object is not in the database.
"""
del defaults
return session.query(model).filter_by(**kwargs).first()
def CreateEngine(url: str, must_exist: bool = False) -> sql.engine.Engine:
"""Create an sqlalchemy database engine.
This is a convenience wrapper for creating an sqlalchemy engine, that also
creates the database if required, and checks that the database exists. This
means that it is less flexible than SqlAlchemy's create_engine() - only three
combination of dialects and drivers are supported: sqlite, mysql, and
postgresql.
See https://docs.sqlalchemy.org/en/latest/core/engines.html for details.
Additionally, this implements a custom 'file://' handler, which reads a URL
from a local file, and returns a connection to the database addressed by the
URL. Use this if you would like to keep sensitive information such as a MySQL
database password out of your .bash_history.
Examples:
Create in-memory SQLite database:
>>> engine = CreateEngine('sqlite://')
Connect to an SQLite database at relative.db:
>>> engine = CreateEngine('sqlite:///relative.db')
Connect to an SQLite database at /absolute/path/to/db:
>>> engine = CreateEngine('sqlite:////absolute/path/to/db')
Connect to MySQL database:
>>> engine = CreateEngine(
'mysql://bob:password@localhost:1234/database?charset=utf8')
Connect to PostgreSQL database:
>>> engine.CreateEngine(
'postgresql://bob:password@localhost:1234/database')
Connect to a URL specified in the file /tmp/url.txt:
>>> engine.CreateEngine('file:///tmp/url.txt')
Connect to a URL specified in the file /tmp/url.txt, with the suffix
'/database?charset=utf8':
>>> engine.CreateEngine('file:///tmp/url.txt?/database?charset=utf8')
Args:
url: The URL of the database to connect to.
must_exist: If True, raise DatabaseNotFound if it doesn't exist. Else,
database is created if it doesn't exist.
Returns:
An SQLalchemy Engine instance.
Raises:
DatabaseNotFound: If the database does not exist and must_exist is set.
ValueError: If the datastore backend is not supported.
"""
engine_args = {}
# Read and expand a `file://` prefixed URL.
url = ResolveUrl(url)
if url.startswith("mysql://"):
# Support for MySQL dialect.
# We create a throwaway engine that we use to check if the requested
# database exists.
engine = sql.create_engine("/".join(url.split("/")[:-1]))
database = url.split("/")[-1].split("?")[0]
query = engine.execute(
sql.text(
"SELECT SCHEMA_NAME FROM "
"INFORMATION_SCHEMA.SCHEMATA WHERE "
"SCHEMA_NAME = :database",
),
database=database,
)
# Engine-specific options.
engine_args["pool_size"] = FLAGS.mysql_engine_pool_size
engine_args["max_overflow"] = FLAGS.mysql_engine_max_overflow
if not query.first():
if must_exist:
raise DatabaseNotFound(url)
else:
# We can't use sql.text() escaping here because it uses single quotes
# for escaping. MySQL only accepts backticks for quoting database
# names.
engine.execute(f"CREATE DATABASE `{database}`")
engine.dispose()
elif url.startswith("sqlite://"):
# Support for SQLite dialect.
# This project (phd) deliberately disallows relative paths due to Bazel
# sandboxing.
if url != "sqlite://" and not url.startswith("sqlite:////"):
raise ValueError("Relative path to SQLite database is not allowed")
if url == "sqlite://":
if must_exist:
raise ValueError(
"must_exist=True not valid for in-memory SQLite database",
)
else:
path = pathlib.Path(url[len("sqlite:///") :])
if must_exist:
if not path.is_file():
raise DatabaseNotFound(url)
else:
# Make the parent directory for SQLite database if creating a new
# database.
path.parent.mkdir(parents=True, exist_ok=True)
elif url.startswith("postgresql://"):
# Support for PostgreSQL dialect.
engine = sql.create_engine("/".join(url.split("/")[:-1] + ["postgres"]))
conn = engine.connect()
database = url.split("/")[-1]
query = conn.execute(
sql.text("SELECT 1 FROM pg_database WHERE datname = :database"),
database=database,
)
if not query.first():
if must_exist:
raise DatabaseNotFound(url)
else:
# PostgreSQL does not let you create databases within a transaction, so
# manually complete the transaction before creating the database.
conn.execute(sql.text("COMMIT"))
# PostgreSQL does not allow single quoting of database names.
conn.execute(f"CREATE DATABASE {database}")
conn.close()
engine.dispose()
else:
raise ValueError(f"Unsupported database URL='{url}'")
# Create the engine.
engine = sql.create_engine(
url,
encoding="utf-8",
echo=FLAGS.sqlutil_echo,
pool_pre_ping=FLAGS.sqlutil_pool_pre_ping,
**engine_args,
)
# Create and immediately close a connection. This is because SQLAlchemy engine
# is lazily instantiated, so for connections such as SQLite, this line
# actually creates the file.
engine.connect().close()
return engine
@sql.event.listens_for(sql.engine.Engine, "connect")
def EnableSqliteForeignKeysCallback(dbapi_connection, connection_record):
"""Enable foreign key constraints for SQLite databases.
See --sqlite_enable_foreign_keys for details.
"""
del connection_record
# This callback listens for *all* database connections, not just SQLite. Check
# the type before trying to run an SQLite-specific pragma.
if FLAGS.sqlite_enable_foreign_keys and isinstance(
dbapi_connection, sqlite3.Connection
):
cursor = dbapi_connection.cursor()
cursor.execute("PRAGMA foreign_keys=ON")
cursor.close()
def ResolveUrl(url: str, use_flags: bool = True):
"""Resolve the URL of a database.
The following modifications are supported:
* If the url begins with 'file://', the URL is substituted with the
contents of the file.
* If --mysql_assume_utf8_charset is set, then '?charset=utf8' suffix is
appended to URLs which begin with mysql://.
* Shell variables are expanded.
Args:
url: The URL to expand, e.g. `file://path/to/file.txt?arg'
use_flags: Determine whether behaviour is dictated by the FLAGS variables.
Set this to False only when resolving database URLs before flags parsing,
e.g. in enumerating test fixtures.
Returns:
The URL as interpreted by reading any URL file.
Raises:
ValueError: If the file path is invalid.
FileNotFoundError: IF the file path does not exist.
"""
# Substitute shell variables.
url = os.path.expandvars(url)
if url.startswith("file://"):
# Split the URL into the file path, and the optional suffix.
components = url.split("?")
path, suffix = components[0], "?".join(components[1:])
# Strip the file:// prefix from the path.
path = pathlib.Path(path[len("file://") :])
if not path.is_absolute():
raise ValueError("Relative path to file:// is not allowed")
if not path.is_file():
raise FileNotFoundError(f"File '{path}' not found")
# Read the contents of the file, ignoring lines starting with '#'.
with open(path) as f:
url = "\n".join(
x for x in f.read().split("\n") if not x.lstrip().startswith("#")
).strip()
# Append the suffix.
url += suffix
if (
use_flags and url.startswith("mysql://") and FLAGS.mysql_assume_utf8_charset
):
url += "?charset=utf8"
return url
def ColumnNames(model) -> typing.List[str]:
"""Return the names of all columns in a mapped object.
Args:
model: A mapped class.
Returns:
A list of string column names in the order that they are declared.
"""
try:
inst = sql.inspect(model)
return [c_attr.key for c_attr in inst.mapper.column_attrs]
except sql.exc.NoInspectionAvailable as e:
raise TypeError(str(e))
class Session(orm.session.Session):
"""A subclass of the default SQLAlchemy Session with added functionality.
An instance of this class is returned by Database.Session().
"""
def GetOrAdd(
self, model, defaults: typing.Dict[str, object] = None, **kwargs
):
"""Instantiate a mapped database object.
If the object is not in the database, add it. Note that no change is written
to disk until commit() is called on the session.
Args:
model: The database table class.
defaults: Default values for mapped objects.
kwargs: The values for the table row.
Returns:
An instance of the model class, with the values specified.
"""
return GetOrAdd(self, model, defaults, **kwargs)
class Database(object):
"""A base class for implementing databases."""
SessionType = Session
def __init__(self, url: str, declarative_base, must_exist: bool = False):
"""Instantiate a database object.
Example:
>>> db = Database('sqlite:////tmp/foo.db',
sqlalchemy.ext.declarative.declarative_base())
Args:
url: The URL of the database to connect to.
declarative_base: The SQLAlchemy declarative base instance.
must_exist: If True, raise DatabaseNotFound if it doesn't exist. Else,
database is created if it doesn't exist.
Raises:
DatabaseNotFound: If the database does not exist and must_exist is set.
ValueError: If the datastore backend is not supported.
"""
self._url = url
self.engine = CreateEngine(url, must_exist=must_exist)
declarative_base.metadata.create_all(self.engine)
declarative_base.metadata.bind = self.engine
# Bind the Engine to a session maker, which instantiates our own Session
# class, which is a subclass of the default SQLAlchemy Session with added
# functionality.
self.MakeSession = orm.sessionmaker(bind=self.engine, class_=Session)
def Close(self) -> None:
"""Close the connection to the database.
Use this to free up the connection to a database, while keeping the database
instance around. After calling this method, attempting to run operations on
this database will raise an error (like a sqlalchemy.exc.OperationalError).
Usage of this method is generally discouraged - connections are
automatically closed up when a database instance is garbage collected, so
there are rarely cases for leaving a database instance around with the
connection closed. Use at your peril!
"""
self.engine.dispose()
def Drop(self, are_you_sure_about_this_flag: bool = False):
"""Drop the database, irreverisbly destroying it.
Be careful with this! After calling this method an a Database instance, no
further operations can be made on it, and any Sessions should be discarded.
Args:
are_you_sure_about_this_flag: You should be sure.
Raises:
ValueError: In case you're not 100% sure.
"""
if not are_you_sure_about_this_flag:
raise ValueError("Let's take a minute to think things over")
if self.url.startswith("mysql://"):
engine = sql.create_engine("/".join(self.url.split("/")[:-1]))
database = self.url.split("/")[-1].split("?")[0]
logging.Log(logging.GetCallingModuleName(), 1, "database %s", database)
engine.execute(f"DROP DATABASE IF EXISTS `{database}`")
elif self.url == "sqlite://":
# In-memory databases do not dropping.
pass
elif self.url.startswith("sqlite:///"):
path = pathlib.Path(self.url[len("sqlite:///") :])
assert path.is_file()
path.unlink()
else:
raise NotImplementedError(
f"Unsupported operation DROP for database: '{self.url}'",
)
@property
def url(self) -> str:
"""Return the URL of the database."""
return self._url
@contextlib.contextmanager
def Session(
self, commit: bool = False, session: Optional[Session] = None
) -> Session:
"""Provide a transactional scope around a session.
The optional session argument may be used for cases where you want to
optionally re-use an existing session, rather than always creating a new
session, e.g.:
class MyDatabase(sqlutil.Database):
def DoAThing(self, session=None):
with self.Session(session=session, commit=True):
# go nuts ...
Args:
commit: If true, commit session at the end of scope.
session: An existing session object to re-use.
Returns:
A database session.
"""
session = session or self.MakeSession()
try:
yield session
if commit:
session.commit()
except:
session.rollback()
raise
finally:
session.close()
@property
def Random(self):
"""Get the backend-specific random function.
This can be used to select a random row from a table, e.g.
session.query(Table).order_by(db.Random()).first()
"""
if self.url.startswith("mysql"):
return func.rand
else:
return func.random # for PostgreSQL, SQLite
def __repr__(self) -> str:
return self.url
class TablenameFromClassNameMixin(object):
"""A class mixin which derives __tablename__ from the class name.
Add this mixin to a mapped table class to automatically set the set the
__tablename__ property of a class to the lowercase name of the Python class.
"""
@declarative.declared_attr
def __tablename__(self):
return self.__name__.lower()
class TablenameFromCamelCapsClassNameMixin(object):
"""A class mixin which derives __tablename__ from the class name.
Add this mixin to a mapped table class to automatically set the set the
__tablename__ property of a class to the name of the Python class with camel
caps converted to underscores, e.g.
class FooBar -> table "foo_bar".
"""
@declarative.declared_attr
def __tablename__(self):
return text.CamelCapsToUnderscoreSeparated(self.__name__)
class PluralTablenameFromCamelCapsClassNameMixin(object):
"""A class mixin which derives __tablename__ from the class name.
Add this mixin to a mapped table class to automatically set the set the
__tablename__ property of a class to the pluralized name of the Python class
with camel caps converted to underscores, e.g.
class FooBar -> table "foo_bars".
"""
@declarative.declared_attr
def __tablename__(self):
pluralised = humanize.Plural(2, self.__name__)
pluralised = " ".join(pluralised.split()[1:])
return text.CamelCapsToUnderscoreSeparated(pluralised)
class ProtoBackedMixin(object):
"""A database table backed by protocol buffers.
This class provides the abstract interface for sqlalchemy table classes which
support serialization to and from protocol buffers.
This is only an interface - inheriting classes must still inherit from
sqlalchemy.ext.declarative.declarative_base().
"""
proto_t = None
def SetProto(self, proto: pbutil.ProtocolBuffer) -> None:
"""Set the fields of a protocol buffer with the values from the instance.
Args:
proto: A protocol buffer.
"""
raise NotImplementedError(
f"{type(self).__name__}.SetProto() not implemented",
)
def ToProto(self) -> pbutil.ProtocolBuffer:
"""Serialize the instance to protocol buffer.
Returns:
A protocol buffer.
"""
proto = self.proto_t()
self.SetProto(proto)
return proto
@classmethod
def FromProto(
cls, proto: pbutil.ProtocolBuffer,
) -> typing.Dict[str, typing.Any]:
"""Return a dictionary of instance constructor args from proto.
Examples:
Construct a table instance from proto:
>>> table = Table(**Table.FromProto(proto))
Construct a table instance and add to session:
>>> session.GetOrAdd(Table, **Table.FromProto(proto))
Args:
proto: A protocol buffer.
Returns:
A dictionary of constructor arguments.
"""
raise NotImplementedError(
f"{type(self).__name__}.FromProto() not implemented",
)
@classmethod
def FromFile(cls, path: pathlib.Path) -> typing.Dict[str, typing.Any]:
"""Return a dictionary of instance constructor args from proto file.
Examples:
Construct a table instance from proto file:
>>> table = Table(**Table.FromFile(path))
Construct a table instance and add to session:
>>> session.GetOrAdd(Table, **Table.FromFile(path))
Args:
path: Path to a proto file.
Returns:
An instance.
"""
proto = pbutil.FromFile(path, cls.proto_t())
return cls.FromProto(proto)
class OffsetLimitQueryResultsBatch(typing.NamedTuple):
"""The results of an offset-limit batched query."""
# The current batch number.
batch_num: int
# Offset into the results set.
offset: int
# Limit is the last row in the results set.
limit: int
# The total number of rows in the query if compute_max_rows=True, else None.
max_rows: int
# The results of the query.
rows: typing.List[typing.Any]
def OffsetLimitBatchedQuery(
query: Query,
batch_size: int = 1000,
start_at: int = 0,
compute_max_rows: bool = False,
) -> typing.Iterator[OffsetLimitQueryResultsBatch]:
"""Split and return the rows resulting from a query in to batches.
This iteratively runs the query `SELECT * FROM * OFFSET i LIMIT batch_size;`
with `i` initialized to `start_at` and increasing by `batch_size` per
iteration. Iteration terminates when the query returns no rows.
This function is useful for returning row sets from enormous tables, where
loading the full query results in to memory would take prohibitive time or
resources.
Args:
query: The query to run.
batch_size: The number of rows to return per batch.
start_at: The initial offset into the table.
compute_max_rows: If true
Returns:
A generator of OffsetLimitQueryResultsBatch tuples, where each tuple
contains between 1 <= x <= `batch_size` rows.
"""
max_rows = None
if compute_max_rows:
max_rows = query.count()
batch_num = 0
i = start_at
while True:
batch_num += 1
batch = query.offset(i).limit(batch_size).all()
if batch:
yield OffsetLimitQueryResultsBatch(
batch_num=batch_num,
offset=i,
limit=i + batch_size,
max_rows=max_rows,
rows=batch,
)
i += len(batch)
else:
break
class ColumnTypes(object):
"""Abstract class containing methods for generating column types."""
def __init__(self):
raise TypeError("abstract class")
@staticmethod
def BinaryArray(length: int):
"""Return a fixed size binary array column type.
Args:
length: The length of the column.
Returns:
A column type.
"""
return sql.Binary(length).with_variant(mysql.BINARY(length), "mysql")
@staticmethod
def LargeBinary():
"""Return a fixed size binary array column type.
Returns:
A column type.
"""
return sql.LargeBinary().with_variant(sql.LargeBinary(2 ** 31), "mysql")
@staticmethod
def UnboundedUnicodeText():
"""Return an unbounded unicode text column type.
This isn't truly unbounded, but 2^32 chars should be enough!
Returns:
A column type.
"""
return sql.UnicodeText().with_variant(sql.UnicodeText(2 ** 31), "mysql")
@staticmethod
def IndexableString(length: int = None):
"""Return a string that is short enough that it can be used as an index.
Returns:
A column type.
"""
# MySQL InnoDB tables use a default index key prefix length limit of 767.
# https://dev.mysql.com/doc/refman/5.6/en/innodb-restrictions.html
MAX_LENGTH = 767
if length and length > MAX_LENGTH:
raise ValueError(
f"IndexableString requested length {length} is greater "
f"than maximum allowed {MAX_LENGTH}",
)
return sql.String(MAX_LENGTH)
@staticmethod
def MillisecondDatetime():
"""Return a datetime type with millisecond precision.
Returns:
A column type.
"""
return sql.DateTime().with_variant(mysql.DATETIME(fsp=3), "mysql")
class ColumnFactory(object):
"""Abstract class containing methods for generating columns."""
@staticmethod
def MillisecondDatetime(
nullable: bool = False, default=labdate.GetUtcMillisecondsNow,
):
"""Return a datetime column with millisecond precision.
Returns:
A column which defaults to UTC now.
"""
return sql.Column(
sql.DateTime().with_variant(mysql.DATETIME(fsp=3), "mysql",),
nullable=nullable,
default=default,
)
def ResilientAddManyAndCommit(db: Database, mapped: typing.Iterable[Base]):
"""Attempt to commit all mapped objects and return those that fail.
This method creates a session and commits the given mapped objects.
In case of error, this method will recurse up to O(log(n)) times, committing
as many objects that can be as possible.
Args:
db: The database to add the objects to.
mapped: A sequence of objects to commit.
Returns:
Any items in `mapped` which could not be committed, if any. Relative order
of items is preserved.
"""
failures = []
if not mapped:
return failures
mapped = list(mapped)
try:
with db.Session(commit=True) as session:
session.add_all(mapped)
except sql.exc.SQLAlchemyError as e:
logging.Log(
logging.GetCallingModuleName(),
1,
"Caught error while committing %d mapped objects: %s",
len(mapped),
e,
)
# Divide and conquer. If we're committing only a single object, then a
# failure to commit it means that we can do nothing other than return it.
# Else, divide the mapped objects in half and attempt to commit as many of
# them as possible.
if len(mapped) == 1:
return mapped
else:
mid = int(len(mapped) / 2)
left = mapped[:mid]
right = mapped[mid:]
failures += ResilientAddManyAndCommit(db, left)
failures += ResilientAddManyAndCommit(db, right)
return failures
def QueryToString(query) -> str:
"""Compile the query to inline literals in place of '?' placeholders.
See: https://stackoverflow.com/a/23835766
"""
return str(query.statement.compile(compile_kwargs={"literal_binds": True}))
class BufferedDatabaseWriter(threading.Thread):
"""A buffered writer for adding objects to a database.
Use this class for cases when you are producing lots of mapped objects that
you would like to commit to a database, but don't require them to be committed
immediately. By buffering objects and committing them in batches, this class
minimises the number of SQL statements that are executed, and is faster than
creating and committing a session for every object.
This object spawns a separate thread for asynchronously performing database
writes. Use AddOne() and AddMany() methods to add objects to the write buffer.
Note that because this is a multithreaded implementation, in-memory SQLite
databases are not supported.
The user is responsible for calling Close() to flush the contents of the
buffer and terminate the thread. Alternatively, use this class as a context
manager to automatically flush the buffer and terminate the thread:
with BufferedDatabaseWriter(db, max_buffer_length=128) as writer:
for chunk in chunks_to_process:
objs = ProcessChunk(chunk)
writer.AddMany(objs)
"""
def __init__(
self,
db: Database,
max_buffer_size: Optional[int] = None,
max_buffer_length: Optional[int] = None,
max_seconds_since_flush: Optional[float] = None,
log_level: int = 2,
ctx: progress.ProgressContext = progress.NullContext,
):
"""Constructor.
Args:
db: The database to write to.
max_buffer_size: The maximum size of the buffer before flushing, in bytes.
The buffer size is the sum of the elements in the write buffer. The size
of elements is determined using sys.getsizeof(), and has all the caveats
of this method.
max_buffer_length: The maximum number of items in the write buffer before
flushing.
max_seconds_since_flush: The maximum number of elapsed seconds between
flushes.
ctx: progress.ProgressContext = progress.NullContext,
log_level: The logging level for logging output.
"""
super(BufferedDatabaseWriter, self).__init__()
self.db = db
self.ctx = ctx
self.log_level = log_level
self.max_seconds_since_flush = max_seconds_since_flush
self.max_buffer_size = max_buffer_size
self.max_buffer_length = max_buffer_length
# Counters.
self.flush_count = 0
self.error_count = 0
self._buffer = []
self.buffer_size = 0
self._last_flush = time.time()
# Limit the size of the queue so that calls to AddOne() or AddMany() will
# block if the calling code is too far ahead of the writer.
queue_size = self.max_buffer_length * 2 if self.max_buffer_length else 1000
self._queue = queue.Queue(maxsize=queue_size)
self.start()
def __enter__(self) -> "Buff":
"""Enter a scoped writer context closes at the end."""
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Exit a scoped writer context closes at the end."""
del exc_type
del exc_val
del exc_tb
self.Close()
def AddOne(self, mapped, size: Optional[int] = None) -> None:
"""Add a mapped object.
Args:
mapped: The mapped object to write to the database.
size: The object sizes to use to update the total buffer size. If not
provided, sys.getsizeof() is used to determine the size.
"""
size = size or sys.getsizeof(mapped)
self._queue.put((mapped, size))
def AddMany(self, mappeds, sizes: Optional[List[int]] = None) -> None:
"""Add many mapped objects.
Args:
mappeds: The mapped objects to write to the database.
sizes: A list of mapped object sizes to use to calculate the buffer size.
If not provided, sys.getsizeof() is used to determine the size.
"""
sizes = sizes or [sys.getsizeof(item) for item in mappeds]
for mapped, size in zip(mappeds, sizes):
self._queue.put((mapped, size))
def AddLambdaOp(self, callback: Callable[[Database.SessionType], None]):
self._queue.put(BufferedDatabaseWriter.LambdaOp(callback))
def Flush(self) -> None:
"""Flush the buffer.
This method blocks until the flush has completed.
In normal use, you can rely on the automated flushing mechanisms to flush
the write buffer, rather than calling this by hand.
"""
self._queue.put(BufferedDatabaseWriter.FlushMarker())
self._queue.join()
def Close(self):
"""Close the writer thread.
This method blocks until the buffer has been flushed and the thread
terminates.
"""
if not self.is_alive():
raise TypeError("Close() called on dead BufferedDatabaseWriter")
self._queue.put(BufferedDatabaseWriter.CloseMarker())
self._queue.join()
self.join()
@property
def buffer_length(self) -> int:
"""Get the current length of the buffer, in range [0, max_buffer_length]."""
return len(self._buffer)
@property
def seconds_since_last_flush(self) -> float:
"""Get the number of seconds since the buffer was last flushed."""
return time.time() - self._last_flush
##############################################################################
# Private methods.
##############################################################################
class CloseMarker(object):
"""An object to append to _queue to close the thread."""
pass
class FlushMarker(object):
"""An object to append to _queue to flush the buffer."""
pass
class LambdaOp(object):
def __init__(self, callback):
self.callback = callback
def __call__(self, session: Database.SessionType):
self.callback(session)
def run(self):
"""The thread loop."""
while True:
# Block until there is something on the queue. Use max_seconds_since_flush
# as a timeout to ensure that flushes still occur when the writer is not
# being used.
try:
item = self._queue.get(timeout=self.max_seconds_since_flush)
except queue.Empty:
self._Flush()
continue
if isinstance(item, BufferedDatabaseWriter.CloseMarker):
# End of queue. Break out of the loop.
break
elif isinstance(item, BufferedDatabaseWriter.FlushMarker):
# Force a flush.
self._Flush()
elif isinstance(item, BufferedDatabaseWriter.LambdaOp):
# Handle delete op.
self._buffer.append(item)
self._MaybeFlush()
else:
# Add the object to the buffer.
mapped, size = item
self._buffer.append(mapped)
self.buffer_size += size
self._MaybeFlush()
# Register that the item has been processed. This is used by join() to
# signal to stop blocking.
self._queue.task_done()
# Register that the end-of-queue marker has been processed.
self._Flush()
self._queue.task_done()
def _MaybeFlush(self) -> None:
if (
(self.max_buffer_size and self.buffer_size >= self.max_buffer_size)
or (
self.max_buffer_length and self.buffer_length >= self.max_buffer_length
)
or (
self.max_seconds_since_flush
and self.seconds_since_last_flush >= self.max_seconds_since_flush
)
):
self._Flush()
def _AddMapped(self, mapped) -> None:
"""Add and commit a list of mapped objects."""
if not mapped:
return
failures = ResilientAddManyAndCommit(self.db, mapped)
if failures:
self.ctx.Error("Logger failed to commit %d objects", len(failures))
self.error_count += len(failures)
def _Flush(self):
"""Flush the buffer."""
if not self._buffer:
return
with self.ctx.Profile(
self.log_level,
f"Committed {self.buffer_length} rows "
f"({humanize.BinaryPrefix(self.buffer_size, 'B')}) to {self.db.url}",
), self.db.Session() as session:
# Iterate through the buffer and handle any lambda ops.
start_i, end_i = 0, 0
for end_i, item in enumerate(self._buffer):
if isinstance(item, BufferedDatabaseWriter.LambdaOp):
# If we have a lambda op, we flush the contents of the current buffer,
# then execute the op and continue.
self._AddMapped(self._buffer[start_i:end_i])
self._buffer[end_i](session)
session.commit()
start_i = end_i + 1
# Add any remaining mapped objects from the buffer.
self._AddMapped(self._buffer[start_i:])
self._buffer = []
self._last_flush = time.time()
self.buffer_size = 0
self.flush_count += 1
|
py | 1a54f14c516bc8780b3a8347237c2378deee66ec | """
In this example we modify the mesh of a shape
by moving the points along the normals to the surface
and along the radius of a sphere centered at the center of mass.
At each step we redefine the actor so that the normals are
recalculated for the underlying polydata.
"""
from __future__ import division, print_function
from vtkplotter import *
settings.computeNormals = True # on object creation by default
vp = Plotter(axes=0, verbose=0, bg="w")
s = vp.load(datadir+"290.vtk", c="red")
c = s.centerOfMass()
vp += [Point(c), Text(__doc__, c="k")]
Niter = 4
for t in range(Niter):
print("iteration", t)
coords = s.coordinates()
normals = s.normals()
aves = s.averageSize() * 1.5
for i in range(s.N()):
n = normals[i]
p = coords[i]
q = versor(p - c) * aves + c # versor = unit vector
dp = mag(q - p)
alongn = n * dp
alongr = q - p # bias normal
newp = p + (alongn + alongr) / 2 / Niter
s.setPoint(i, newp)
# refresh actor, so polydata normals are recalculated
s = s.clone()
vp += s.alpha(0.1).color("gold").wireframe(True) #add into Plotter
vp.show()
|
py | 1a54f184efc0d035924403a02aabbd4426e2a21f | import typing
from datetime import datetime
from ParadoxTrading.Indicator.Bar.BarIndicatorAbstract import BarIndicatorAbstract
from ParadoxTrading.Utils import DataStruct
class OpenBar(BarIndicatorAbstract):
def __init__(
self, _use_key: str, _idx_key: str = 'time', _ret_key: str = 'open'
):
super().__init__()
self.use_key = _use_key
self.idx_key = _idx_key
self.ret_key = _ret_key
self.data = DataStruct(
[self.idx_key, self.ret_key],
self.idx_key
)
def _addOne(
self, _data_struct: DataStruct,
_idx: typing.Union[str, datetime] = None
):
self.data.addDict({
self.idx_key: _idx,
self.ret_key: _data_struct[self.use_key][0]
})
|
py | 1a54f208975ac665f6da041dc787bcd0b6a75676 | # Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
import netaddr
from webob import exc
from nova.api.openstack.compute.contrib import hypervisors as hypervisors_v2
from nova.api.openstack.compute.plugins.v3 import hypervisors \
as hypervisors_v21
from nova.api.openstack import extensions
from nova import context
from nova import db
from nova import exception
from nova import objects
from nova import test
from nova.tests.unit.api.openstack import fakes
TEST_HYPERS = [
dict(id=1,
service_id=1,
host="compute1",
vcpus=4,
memory_mb=10 * 1024,
local_gb=250,
vcpus_used=2,
memory_mb_used=5 * 1024,
local_gb_used=125,
hypervisor_type="xen",
hypervisor_version=3,
hypervisor_hostname="hyper1",
free_ram_mb=5 * 1024,
free_disk_gb=125,
current_workload=2,
running_vms=2,
cpu_info='cpu_info',
disk_available_least=100,
host_ip=netaddr.IPAddress('1.1.1.1')),
dict(id=2,
service_id=2,
host="compute2",
vcpus=4,
memory_mb=10 * 1024,
local_gb=250,
vcpus_used=2,
memory_mb_used=5 * 1024,
local_gb_used=125,
hypervisor_type="xen",
hypervisor_version=3,
hypervisor_hostname="hyper2",
free_ram_mb=5 * 1024,
free_disk_gb=125,
current_workload=2,
running_vms=2,
cpu_info='cpu_info',
disk_available_least=100,
host_ip=netaddr.IPAddress('2.2.2.2'))]
TEST_SERVICES = [
objects.Service(id=1,
host="compute1",
binary="nova-compute",
topic="compute_topic",
report_count=5,
disabled=False,
disabled_reason=None,
availability_zone="nova"),
objects.Service(id=2,
host="compute2",
binary="nova-compute",
topic="compute_topic",
report_count=5,
disabled=False,
disabled_reason=None,
availability_zone="nova"),
]
TEST_HYPERS_OBJ = [objects.ComputeNode(**hyper_dct)
for hyper_dct in TEST_HYPERS]
TEST_HYPERS[0].update({'service': TEST_SERVICES[0]})
TEST_HYPERS[1].update({'service': TEST_SERVICES[1]})
TEST_SERVERS = [dict(name="inst1", uuid="uuid1", host="compute1"),
dict(name="inst2", uuid="uuid2", host="compute2"),
dict(name="inst3", uuid="uuid3", host="compute1"),
dict(name="inst4", uuid="uuid4", host="compute2")]
def fake_compute_node_get_all(context):
return TEST_HYPERS_OBJ
def fake_compute_node_search_by_hypervisor(context, hypervisor_re):
return TEST_HYPERS_OBJ
def fake_compute_node_get(context, compute_id):
for hyper in TEST_HYPERS_OBJ:
if hyper.id == int(compute_id):
return hyper
raise exception.ComputeHostNotFound(host=compute_id)
@classmethod
def fake_service_get_by_host_and_topic(cls, context, host, topic):
for service in TEST_SERVICES:
if service.host == host:
return service
def fake_compute_node_statistics(context):
result = dict(
count=0,
vcpus=0,
memory_mb=0,
local_gb=0,
vcpus_used=0,
memory_mb_used=0,
local_gb_used=0,
free_ram_mb=0,
free_disk_gb=0,
current_workload=0,
running_vms=0,
disk_available_least=0,
)
for hyper in TEST_HYPERS_OBJ:
for key in result:
if key == 'count':
result[key] += 1
else:
result[key] += hyper[key]
return result
def fake_instance_get_all_by_host(context, host):
results = []
for inst in TEST_SERVERS:
if inst['host'] == host:
results.append(inst)
return results
class HypervisorsTestV21(test.NoDBTestCase):
DETAIL_HYPERS_DICTS = copy.deepcopy(TEST_HYPERS)
del DETAIL_HYPERS_DICTS[0]['service_id']
del DETAIL_HYPERS_DICTS[1]['service_id']
del DETAIL_HYPERS_DICTS[0]['host']
del DETAIL_HYPERS_DICTS[1]['host']
DETAIL_HYPERS_DICTS[0].update({'state': 'up',
'status': 'enabled',
'service': dict(id=1, host='compute1',
disabled_reason=None)})
DETAIL_HYPERS_DICTS[1].update({'state': 'up',
'status': 'enabled',
'service': dict(id=2, host='compute2',
disabled_reason=None)})
INDEX_HYPER_DICTS = [
dict(id=1, hypervisor_hostname="hyper1",
state='up', status='enabled'),
dict(id=2, hypervisor_hostname="hyper2",
state='up', status='enabled')]
NO_SERVER_HYPER_DICTS = copy.deepcopy(INDEX_HYPER_DICTS)
NO_SERVER_HYPER_DICTS[0].update({'servers': []})
NO_SERVER_HYPER_DICTS[1].update({'servers': []})
def _get_request(self, use_admin_context):
return fakes.HTTPRequest.blank('', use_admin_context=use_admin_context)
def _set_up_controller(self):
self.controller = hypervisors_v21.HypervisorsController()
self.controller.servicegroup_api.service_is_up = mock.MagicMock(
return_value=True)
def setUp(self):
super(HypervisorsTestV21, self).setUp()
self._set_up_controller()
self.stubs.Set(self.controller.host_api, 'compute_node_get_all',
fake_compute_node_get_all)
self.stubs.Set(objects.Service, 'get_by_host_and_topic',
fake_service_get_by_host_and_topic)
self.stubs.Set(self.controller.host_api,
'compute_node_search_by_hypervisor',
fake_compute_node_search_by_hypervisor)
self.stubs.Set(self.controller.host_api, 'compute_node_get',
fake_compute_node_get)
self.stubs.Set(db, 'compute_node_statistics',
fake_compute_node_statistics)
self.stubs.Set(db, 'instance_get_all_by_host',
fake_instance_get_all_by_host)
def test_view_hypervisor_nodetail_noservers(self):
result = self.controller._view_hypervisor(
TEST_HYPERS_OBJ[0], TEST_SERVICES[0], False)
self.assertEqual(result, self.INDEX_HYPER_DICTS[0])
def test_view_hypervisor_detail_noservers(self):
result = self.controller._view_hypervisor(
TEST_HYPERS_OBJ[0], TEST_SERVICES[0], True)
self.assertEqual(result, self.DETAIL_HYPERS_DICTS[0])
def test_view_hypervisor_servers(self):
result = self.controller._view_hypervisor(TEST_HYPERS_OBJ[0],
TEST_SERVICES[0],
False, TEST_SERVERS)
expected_dict = copy.deepcopy(self.INDEX_HYPER_DICTS[0])
expected_dict.update({'servers': [
dict(name="inst1", uuid="uuid1"),
dict(name="inst2", uuid="uuid2"),
dict(name="inst3", uuid="uuid3"),
dict(name="inst4", uuid="uuid4")]})
self.assertEqual(result, expected_dict)
def test_index(self):
req = self._get_request(True)
result = self.controller.index(req)
self.assertEqual(result, dict(hypervisors=self.INDEX_HYPER_DICTS))
def test_index_non_admin(self):
req = self._get_request(False)
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.index, req)
def test_detail(self):
req = self._get_request(True)
result = self.controller.detail(req)
self.assertEqual(result, dict(hypervisors=self.DETAIL_HYPERS_DICTS))
def test_detail_non_admin(self):
req = self._get_request(False)
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.detail, req)
def test_show_noid(self):
req = self._get_request(True)
self.assertRaises(exc.HTTPNotFound, self.controller.show, req, '3')
def test_show_non_integer_id(self):
req = self._get_request(True)
self.assertRaises(exc.HTTPNotFound, self.controller.show, req, 'abc')
def test_show_withid(self):
req = self._get_request(True)
result = self.controller.show(req, '1')
self.assertEqual(result, dict(hypervisor=self.DETAIL_HYPERS_DICTS[0]))
def test_show_non_admin(self):
req = self._get_request(False)
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.show, req, '1')
def test_uptime_noid(self):
req = self._get_request(True)
self.assertRaises(exc.HTTPNotFound, self.controller.uptime, req, '3')
def test_uptime_notimplemented(self):
def fake_get_host_uptime(context, hyp):
raise exc.HTTPNotImplemented()
self.stubs.Set(self.controller.host_api, 'get_host_uptime',
fake_get_host_uptime)
req = self._get_request(True)
self.assertRaises(exc.HTTPNotImplemented,
self.controller.uptime, req, '1')
def test_uptime_implemented(self):
def fake_get_host_uptime(context, hyp):
return "fake uptime"
self.stubs.Set(self.controller.host_api, 'get_host_uptime',
fake_get_host_uptime)
req = self._get_request(True)
result = self.controller.uptime(req, '1')
expected_dict = copy.deepcopy(self.INDEX_HYPER_DICTS[0])
expected_dict.update({'uptime': "fake uptime"})
self.assertEqual(result, dict(hypervisor=expected_dict))
def test_uptime_non_integer_id(self):
req = self._get_request(True)
self.assertRaises(exc.HTTPNotFound, self.controller.uptime, req, 'abc')
def test_uptime_non_admin(self):
req = self._get_request(False)
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.uptime, req, '1')
def test_search(self):
req = self._get_request(True)
result = self.controller.search(req, 'hyper')
self.assertEqual(result, dict(hypervisors=self.INDEX_HYPER_DICTS))
def test_search_non_admin(self):
req = self._get_request(False)
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.search, req, '1')
def test_search_non_exist(self):
def fake_compute_node_search_by_hypervisor_return_empty(context,
hypervisor_re):
return []
self.stubs.Set(self.controller.host_api,
'compute_node_search_by_hypervisor',
fake_compute_node_search_by_hypervisor_return_empty)
req = self._get_request(True)
self.assertRaises(exc.HTTPNotFound, self.controller.search, req, 'a')
def test_servers(self):
req = self._get_request(True)
result = self.controller.servers(req, 'hyper')
expected_dict = copy.deepcopy(self.INDEX_HYPER_DICTS)
expected_dict[0].update({'servers': [
dict(name="inst1", uuid="uuid1"),
dict(name="inst3", uuid="uuid3")]})
expected_dict[1].update({'servers': [
dict(name="inst2", uuid="uuid2"),
dict(name="inst4", uuid="uuid4")]})
self.assertEqual(result, dict(hypervisors=expected_dict))
def test_servers_non_id(self):
def fake_compute_node_search_by_hypervisor_return_empty(context,
hypervisor_re):
return []
self.stubs.Set(self.controller.host_api,
'compute_node_search_by_hypervisor',
fake_compute_node_search_by_hypervisor_return_empty)
req = self._get_request(True)
self.assertRaises(exc.HTTPNotFound,
self.controller.servers,
req, '115')
def test_servers_non_admin(self):
req = self._get_request(False)
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.servers, req, '1')
def test_servers_with_non_integer_hypervisor_id(self):
def fake_compute_node_search_by_hypervisor_return_empty(context,
hypervisor_re):
return []
self.stubs.Set(self.controller.host_api,
'compute_node_search_by_hypervisor',
fake_compute_node_search_by_hypervisor_return_empty)
req = self._get_request(True)
self.assertRaises(exc.HTTPNotFound,
self.controller.servers, req, 'abc')
def test_servers_with_no_server(self):
def fake_instance_get_all_by_host_return_empty(context, hypervisor_re):
return []
self.stubs.Set(db, 'instance_get_all_by_host',
fake_instance_get_all_by_host_return_empty)
req = self._get_request(True)
result = self.controller.servers(req, '1')
self.assertEqual(result, dict(hypervisors=self.NO_SERVER_HYPER_DICTS))
def test_statistics(self):
req = self._get_request(True)
result = self.controller.statistics(req)
self.assertEqual(result, dict(hypervisor_statistics=dict(
count=2,
vcpus=8,
memory_mb=20 * 1024,
local_gb=500,
vcpus_used=4,
memory_mb_used=10 * 1024,
local_gb_used=250,
free_ram_mb=10 * 1024,
free_disk_gb=250,
current_workload=4,
running_vms=4,
disk_available_least=200)))
def test_statistics_non_admin(self):
req = self._get_request(False)
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.statistics, req)
class HypervisorsTestV2(HypervisorsTestV21):
DETAIL_HYPERS_DICTS = copy.deepcopy(
HypervisorsTestV21.DETAIL_HYPERS_DICTS)
del DETAIL_HYPERS_DICTS[0]['state']
del DETAIL_HYPERS_DICTS[1]['state']
del DETAIL_HYPERS_DICTS[0]['status']
del DETAIL_HYPERS_DICTS[1]['status']
del DETAIL_HYPERS_DICTS[0]['service']['disabled_reason']
del DETAIL_HYPERS_DICTS[1]['service']['disabled_reason']
del DETAIL_HYPERS_DICTS[0]['host_ip']
del DETAIL_HYPERS_DICTS[1]['host_ip']
INDEX_HYPER_DICTS = copy.deepcopy(HypervisorsTestV21.INDEX_HYPER_DICTS)
del INDEX_HYPER_DICTS[0]['state']
del INDEX_HYPER_DICTS[1]['state']
del INDEX_HYPER_DICTS[0]['status']
del INDEX_HYPER_DICTS[1]['status']
NO_SERVER_HYPER_DICTS = copy.deepcopy(
HypervisorsTestV21.NO_SERVER_HYPER_DICTS)
del NO_SERVER_HYPER_DICTS[0]['state']
del NO_SERVER_HYPER_DICTS[1]['state']
del NO_SERVER_HYPER_DICTS[0]['status']
del NO_SERVER_HYPER_DICTS[1]['status']
del NO_SERVER_HYPER_DICTS[0]['servers']
del NO_SERVER_HYPER_DICTS[1]['servers']
def _set_up_controller(self):
self.context = context.get_admin_context()
self.ext_mgr = extensions.ExtensionManager()
self.ext_mgr.extensions = {}
self.controller = hypervisors_v2.HypervisorsController(self.ext_mgr)
|
py | 1a54f242106581fb376bcf0856b6e36fa1843d63 | import settings as S
from common import connect_url, printable
from scrapers.i3investor.insider.format_insiders import format_table_entitlement, format_dividend
from utils.dateutils import change2KlseDateFmt, getToday
I3_ENTITLEMENT_URL = S.I3_KLSE_URL + '/entitlement/'
I3_DIVIDEND_URL = I3_ENTITLEMENT_URL + "dividend/latest.jsp"
I3_ENTITLEMENT_OTHERS_URL = I3_ENTITLEMENT_URL + "others/latest.jsp"
def crawl_entitlement(trading_date=getToday("%d-%b-%Y"), formatted_output=False):
url = I3_DIVIDEND_URL
latest_dividends = scrape_entitlement(connect_url(url), url, trading_date, formatted_output)
if formatted_output and len(latest_dividends) > 0:
format_table_entitlement("Latest Dividends", latest_dividends)
url = I3_ENTITLEMENT_OTHERS_URL
latest_others = scrape_entitlement(connect_url(url), url, trading_date, formatted_output)
if formatted_output and len(latest_others) > 0:
format_table_entitlement("Latest Bonus, Share Split & Consolidation", latest_others)
return latest_dividends, latest_others
def scrape_entitlement(soup, url, trading_date, formatted_output):
if soup is None:
print ('Insider ERR: no result for <' + url + '>')
return None
table = soup.find('table', {'class': 'nc'})
if table is None:
if S.DBG_ALL:
print ('INFO: No insider data is available for <' + url + '>')
return None
entitlements = {}
others = "others" in url
# for each row, there are many rows including no table
for tr in table.findAll('tr'):
td = tr.findAll('td')
if S.DBG_INSIDER:
print("DBG:")
for x in td:
print repr(x)
# u'\u2019' is the last char in DATO' which can't be encoded to ascii
# insider = [x.text.replace(u'\u2019', '').strip().encode("ascii") for x in td]
insider = [printable(x.text.replace(u'\u2019', '').encode("ascii")).strip() for x in td]
if len(insider) >= 7:
if len(insider) == 7:
announce_date, stock, open_price, current_price, dividend, ex_date = \
unpack_dividend_td(*insider)
view = S.I3_KLSE_URL + td[6].find('a').get('href').encode("ascii")
else:
announce_date, stock, subject, open_price, current_price, ratio, ex_date = \
unpack_others_td(*insider)
view = S.I3_KLSE_URL + td[7].find('a').get('href').encode("ascii")
if S.DBG_ALL or S.DBG_INSIDER:
print "view: {}".format(view)
ann_date = change2KlseDateFmt(announce_date, "%d-%b-%Y")
trd_date = change2KlseDateFmt(trading_date, "%d-%b-%Y")
if S.DBG_QR:
print("DBG:dates:{0}:{1}".format(ann_date, trd_date))
if ann_date >= trd_date:
if len(insider) == 7:
entitlements[stock] = format_dividend(formatted_output, others,
announce_date, stock, "", open_price,
current_price, dividend, ex_date, view)
else:
entitlements[stock] = format_dividend(formatted_output, others,
announce_date, stock, subject, open_price,
current_price, ratio, ex_date, view)
else:
break
return entitlements
def unpack_dividend_td(announce_date, stock, open_price, current_price, dividend, ex_date, view):
if S.DBG_INSIDER:
print "DBG:{0},{1},{2},{3},{4},{5}".format(
announce_date, stock, open_price, current_price, dividend, ex_date)
return announce_date, stock, open_price, current_price, dividend, ex_date
def unpack_others_td(announce_date, stock, subject, open_price, current_price, ratio, ex_date, view):
if S.DBG_INSIDER:
print "DBG:{0},{1},{2},{3},{4},{5},{6}".format(
announce_date, stock, subject, open_price, current_price, ratio, ex_date)
return announce_date, stock, subject, open_price, current_price, ratio, ex_date
|
py | 1a54f300b099571708f181b12d8cea49ee8403b0 | # coding: utf-8
# pew in unshortener-venv python ~/wm-dist-tmp/Unshortener/unshortener/unshortener.py
import requests
from datatools.url import *
from urllib.request import urlopen
from systemtools.basics import *
from systemtools.location import *
from systemtools.logger import *
import requests.auth
from datastructuretools.hashmap import *
from hjwebbrowser.httpbrowser import *
from hjwebbrowser.browser import *
from hjwebbrowser.utils import *
from threading import Thread
try:
from systemtools.hayj import *
except: pass
import random
from unshortener import config as unsConfig
class Unshortener():
"""
See the README
"""
def __init__ \
(
self,
logger=None,
verbose=True,
serializableDictParams=\
{
"limit": 10000000,
"name": "unshortenedurls",
"cacheCheckRatio": 0.0,
"mongoIndex": "url",
},
httpBrowserParams=\
{
"maxRetryWithoutProxy": 0,
"maxRetryIfTimeout": 1,
"maxRetryIf407": 1,
},
user=None, password=None, host=None,
useMongodb=None,
hostname=None,
shortenersDomainsFilePath=None,
retryFailedRatio=0.5,
useProxy=True,
randomProxyFunct=None,
timeout=25,
maxRetry=2,
nextTriesTimeoutRatio=0.3,
readOnly=False,
proxy=None,
):
self.useMongodb = useMongodb
if self.useMongodb is None:
self.useMongodb = unsConfig.useMongodb
# We store some params:
self.retryFailedRatio = retryFailedRatio
self.verbose = verbose
self.logger = logger
self.timeout = timeout
self.maxRetries = maxRetry
self.nextTriesTimeoutRatio = nextTriesTimeoutRatio
self.readOnly = readOnly
self.proxy = proxy
# We create the url parser:
self.urlParser = URLParser()
# We get the default randomProxyFunct:
self.useProxy = useProxy
self.randomProxyFunct = randomProxyFunct
if self.randomProxyFunct is None:
try:
self.randomProxyFunct = getRandomProxy
except: pass
if self.randomProxyFunct is None:
self.useProxy = False
# We init the mongo collection through SerializableDict:
self.serializableDictParams = serializableDictParams
if hostname is None: hostname = unsConfig.hostname
if host is None: host = unsConfig.host
if user is None: user = unsConfig.user
if password is None: password = unsConfig.password
if user == "hayj":
try:
(user, password, host) = getMongoAuth(user=user, hostname=hostname)
except: pass
self.serializableDictParams["user"] = user
self.serializableDictParams["password"] = password
self.serializableDictParams["host"] = host
self.serializableDictParams["logger"] = self.logger
self.serializableDictParams["verbose"] = self.verbose
self.serializableDictParams["useMongodb"] = self.useMongodb
self.data = SerializableDict(**self.serializableDictParams)
# We get shorteners domains:
self.shortenersDomainsFilePath = shortenersDomainsFilePath
if self.shortenersDomainsFilePath is None:
self.shortenersDomainsFilePath = getDataDir() + "/Misc/crawling/shorteners.txt"
self.shortenersDomains = None
self.initShortenersDomains()
# We create the http browser:
self.httpBrowserParams = httpBrowserParams
self.httpBrowser = HTTPBrowser(logger=self.logger,
verbose=self.verbose,
**self.httpBrowserParams)
def initShortenersDomains(self):
if self.shortenersDomains is None:
if not isFile(self.shortenersDomainsFilePath):
raise Exception("File " + str(self.shortenersDomainsFilePath) + " not found.")
shorteners = fileToStrList(self.shortenersDomainsFilePath, removeDuplicates=True)
newShorteners = []
for current in shorteners:
current = current.lower()
newShorteners.append(current)
shorteners = newShorteners
self.shortenersDomains = set()
for current in shorteners:
newCurrent = self.urlParser.getDomain(current)
self.shortenersDomains.add(newCurrent)
self.shortenersDomains = list(self.shortenersDomains)
# We filter all by presence of a point:
newShortenersDomains= []
for current in self.shortenersDomains:
if "." in current:
newShortenersDomains.append(current)
self.shortenersDomains = newShortenersDomains
def reduceIrrelevantUrls(self, isRelevantUrlFunct):
"""
If some last urls are not enough relevant to keep the html content
You can delete it by call this method
You have to give a funct in params
This method can take a long time and will update all row so you will loose
old/new read/write sort.
"""
for theHash, current in self.data.items():
if isRelevantUrlFunct(current["lastUrl"]):
if dictContains(current, "relevant") and not current["relevant"]:
logError("You previously set this row as irrelevant but now you set it as relevant, so you lost the html data, you can re-set the html data using hjwebbrowser.httpbrowser.HTTPBrowser", self)
logError(reduceDictStr(current), self)
self.data.updateRow(theHash, "relevant", True)
else:
self.data.updateRow(theHash, "html", None)
self.data.updateRow(theHash, "relevant", False)
def getUnshortenersDomains(self):
return self.shortenersDomains
def close(self):
self.data.close()
def isShortened(self, *args, **kwargs):
return self.isShortener(*args, **kwargs)
def isShortener(self, url):
"""
Use this method to test if an url come from an unshortener service
"""
smartDomain = self.urlParser.getDomain(url)
return smartDomain in self.shortenersDomains
def has(self, *args, **kwargs):
return self.hasKey(*args, **kwargs)
def isAlreadyUnshortened(self, *args, **kwargs):
return self.hasKey(*args, **kwargs)
def hasKey(self, url):
"""
This method test if an url was already unshortened before
"""
url = self.urlParser.normalize(url)
return self.data.hasKey(url)
def unshort\
(
self,
*args,
**kwargs
):
"""
This method will call request but give the last url (unshortened) instead
of all data
"""
result = self.request(*args, **kwargs)
if result is None:
return None
else:
if dictContains(result, "lastUrl"):
return result["lastUrl"]
else:
return None
def add(self, result, onlyHttpBrowser=True):
# We check readOnly:
if self.readOnly:
logError("The unshortener is set as read only!", self)
return False
# We check None:
if result is None or not isinstance(result, dict):
logError("No data found to add in unshortener!", self)
return False
resultStr = lts(reduceDictStr(result))
# We check keys:
for key in \
[
"lastUrl", "browser",
"lastUrlDomain", "historyCount", "html",
"title", "status",
]:
if key not in result:
logError(key + " is not in:\n" + resultStr, self)
return False
# We check keys not None:
for key in ["url", "domain"]:
if not dictContains(result, key):
logError(key + " is not in:\n" + resultStr, self)
return False
# We check the browser:
if onlyHttpBrowser and result["browser"] != "http":
logError("The browser must be an http browser!", self)
return False
# We delete and add some elements:
if "crawlingElement" in result:
del result["crawlingElement"]
if "relevant" not in result:
result["relevant"] = True
# We check the status:
if result["httpStatus"] == 200 or result["httpStatus"] == 404:
# We add the data:
self.data[result["url"]] = result
return True
else:
logError("Cant't add this data to unshortener because of the http status:\n"\
+ resultStr, self)
return False
return False
def request\
(
self,
url,
force=False,
retriesCount=0,
):
"""
This method will request the given url
You can read the last url (unshortened) in the field "lastUrl" of the returned dict
If the request failed, this method return None
force as True will give the last url for the request, even if it is not a shortener...
"""
# We set the timeout:
timeout = self.timeout
if retriesCount >= 1:
timeout = int(self.nextTriesTimeoutRatio * timeout)
# We parse the url:
url = self.urlParser.normalize(url)
smartDomain = self.urlParser.getDomain(url)
# We return None if we don't have to request it:
thisIsAShortener = smartDomain in self.shortenersDomains
if not force and not thisIsAShortener:
return None
# We check if we already have the url:
if self.data.hasKey(url):
# log(url + " was in the Unshortener database!", self)
return self.data.get(url)
# If we read only, we don't request the url:
elif self.readOnly:
# log(url + " is not in the database and the unshortener was set as read only!", self)
return None
# Else we can request it:
else:
# We get a random proxy:
proxy = None
if self.useProxy:
proxy = self.proxy
if proxy is None:
proxy = self.randomProxyFunct()
# We set the proxy and the timeout:
self.httpBrowser.setProxy(proxy)
self.httpBrowser.setTimeout(timeout)
# We request the url:
result = self.httpBrowser.get(url)
# We add some params to the result:
result["url"] = url
# result["isShortener"] = thisIsAShortener
result["relevant"] = True
# And if the request succeded:
# if result["status"] == REQUEST_STATUS.duplicate or \
# result["status"] == REQUEST_STATUS.success or \
# result["status"] == REQUEST_STATUS.error404 or \
# result["status"] == REQUEST_STATUS.timeoutWithContent:
if result["httpStatus"] == 200 or \
result["httpStatus"] == 404:
# We add the row:
self.data[url] = result
# We log it:
log("Unshort succedded: " + url, self)
log(getRequestInfos(result), self)
# And finally we return the result:
return result
# Else we retry:
else:
# We log the error:
log("Unshort failed: " + url, self)
log(getRequestInfos(result), self)
# log(listToStr(reduceDictStr(result, replaceNewLine=True)), self)
# If we can retry:
if retriesCount < self.maxRetries:
# We recursively call the method:
log("We retry to unshort: " + url, self)
return self.request(url,
force=force,
retriesCount=retriesCount+1)
# If we failed, we just return None:
else:
return None
def getRequestInfos(result):
return str(result["proxy"]) + " " + str(result["status"].name) + " (" + str(result["httpStatus"]) + ")"
def test1():
uns = Unshortener(host="localhost")
url = "https://api.ipify.org/?format=json"
# url = "http://httpbin.org/redirect/3"
printLTS(uns.unshort(url, force=True))
def test2():
uns = Unshortener(host="localhost")
printLTS(uns.getUnshortenersDomains())
def test3():
def getShares(crawlOrScrap):
if dictContains(crawlOrScrap, "scrap"):
scrap = crawlOrScrap["scrap"]
else:
scrap = crawlOrScrap
if dictContains(scrap, "tweets"):
tweets = scrap["tweets"]
for tweet in tweets:
if dictContains(tweet, "shares"):
for share in tweet["shares"]:
yield share
uns = Unshortener(host="localhost", useProxy=False)
(user, password, host) = getStudentMongoAuth()
collection = MongoCollection("twitter", "usercrawl",
user=user, password=password, host=host)
i = 0
for current in collection.find():
urls = list(getShares(current))
for url in urls:
url = url["url"]
if getRandomFloat() > 0.8 and (uns.isShortener(url) or getRandomFloat() > 0.95):
print(url)
print("isShortener: " + str(uns.isShortener(url)))
print(uns.unshort(url, force=True))
print()
print()
print()
print()
# input()
i += 1
if i > 100:
exit()
def test4():
urls = \
[
"http://ow.ly/DIFx30hfmsE",
"http://bit.ly/2jBKQoh",
]
uns = Unshortener(host="localhost")
print()
print()
print()
print()
for url in urls:
print(url)
print("isShortener: " + str(uns.isShortener(url)))
print(uns.unshort(url, force=True))
print()
print()
print()
print()
def testAlexis():
uns = Unshortener\
(
shortenersDomainsFilePath="/tmp",
useProxy=False,
randomProxyFunct=None,
proxy=None,
serializableDictParams=\
{
"limit": 10000000,
"useMongodb": False,
"name": "unshortenedurls",
"cacheCheckRatio": 0.0,
"mongoIndex": "url",
"serializeEachNAction": 1,
}
)
print(uns.unshort("https://bit.ly/2Hor6PN"))
if __name__ == '__main__':
# test1()
# test2()
# test3()
# test4()
testAlexis()
|
py | 1a54f37b0977da2cfaa17f58ef59cc395f83a5e0 | import hashlib
import json
from time import time
from uuid import uuid4
from flask import Flask, jsonify, request
import sys
class Blockchain(object):
def __init__(self):
self.chain = []
self.current_transactions = []
self.nodes = set()
self.new_block(previous_hash=1, proof=99)
def new_block(self, proof, previous_hash=None):
"""
Create a new Block in the Blockchain
:param proof: <int> The proof given by the Proof of Work algorithm
:param previous_hash: (Optional) <str> Hash of previous Block
:return: <dict> New Block
"""
block = {
'index': len(self.chain) + 1,
'timestamp': time(),
'transactions': self.current_transactions,
'proof': proof,
'previous_hash': previous_hash or self.hash(self.chain[-1]),
}
# Reset the current list of transactions
self.current_transactions = []
self.chain.append(block)
return block
def new_transaction(self, sender, recipient, amount):
"""
Creates a new transaction to go into the next mined Block
:param sender: <str> Address of the Recipient
:param recipient: <str> Address of the Recipient
:param amount: <int> Amount
:return: <int> The index of the BLock that will hold this transaction
"""
self.current_transactions.append({
'sender': sender,
'recipient': recipient,
'amount': amount,
})
return self.last_block['index'] + 1
@staticmethod
def hash(block):
"""
Creates a SHA-256 hash of a Block
:param block": <dict> Block
"return": <str>
"""
# We must make sure that the Dictionary is Ordered,
# or we'll have inconsistent hashes
block_string = json.dumps(block, sort_keys=True).encode()
return hashlib.sha256(block_string).hexdigest()
@property
def last_block(self):
return self.chain[-1]
@staticmethod
def valid_proof(last_proof, proof):
"""
Validates the Proof: Does hash(last_proof, proof) contain 6
leading zeroes?
"""
guess = f'{last_proof}{proof}'.encode()
guess_hash = hashlib.sha256(guess).hexdigest()
return guess_hash[:6] == "000000"
def valid_chain(self, chain):
"""
Determine if a given blockchain is valid
:param chain: <list> A blockchain
:return: <bool> True if valid, False if not
"""
last_block = chain[0]
current_index = 1
while current_index < len(chain):
block = chain[current_index]
print(f'{last_block}')
print(f'{block}')
print("\n-------------------\n")
# Check that the hash of the block is correct
if block['previous_hash'] != self.hash(last_block):
return False
# Check that the Proof of Work is correct
if not self.valid_proof(last_block['proof'], block['proof']):
return False
last_block = block
current_index += 1
return True
# Instantiate our Node
app = Flask(__name__)
# Generate a globally unique address for this node
node_identifier = str(uuid4()).replace('-', '')
# Instantiate the Blockchain
blockchain = Blockchain()
@app.route('/mine', methods=['POST']) # changed from GET to POST
def mine():
values = request.get_json()
print("VALUES", values)
# Check that the required fields are in the POST'ed data
required = ['proof']
if not all(k in values for k in required):
return 'Missing Values', 400
# use valid_proof to validate the client's proof
validated = blockchain.valid_proof(blockchain.last_block['proof'], values['proof'])
if validated:
# create transaction for the reward
blockchain.new_transaction(
sender='0',
recipient=node_identifier,
amount=1,
)
# add block to chain
previous_hash = blockchain.hash(blockchain.last_block)
block = blockchain.new_block(values['proof'], previous_hash)
# send a success message
return jsonify(message='New Block Forged'), 200
# else send a message that it's not validated
return jsonify(message='Error, proof not validated or proof has changed'), 200
@app.route('/transactions/new', methods=['POST'])
def new_transaction():
values = request.get_json()
# Check that the required fields are in the POST'ed data
required = ['sender', 'recipient', 'amount']
if not all(k in values for k in required):
return 'Missing Values', 400
# Create a new Transaction
index = blockchain.new_transaction(values['sender'],
values['recipient'],
values['amount'])
response = {'message': f'Transaction will be added to Block {index}'}
return jsonify(response), 201
@app.route('/chain', methods=['GET'])
def full_chain():
response = {
'chain': blockchain.chain,
'length': len(blockchain.chain),
}
return jsonify(response), 200
@app.route('/last_proof', methods=['GET'])
def last_proof():
# get the last index of blockchain.chain
response = {
'proof': blockchain.last_block['proof']
}
return jsonify(response), 200
# Note, when demoing, start with this, then change to the below
# if __name__ == '__main__':
# app.run(host='0.0.0.0', port=5000)
if __name__ == '__main__':
if len(sys.argv) > 1:
port = int(sys.argv[1])
else:
port = 5000
app.run(host='0.0.0.0', port=port)
|
py | 1a54f3b45ac335915dfaf8061cc485c47150396b | from Products.CMFCore.utils import getToolByName
from Products.Five.browser import BrowserView
from isaw.bibitems.browser.view import BibItemView
class PublicationView(BibItemView):
"""view class"""
@property
def authors(self):
members = self._get_members(self.context.authors)
return ', '.join(members)
@property
def contributors(self):
members = self._get_members(self.context.contributors)
return ', '.join(members)
@property
def editors(self):
members = self._get_members(self.context.editors)
return ', '.join(members)
def _get_members(self, member_list):
mt = getToolByName(self.context, 'portal_membership')
members = []
for author in member_list:
author = author.strip()
if not author:
continue
info = mt.getMemberInfo(author)
if info:
members.append('<a href="%s">%s</a>' % (info.get('home_page'),
info.get('fullname', author)))
else:
members.append(author)
return members
@property
def images(self):
return self.context.objectValues()
class PublicationImagesView(BrowserView):
""" images overlay """
@property
def images(self):
return self.context.objectValues()
class PublicationListingView(BrowserView):
"""view class"""
batch_size = 0
page = 1
def __init__(self, request, context):
super(PublicationListingView, self).__init__(request, context)
self.page = int(self.request.get('page', 1))
def _query(self, query=None, exclude=None, b_start=None, b_size=None):
if b_size is None:
b_size = self.batch_size
if b_start is None:
b_start = (getattr(self, 'page', 1) - 1) * b_size
if query is None:
query = {'portal_type': 'isaw.policy.publication'}
if exclude is not None:
uuid = getattr(exclude, 'UID')
if callable(uuid):
uuid = uuid()
if uuid:
query['UID'] = {'not': uuid}
if self.context.portal_type == 'Folder':
self.request['b_start'] = b_start
self.request['b_size'] = b_size
query['b_start'] = b_start
query['b_size'] = b_size
items = self.context.getFolderContents(contentFilter=query,
batch=True, b_size=b_size)
elif self.context.portal_type == 'Topic':
if b_start and not self.request.get('b_start'):
self.request['b_start'] = b_start
items = self.context.queryCatalog(self.request, True, b_size,
**query)
elif self.context.portal_type == 'Collection':
items = self.context.results(True, b_start, b_size,
custom_query=query)
else:
items = []
return items
def listings(self, b_start=None, b_size=None):
"""get a page of listings"""
return self._query(b_start=b_start, b_size=b_size)
|
py | 1a54f3ce01118a676c31b5051ce92c585e2870d7 | """
Copyright (c) 2016-present, Facebook, Inc.
All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree. An additional grant
of patent rights can be found in the PATENTS file in the same directory.
"""
import asyncio
import unittest
import unittest.mock
from orc8r.protos.common_pb2 import Void
from orc8r.protos.service303_pb2 import ServiceInfo
from magma.common.service_registry import ServiceRegistry
from magma.magmad.service_poller import ServicePoller
class MockFuture(object):
def __init__(self, is_error):
self._is_error = is_error
def exception(self):
if self._is_error:
return self.MockException()
return None
def result(self):
return ServiceInfo()
class MockException(object):
def details(self):
return ''
def code(self):
return 0
class ServicePollerTests(unittest.TestCase):
"""
Tests for the ServicePoller
"""
def setUp(self):
ServiceRegistry.add_service('test1', '0.0.0.0', 0)
ServiceRegistry.add_service('test2', '0.0.0.0', 0)
config = {
'magma_services': ['test1', 'test2'],
'non_service303_services': ['test2']
}
self._loop = asyncio.new_event_loop()
self._service_poller = ServicePoller(self._loop, config)
@unittest.mock.patch('magma.magmad.service_poller.Service303Stub')
@unittest.mock.patch('magma.configuration.service_configs')
def test_poll(self, _service_configs_mock, service303_mock):
"""
Test if the query to Service303 succeeds.
"""
# Mock out GetServiceInfo.future
mock = unittest.mock.Mock()
mock.GetServiceInfo.future.side_effect = [unittest.mock.Mock()]
service303_mock.side_effect = [mock]
self._service_poller.start()
mock.GetServiceInfo.future.assert_called_once_with(
Void(), self._service_poller.GET_STATUS_TIMEOUT)
# pylint: disable=protected-access
self._service_poller._get_service_info_done('test1', MockFuture(False))
@unittest.mock.patch('magma.magmad.service_poller.Service303Stub')
@unittest.mock.patch('magma.configuration.service_configs')
def test_poll_exception(self, _service_configs_mock, service303_mock):
"""
Test if the query to Service303 fails and handled gracefully.
"""
# Mock out GetServiceInfo.future
mock = unittest.mock.Mock()
mock.GetServiceInfo.future.side_effect = [unittest.mock.Mock()]
service303_mock.side_effect = [mock]
self._service_poller.start()
mock.GetServiceInfo.future.assert_called_once_with(
Void(), self._service_poller.GET_STATUS_TIMEOUT)
# pylint: disable=protected-access
self._service_poller._get_service_info_done('test1', MockFuture(True))
if __name__ == "__main__":
unittest.main()
|
py | 1a54f402f8831ff2c48e7eefb4ca2ca51490b332 | import posixpath
from weakref import ref as weakref
from lektor.environment import PRIMARY_ALT
from lektor.utils import is_path_child_of
from lektor.utils import join_path
class SourceObject(object):
source_classification = "generic"
# We consider this class at least what public usage is to considered
# to be from another place.
__module__ = "db"
def __init__(self, pad):
self._pad = weakref(pad)
@property
def alt(self):
"""Returns the effective alt of this source object (unresolved)."""
return PRIMARY_ALT
@property
def source_filename(self):
"""The primary source filename of this source object."""
is_hidden = False
is_discoverable = True
@property
def is_visible(self):
"""The negated version of :attr:`is_hidden`."""
return not self.is_hidden
@property
def is_undiscoverable(self):
"""The negated version of :attr:`is_discoverable`."""
return not self.is_discoverable
def iter_source_filenames(self):
fn = self.source_filename
if fn is not None:
yield self.source_filename
def iter_virtual_sources(self):
return []
@property
def url_path(self):
"""The URL path of this source object if available."""
raise NotImplementedError()
@property
def path(self):
"""Return the full path to the source object. Not every source
object actually has a path but source objects without paths need
to subclass `VirtualSourceObject`.
"""
return None
@property
def pad(self):
"""The associated pad of this source object."""
rv = self._pad()
if rv is not None:
return rv
raise AttributeError("The pad went away")
def resolve_url_path(self, url_path):
"""Given a URL path as list this resolves the most appropriate
direct child and returns the list of remaining items. If no
match can be found, the result is `None`.
"""
if not url_path:
return self
return None
def is_child_of(self, path, strict=False):
"""Checks if the current object is a child of the passed object
or path.
"""
if isinstance(path, SourceObject):
path = path.path
if self.path is None or path is None:
return False
return is_path_child_of(self.path, path, strict=strict)
def url_to(self, path, alt=None, absolute=None, external=None, base_url=None):
"""Calculates the URL from the current source object to the given
other source object. Alternatively a path can also be provided
instead of a source object. If the path starts with a leading
bang (``!``) then no resolving is performed.
If a `base_url` is provided then it's used instead of the URL of
the record itself.
"""
if alt is None:
alt = getattr(path, "alt", None)
if alt is None:
alt = self.alt
resolve = True
path = getattr(path, "url_path", path)
if path[:1] == "!":
resolve = False
path = path[1:]
if resolve:
if not path.startswith("/"):
if self.path is None:
raise RuntimeError(
"Cannot use relative URL generation "
"from sources that do not have a "
"path. The source object without "
"a path is %r" % self
)
path = join_path(self.path, path)
source = self.pad.get(path, alt=alt)
if source is not None:
path = source.url_path
else:
path = posixpath.join(self.url_path, path)
if absolute:
return path
if base_url is None:
base_url = self.url_path
return self.pad.make_url(path, base_url, absolute, external)
class VirtualSourceObject(SourceObject):
"""Virtual source objects live below a parent record but do not
originate from the source tree with a separate file.
"""
def __init__(self, record):
SourceObject.__init__(self, record.pad)
self.record = record
@property
def path(self):
raise NotImplementedError()
def get_mtime(self, path_cache):
return None
def get_checksum(self, path_cache):
return None
@property
def parent(self):
return self.record
@property
def alt(self):
return self.record.alt
@property
def source_filename(self):
return self.record.source_filename
def iter_virtual_sources(self):
yield self
|
py | 1a54f49df479261409a496e2cc98574504f17177 | # -*- coding: utf-8 -*-
name = u'simplejson'
version = '3.15.0'
description = \
"""
Simple, fast, extensible JSON encoder/decoder for Python
"""
variants = []
requires = ['boost' ]
def commands():
import os
libs_path = os.path.join(getenv("PYTHON_LIBS_PATH"), "simplejson", "%s" % version)
env.PYTHONPATH.append(os.path.join(libs_path, "lib").replace("/", os.sep))
|
py | 1a54f4a9c66d480eddab45123823863ac363e5c8 | import requests
import petl
from parsons.etl.table import Table
from parsons.utilities import check_env
URI = 'https://api.targetsmart.com/'
class TargetSmartConnector(object):
def __init__(self, api_key):
self.uri = URI
self.api_key = check_env.check('TS_API_KEY', api_key)
self.headers = {'x-api-key': self.api_key}
def request(self, url, args=None, raw=False):
r = requests.get(url, headers=self.headers, params=args)
# This allows me to deal with data that needs to be munged.
if raw:
return r.json()
return Table(r.json()['output'])
class Person(object):
def __init__(self):
return None
def data_enhance(self, search_id, search_id_type='voterbase', state=None):
"""
Searches for a record based on an id or phone or email address
`Args:`
search_id: str
The primary key or email address or phone number
search_id_type: str
One of ``voterbase``, ``exacttrack``, ``abilitec_consumer_link``, ``phone``,
``email``, ``smartvan``, ``votebuilder``, ``voter``, ``household``.
state: str
Two character state code. Required if ``search_id_type`` of ``smartvan``,
``votebuilder`` or ``voter``.
`Returns`
Parsons Table
See :ref:`parsons-table` for output options.
"""
if search_id_type in ['smartvan', 'votebuilder', 'voter'] and state is None:
raise KeyError("Search ID type '{}' requires state kwarg".format(search_id_type))
if search_id_type not in ('voterbase', 'exacttrack', 'abilitec_consumer_link', 'phone',
'email', 'smartvan', 'votebuilder', 'voter', 'household'):
raise ValueError('Search_id_type is not valid')
url = self.connection.uri + 'person/data-enhance'
args = {'search_id': search_id,
'search_id_type': search_id_type,
'state': state
}
return self.connection.request(url, args=args)
def radius_search(self, first_name, last_name, middle_name=None, name_suffix=None,
latitude=None, longitude=None, address=None, radius_size=10,
radius_unit='miles', max_results=10, gender='a', age_min=None, age_max=None,
composite_score_min=1, composite_score_max=100, last_name_exact=True,
last_name_is_prefix=False, last_name_prefix_length=10):
"""
Search for a person based on a specified radius
`Args`:
first_name: str
One or more alpha characters
last_name: str
One or more alpha characters
middle_name: str
One or more alpha characters
name_suffix: str
One or more alpha characters
latitude: float
Floating point number (e.g. 33.738987255507)
longitude: float
Floating point number (e.g. -116.40833849559)
address: str
Any geocode-able address
address_type: str
``reg`` for registration (default) or ``tsmart`` for TargetSmart
radius_unit: str
One of ``meters``, ``feet``, ``miles`` (default), or ``kilometers``.
max_results: int
Default of ``10``. An integer in range [0 - 100]
gender: str
Default of ``a``. One of ``m``, ``f``, ``u``, ``a``.
age_min: int
A positive integer
age_max: int
A positive integer
composite_score_min: int
An integer in range [1 - 100]. Filter out results with composite score
less than this value.
composite_score_max: int
An integer in range [1 - 100]. Filter out results with composite score
greater than this value.
last_name_exact: boolean
By default, the full last name is used for finding matches if the length of the
last name is not longer than 10 characters. As an example, “anders” is less likely
to match to “anderson” with this enabled. Disable this option if you are using
either ``last_name_is_prefix`` or ``last_name_prefix_length``.
last_name_is_prefix: boolean
By default, the full last name is used for finding matches. Enable this parameter
if your search last name is truncated. This can be common for some client
applications that for various reasons do not have full last names. Use this
parameter along with ``last_name_prefix_length`` to configure the length of the last
name prefix. This parameter is ignored if ``last_name_exact`` is enabled.
last_name_prefix_length: int
By default, up to the first 10 characters of the search last name are used for
finding relative matches. This value must be between 3 and 10. This parameter is
ignored if last_name_exact is enabled.
`Returns`
Parsons Table
See :ref:`parsons-table` for output options.
"""
if (latitude is None or longitude is None) and address is None:
raise ValueError('Lat/Long or Address required')
# Convert booleans
for a in [last_name_exact, last_name_is_prefix]:
a = str(a)
url = self.connection.uri + 'person/radius-search'
args = {'first_name': first_name,
'last_name': last_name,
'middle_name': middle_name,
'name_suffix': name_suffix,
'latitude': latitude,
'longitude': longitude,
'address': address,
'radius_size': radius_size,
'radius_unit': radius_unit,
'max_results': max_results,
'gender': gender,
'age_min': age_min,
'age_max': age_max,
'composite_score_min': composite_score_min,
'composite_score_max': composite_score_max,
'last_name_exact': last_name_exact,
'last_name_is_prefix': last_name_is_prefix,
'last_name_prefix_length': last_name_prefix_length
}
r = self.connection.request(url, args=args, raw=True)
return Table([itm for itm in r['output']]).unpack_dict('data_fields', prepend=False)
def phone(self, table):
"""
Match based on a list of 500 phones numbers. Table
can contain up to 500 phone numbers to match
`Args:`
table: parsons table
See :ref:`parsons-table`. One row per phone number,
up to 500 phone numbers.
`Returns:`
See :ref:`parsons-table` for output options.
"""
url = self.connection.uri + 'person/phone-search'
args = {'phones': list(petl.values(table.table, 0))}
return Table(self.connection.request(url, args=args, raw=True)['result'])
class Service(object):
def __init__(self):
return None
def district(self, search_type='zip', address=None, zip5=None, zip4=None, state=None,
latitude=None, longitude=None):
"""
Return district information based on a geographic point. The method allows you to
search based on the following:
.. list-table::
:widths: 30 30 30
:header-rows: 1
* - Search Type
- Search Type Name
- Required kwarg(s)
* - Zip Code
- ``zip``
- ``zip5``, ``zip4``
* - Address
- ``address``
- ``address``
* - Point
- point
- ``latitude``, ``longitude``
`Args`:
search_type: str
The type of district search to perform. One of ``zip``, ``address``
or ``point``.
address: str
An uparsed full address
zip5: str
The USPS Zip5 code
zip4: str
The USPS Zip4 code
state: str
The two character state code
latitude: float or str
Valid latitude floating point
lontitude: float or str
Valid longitude floating point
`Returns`:
Parsons Table
See :ref:`parsons-table` for output options.
"""
if search_type == 'zip' and None in [zip5, zip4]:
raise ValueError("Search type 'zip' requires 'zip5' and 'zip4' arguments")
elif search_type == 'point' and None in [latitude, longitude]:
raise ValueError("Search type 'point' requires 'latitude' and 'longitude' arguments")
elif search_type == 'address' and None in [address]:
raise ValueError("Search type 'address' requires 'address' argument")
elif search_type not in ['zip', 'point', 'address']:
raise KeyError("Invalid 'search_type' provided. ")
else:
pass
url = self.connection.uri + 'service/district'
args = {'search_type': search_type,
'address': address,
'zip5': zip5,
'zip4': zip4,
'state': state,
'latitude': latitude,
'longitude': longitude
}
return Table([self.connection.request(url, args=args, raw=True)['match_data']])
class Voter(object):
def __init__(self, connection):
self.connection = connection
def voter_registration_check(self, first_name=None, last_name=None,
state=None, street_number=None,
street_name=None, city=None, zip_code=None,
age=None, dob=None, phone=None, email=None,
unparsed_full_address=None):
"""
Searches for a registered individual, returns matches.
A search must include the at minimum first name, last name and state.
`Args:`
first_name: str
Required; One or more alpha characters. Trailing wildcard allowed
last_name: str
Required; One or more alpha characters. Trailing wildcard allowed
state: str
Required; Two character state code (e.g. ``NY``)
street_number: str
Optional; One or more alpha characters. Trailing wildcard allowed
street_name: str
Optional; One or more alpha characters. Trailing wildcard allowed
city: str
Optional; The person's home city
zip_code: str
Optional; Numeric characters. Trailing wildcard allowed
age; int
Optional; One or more integers. Trailing wildcard allowed
dob; str
Numeric characters in YYYYMMDD format. Trailing wildcard allowed
phone; str
Integer followed by 0 or more * or integers
email: str
Alphanumeric character followed by 0 or more * or legal characters
(alphanumeric, @, -, .)
unparsed_full_address: str
One or more alphanumeric characters. No wildcards.
`Returns`
Parsons Table
See :ref:`parsons-table` for output options.
"""
url = self.connection.uri + 'voter/voter-registration-check'
if None in [first_name, last_name, state]:
raise ValueError("""Function must include at least first_name,
last_name, and state.""")
args = {'first_name': first_name,
'last_name': last_name,
'state': state,
'street_number': street_number,
'street_name': street_name,
'city': city,
'zip_code': zip_code,
'age': age,
'dob': dob,
'phone': phone,
'email': email,
'unparsed_full_address': unparsed_full_address
}
return self.connection.request(url, args=args, raw=True)
class TargetSmartAPI(Voter, Person, Service):
def __init__(self, api_key=None):
self.connection = TargetSmartConnector(api_key=api_key)
|
py | 1a54f4c1c384a4e146364acb59e3d0fd75779fc1 | import datetime
import glob
import gzip
import json
import os
import pickle
import random
import time
from hashlib import sha256
import cv2
import numpy as np
from tqdm import tqdm
from pcs.augmentations import PCSDefaultAugmentor
from pcs.utils import (
convert_min_area_rect,
grayscale_to_float,
grayscale_to_uint
)
class CocoExporter:
def __init__(self, output_dir="", dataset_name=""):
self.output_dir = output_dir
self.dataset_name = dataset_name
if not os.path.isdir(self.output_dir):
os.mkdir(self.output_dir)
self.dataset_dir = os.path.join(self.output_dir, self.dataset_name)
if not os.path.isdir(self.dataset_dir):
os.mkdir(self.dataset_dir)
self.number_images = 0
self.number_annotations = 0
date = str(datetime.date.today())
self.coco_dataset = {
"info": {
"description": "Protein crystals in suspension (PCS) dataset for automated crystal detection",
"url": "",
"version": "1.0",
"year": 2021,
"contributor": "Daniel Bischoff, Sebastian Franz",
"date_created": date,
},
"licenses": [
{
"url": "https://opensource.org/licenses/MIT",
"id": 1,
"name": "MIT License",
},
],
"categories": [
{"supercategory": "Crystal", "id": 1, "name": "Crystal"},
],
"images": [],
"annotations": [],
}
self.images_template = {
"license": 1,
"file_name": "",
"width": -1,
"height": -1,
"date_captured": date,
"id": 0,
}
self.annotations_template = {
"segmentation": [
[]
],
"area": 0,
"iscrowd": 0,
"image_id": 0,
"bbox": [0, 0, 0, 0],
"category_id": 1,
"id": 0,
}
def add_image(self, image_path, height, width):
self.number_images += 1
image_id = self.number_images
image_name = f"{str(image_id).zfill(10)}"
_, ext = os.path.splitext(image_path)
image_dict = self.images_template.copy()
image_dict["file_name"] = image_name + ext
image_dict["width"] = width
image_dict["height"] = height
image_dict["id"] = image_id
self.coco_dataset["images"].append(image_dict)
return image_dict
def add_annotation(self, image_id=1, segmentation=None, bbox=None, area=0):
self.number_annotations += 1
annotation_id = self.number_annotations
if segmentation is None:
segmentation = [[]]
if bbox is None:
bbox = []
# Annotation
annotation_dict = self.annotations_template.copy()
annotation_dict["segmentation"] = segmentation
annotation_dict["bbox"] = bbox
annotation_dict["image_id"] = image_id
annotation_dict["id"] = annotation_id
annotation_dict["area"] = area
self.coco_dataset["annotations"].append(annotation_dict)
return annotation_id
def write(self):
dataset_annotations_file = os.path.join(
self.output_dir,
self.dataset_name + ".json"
)
with open(dataset_annotations_file, "w") as f:
json.dump(self.coco_dataset, f, indent=None)
class Indexer:
def __init__(self, root_dirs, labels={"train": 80, "validation": 20}):
if isinstance(root_dirs, str):
root_dirs = [root_dirs]
for root_dir in root_dirs:
assert os.path.isdir(root_dir), f"Not a directory: {root_dir}"
self.root_dirs = root_dirs
sum_weights = sum(labels.values())
self.labels = {
label: weight / sum_weights
for label, weight in labels.items()
}
assert sum(self.labels.values()) == 1
def _index_iopairs(self, reindex):
iopairs = {}
for root_dir in self.root_dirs:
glob_str = os.path.join(root_dir, "**", "*.png")
inputs = glob.glob(glob_str, recursive=True)
for image_file in tqdm(
inputs,
desc=f"Indexing io pairs from directory {root_dir}",
total=len(inputs)
):
index_file = image_file + ".idx.json"
annotation_file = image_file + ".json"
if not reindex and os.path.exists(index_file):
with open(index_file, "r") as f:
d_iopair = json.load(f)
else:
d_iopair = {}
d_iopair["valid"] = os.path.exists(annotation_file)
image_data = cv2.imread(image_file, cv2.IMREAD_GRAYSCALE)
d_iopair["height"], d_iopair["width"] = image_data.shape
d_iopair["key"] = sha256(image_data.data.tobytes()).hexdigest()
if d_iopair["key"] in iopairs:
d_iopair["valid"] = False
print(f"warning: invalidating {image_file} (duplicate)")
# validate annotations
try:
with open(annotation_file, "r") as f:
annotations = json.load(f)
except json.JSONDecodeError:
d_iopair["valid"] = False
print(f"warning: invalidating {image_file} (JSON decode error)")
if not "segmentation" in annotations:
d_iopair["valid"] = False
print(f"warning: invalidating {image_file} (missing segmentation field)")
# shape check
arr = np.array(annotations["segmentation"])
if len(arr.shape) < 3:
d_iopair["valid"] = False
print(f"warning: invalidating {image_file} (wrong segmentation shape)")
if arr.shape[2] != 3:
d_iopair["valid"] = False
print(f"warning: invalidating {image_file} (wrong segmentation shape)")
# coordinated check
y_min = arr[:, :, 1].min()
y_max = arr[:, :, 1].max()
if y_min < 0 or y_max >= d_iopair["height"]:
d_iopair["valid"] = False
print(f"warning: invalidating {image_file} (coordinate out of image bounds)")
x_min = arr[:, :, 1].min()
x_max = arr[:, :, 1].max()
if x_min < 0 or x_max >= d_iopair["width"]:
d_iopair["valid"] = False
print(f"warning: invalidating {image_file} (coordinate out of image bounds)")
d_iopair["label"] = ""
with open(index_file, "w") as f:
json.dump(d_iopair, f)
iopairs[d_iopair["key"]] = (
image_file,
annotation_file,
index_file,
d_iopair["height"],
d_iopair["width"],
d_iopair["label"],
d_iopair["valid"]
)
return iopairs
def _load_only(self):
iopairs = {}
for root_dir in self.root_dirs:
glob_str = os.path.join(root_dir, "**", "*.png")
inputs = glob.glob(glob_str, recursive=True)
for image_file in inputs:
index_file = image_file + ".idx.json"
annotation_file = image_file + ".json"
with open(index_file, "r") as f:
d_iopair = json.load(f)
iopairs[d_iopair["key"]] = (
image_file,
annotation_file,
index_file,
d_iopair["height"],
d_iopair["width"],
d_iopair["label"],
d_iopair["valid"]
)
return iopairs
def _resample_iopairs(self, iopairs):
keys = list(iopairs.keys())
random.shuffle(keys)
offset = 0
for label, fraction in self.labels.items():
size = int(round(fraction * len(iopairs)))
label_keys = keys[offset:offset+size]
offset += size
for key in label_keys:
_, _, index_file, height, width, _, valid = iopairs[key]
d_iopair = {
"key": key,
"height": height,
"width": width,
"label": label,
"valid": valid
}
with open(index_file, "w") as f:
json.dump(d_iopair, f)
def load_iopairs(self, reindex=False):
iopairs = self._index_iopairs(reindex)
filtered_iopairs = {key: iopair for key, iopair in iopairs.items() if iopair[6]}
self._resample_iopairs(filtered_iopairs)
updated_iopairs = self._load_only()
label_count = {label: 0 for label, _ in self.labels.items()}
for _, iopair in updated_iopairs.items():
label_count[iopair[5]] += 1
print("after indexing:")
for root_dir in self.root_dirs:
print(f"\t{root_dir}")
for label, count in label_count.items():
print(f"\t{label}: {count} ({round(100 * self.labels[label], 2)}%)")
return updated_iopairs
def to_coco(self, output_dir, iopairs=None, box_mode="xywha", reindex=False, flip_y=True, digits=2):
assert box_mode in ("xywha", "coco")
exporters = {
label: CocoExporter(
output_dir=output_dir,
dataset_name=f"pcs_{label}")
for label, _ in self.labels.items()
}
label_dirs = {
label: os.path.join(output_dir, f"pcs_{label}")
for label, _ in self.labels.items()
}
if iopairs is None:
valid_labeled_iopairs = self.load_iopairs(reindex=reindex)
else:
valid_labeled_iopairs = iopairs
for _, iopair in tqdm(
valid_labeled_iopairs.items(),
desc=f"Exporting dataset to coco format",
total=len(valid_labeled_iopairs),
):
image_file, annotation_file, _, height, width, label, _ = iopair
exporter = exporters[label]
# Adding image to dataset while ensuring that only grayscale images are stored
image_dict = exporter.add_image(image_file, height, width)
image_store_path = os.path.join(label_dirs[label], image_dict["file_name"])
image = cv2.imread(image_file, cv2.IMREAD_GRAYSCALE)
cv2.imwrite(image_store_path, image)
# Adding annotations to image
with open(annotation_file, "r") as f:
annotations = json.load(f)
segmentations = annotations["segmentation"]
for verts in segmentations:
# x, y, z device coordinates scaled and shifted to fit image dimesions of every crystal vertex
verts = np.array(verts)
assert verts.shape[-1] == 3
# depth information is currently not used.
# the array is copied during np.delete which prevents a CV2 error.
verts = np.delete(verts, 2, axis=1)
if flip_y:
verts[:, 1] = (image_dict["height"] - 1) - verts[:, 1]
# let CV2 figure out the correct ordering of the vertices
hull = cv2.convexHull(np.float32(verts))
# rounding to make the resulting JSON files smaller.
area = round(cv2.contourArea(hull), digits)
segmentation = [
[round(v, digits) for v in hull.flatten().tolist()]
]
if box_mode == "coco":
x0 = verts[:, 0].min()
y0 = verts[:, 1].min()
w = verts[:, 0].max() - x0
h = verts[:, 1].max() - y0
bbox = [round(v, digits) for v in [x0, y0, w, h]]
elif box_mode == "xywha":
min_area_rect = cv2.minAreaRect(hull)
bbox = convert_min_area_rect(min_area_rect)
bbox = [round(v, digits) for v in bbox]
exporter.add_annotation(
image_id=image_dict["id"],
segmentation=segmentation,
bbox=bbox,
area=area
)
for _, exporter in exporters.items():
exporter.write()
class PCSDataset:
def __init__(self, dataset_file, image_dir=None, use_augmentations=False, intermediates=False):
assert os.path.exists(dataset_file)
self.dataset_file = dataset_file
assert self.dataset_file.lower().endswith(".json") or self.dataset_file.lower().endswith(".gzip")
self.compressed = True if self.dataset_file.lower().endswith(".gzip") else False
if not image_dir:
dirname = os.path.dirname(self.dataset_file)
basename = os.path.basename(self.dataset_file)
name, _ = os.path.splitext(basename)
if self.compressed and name.endswith(".json"): # support for .json.gzip
name = name[:-len(".json")]
image_dir = os.path.join(dirname, name)
assert os.path.isdir(image_dir), f"Image directory not found: {image_dir}"
self.image_dir = image_dir
self.stats_file = self.image_dir + "_stats.json"
self.aug_stats_file = self.image_dir + "_aug_stats.json"
if self.compressed:
print("Reading compressed PCSCocoDataset:", dataset_file, "...")
start = time.time()
with gzip.open(dataset_file, "r") as file:
self.data = json.loads(file.read().decode('utf-8'))
end = time.time()
print("finished reading in", f"{round(end-start, 3)} seconds")
else:
print("Reading PCSCocoDataset:", dataset_file, "...")
start = time.time()
with open(dataset_file, "r") as file:
self.data = json.load(file)
end = time.time()
print("finished reading in", f"{round(end-start, 3)} seconds")
self.image_annotations = dict()
for annotation in self.data["annotations"]:
image_id = annotation["image_id"]
if not image_id in self.image_annotations:
self.image_annotations[image_id] = list()
self.image_annotations[image_id].append(annotation)
self.augmentations_active = use_augmentations
self.intermediates = intermediates
def use_augmentations(self, flag=True, intermediates=False):
self.augmentations_active = flag
self.intermediates = intermediates
def write_statistics(self, num_images=20000, digits=2):
dataset_statistics = {
"pixel_mean": -1,
"pixel_std": -1,
"num_images": -1,
"num_annotations": -1,
"augmentations_active": self.augmentations_active,
"images": {
"image_id": [],
"height": [],
"width": [],
"instances_mean_area": [],
"instances_mean_ratio": [],
"annotation_ids": []
},
"annotations": {
"image_id": [],
"annotation_id": [],
"x": [],
"y": [],
"width": [],
"height": [],
"angle": [],
"area": [],
"ratio": []
}
}
dataset_statistics["num_images"] = len(self)
num_annotations = 0
image_stats_num_images = min(len(self), num_images)
image_stats_indices = set(random.sample(range(image_stats_num_images), image_stats_num_images))
image_flat_store = []
image_stats = dataset_statistics["images"]
annotation_stats = dataset_statistics["annotations"]
def rndf(x):
return round(float(x), digits)
for index, img_data in enumerate(tqdm(
self, total=len(self), desc="calculate image stats"
)):
if self.augmentations_active:
image = img_data["aug_img"]
else:
image = img_data["img"]
if index in image_stats_indices:
image_flat_store.append(image.flatten().astype(np.float64))
image_stats["image_id"].append(img_data["meta"]["img_dict"]["id"])
image_shape = image.shape
image_stats["height"].append(image_shape[0])
image_stats["width"].append(image_shape[1])
image_instance_areas = []
image_instance_ratios = []
image_stats["annotation_ids"] = img_data["anno_ids"]
if self.augmentations_active:
segms = img_data["aug_segms"]
bboxs = img_data["aug_rbboxs"]
else:
segms = img_data["segms"]
bboxs = img_data["rbboxs"]
for segmentation, rotated_box in zip(segms, bboxs):
num_annotations += 1
annotation_stats["image_id"].append(
img_data["meta"]["img_dict"]["id"]
)
x_ctr, y_ctr, width, height, angle = rotated_box
annotation_stats["x"].append(rndf(x_ctr))
annotation_stats["y"].append(rndf(y_ctr))
annotation_stats["width"].append(rndf(width))
annotation_stats["height"].append(rndf(height))
annotation_stats["angle"].append(rndf(angle))
ratio = width / (height + 1e-4)
image_instance_ratios.append(rndf(ratio))
annotation_stats["ratio"].append(rndf(ratio))
area = cv2.contourArea(np.float32(segmentation))
image_instance_areas.append(rndf(area))
annotation_stats["area"].append(rndf(area))
image_stats["instances_mean_area"].append(
rndf(np.mean(image_instance_areas))
)
image_stats["instances_mean_ratio"].append(
rndf(np.mean(image_instance_ratios))
)
image_flat_store = np.concatenate(image_flat_store)
dataset_statistics["pixel_mean"] = rndf(np.mean(image_flat_store))
dataset_statistics["pixel_std"] = rndf(np.std(image_flat_store))
dataset_statistics["num_annotations"] = num_annotations
output_file = self.aug_stats_file if self.augmentations_active else self.stats_file
with open(output_file, "w") as f:
json.dump(dataset_statistics, f)
def write_augmented_dataset(self, output_dataset, digits=2):
dirname = os.path.dirname(self.dataset_file)
coco_exporter = CocoExporter(
output_dir=dirname, dataset_name=output_dataset
)
self.use_augmentations()
def rndf(x):
return round(float(x), digits)
for img_data in tqdm(
self, total=len(self), desc="augmenting dataset"
):
img_path = img_data["meta"]["img_path"]
height, width = img_data["img"].shape
image_dict = coco_exporter.add_image(
img_path, int(height), int(width)
)
cv2.imwrite(
os.path.join(
dirname,
output_dataset,
img_data["meta"]["img_dict"]["file_name"]
),
grayscale_to_uint(img_data["aug_img"])
)
image_id = image_dict["id"]
for segmentation, rotated_box in zip(img_data["aug_segms"], img_data["aug_rbboxs"]):
area = rndf(cv2.contourArea(np.float32(segmentation)))
segmentation = segmentation.flatten().tolist()
segmentation = [rndf(v) for v in segmentation]
if isinstance(rotated_box, np.ndarray):
rotated_box = rotated_box.flatten().tolist()
rotated_box = [rndf(v) for v in rotated_box]
coco_exporter.add_annotation(
image_id=image_id,
segmentation=[segmentation],
bbox=rotated_box,
area=area
)
coco_exporter.write()
def write_trimmed_dataset(self, output_dataset, digits=2, num=20, augmented=False):
dirname = os.path.dirname(self.dataset_file)
coco_exporter = CocoExporter(
output_dir=dirname, dataset_name=output_dataset
)
if augmented:
self.use_augmentations()
def rndf(x):
return round(float(x), digits)
for idx, img_data in enumerate(tqdm(
self, total=num, desc="trimming dataset"
)):
if idx == num:
break
img_path = img_data["meta"]["img_path"]
height, width = img_data["img"].shape
image_dict = coco_exporter.add_image(
img_path, int(height), int(width)
)
cv2.imwrite(
os.path.join(
dirname,
output_dataset,
img_data["meta"]["img_dict"]["file_name"]
),
grayscale_to_uint(img_data["aug_img" if augmented else "img"])
)
image_id = image_dict["id"]
for segmentation, rotated_box in zip(img_data["aug_segms" if augmented else "segms"], img_data["aug_rbboxs" if augmented else "rbboxs"]):
area = rndf(cv2.contourArea(np.float32(segmentation)))
segmentation = segmentation.flatten().tolist()
segmentation = [rndf(v) for v in segmentation]
if isinstance(rotated_box, np.ndarray):
rotated_box = rotated_box.flatten().tolist()
rotated_box = [rndf(v) for v in rotated_box]
coco_exporter.add_annotation(
image_id=image_id,
segmentation=[segmentation],
bbox=rotated_box,
area=area
)
coco_exporter.write()
def write_pickled_dataset(self, output_dataset):
dirname = os.path.dirname(self.dataset_file)
outpath = os.path.join(dirname, output_dataset)
assert outpath.endswith(".pkl")
data = []
for idx, img_data in enumerate(tqdm(
self, total=len(self), desc="writing segmented dataset"
)):
_, _, _, segmentations, rotated_boxes = img_data["meta"]["img_dict"], img_data["img"], None, img_data["segms"], img_data["rbboxs"]
segmentations=[np.float32(segm) for segm in segmentations],
rotated_boxes=np.float32(rotated_boxes)
data.append((segmentations, rotated_boxes))
with open(outpath, "wb") as f:
pickle.dump(data, f)
def load_aug_stats(self):
with open(self.aug_stats_file, "r") as f:
aug_stats = json.load(f)
return aug_stats
def load_stats(self):
with open(self.stats_file, "r") as f:
aug_stats = json.load(f)
return aug_stats
@staticmethod
def get_segmentations(image_annotations):
return [
np.array(annotation["segmentation"], dtype=np.float32).flatten().reshape(-1, 2)
for annotation in image_annotations
]
@staticmethod
def get_rotated_boxes(image_annotations, segmentations):
# use bbox field if angle information is present, otherwise infer from segmentations
assert len(image_annotations) > 0
has_angle = len(image_annotations[0]["bbox"]) == 5
if has_angle:
return [
np.array(annotation["bbox"], dtype=np.float32).flatten()
for annotation in image_annotations
]
else:
min_area_rects = [
cv2.minAreaRect(segmentation)
for segmentation in segmentations
]
return [
np.array(convert_min_area_rect(min_area_rect), dtype=np.float32)
for min_area_rect in min_area_rects
]
@staticmethod
def get_annotation_ids(image_annotations):
return [annotation["id"] for annotation in image_annotations]
def get_meta(self, idx):
image_dict = self.data["images"][idx]
image_annotations = self.image_annotations[image_dict["id"]]
image_path = os.path.join(self.image_dir, image_dict["file_name"])
assert os.path.exists(image_path)
return dict(
img_dict=image_dict,
img_path=image_path,
img_annos=image_annotations
)
def __getitem__(self, idx):
meta = self.get_meta(idx)
image = grayscale_to_float(
cv2.imread(meta["img_path"], cv2.IMREAD_GRAYSCALE)
)
segmentations = PCSDataset.get_segmentations(meta["img_annos"])
rotated_boxes = PCSDataset.get_rotated_boxes(meta["img_annos"], segmentations)
annotation_ids = PCSDataset.get_annotation_ids(meta["img_annos"])
return dict(
meta=meta,
img=image,
anno_ids=annotation_ids,
segms=segmentations,
rbboxs=rotated_boxes
)
def __iter__(self):
if self.augmentations_active:
return PCSDatasetAugmentedIterator(self)
else:
return PCSDatasetIterator(self)
def __len__(self):
return len(self.data["images"])
class PCSDatasetIterator:
def __init__(self, pcs_coco_dataset):
self.dataset = pcs_coco_dataset
self.num_images = len(self.dataset.data["images"])
self.index = 0
def __next__(self):
if self.index < self.num_images:
img_data = self.dataset[self.index]
self.index += 1
return img_data
else:
raise StopIteration
class PCSDatasetAugmentedIterator:
def __init__(self, dataset):
self.dataset = dataset
self.dataset_iter = PCSDatasetIterator(
self.dataset
)
self.augmentor = PCSDefaultAugmentor()
def __next__(self):
img_data = next(
self.dataset_iter
)
aug_result = self.augmentor(
img_data["img"].copy(),
[x.copy() for x in img_data["segms"]],
[x.copy() for x in img_data["rbboxs"]]
)
img_data.update(aug_result)
return img_data
def __iter__(self):
return self |
py | 1a54f50fb928bb6cad04f3b4207d6f0f72fdab5b | """
Copyright 2020, The Regents of the University of California.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE REGENTS OF THE UNIVERSITY OF CALIFORNIA ''AS
IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OF THE UNIVERSITY OF CALIFORNIA OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of The Regents of the University of California.
"""
import logging
import os
import sys
import scapy.utils
from scapy.layers.l2 import Ether
from scapy.layers.inet import IP, UDP
import cocotb_test.simulator
import cocotb
from cocotb.log import SimLog
from cocotb.clock import Clock
from cocotb.triggers import RisingEdge, FallingEdge, Timer
from cocotbext.axi import AxiStreamBus
from cocotbext.eth import XgmiiSource, XgmiiSink
from cocotbext.pcie.core import RootComplex
from cocotbext.pcie.xilinx.us import UltraScalePlusPcieDevice
try:
import mqnic
except ImportError:
# attempt import from current directory
sys.path.insert(0, os.path.join(os.path.dirname(__file__)))
try:
import mqnic
finally:
del sys.path[0]
class TB(object):
def __init__(self, dut):
self.dut = dut
self.BAR0_APERTURE = int(os.getenv("PARAM_BAR0_APERTURE"))
self.log = SimLog("cocotb.tb")
self.log.setLevel(logging.DEBUG)
# PCIe
self.rc = RootComplex()
self.rc.max_payload_size = 0x1 # 256 bytes
self.rc.max_read_request_size = 0x2 # 512 bytes
self.dev = UltraScalePlusPcieDevice(
# configuration options
pcie_generation=3,
pcie_link_width=16,
user_clk_frequency=250e6,
alignment="dword",
cq_cc_straddle=False,
rq_rc_straddle=False,
rc_4tlp_straddle=False,
enable_pf1=False,
enable_client_tag=True,
enable_extended_tag=True,
enable_parity=False,
enable_rx_msg_interface=False,
enable_sriov=False,
enable_extended_configuration=False,
enable_pf0_msi=True,
enable_pf1_msi=False,
# signals
# Clock and Reset Interface
user_clk=dut.clk_250mhz,
user_reset=dut.rst_250mhz,
# user_lnk_up
# sys_clk
# sys_clk_gt
# sys_reset
# phy_rdy_out
# Requester reQuest Interface
rq_bus=AxiStreamBus.from_prefix(dut, "m_axis_rq"),
pcie_rq_seq_num0=dut.s_axis_rq_seq_num_0,
pcie_rq_seq_num_vld0=dut.s_axis_rq_seq_num_valid_0,
pcie_rq_seq_num1=dut.s_axis_rq_seq_num_1,
pcie_rq_seq_num_vld1=dut.s_axis_rq_seq_num_valid_1,
# pcie_rq_tag0
# pcie_rq_tag1
# pcie_rq_tag_av
# pcie_rq_tag_vld0
# pcie_rq_tag_vld1
# Requester Completion Interface
rc_bus=AxiStreamBus.from_prefix(dut, "s_axis_rc"),
# Completer reQuest Interface
cq_bus=AxiStreamBus.from_prefix(dut, "s_axis_cq"),
# pcie_cq_np_req
# pcie_cq_np_req_count
# Completer Completion Interface
cc_bus=AxiStreamBus.from_prefix(dut, "m_axis_cc"),
# Transmit Flow Control Interface
# pcie_tfc_nph_av=dut.pcie_tfc_nph_av,
# pcie_tfc_npd_av=dut.pcie_tfc_npd_av,
# Configuration Management Interface
cfg_mgmt_addr=dut.cfg_mgmt_addr,
cfg_mgmt_function_number=dut.cfg_mgmt_function_number,
cfg_mgmt_write=dut.cfg_mgmt_write,
cfg_mgmt_write_data=dut.cfg_mgmt_write_data,
cfg_mgmt_byte_enable=dut.cfg_mgmt_byte_enable,
cfg_mgmt_read=dut.cfg_mgmt_read,
cfg_mgmt_read_data=dut.cfg_mgmt_read_data,
cfg_mgmt_read_write_done=dut.cfg_mgmt_read_write_done,
# cfg_mgmt_debug_access
# Configuration Status Interface
# cfg_phy_link_down
# cfg_phy_link_status
# cfg_negotiated_width
# cfg_current_speed
cfg_max_payload=dut.cfg_max_payload,
cfg_max_read_req=dut.cfg_max_read_req,
# cfg_function_status
# cfg_vf_status
# cfg_function_power_state
# cfg_vf_power_state
# cfg_link_power_state
# cfg_err_cor_out
# cfg_err_nonfatal_out
# cfg_err_fatal_out
# cfg_local_error_out
# cfg_local_error_valid
# cfg_rx_pm_state
# cfg_tx_pm_state
# cfg_ltssm_state
# cfg_rcb_status
# cfg_obff_enable
# cfg_pl_status_change
# cfg_tph_requester_enable
# cfg_tph_st_mode
# cfg_vf_tph_requester_enable
# cfg_vf_tph_st_mode
# Configuration Received Message Interface
# cfg_msg_received
# cfg_msg_received_data
# cfg_msg_received_type
# Configuration Transmit Message Interface
# cfg_msg_transmit
# cfg_msg_transmit_type
# cfg_msg_transmit_data
# cfg_msg_transmit_done
# Configuration Flow Control Interface
cfg_fc_ph=dut.cfg_fc_ph,
cfg_fc_pd=dut.cfg_fc_pd,
cfg_fc_nph=dut.cfg_fc_nph,
cfg_fc_npd=dut.cfg_fc_npd,
cfg_fc_cplh=dut.cfg_fc_cplh,
cfg_fc_cpld=dut.cfg_fc_cpld,
cfg_fc_sel=dut.cfg_fc_sel,
# Configuration Control Interface
# cfg_hot_reset_in
# cfg_hot_reset_out
# cfg_config_space_enable
# cfg_dsn
# cfg_bus_number
# cfg_ds_port_number
# cfg_ds_bus_number
# cfg_ds_device_number
# cfg_ds_function_number
# cfg_power_state_change_ack
# cfg_power_state_change_interrupt
cfg_err_cor_in=dut.status_error_cor,
cfg_err_uncor_in=dut.status_error_uncor,
# cfg_flr_in_process
# cfg_flr_done
# cfg_vf_flr_in_process
# cfg_vf_flr_func_num
# cfg_vf_flr_done
# cfg_pm_aspm_l1_entry_reject
# cfg_pm_aspm_tx_l0s_entry_disable
# cfg_req_pm_transition_l23_ready
# cfg_link_training_enable
# Configuration Interrupt Controller Interface
# cfg_interrupt_int
# cfg_interrupt_sent
# cfg_interrupt_pending
cfg_interrupt_msi_enable=dut.cfg_interrupt_msi_enable,
cfg_interrupt_msi_mmenable=dut.cfg_interrupt_msi_mmenable,
cfg_interrupt_msi_mask_update=dut.cfg_interrupt_msi_mask_update,
cfg_interrupt_msi_data=dut.cfg_interrupt_msi_data,
# cfg_interrupt_msi_select=dut.cfg_interrupt_msi_select,
cfg_interrupt_msi_int=dut.cfg_interrupt_msi_int,
cfg_interrupt_msi_pending_status=dut.cfg_interrupt_msi_pending_status,
cfg_interrupt_msi_pending_status_data_enable=dut.cfg_interrupt_msi_pending_status_data_enable,
# cfg_interrupt_msi_pending_status_function_num=dut.cfg_interrupt_msi_pending_status_function_num,
cfg_interrupt_msi_sent=dut.cfg_interrupt_msi_sent,
cfg_interrupt_msi_fail=dut.cfg_interrupt_msi_fail,
# cfg_interrupt_msix_enable
# cfg_interrupt_msix_mask
# cfg_interrupt_msix_vf_enable
# cfg_interrupt_msix_vf_mask
# cfg_interrupt_msix_address
# cfg_interrupt_msix_data
# cfg_interrupt_msix_int
# cfg_interrupt_msix_vec_pending
# cfg_interrupt_msix_vec_pending_status
cfg_interrupt_msi_attr=dut.cfg_interrupt_msi_attr,
cfg_interrupt_msi_tph_present=dut.cfg_interrupt_msi_tph_present,
cfg_interrupt_msi_tph_type=dut.cfg_interrupt_msi_tph_type,
# cfg_interrupt_msi_tph_st_tag=dut.cfg_interrupt_msi_tph_st_tag,
# cfg_interrupt_msi_function_number=dut.cfg_interrupt_msi_function_number,
# Configuration Extend Interface
# cfg_ext_read_received
# cfg_ext_write_received
# cfg_ext_register_number
# cfg_ext_function_number
# cfg_ext_write_data
# cfg_ext_write_byte_enable
# cfg_ext_read_data
# cfg_ext_read_data_valid
)
# self.dev.log.setLevel(logging.DEBUG)
self.rc.make_port().connect(self.dev)
self.driver = mqnic.Driver(self.rc)
self.dev.functions[0].msi_multiple_message_capable = 5
self.dev.functions[0].configure_bar(0, 2**self.BAR0_APERTURE, ext=True, prefetch=True)
# Ethernet
cocotb.fork(Clock(dut.qsfp_0_rx_clk_0, 6.4, units="ns").start())
self.qsfp_0_0_source = XgmiiSource(dut.qsfp_0_rxd_0, dut.qsfp_0_rxc_0, dut.qsfp_0_rx_clk_0, dut.qsfp_0_rx_rst_0)
cocotb.fork(Clock(dut.qsfp_0_tx_clk_0, 6.4, units="ns").start())
self.qsfp_0_0_sink = XgmiiSink(dut.qsfp_0_txd_0, dut.qsfp_0_txc_0, dut.qsfp_0_tx_clk_0, dut.qsfp_0_tx_rst_0)
cocotb.fork(Clock(dut.qsfp_0_rx_clk_1, 6.4, units="ns").start())
self.qsfp_0_1_source = XgmiiSource(dut.qsfp_0_rxd_1, dut.qsfp_0_rxc_1, dut.qsfp_0_rx_clk_1, dut.qsfp_0_rx_rst_1)
cocotb.fork(Clock(dut.qsfp_0_tx_clk_1, 6.4, units="ns").start())
self.qsfp_0_1_sink = XgmiiSink(dut.qsfp_0_txd_1, dut.qsfp_0_txc_1, dut.qsfp_0_tx_clk_1, dut.qsfp_0_tx_rst_1)
cocotb.fork(Clock(dut.qsfp_0_rx_clk_2, 6.4, units="ns").start())
self.qsfp_0_2_source = XgmiiSource(dut.qsfp_0_rxd_2, dut.qsfp_0_rxc_2, dut.qsfp_0_rx_clk_2, dut.qsfp_0_rx_rst_2)
cocotb.fork(Clock(dut.qsfp_0_tx_clk_2, 6.4, units="ns").start())
self.qsfp_0_2_sink = XgmiiSink(dut.qsfp_0_txd_2, dut.qsfp_0_txc_2, dut.qsfp_0_tx_clk_2, dut.qsfp_0_tx_rst_2)
cocotb.fork(Clock(dut.qsfp_0_rx_clk_3, 6.4, units="ns").start())
self.qsfp_0_3_source = XgmiiSource(dut.qsfp_0_rxd_3, dut.qsfp_0_rxc_3, dut.qsfp_0_rx_clk_3, dut.qsfp_0_rx_rst_3)
cocotb.fork(Clock(dut.qsfp_0_tx_clk_3, 6.4, units="ns").start())
self.qsfp_0_3_sink = XgmiiSink(dut.qsfp_0_txd_3, dut.qsfp_0_txc_3, dut.qsfp_0_tx_clk_3, dut.qsfp_0_tx_rst_3)
cocotb.fork(Clock(dut.qsfp_1_rx_clk_0, 6.4, units="ns").start())
self.qsfp_1_0_source = XgmiiSource(dut.qsfp_1_rxd_0, dut.qsfp_1_rxc_0, dut.qsfp_1_rx_clk_0, dut.qsfp_1_rx_rst_0)
cocotb.fork(Clock(dut.qsfp_1_tx_clk_0, 6.4, units="ns").start())
self.qsfp_1_0_sink = XgmiiSink(dut.qsfp_1_txd_0, dut.qsfp_1_txc_0, dut.qsfp_1_tx_clk_0, dut.qsfp_1_tx_rst_0)
cocotb.fork(Clock(dut.qsfp_1_rx_clk_1, 6.4, units="ns").start())
self.qsfp_1_1_source = XgmiiSource(dut.qsfp_1_rxd_1, dut.qsfp_1_rxc_1, dut.qsfp_1_rx_clk_1, dut.qsfp_1_rx_rst_1)
cocotb.fork(Clock(dut.qsfp_1_tx_clk_1, 6.4, units="ns").start())
self.qsfp_1_1_sink = XgmiiSink(dut.qsfp_1_txd_1, dut.qsfp_1_txc_1, dut.qsfp_1_tx_clk_1, dut.qsfp_1_tx_rst_1)
cocotb.fork(Clock(dut.qsfp_1_rx_clk_2, 6.4, units="ns").start())
self.qsfp_1_2_source = XgmiiSource(dut.qsfp_1_rxd_2, dut.qsfp_1_rxc_2, dut.qsfp_1_rx_clk_2, dut.qsfp_1_rx_rst_2)
cocotb.fork(Clock(dut.qsfp_1_tx_clk_2, 6.4, units="ns").start())
self.qsfp_1_2_sink = XgmiiSink(dut.qsfp_1_txd_2, dut.qsfp_1_txc_2, dut.qsfp_1_tx_clk_2, dut.qsfp_1_tx_rst_2)
cocotb.fork(Clock(dut.qsfp_1_rx_clk_3, 6.4, units="ns").start())
self.qsfp_1_3_source = XgmiiSource(dut.qsfp_1_rxd_3, dut.qsfp_1_rxc_3, dut.qsfp_1_rx_clk_3, dut.qsfp_1_rx_rst_3)
cocotb.fork(Clock(dut.qsfp_1_tx_clk_3, 6.4, units="ns").start())
self.qsfp_1_3_sink = XgmiiSink(dut.qsfp_1_txd_3, dut.qsfp_1_txc_3, dut.qsfp_1_tx_clk_3, dut.qsfp_1_tx_rst_3)
dut.qsfp_0_i2c_scl_i.setimmediatevalue(1)
dut.qsfp_0_i2c_sda_i.setimmediatevalue(1)
dut.qsfp_0_intr_n.setimmediatevalue(1)
dut.qsfp_0_mod_prsnt_n.setimmediatevalue(0)
dut.qsfp_0_rx_error_count_0.setimmediatevalue(0)
dut.qsfp_0_rx_error_count_1.setimmediatevalue(0)
dut.qsfp_0_rx_error_count_2.setimmediatevalue(0)
dut.qsfp_0_rx_error_count_3.setimmediatevalue(0)
dut.qsfp_1_i2c_scl_i.setimmediatevalue(1)
dut.qsfp_1_i2c_sda_i.setimmediatevalue(1)
dut.qsfp_1_intr_n.setimmediatevalue(1)
dut.qsfp_1_mod_prsnt_n.setimmediatevalue(0)
dut.qsfp_1_rx_error_count_0.setimmediatevalue(0)
dut.qsfp_1_rx_error_count_1.setimmediatevalue(0)
dut.qsfp_1_rx_error_count_2.setimmediatevalue(0)
dut.qsfp_1_rx_error_count_3.setimmediatevalue(0)
dut.qspi_dq_i.setimmediatevalue(0)
dut.pps_in.setimmediatevalue(0)
dut.bmc_miso.setimmediatevalue(0)
dut.bmc_int.setimmediatevalue(0)
self.loopback_enable = False
cocotb.fork(self._run_loopback())
async def init(self):
self.dut.qsfp_0_rx_rst_0.setimmediatevalue(0)
self.dut.qsfp_0_tx_rst_0.setimmediatevalue(0)
self.dut.qsfp_0_rx_rst_1.setimmediatevalue(0)
self.dut.qsfp_0_tx_rst_1.setimmediatevalue(0)
self.dut.qsfp_0_rx_rst_2.setimmediatevalue(0)
self.dut.qsfp_0_tx_rst_2.setimmediatevalue(0)
self.dut.qsfp_0_rx_rst_3.setimmediatevalue(0)
self.dut.qsfp_0_tx_rst_3.setimmediatevalue(0)
self.dut.qsfp_1_rx_rst_0.setimmediatevalue(0)
self.dut.qsfp_1_tx_rst_0.setimmediatevalue(0)
self.dut.qsfp_1_rx_rst_1.setimmediatevalue(0)
self.dut.qsfp_1_tx_rst_1.setimmediatevalue(0)
self.dut.qsfp_1_rx_rst_2.setimmediatevalue(0)
self.dut.qsfp_1_tx_rst_2.setimmediatevalue(0)
self.dut.qsfp_1_rx_rst_3.setimmediatevalue(0)
self.dut.qsfp_1_tx_rst_3.setimmediatevalue(0)
await RisingEdge(self.dut.clk_250mhz)
await RisingEdge(self.dut.clk_250mhz)
self.dut.qsfp_0_rx_rst_0.setimmediatevalue(1)
self.dut.qsfp_0_tx_rst_0.setimmediatevalue(1)
self.dut.qsfp_0_rx_rst_1.setimmediatevalue(1)
self.dut.qsfp_0_tx_rst_1.setimmediatevalue(1)
self.dut.qsfp_0_rx_rst_2.setimmediatevalue(1)
self.dut.qsfp_0_tx_rst_2.setimmediatevalue(1)
self.dut.qsfp_0_rx_rst_3.setimmediatevalue(1)
self.dut.qsfp_0_tx_rst_3.setimmediatevalue(1)
self.dut.qsfp_1_rx_rst_0.setimmediatevalue(1)
self.dut.qsfp_1_tx_rst_0.setimmediatevalue(1)
self.dut.qsfp_1_rx_rst_1.setimmediatevalue(1)
self.dut.qsfp_1_tx_rst_1.setimmediatevalue(1)
self.dut.qsfp_1_rx_rst_2.setimmediatevalue(1)
self.dut.qsfp_1_tx_rst_2.setimmediatevalue(1)
self.dut.qsfp_1_rx_rst_3.setimmediatevalue(1)
self.dut.qsfp_1_tx_rst_3.setimmediatevalue(1)
await FallingEdge(self.dut.rst_250mhz)
await Timer(100, 'ns')
await RisingEdge(self.dut.clk_250mhz)
await RisingEdge(self.dut.clk_250mhz)
self.dut.qsfp_0_rx_rst_0.setimmediatevalue(0)
self.dut.qsfp_0_tx_rst_0.setimmediatevalue(0)
self.dut.qsfp_0_rx_rst_1.setimmediatevalue(0)
self.dut.qsfp_0_tx_rst_1.setimmediatevalue(0)
self.dut.qsfp_0_rx_rst_2.setimmediatevalue(0)
self.dut.qsfp_0_tx_rst_2.setimmediatevalue(0)
self.dut.qsfp_0_rx_rst_3.setimmediatevalue(0)
self.dut.qsfp_0_tx_rst_3.setimmediatevalue(0)
self.dut.qsfp_1_rx_rst_0.setimmediatevalue(0)
self.dut.qsfp_1_tx_rst_0.setimmediatevalue(0)
self.dut.qsfp_1_rx_rst_1.setimmediatevalue(0)
self.dut.qsfp_1_tx_rst_1.setimmediatevalue(0)
self.dut.qsfp_1_rx_rst_2.setimmediatevalue(0)
self.dut.qsfp_1_tx_rst_2.setimmediatevalue(0)
self.dut.qsfp_1_rx_rst_3.setimmediatevalue(0)
self.dut.qsfp_1_tx_rst_3.setimmediatevalue(0)
await self.rc.enumerate(enable_bus_mastering=True, configure_msi=True)
async def _run_loopback(self):
while True:
await RisingEdge(self.dut.clk_250mhz)
if self.loopback_enable:
if not self.qsfp_0_0_sink.empty():
await self.qsfp_0_0_source.send(await self.qsfp_0_0_sink.recv())
if not self.qsfp_0_1_sink.empty():
await self.qsfp_0_1_source.send(await self.qsfp_0_1_sink.recv())
if not self.qsfp_0_2_sink.empty():
await self.qsfp_0_2_source.send(await self.qsfp_0_2_sink.recv())
if not self.qsfp_0_3_sink.empty():
await self.qsfp_0_3_source.send(await self.qsfp_0_3_sink.recv())
if not self.qsfp_1_0_sink.empty():
await self.qsfp_1_0_source.send(await self.qsfp_1_0_sink.recv())
if not self.qsfp_1_1_sink.empty():
await self.qsfp_1_1_source.send(await self.qsfp_1_1_sink.recv())
if not self.qsfp_1_2_sink.empty():
await self.qsfp_1_2_source.send(await self.qsfp_1_2_sink.recv())
if not self.qsfp_1_3_sink.empty():
await self.qsfp_1_3_source.send(await self.qsfp_1_3_sink.recv())
@cocotb.test()
async def run_test_nic(dut):
tb = TB(dut)
await tb.init()
tb.log.info("Init driver")
await tb.driver.init_dev(tb.dev.functions[0].pcie_id)
await tb.driver.interfaces[0].open()
# await driver.interfaces[1].open()
# enable queues
tb.log.info("Enable queues")
await tb.rc.mem_write_dword(tb.driver.interfaces[0].ports[0].hw_addr+mqnic.MQNIC_PORT_REG_SCHED_ENABLE, 0x00000001)
for k in range(tb.driver.interfaces[0].tx_queue_count):
await tb.rc.mem_write_dword(tb.driver.interfaces[0].ports[0].schedulers[0].hw_addr+4*k, 0x00000003)
# wait for all writes to complete
await tb.rc.mem_read(tb.driver.hw_addr, 4)
tb.log.info("Init complete")
tb.log.info("Send and receive single packet")
data = bytearray([x % 256 for x in range(1024)])
await tb.driver.interfaces[0].start_xmit(data, 0)
pkt = await tb.qsfp_0_0_sink.recv()
tb.log.info("Packet: %s", pkt)
await tb.qsfp_0_0_source.send(pkt)
pkt = await tb.driver.interfaces[0].recv()
tb.log.info("Packet: %s", pkt)
assert pkt.rx_checksum == ~scapy.utils.checksum(bytes(pkt.data[14:])) & 0xffff
# await tb.driver.interfaces[1].start_xmit(data, 0)
# pkt = await tb.qsfp_1_0_sink.recv()
# tb.log.info("Packet: %s", pkt)
# await tb.qsfp_1_0_source.send(pkt)
# pkt = await tb.driver.interfaces[1].recv()
# tb.log.info("Packet: %s", pkt)
# assert pkt.rx_checksum == ~scapy.utils.checksum(bytes(pkt.data[14:])) & 0xffff
tb.log.info("RX and TX checksum tests")
payload = bytes([x % 256 for x in range(256)])
eth = Ether(src='5A:51:52:53:54:55', dst='DA:D1:D2:D3:D4:D5')
ip = IP(src='192.168.1.100', dst='192.168.1.101')
udp = UDP(sport=1, dport=2)
test_pkt = eth / ip / udp / payload
test_pkt2 = test_pkt.copy()
test_pkt2[UDP].chksum = scapy.utils.checksum(bytes(test_pkt2[UDP]))
await tb.driver.interfaces[0].start_xmit(test_pkt2.build(), 0, 34, 6)
pkt = await tb.qsfp_0_0_sink.recv()
tb.log.info("Packet: %s", pkt)
await tb.qsfp_0_0_source.send(pkt)
pkt = await tb.driver.interfaces[0].recv()
tb.log.info("Packet: %s", pkt)
assert pkt.rx_checksum == ~scapy.utils.checksum(bytes(pkt.data[14:])) & 0xffff
assert Ether(pkt.data).build() == test_pkt.build()
tb.log.info("Multiple small packets")
count = 64
pkts = [bytearray([(x+k) % 256 for x in range(60)]) for k in range(count)]
tb.loopback_enable = True
for p in pkts:
await tb.driver.interfaces[0].start_xmit(p, 0)
for k in range(count):
pkt = await tb.driver.interfaces[0].recv()
tb.log.info("Packet: %s", pkt)
assert pkt.data == pkts[k]
assert pkt.rx_checksum == ~scapy.utils.checksum(bytes(pkt.data[14:])) & 0xffff
tb.loopback_enable = False
tb.log.info("Multiple large packets")
count = 64
pkts = [bytearray([(x+k) % 256 for x in range(1514)]) for k in range(count)]
tb.loopback_enable = True
for p in pkts:
await tb.driver.interfaces[0].start_xmit(p, 0)
for k in range(count):
pkt = await tb.driver.interfaces[0].recv()
tb.log.info("Packet: %s", pkt)
assert pkt.data == pkts[k]
assert pkt.rx_checksum == ~scapy.utils.checksum(bytes(pkt.data[14:])) & 0xffff
tb.loopback_enable = False
await RisingEdge(dut.clk_250mhz)
await RisingEdge(dut.clk_250mhz)
# cocotb-test
tests_dir = os.path.dirname(__file__)
rtl_dir = os.path.abspath(os.path.join(tests_dir, '..', '..', 'rtl'))
lib_dir = os.path.abspath(os.path.join(rtl_dir, '..', 'lib'))
axi_rtl_dir = os.path.abspath(os.path.join(lib_dir, 'axi', 'rtl'))
axis_rtl_dir = os.path.abspath(os.path.join(lib_dir, 'axis', 'rtl'))
eth_rtl_dir = os.path.abspath(os.path.join(lib_dir, 'eth', 'rtl'))
pcie_rtl_dir = os.path.abspath(os.path.join(lib_dir, 'pcie', 'rtl'))
def test_fpga_core(request):
dut = "fpga_core"
module = os.path.splitext(os.path.basename(__file__))[0]
toplevel = dut
verilog_sources = [
os.path.join(rtl_dir, f"{dut}.v"),
os.path.join(rtl_dir, "bmc_spi.v"),
os.path.join(rtl_dir, "common", "mqnic_interface.v"),
os.path.join(rtl_dir, "common", "mqnic_port.v"),
os.path.join(rtl_dir, "common", "cpl_write.v"),
os.path.join(rtl_dir, "common", "cpl_op_mux.v"),
os.path.join(rtl_dir, "common", "desc_fetch.v"),
os.path.join(rtl_dir, "common", "desc_op_mux.v"),
os.path.join(rtl_dir, "common", "queue_manager.v"),
os.path.join(rtl_dir, "common", "cpl_queue_manager.v"),
os.path.join(rtl_dir, "common", "tx_engine.v"),
os.path.join(rtl_dir, "common", "rx_engine.v"),
os.path.join(rtl_dir, "common", "tx_checksum.v"),
os.path.join(rtl_dir, "common", "rx_hash.v"),
os.path.join(rtl_dir, "common", "rx_checksum.v"),
os.path.join(rtl_dir, "common", "tx_scheduler_rr.v"),
os.path.join(rtl_dir, "common", "event_mux.v"),
os.path.join(rtl_dir, "common", "tdma_scheduler.v"),
os.path.join(rtl_dir, "common", "tdma_ber.v"),
os.path.join(rtl_dir, "common", "tdma_ber_ch.v"),
os.path.join(eth_rtl_dir, "eth_mac_10g_fifo.v"),
os.path.join(eth_rtl_dir, "eth_mac_10g.v"),
os.path.join(eth_rtl_dir, "axis_xgmii_rx_64.v"),
os.path.join(eth_rtl_dir, "axis_xgmii_tx_64.v"),
os.path.join(eth_rtl_dir, "lfsr.v"),
os.path.join(eth_rtl_dir, "ptp_clock.v"),
os.path.join(eth_rtl_dir, "ptp_clock_cdc.v"),
os.path.join(eth_rtl_dir, "ptp_perout.v"),
os.path.join(eth_rtl_dir, "ptp_ts_extract.v"),
os.path.join(axi_rtl_dir, "axil_interconnect.v"),
os.path.join(axi_rtl_dir, "arbiter.v"),
os.path.join(axi_rtl_dir, "priority_encoder.v"),
os.path.join(axis_rtl_dir, "axis_adapter.v"),
os.path.join(axis_rtl_dir, "axis_arb_mux.v"),
os.path.join(axis_rtl_dir, "axis_async_fifo.v"),
os.path.join(axis_rtl_dir, "axis_async_fifo_adapter.v"),
os.path.join(axis_rtl_dir, "axis_fifo.v"),
os.path.join(axis_rtl_dir, "axis_register.v"),
os.path.join(pcie_rtl_dir, "pcie_us_axil_master.v"),
os.path.join(pcie_rtl_dir, "dma_if_pcie_us.v"),
os.path.join(pcie_rtl_dir, "dma_if_pcie_us_rd.v"),
os.path.join(pcie_rtl_dir, "dma_if_pcie_us_wr.v"),
os.path.join(pcie_rtl_dir, "dma_if_mux.v"),
os.path.join(pcie_rtl_dir, "dma_if_mux_rd.v"),
os.path.join(pcie_rtl_dir, "dma_if_mux_wr.v"),
os.path.join(pcie_rtl_dir, "dma_psdpram.v"),
os.path.join(pcie_rtl_dir, "dma_client_axis_sink.v"),
os.path.join(pcie_rtl_dir, "dma_client_axis_source.v"),
os.path.join(pcie_rtl_dir, "pcie_us_cfg.v"),
os.path.join(pcie_rtl_dir, "pcie_us_msi.v"),
os.path.join(pcie_rtl_dir, "pulse_merge.v"),
]
parameters = {}
parameters['AXIS_PCIE_DATA_WIDTH'] = 512
parameters['AXIS_PCIE_KEEP_WIDTH'] = parameters['AXIS_PCIE_DATA_WIDTH'] // 32
parameters['AXIS_PCIE_RQ_USER_WIDTH'] = 62 if parameters['AXIS_PCIE_DATA_WIDTH'] < 512 else 137
parameters['AXIS_PCIE_RC_USER_WIDTH'] = 75 if parameters['AXIS_PCIE_DATA_WIDTH'] < 512 else 161
parameters['AXIS_PCIE_CQ_USER_WIDTH'] = 88 if parameters['AXIS_PCIE_DATA_WIDTH'] < 512 else 183
parameters['AXIS_PCIE_CC_USER_WIDTH'] = 33 if parameters['AXIS_PCIE_DATA_WIDTH'] < 512 else 81
parameters['RQ_SEQ_NUM_WIDTH'] = 6
parameters['BAR0_APERTURE'] = 24
extra_env = {f'PARAM_{k}': str(v) for k, v in parameters.items()}
sim_build = os.path.join(tests_dir, "sim_build",
request.node.name.replace('[', '-').replace(']', ''))
cocotb_test.simulator.run(
python_search=[tests_dir],
verilog_sources=verilog_sources,
toplevel=toplevel,
module=module,
parameters=parameters,
sim_build=sim_build,
extra_env=extra_env,
)
|
py | 1a54f5b9d8942191ab09e2ce61d20676d208ba79 | # -*- coding: utf-8 -*-
#
# ResNet152 model definition is based on TensorFlow Slim implementation.
#
# Author: Gencer Sumbul, http://www.user.tu-berlin.de/gencersumbul/
# Email: [email protected]
# Date: 23 Dec 2019
# Version: 1.0.1
import tensorflow as tf
from nets.resnet_utils import resnet_arg_scope
from nets.resnet_v1 import resnet_v1_152
from models.main_model import Model
class DNN_model(Model):
def create_network(self):
with tf.contrib.slim.arg_scope(resnet_arg_scope()):
logits, end_points = resnet_v1_152(
self.img,
num_classes = self.nb_class,
is_training = self.is_training,
global_pool=True,
spatial_squeeze=True
)
self.logits = logits
self.probabilities = tf.nn.sigmoid(self.logits)
self.predictions = tf.cast(self.probabilities >= self.prediction_threshold, tf.float32)
|
py | 1a54f66be3769df5f31de6fc30de915eb08fb94b | # Copyright (c) 2006-2008 Andreas Kloeckner
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import _tagpy
def _genreList():
result = []
i = 0
next = _tagpy.id3v1_genre(i)
while next:
result.append(next)
i += 1
next = _tagpy.id3v1_genre(i)
return result
_GenreList = _genreList()
_GenreMap = dict([(v, k) for k, v in enumerate(_GenreList)])
genre = _tagpy.id3v1_genre
def genreIndex(genre):
return _GenreMap[genre]
def genreList():
return _GenreList
def genreMap():
return _GenreMap
|
py | 1a54f67bb821d9a74da724eb11f12341db0fdfe7 | """colabelsite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.conf.urls import url
from codechallenge.views import HandleSlackEvents
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^codechallenge/', HandleSlackEvents.as_view())
]
|
py | 1a54f6905d622a5b0566fe9f4c045c4b1e0a8bca | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class QuotesAppConfig(AppConfig):
name = 'Quotes_app'
|
py | 1a54f7e305ab74b452a4445ab08c7ee546fb1d37 | #!/usr/bin/env python
import platform
from sys import exit
from time import sleep
from argparse import ArgumentParser
from distutils.version import LooseVersion
from pyVim import connect
from pyVmomi import vim
def get_args():
"""
Get CLI arguments.
"""
parser = ArgumentParser(description='Arguments for talking to vCenter')
parser.add_argument('-s', '--host', required=True, action='store', help='vSphere service to connect to.')
parser.add_argument('-o', '--port', type=int, default=443, action='store', help='Port to connect on.')
parser.add_argument('-u', '--user', required=True, action='store', help='Username to use.')
parser.add_argument('-p', '--password', required=True, action='store', help='Password to use.')
parser.add_argument('--dv_pg_name', required=True, action='store', help='Name of the port-group')
parser.add_argument('--num_ports', required=True, action='store', help='Name of ports to be added to port-group')
parser.add_argument('--dvs_name', required=True, action='store', help='Name of the dv-switch to add portgroup to')
args = parser.parse_args()
return args
def get_dvs_pg_obj(si_content, vimtype, portgroup_name, dvs_name):
obj = None
container = si_content.viewManager.CreateContainerView(si_content.rootFolder, vimtype, True)
for c in container.view:
if c.name == portgroup_name:
if c.config.distributedVirtualSwitch.name == dvs_name:
obj = c
break
return obj
def get_obj(si_content, vimtype, name):
obj = None
container = si_content.viewManager.CreateContainerView(si_content.rootFolder, vimtype, True)
for c in container.view:
if c.name == name:
obj = c
break
return obj
def wait_for_task(task, actionName='job', hideResult=False):
while task.info.state == (vim.TaskInfo.State.running or vim.TaskInfo.State.queued):
sleep(2)
if task.info.state == vim.TaskInfo.State.success:
if task.info.result is not None and not hideResult:
out = '%s completed successfully, result: %s' % (actionName, task.info.result)
print out
else:
out = '%s completed successfully.' % actionName
print out
elif task.info.state == vim.TaskInfo.State.error:
out = 'Error - %s did not complete successfully: %s' % (actionName, task.info.error)
raise ValueError(out)
return task.info.result
def update_dv_pg(args, dv_pg):
dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
dv_pg_spec.name = args.dv_pg_name
dv_pg_spec.numPorts = int(args.num_ports)
dv_pg_spec.configVersion = dv_pg.config.configVersion
dv_pg_spec.type = vim.dvs.DistributedVirtualPortgroup.PortgroupType.earlyBinding
dv_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
dv_pg_spec.defaultPortConfig.securityPolicy = vim.dvs.VmwareDistributedVirtualSwitch.SecurityPolicy()
dv_pg_spec.defaultPortConfig.vlan = vim.dvs.VmwareDistributedVirtualSwitch.TrunkVlanSpec()
dv_pg_spec.defaultPortConfig.vlan.vlanId = [vim.NumericRange(start=1, end=4094)]
dv_pg_spec.defaultPortConfig.securityPolicy.allowPromiscuous = vim.BoolPolicy(value=True)
dv_pg_spec.defaultPortConfig.securityPolicy.forgedTransmits = vim.BoolPolicy(value=True)
dv_pg_spec.defaultPortConfig.vlan.inherited = False
dv_pg_spec.defaultPortConfig.securityPolicy.macChanges = vim.BoolPolicy(value=False)
dv_pg_spec.defaultPortConfig.securityPolicy.inherited = False
task = dv_pg.ReconfigureDVPortgroup_Task(dv_pg_spec)
wait_for_task(task)
print "Successfully modified DV Port Group %s" %args.dv_pg_name
def main():
args = get_args()
try:
ssl = __import__("ssl")
context = ssl._create_unverified_context()
si = connect.SmartConnect(host=args.host,
user=args.user,
pwd=args.password,
port=args.port,
sslContext=context)
except Exception as e:
si = connect.SmartConnect(host=args.host,
user=args.user,
pwd=args.password,
port=args.port)
except:
print "Unable to connect to %s" % args.host
exit(1)
si_content = si.RetrieveContent()
# check if PG exists else return error
dv_switch = get_obj(si_content, [vim.DistributedVirtualSwitch], args.dvs_name)
if not dv_switch:
print "dv switch %s not pressent" %(args.dvs_name)
exit(1)
dv_pg = get_dvs_pg_obj(si_content, [vim.dvs.DistributedVirtualPortgroup], args.dv_pg_name, args.dvs_name)
if not dv_pg:
print "port-group %s not present in dvs %s" %(args.dv_pg_name, args.dvs_name)
exit(1)
update_dv_pg(args, dv_pg)
connect.Disconnect(si)
if __name__ == "__main__":
exit(main())
|
py | 1a54f81a1de90f2283337d7c0e069aadb5a38da6 | """ Run all tests for openshift-tools repository """
# This script expects a single environment variable to be defined:
# PULL_REQUEST - JSON represntation of the pull request to be tested
#
# The data expected in PULL_REQUEST is defined in the github api here:
# https://developer.github.com/v3/pulls/#get-a-single-pull-request
# The same data is provided in the webhook, which is defined here:
# https://developer.github.com/v3/activity/events/types/#pullrequestevent
#
# The script will parse the provided pull request JSON and define a list of environment variables for
# consumption by the validation scripts. Then, each *.py file in ./validators/ (minus specified exclusions)
# will be run. The list of variables defined is below:
# Github stuff
# PRV_PULL_ID ID of the pull request
# PRV_PULL_URL URL of the pull request
#
# Base info
# PRV_BASE_SHA SHA of the target being merged into
# PRV_BASE_REF ref (usually branch name) of the base
# PRV_BASE_LABEL Base label
# PRV_BASE_NAME Full name of the base 'namespace/reponame'
#
# Remote (or "head") info
# PRV_REMOTE_SHA SHA of the branch being merged
# PRV_REMOTE_REF ref (usually branch name) of the remote
# PRV_REMOTE_LABEL Remote label
# PRV_REMOTE_NAME Full name of the remote 'namespace/reponame'
# PRV_CURRENT_SHA The SHA of the merge commit
#
# Other info
# PRV_CHANGED_FILES List of files changed in the pull request
# PRV_COMMITS List of commits in the pull request
#
# TODO:
# - Handle failures better. Just exiting is not a good option, as it will likely leave the PR
# commit status in pending forever. We might be able to better handle this in the webhook now
import os
import json
import subprocess
import sys
import fnmatch
import github_helpers
EXCLUDES = [
"common.py",
".pylintrc"
]
# The path set up in the Dockerfile
WORK_DIR = "/validator/"
# The absolute path to openshift-tools repo
OPENSHIFT_TOOLS_PATH = WORK_DIR + "openshift-tools/"
# The absolute path to the testing validator scripts
VALIDATOR_PATH = OPENSHIFT_TOOLS_PATH + "jenkins/test/validators/"
# Script location of unit tests
UNIT_TEST_SCRIPT = OPENSHIFT_TOOLS_PATH + "jenkins/test/run_unit_tests.sh"
# The absolute path to the ops-rpm repo
OPS_RPM_PATH = WORK_DIR + "ops-rpm/"
# The string to accept in PR comments to initiate testing by a whitelisted user
TEST_STRING = "[test]"
def run_cli_cmd(cmd, exit_on_fail=True, log_cmd=True):
'''Run a command and return its output'''
# Don't log the command if log_cmd=False to avoid exposing secrets in commands
if log_cmd:
print "> " + " ".join(cmd)
proc = subprocess.Popen(cmd, bufsize=-1, stderr=subprocess.PIPE, stdout=subprocess.PIPE,
shell=False)
stdout, stderr = proc.communicate()
if proc.returncode != 0:
# Don't log the command if log_cmd=False to avoid exposing secrets in commands
if log_cmd:
print "Unable to run " + " ".join(cmd) + " due to error: " + stderr
else:
print "Error running system command: " + stderr
if exit_on_fail:
sys.exit(proc.returncode)
else:
return False, stdout
else:
return True, stdout
def assign_env(pull_request):
'''Assign environment variables based on pull_request json data and other env variables'''
# Github environment variables
# Note that the PR title and body are not set or included in the pull_request json
# This is to avoid issues with unexpected characters being passed through the jenkins plugin,
# to openshift, and through json parsing. There are too many unknowns for it to be predictable.
os.environ["PRV_PULL_ID"] = pull_request["number"]
os.environ["PRV_URL"] = pull_request["url"]
# Base environment variables
base = pull_request["base"]
os.environ["PRV_BASE_SHA"] = base["sha"]
os.environ["PRV_BASE_REF"] = base["ref"]
os.environ["PRV_BASE_LABEL"] = base["label"]
os.environ["PRV_BASE_NAME"] = base["repo"]["full_name"]
# Remote environment variables
head = pull_request["head"]
os.environ["PRV_REMOTE_SHA"] = head["sha"]
os.environ["PRV_REMOTE_REF"] = head["ref"]
os.environ["PRV_REMOTE_LABEL"] = head["label"]
os.environ["PRV_REMOTE_NAME"] = head["repo"]["full_name"]
# Other helpful environment variables
baserepo = base["repo"]["full_name"]
prnum = pull_request["number"]
# List of changed files
changed_files = github_helpers.get_changed_files(baserepo, prnum)
os.environ["PRV_CHANGED_FILES"] = ",".join(changed_files)
# List of commits
commits = github_helpers.get_commits(baserepo, prnum)
os.environ["PRV_COMMITS"] = ",".join(commits)
def merge_changes(pull_request):
""" Merge changes into current repository """
pull_id = pull_request["number"]
run_cli_cmd(['/usr/bin/git', 'fetch', "--tags", "origin", "+refs/head/*:refs/remotes/origin/*",
"+refs/pull/*:refs/remotes/origin/pr/*"])
_, output = run_cli_cmd(['/usr/bin/git', 'rev-parse',
'refs/remotes/origin/pr/'+pull_id+'/merge^{commit}'])
current_rev = output.rstrip()
run_cli_cmd(['/usr/bin/git', 'config', 'core.sparsecheckout'], exit_on_fail=False)
run_cli_cmd(['/usr/bin/git', 'checkout', '-f', current_rev])
os.environ["PRV_CURRENT_SHA"] = current_rev
def run_validators():
""" Run all test validators """
# First, add the validator direcotry to the python path to allow
# modules to be loaded by pylint
# We also add the jenkins/test directory so that github_helpers can be properly loaded.
pypath = os.getenv("PYTHONPATH", "")
tools_test_path = OPENSHIFT_TOOLS_PATH + "jenkins/test/"
if pypath != "":
os.environ["PYTHONPATH"] = VALIDATOR_PATH + os.pathsep + tools_test_path + os.pathsep + pypath
else:
os.environ["PYTHONPATH"] = VALIDATOR_PATH + os.pathsep + tools_test_path
failure_occured = False
validators = [validator for validator in os.listdir(VALIDATOR_PATH) if
os.path.isfile(os.path.join(VALIDATOR_PATH, validator))]
for validator in validators:
skip = False
for exclude in EXCLUDES:
if validator == exclude:
skip = True
if skip:
continue
validator_abs = os.path.join(VALIDATOR_PATH, validator)
executer = ""
_, ext = os.path.splitext(validator)
if ext == ".py":
executer = "/usr/bin/python"
elif ext == ".sh":
executer = "/bin/sh"
# If the ext is not recongized, try to just run the file
print "Executing validator: " + executer + " " + validator_abs
success, output = run_cli_cmd([executer, validator_abs], exit_on_fail=False)
print output
if not success:
print validator + " failed!"
failure_occured = True
if failure_occured:
return False
return True
# Check both the user and org whitelist for the user in this pull request
def pre_test_check(pull_request):
""" Get and check the user whitelist for testing from mounted secret volume """
# Get user from pull request
user = ""
if "user" in pull_request:
user = pull_request["user"]["login"]
else:
print "Pull request data does not include pull request user or issue comment user data"
sys.exit(1)
# Get secret information from env variable
secret_dir = os.getenv("WHITELIST_SECRET_DIR")
if secret_dir == "":
print "ERROR: $WHITELIST_SECRET_DIR undefined. This variable should exist and" + \
" should point to the mounted volume containing the admin whitelist"
sys.exit(2)
# Extract whitelist from secret volume
user_whitelist_file = open(os.path.join("/", secret_dir, "users"), "r")
user_whitelist = user_whitelist_file.read()
user_whitelist_file.close()
if user_whitelist == "" or user not in user_whitelist.split(","):
if not check_org_whitelist(user, secret_dir):
print "WARN: User " + user + " not in admin or org whitelist."
# exit success here so that the jenkins job is marked as a success,
# since no actual error occured, the expected has happened
sys.exit(0)
# Get the members of each organization in the organization whitelist for the user. If
# the user is a member of any of these organizations, return True
def check_org_whitelist(user, secret_dir):
""" Determine whether user is a member of any org in the org whitelist """
org_whitelist_file = open(os.path.join("/", secret_dir, "orgs"), "r")
org_whitelist = org_whitelist_file.read()
org_whitelist_file.close()
for org in org_whitelist.split(","):
if github_helpers.org_includes(user, org):
return True
return False
def build_test_tools_rpms():
""" Build and install the openshift-tools rpms """
# We only need to build the openshift-tools rpms:
# openshift-tools/scripts/openshift-tools-scripts.spec
# openshift-tools/ansible/openshift-tools-ansible.spec
# openshift-tools/openshift_tools/python-openshift-tools.spec
# openshift-tools/web/openshift-tools-web.spec
# To build them: cd /validator/ops-rpm; /bin/sh /validator/ops-rpm/ops-rpm test-build-git openshift-tools
# This will unfortunately create a /tmp/titobuild.[A-Za-z0-9]{10} directory for each rpm
print "Building openshift-tools test rpms..."
cwd = os.getcwd()
# Clone the ops-rpm repo
# The ops-rpm repo is private and requires authentication
username, token = github_helpers.get_github_credentials()
opsrpm_url = "https://" + username + ":" + token + "@github.com/openshift/ops-rpm"
clone_opsrpm_cmd = ["/usr/bin/git", "clone", opsrpm_url, OPS_RPM_PATH]
success, output = run_cli_cmd(clone_opsrpm_cmd, False, False)
if not success:
print "Unable to clone the ops-rpm repo, builds cannot continue: " + output
sys.exit(1)
# Change to the ops-rpm directory
cd_cmd = ["cd", OPS_RPM_PATH]
success, output = run_cli_cmd(cd_cmd, False)
if not success:
print "Unable to change to the ops-rpm directory: " + output
sys.exit(1)
# Do the build
build_cmd = ["/bin/sh", os.path.join(OPS_RPM_PATH, "ops-rpm"), "test-build-git", "openshift-tools"]
success, output = run_cli_cmd(build_cmd, False)
if not success:
print "Unable to build test rpms: " + output
sys.exit(1)
else:
print "Successfully built openshift-tools test rpms!"
# Change back to previous directory
cd_cmd = ["cd", cwd]
# We don't really care if this fails, most things we do are from an absolute path
run_cli_cmd(cd_cmd, False)
# The directories ops-rpm creates look like this:
# /tmp/titobuild.CzQ1l4W8LM:
# noarch
# openshift-tools-ansible-0.0.35-1.git.2.74afd1e.el7.centos.src.rpm
# openshift-tools-ansible-git-2.aa60bc1.tar.gz
# /tmp/titobuild.CzQ1l4W8LM/noarch:
# openshift-tools-ansible-filter-plugins-0.0.35-1.git.2.74afd1e.el7.centos.noarch.rpm
# openshift-tools-ansible-inventory-0.0.35-1.git.2.74afd1e.el7.centos.noarch.rpm
# openshift-tools-ansible-inventory-aws-0.0.35-1.git.2.74afd1e.el7.centos.noarch.rpm
# openshift-tools-ansible-inventory-gce-0.0.35-1.git.2.74afd1e.el7.centos.noarch.rpm
# openshift-tools-ansible-zabbix-0.0.35-1.git.2.74afd1e.el7.centos.noarch.rpm
# We want to install all of the *.noarch.rpm files in the tree.
rpms = []
for root, _, files in os.walk('/tmp/'):
# This really assumes that no other *.noarch.rpm files are in /tmp/, we might want to narrow it down
for filename in files:
if fnmatch.fnmatch(filename, "*.noarch.rpm"):
file_abs = os.path.abspath(os.path.join(root, filename))
rpms.append(file_abs)
# If we didn't find any rpms, then there must have been some problems building.
if len(rpms) == 0:
print "No rpms found in /tmp/ after test build of openshift-tools"
sys.exit(1)
# Install the rpms, in one big yum install command
yum_install_cmd = ["yum", "localinstall", " ".join(rpms)]
success, output = run_cli_cmd(yum_install_cmd, False)
return success, output
def run_unit_tests():
""" Run unit tests against installed tools rpms """
# At the time of this writing, no unit tests exist.
# A unit tests script will be run so that unit tests can easily be modified
print "Running unit tests..."
success, output = run_cli_cmd(["/bin/sh", UNIT_TEST_SCRIPT], False)
return success, output
def main():
""" Get the pull request data, merge changes, assign env, and run validators """
# Get the pull request json from the defined env variable
pull_request_json = os.getenv("PULL_REQUEST", "")
if pull_request_json == "":
print 'No JSON data provided in $PULL_REQUEST environment variable'
sys.exit(1)
try:
pull_request = json.loads(pull_request_json, parse_int=str, parse_float=str)
except ValueError as error:
print "Unable to load JSON data from $PULL_REQUEST environment variable:"
print error
sys.exit(1)
# Run several checks to ensure tests should be run for this pull request
pre_test_check(pull_request)
# These variables will be used at the end of testing to submit status updates
remote_sha = pull_request["head"]["sha"]
pull_id = pull_request["number"]
repo = pull_request["base"]["repo"]["full_name"]
# Merge changes from pull request
merge_changes(pull_request)
# Assign env variables for validators
assign_env(pull_request)
# Run validators
validators_success = run_validators()
# Determine and post result of tests
if not validators_success:
github_helpers.submit_pr_comment("Validation tests failed!", pull_id, repo)
sys.exit(1)
print "Validation tests passed!"
# Build test rpms
build_success, output = build_test_tools_rpms()
if not build_success:
print "Rpm test builds failed, output:"
print output
github_helpers.submit_pr_comment("Validation tests passed, rpm test builds failed!", pull_id, repo)
sys.exit(1)
print "Test rpms built!"
# Run unit tests
unittest_success, output = run_unit_tests()
if not unittest_success:
print "Unit tests failed, output:"
print output
github_helpers.submit_pr_comment("Validation tests passed, test rpms built, unit tests failed!", pull_id, repo)
sys.exit(1)
print "Unit tests passed!"
# If we are here, then everything succeeded!
github_helpers.submit_pr_comment("All tests passed!", pull_id, repo)
github_helpers.submit_pr_status_update("success", "All tests passed",
remote_sha, repo)
if __name__ == '__main__':
main()
|
py | 1a54f8954dce23958552fe7ce442dd29f90a83cb | # Copyright 2020 Tier IV, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from ament_index_python.packages import get_package_share_directory
import launch
from launch.actions import DeclareLaunchArgument
from launch.actions import OpaqueFunction
from launch.substitutions import LaunchConfiguration
from launch_ros.actions import ComposableNodeContainer
from launch_ros.descriptions import ComposableNode
import yaml
def launch_setup(context, *args, **kwargs):
with open(LaunchConfiguration("cpu_monitor_config_file").perform(context), "r") as f:
cpu_monitor_config = yaml.safe_load(f)["/**"]["ros__parameters"]
cpu_monitor = ComposableNode(
package="system_monitor",
plugin="CPUMonitor",
name="cpu_monitor",
parameters=[
cpu_monitor_config,
],
)
with open(LaunchConfiguration("hdd_monitor_config_file").perform(context), "r") as f:
hdd_monitor_config = yaml.safe_load(f)["/**"]["ros__parameters"]
hdd_monitor = ComposableNode(
package="system_monitor",
plugin="HDDMonitor",
name="hdd_monitor",
parameters=[
hdd_monitor_config,
],
)
with open(LaunchConfiguration("mem_monitor_config_file").perform(context), "r") as f:
mem_monitor_config = yaml.safe_load(f)["/**"]["ros__parameters"]
mem_monitor = ComposableNode(
package="system_monitor",
plugin="MemMonitor",
name="mem_monitor",
parameters=[
mem_monitor_config,
],
)
with open(LaunchConfiguration("net_monitor_config_file").perform(context), "r") as f:
net_monitor_config = yaml.safe_load(f)["/**"]["ros__parameters"]
net_monitor = ComposableNode(
package="system_monitor",
plugin="NetMonitor",
name="net_monitor",
parameters=[
net_monitor_config,
],
)
with open(LaunchConfiguration("ntp_monitor_config_file").perform(context), "r") as f:
ntp_monitor_config = yaml.safe_load(f)["/**"]["ros__parameters"]
ntp_monitor = ComposableNode(
package="system_monitor",
plugin="NTPMonitor",
name="ntp_monitor",
parameters=[
ntp_monitor_config,
],
)
with open(LaunchConfiguration("process_monitor_config_file").perform(context), "r") as f:
process_monitor_config = yaml.safe_load(f)["/**"]["ros__parameters"]
process_monitor = ComposableNode(
package="system_monitor",
plugin="ProcessMonitor",
name="process_monitor",
parameters=[
process_monitor_config,
],
)
with open(LaunchConfiguration("gpu_monitor_config_file").perform(context), "r") as f:
gpu_monitor_config = yaml.safe_load(f)["/**"]["ros__parameters"]
gpu_monitor = ComposableNode(
package="system_monitor",
plugin="GPUMonitor",
name="gpu_monitor",
parameters=[
gpu_monitor_config,
],
)
# set container to run all required components in the same process
container = ComposableNodeContainer(
name="system_monitor_container",
namespace="system_monitor",
package="rclcpp_components",
executable="component_container_mt",
composable_node_descriptions=[
cpu_monitor,
hdd_monitor,
mem_monitor,
net_monitor,
ntp_monitor,
process_monitor,
gpu_monitor,
],
output="screen",
)
return [container]
def generate_launch_description():
system_monitor_path = os.path.join(
get_package_share_directory("system_launch"), "config", "system_monitor"
)
return launch.LaunchDescription(
[
DeclareLaunchArgument(
"cpu_monitor_config_file",
default_value=os.path.join(system_monitor_path, "cpu_monitor.param.yaml"),
),
DeclareLaunchArgument(
"hdd_monitor_config_file",
default_value=os.path.join(system_monitor_path, "hdd_monitor.param.yaml"),
),
DeclareLaunchArgument(
"mem_monitor_config_file",
default_value=os.path.join(system_monitor_path, "mem_monitor.param.yaml"),
),
DeclareLaunchArgument(
"net_monitor_config_file",
default_value=os.path.join(system_monitor_path, "net_monitor.param.yaml"),
),
DeclareLaunchArgument(
"ntp_monitor_config_file",
default_value=os.path.join(system_monitor_path, "ntp_monitor.param.yaml"),
),
DeclareLaunchArgument(
"process_monitor_config_file",
default_value=os.path.join(system_monitor_path, "process_monitor.param.yaml"),
),
DeclareLaunchArgument(
"gpu_monitor_config_file",
default_value=os.path.join(system_monitor_path, "gpu_monitor.param.yaml"),
),
OpaqueFunction(function=launch_setup),
]
)
|
py | 1a54f8e472dbb17f691cba8c58197ecc4f90c230 | import importlib
from igraph import plot
from circulo import metrics
'''
Senate Community Example
'''
#get the graph from the data holding
senate_mod = importlib.import_module("data.congress_voting.run")
G = senate_mod.get_graph()
ground_truth = senate_mod.get_ground_truth(G)
clustering = G.community_leading_eigenvector(weights="weight")
cover = clustering.as_cover()
cover.print_metrics()
#cover.compute_metrics(weights="weight")
#for k,v in cover.metrics.items():
# print("{} {}".format(k,v))
#score = metrics.f1(clustering, ground_truth)
#result = VertexCoverMetric.run_analysis(clustering.as_cover())
#result.report()
#result.to_json()
#metrics.run_analysis(clustering, ground_truth, "metric.txt")
#print(score)
|
py | 1a54f8fc6c10afcb8d8eb48c2b895e402334097e | # Copyright 2019, by the California Institute of Technology.
# ALL RIGHTS RESERVED. United States Government Sponsorship acknowledged.
# Any commercial use must be negotiated with the Office of Technology
# Transfer at the California Institute of Technology.
#
# This software may be subject to U.S. export control laws. By accepting
# this software, the user agrees to comply with all applicable U.S. export
# laws and regulations. User has the responsibility to obtain export
# licenses, or other export authority as may be required before exporting
# such information to foreign countries or providing access to foreign
# persons.
"""
=========
subset.py
=========
Functions related to subsetting a NetCDF file.
"""
import datetime
import functools
import json
import operator
import os
import geopandas as gpd
import importlib_metadata
import julian
import numpy as np
import xarray as xr
import netCDF4 as nc
import pandas as pd
from shapely.geometry import Point
from shapely.ops import transform
from podaac.subsetter import xarray_enhancements as xre
GROUP_DELIM = '__'
SERVICE_NAME = 'l2ss-py'
def apply_scale_offset(scale, offset, value):
"""Apply scale and offset to the given value"""
return (value + offset) / scale
def remove_scale_offset(value, scale, offset):
"""Remove scale and offset from the given value"""
return (value * scale) - offset
def convert_bound(bound, coord_max, coord_var):
"""
This function will return a converted bound which which matches the
range of the given input file.
Parameters
----------
bound : np.array
1-dimensional 2-element numpy array which represent the lower
and upper bounding box on this coordinate, respectively.
coord_max : integer
The max value which is possible given this coordinate. For
example, the max for longitude is 360.
coord_var : xarray.DataArray
The xarray variable for some coordinate.
Returns
-------
np.array
1-dimensional 2-element number array which represents the lower
and upper bounding box on this coordinate and has been converted
based on the valid bounds coordinate range of the dataset.
Notes
-----
Assumption that 0 is always on the prime meridian/equator.
"""
scale = coord_var.attrs.get('scale_factor', 1.0)
offset = coord_var.attrs.get('add_offset', 0.0)
valid_min = coord_var.attrs.get('valid_min', None)
if valid_min is None or valid_min > 0:
# If coord var doesn't contain valid min, attempt to find
# manually. Note: Given the perfect storm, this could still fail
# to find the actual bounds.
# Filter out _FillValue from data before calculating min and max
fill_value = coord_var.attrs.get('_FillValue', None)
var_values = coord_var.values
if fill_value:
var_values = np.where(var_values != fill_value, var_values, np.nan)
var_min = np.nanmin(var_values)
var_max = np.nanmax(var_values)
if 0 <= var_min <= var_max <= (coord_max / scale):
valid_min = 0
# If the file coords are 0 --> max
if valid_min == 0:
bound = (bound + coord_max) % coord_max
# If the right/top bound is 0, set to max.
if bound[1] == 0:
bound[1] = coord_max
# If edges are the same, assume it wraps and return all
if bound[0] == bound[1]:
bound = np.array([0, coord_max])
# If the file longitude is -coord_max/2 --> coord_max/2
if valid_min != 0:
# If edges are the same, assume it wraps and return all
if bound[0] == bound[1]:
bound = np.array([-(coord_max / 2), coord_max / 2])
# Calculate scale and offset so the bounds match the coord data
return apply_scale_offset(scale, offset, bound)
def convert_bbox(bbox, dataset, lat_var_name, lon_var_name):
"""
This function will return a converted bbox which matches the range
of the given input file. This will convert both the latitude and
longitude range. For example, an input dataset can have a valid
longitude range of -180 --> 180 or of 0 --> 360.
Parameters
----------
bbox : np.array
The bounding box
dataset : xarray.Dataset
The dataset which is being subset.
lat_var_name : str
Name of the lat variable in the given dataset
lon_var_name : str
Name of the lon variable in the given dataset
Returns
-------
bbox : np.array
The new bbox which matches latitude and longitude ranges of the
input file.
Notes
-----
Assumption that the provided bounding box is always between
-180 --> 180 for longitude and -90, 90 for latitude.
"""
return np.array([convert_bound(bbox[0], 360, dataset[lon_var_name]),
convert_bound(bbox[1], 180, dataset[lat_var_name])])
def set_json_history(dataset, cut, file_to_subset, bbox=None, shapefile=None,
origin_source=None):
"""
Set the 'json_history' metadata header of the granule to reflect the
current version of the subsetter, as well as the parameters used
to call the subsetter. This will append an json array to the json_history of
the following format:
Parameters
----------
dataset : xarray.Dataset
The dataset to change the header of
bbox : np.ndarray
The requested bounding box
file_to_subset : string
The filepath of the file which was used to subset
cut : boolean
True to cut the scanline
shapefile : str
Name of the shapefile to include in the version history
"""
params = f'cut={cut}'
if bbox is not None:
params = f'bbox={bbox.tolist()} {params}'
elif shapefile is not None:
params = f'shapefile={shapefile} {params}'
history_json = dataset.attrs.get('history_json', [])
if history_json:
history_json = json.loads(history_json)
if origin_source:
derived_from = origin_source
else:
derived_from = os.path.basename(file_to_subset)
new_history_json = {
"date_time": datetime.datetime.now(tz=datetime.timezone.utc).isoformat(),
"derived_from": derived_from,
"program": SERVICE_NAME,
"version": importlib_metadata.distribution(SERVICE_NAME).version,
"parameters": params,
"program_ref": "https://cmr.earthdata.nasa.gov:443/search/concepts/S1962070864-POCLOUD",
"$schema": "https://harmony.earthdata.nasa.gov/schemas/history/0.1.0/history-v0.1.0.json"
}
history_json.append(new_history_json)
dataset.attrs['history_json'] = json.dumps(history_json)
def set_version_history(dataset, cut, bbox=None, shapefile=None):
"""
Set the 'history' metadata header of the granule to reflect the
current version of the subsetter, as well as the parameters used
to call the subsetter. This will append a line to the history of
the following format:
TIMESTAMP podaac.subsetter VERSION (PARAMS)
Parameters
----------
dataset : xarray.Dataset
The dataset to change the header of
bbox : np.ndarray
The requested bounding box
cut : boolean
True to cut the scanline
shapefile : str
Name of the shapefile to include in the version history
"""
version = importlib_metadata.distribution(SERVICE_NAME).version
history = dataset.attrs.get('history', "")
timestamp = datetime.datetime.utcnow()
params = f'cut={cut}'
if bbox is not None:
params = f'bbox={bbox.tolist()} {params}'
elif shapefile is not None:
params = f'shapefile={shapefile} {params}'
history += f"\n{timestamp} {SERVICE_NAME} v{version} ({params})"
dataset.attrs['history'] = history.strip()
def calculate_chunks(dataset):
"""
For the given dataset, calculate if the size on any dimension is
worth chunking. Any dimension larger than 4000 will be chunked. This
is done to ensure that the variable can fit in memory.
Parameters
----------
dataset : xarray.Dataset
The dataset to calculate chunks for.
Returns
-------
dict
The chunk dictionary, where the key is the dimension and the
value is 4000.
"""
chunk_dict = {dim: 4000 for dim in dataset.dims
if dataset.dims[dim] > 4000
and len(dataset.dims) > 1}
return chunk_dict
def find_matching_coords(dataset, match_list):
"""
As a backup for finding a coordinate var, look at the 'coordinates'
metadata attribute of all data vars in the granule. Return any
coordinate vars that have name matches with the provided
'match_list'
Parameters
----------
dataset : xr.Dataset
Dataset to search data variable coordinate metadata attribute
match_list : list (str)
List of possible matches to search for. For example,
['lat', 'latitude'] would search for variables in the
'coordinates' metadata attribute containing either 'lat'
or 'latitude'
Returns
-------
list (str)
List of matching coordinate variables names
"""
coord_attrs = [
var.attrs['coordinates'] for var_name, var in dataset.data_vars.items()
if 'coordinates' in var.attrs
]
coord_attrs = list(set(coord_attrs))
match_coord_vars = []
for coord_attr in coord_attrs:
coords = coord_attr.split(' ')
match_vars = [
coord for coord in coords
if any(coord_cand in coord for coord_cand in match_list)
]
if match_vars and match_vars[0] in dataset:
# Check if the var actually exists in the dataset
match_coord_vars.append(match_vars[0])
return match_coord_vars
def get_coord_variable_names(dataset):
"""
Given a dataset, determine the coordinate variable from a list
of options
Parameters
----------
dataset: xr.Dataset
The dataset to find the coordinate variables for
Returns
-------
tuple, str
Tuple of strings, where the first element is the lat coordinate
name and the second element is the lon coordinate name
"""
possible_lat_coord_names = ['lat', 'latitude', 'y']
possible_lon_coord_names = ['lon', 'longitude', 'x']
def var_is_coord(var_name, possible_coord_names):
var_name = var_name.strip(GROUP_DELIM).split(GROUP_DELIM)[-1]
return var_name in possible_coord_names
lat_coord_names = list(filter(
lambda var_name: var_is_coord(var_name, possible_lat_coord_names), dataset.variables))
lon_coord_names = list(filter(
lambda var_name: var_is_coord(var_name, possible_lon_coord_names), dataset.variables))
if len(lat_coord_names) < 1 or len(lon_coord_names) < 1:
lat_coord_names = find_matching_coords(dataset, possible_lat_coord_names)
lon_coord_names = find_matching_coords(dataset, possible_lon_coord_names)
if len(lat_coord_names) < 1 or len(lon_coord_names) < 1:
raise ValueError('Could not determine coordinate variables')
return lat_coord_names, lon_coord_names
def is_360(lon_var, scale, offset):
"""
Determine if given dataset is a '360' dataset or not.
Parameters
----------
lon_var : xr.DataArray
The lon variable from the xarray Dataset
scale : float
Used to remove scale and offset for easier calculation
offset : float
Used to remove scale and offset for easier calculation
Returns
-------
bool
True if dataset is 360, False if not. Defaults to False.
"""
valid_min = lon_var.attrs.get('valid_min', None)
if valid_min is None or valid_min > 0:
var_min = remove_scale_offset(np.amin(lon_var.values), scale, offset)
var_max = remove_scale_offset(np.amax(lon_var.values), scale, offset)
if var_min < 0:
return False
if var_max > 180:
return True
if valid_min == 0:
return True
if valid_min < 0:
return False
return False
def get_spatial_bounds(dataset, lat_var_names, lon_var_names):
"""
Get the spatial bounds for this dataset. These values are masked
and scaled.
Parameters
----------
dataset : xr.Dataset
Dataset to retrieve spatial bounds for
lat_var_name : str
Name of the lat variable
lon_var_name : str
Name of the lon variable
Returns
-------
np.array
[[lon min, lon max], [lat min, lat max]]
"""
lat_var_name = lat_var_names[0] if len(lat_var_names) == 1 else [
lat_name for lat_name in lat_var_names if lat_name in dataset.data_vars.keys()
][0]
lon_var_name = lon_var_names[0] if len(lon_var_names) == 1 else [
lon_name for lon_name in lon_var_names if lon_name in dataset.data_vars.keys()
][0]
# Get scale from coordinate variable metadata attributes
lat_scale = dataset[lat_var_name].attrs.get('scale_factor', 1.0)
lon_scale = dataset[lon_var_name].attrs.get('scale_factor', 1.0)
lat_offset = dataset[lat_var_name].attrs.get('add_offset', 0.0)
lon_offset = dataset[lon_var_name].attrs.get('add_offset', 0.0)
lon_valid_min = dataset[lon_var_name].attrs.get('valid_min', None)
lat_fill_value = dataset[lat_var_name].attrs.get('_FillValue', None)
lon_fill_value = dataset[lon_var_name].attrs.get('_FillValue', None)
# Apply mask and scale to min/max coordinate variables to get
# spatial bounds
# Remove fill value. Might cause errors when getting min and max
lats = dataset[lat_var_name].values.flatten()
lons = dataset[lon_var_name].values.flatten()
if lat_fill_value:
lats = list(filter(lambda a: not a == lat_fill_value, lats))
if lon_fill_value:
lons = list(filter(lambda a: not a == lon_fill_value, lons))
if len(lats) == 0 or len(lons) == 0:
return None
min_lat = remove_scale_offset(np.nanmin(lats), lat_scale, lat_offset)
max_lat = remove_scale_offset(np.nanmax(lats), lat_scale, lat_offset)
min_lon = remove_scale_offset(np.nanmin(lons), lon_scale, lon_offset)
max_lon = remove_scale_offset(np.nanmax(lons), lon_scale, lon_offset)
min_lat = round(min_lat, 1)
max_lat = round(max_lat, 1)
min_lon = round(min_lon, 1)
max_lon = round(max_lon, 1)
# Convert longitude to [-180,180] format
if lon_valid_min == 0 or 0 <= min_lon <= max_lon <= 360:
if min_lon > 180:
min_lon -= 360
if max_lon > 180:
max_lon -= 360
if min_lon == max_lon:
min_lon = -180
max_lon = 180
return np.array([[min_lon, max_lon], [min_lat, max_lat]])
def get_time_variable_name(dataset, lat_var):
"""
Try to determine the name of the 'time' variable. This is done as
follows:
- The variable name contains 'time'
- The variable dimensions match the dimensions of the given lat var
Parameters
----------
dataset : xr.Dataset:
xarray dataset to find time variable from
lat_var : xr.Variable
Lat variable for this dataset
Returns
-------
str
The name of the variable
Raises
------
ValueError
If the time variable could not be determined
"""
time_vars = find_matching_coords(dataset, ['time'])
if time_vars:
# There should only be one time var match (this is called once
# per lat var)
return time_vars[0]
for var_name in list(dataset.dims.keys()):
if "time" in var_name and dataset[var_name].squeeze().dims == lat_var.squeeze().dims:
return var_name
for var_name in list(dataset.data_vars.keys()):
if "time" in var_name and dataset[var_name].squeeze().dims == lat_var.squeeze().dims:
return var_name
raise ValueError('Unable to determine time variable')
def get_time_epoch_var(dataset, time_var_name):
"""
Get the name of the epoch time var. This is only needed in the case
where there is a single time var (of size 1) that contains the time
epoch used by the actual time var.
Parameters
----------
dataset : xr.Dataset
Dataset that contains time var
time_var_name : str
The name of the actual time var (with matching dims to the
coord vars)
Returns
-------
str
The name of the epoch time variable
"""
time_var = dataset[time_var_name]
if 'comment' in time_var.attrs:
epoch_var_name = time_var.attrs['comment'].split('plus')[0].strip()
elif 'time' in dataset.variables.keys() and time_var_name != 'time':
epoch_var_name = 'time'
else:
raise ValueError('Unable to determine time variables')
return epoch_var_name
def is_time_mjd(dataset, time_var_name):
"""
Check to see if the time format is a time delta from a modified julian date.
Parameters
----------
dataset : xr.Dataset
Dataset that contains time var
time_var_name : str
The name of the actual time var (with matching dims to the
coord vars)
Returns
-------
boolean
is time delta format in modified julian date
"""
time_var = dataset[time_var_name]
if 'comment' in time_var.attrs:
if 'Modified Julian Day' in time_var.attrs['comment']:
return True
return False
def translate_timestamp(str_timestamp):
"""
Translate timestamp to datetime object
Parameters
----------
str_timestamp : str
Timestamp string. ISO or RFC
Returns
-------
datetime
Constructed Datetime object
"""
allowed_ts_formats = [
'%Y-%m-%dT%H:%M:%SZ',
'%Y-%m-%dT%H:%M:%S%Z',
'%Y-%m-%dT%H:%M:%S.%fZ',
'%Y-%m-%dT%H:%M:%S.%f%Z'
]
for timestamp_format in allowed_ts_formats:
try:
return datetime.datetime.strptime(str_timestamp, timestamp_format)
except ValueError:
pass
return datetime.datetime.fromisoformat(str_timestamp)
def datetime_from_mjd(dataset, time_var_name):
"""
Translate the modified julian date from the long name in the time attribute.
Parameters
----------
dataset : xr.Dataset
Dataset that contains time var
time_var_name : str
The name of the actual time var (with matching dims to the
coord vars)
Returns
-------
datetime
the datetime of the modified julian date
"""
time_var = dataset[time_var_name]
if 'long_name' in time_var.attrs:
mdj_string = time_var.attrs['long_name']
mjd = mdj_string[mdj_string.find("(")+1:mdj_string.find(")")].split("= ")[1]
try:
mjd_float = float(mjd)
except ValueError:
return None
mjd_datetime = julian.from_jd(mjd_float, fmt='mjd')
return mjd_datetime
return None
def build_temporal_cond(min_time, max_time, dataset, time_var_name):
"""
Build the temporal condition used in the xarray 'where' call which
drops data not in the given bounds. If the data in the time var is
of type 'datetime', assume this is a normal case where the time var
uses the epoch from the 'units' metadata attribute to get epoch. If
the data in the time var is of type 'timedelta', the epoch var is
needed to calculate the datetime.
Parameters
----------
min_time : str
ISO timestamp representing the lower temporal bound
max_time : str
ISO timestamp representing the upper temporal bound
dataset : xr.Dataset
Dataset to build the condition off of
time_var_name : str
Name of the time variable
Returns
-------
np.array or boolean
If temporally subsetted, returns a boolean ND-array the shape
of which matches the dimensions of the coordinate vars. 'True'
is essentially a noop.
"""
def build_cond(str_timestamp, compare):
timestamp = translate_timestamp(str_timestamp)
if np.issubdtype(dataset[time_var_name].dtype, np.dtype(np.datetime64)):
timestamp = pd.to_datetime(timestamp)
if np.issubdtype(dataset[time_var_name].dtype, np.dtype(np.timedelta64)):
if is_time_mjd(dataset, time_var_name):
# mjd when timedelta based on
mjd_datetime = datetime_from_mjd(dataset, time_var_name)
if mjd_datetime is None:
raise ValueError('Unable to get datetime from dataset to calculate time delta')
# timedelta between timestamp and mjd
timestamp = np.datetime64(timestamp) - np.datetime64(mjd_datetime)
else:
epoch_time_var_name = get_time_epoch_var(dataset, time_var_name)
epoch_datetime = dataset[epoch_time_var_name].values[0]
timestamp = np.datetime64(timestamp) - epoch_datetime
return compare(dataset[time_var_name], timestamp)
temporal_conds = []
if min_time:
comparison_op = operator.ge
temporal_conds.append(build_cond(min_time, comparison_op))
if max_time:
comparison_op = operator.le
temporal_conds.append(build_cond(max_time, comparison_op))
temporal_cond = True
if min_time or max_time:
temporal_cond = functools.reduce(lambda cond_a, cond_b: cond_a & cond_b, temporal_conds)
return temporal_cond
def subset_with_bbox(dataset, lat_var_names, lon_var_names, time_var_names, bbox=None, cut=True,
min_time=None, max_time=None):
"""
Subset an xarray Dataset using a spatial bounding box.
Parameters
----------
dataset : xr.Dataset
Dataset to subset
lat_var_names : list
Name of the latitude variables in the given dataset
lon_var_names : list
Name of the longitude variables in the given dataset
time_var_names : list
Name of the time variables in the given dataset
bbox : np.array
Spatial bounding box to subset Dataset with.
cut : bool
True if scanline should be cut.
min_time : str
ISO timestamp of min temporal bound
max_time : str
ISO timestamp of max temporal bound
Returns
-------
np.array
Spatial bounds of Dataset after subset operation
"""
lon_bounds, lat_bounds = convert_bbox(bbox, dataset, lat_var_names[0], lon_var_names[0])
# condition should be 'or' instead of 'and' when bbox lon_min > lon_max
oper = operator.and_
if lon_bounds[0] > lon_bounds[1]:
oper = operator.or_
datasets = []
for lat_var_name, lon_var_name, time_var_name in zip(
lat_var_names, lon_var_names, time_var_names
):
if GROUP_DELIM in lat_var_name:
var_prefix = GROUP_DELIM.join(lat_var_name.strip(GROUP_DELIM).split(GROUP_DELIM)[:-1])
group_vars = [
var for var in dataset.data_vars.keys()
if var.startswith(f'{GROUP_DELIM}{var_prefix}')
]
else:
group_vars = list(dataset.keys())
group_dataset = dataset[group_vars]
# Calculate temporal conditions
temporal_cond = build_temporal_cond(min_time, max_time, group_dataset, time_var_name)
group_dataset = xre.where(
group_dataset,
oper(
(group_dataset[lon_var_name] >= lon_bounds[0]),
(group_dataset[lon_var_name] <= lon_bounds[1])
) &
(group_dataset[lat_var_name] >= lat_bounds[0]) &
(group_dataset[lat_var_name] <= lat_bounds[1]) &
temporal_cond,
cut
)
datasets.append(group_dataset)
return datasets
def subset_with_shapefile(dataset, lat_var_name, lon_var_name, shapefile, cut):
"""
Subset an xarray Dataset using a shapefile
Parameters
----------
dataset : xr.Dataset
Dataset to subset
lat_var_name : str
Name of the latitude variable in the given dataset
lon_var_name : str
Name of the longitude variable in the given dataset
shapefile : np.array
Absolute path to the shapefile used to subset the given dataset
cut : bool
True if scanline should be cut.
Returns
-------
np.array
Spatial bounds of Dataset after shapefile subset operation
"""
shapefile_df = gpd.read_file(shapefile)
lat_scale = dataset[lat_var_name].attrs.get('scale_factor', 1.0)
lon_scale = dataset[lon_var_name].attrs.get('scale_factor', 1.0)
lat_offset = dataset[lat_var_name].attrs.get('add_offset', 0.0)
lon_offset = dataset[lon_var_name].attrs.get('add_offset', 0.0)
# If data is '360', convert shapefile to '360' as well. There is an
# assumption that the shapefile is -180,180.
if is_360(dataset[lon_var_name], lon_scale, lon_offset):
# Transform
def convert_180_to_360(lon, lat):
return tuple(map(lambda value: value + 360 if value < 0 else value, lon)), lat
geometries = [transform(convert_180_to_360, geometry) for geometry in
shapefile_df.geometry]
shapefile_df.geometry = geometries
# Mask and scale shapefile
def scale(lon, lat):
lon = tuple(map(functools.partial(apply_scale_offset, lon_scale, lon_offset), lon))
lat = tuple(map(functools.partial(apply_scale_offset, lat_scale, lat_offset), lat))
return lon, lat
geometries = [transform(scale, geometry) for geometry in shapefile_df.geometry]
shapefile_df.geometry = geometries
def in_shape(lon, lat):
point = Point(lon, lat)
point_in_shapefile = shapefile_df.contains(point)
return point_in_shapefile.array[0]
in_shape_vec = np.vectorize(in_shape)
boolean_mask = xr.apply_ufunc(in_shape_vec, dataset[lon_var_name], dataset[lat_var_name])
return xre.where(dataset, boolean_mask, cut)
def transform_grouped_dataset(nc_dataset, file_to_subset):
"""
Transform a netCDF4 Dataset that has groups to an xarray compatible
dataset. xarray does not work with groups, so this transformation
will flatten the variables in the dataset and use the group path as
the new variable name. For example, data_01 > km > sst would become
'data_01__km__sst', where GROUP_DELIM is __.
This same pattern is applied to dimensions, which are located under
the appropriate group. They are renamed and placed in the root
group.
Parameters
----------
nc_dataset : nc.Dataset
netCDF4 Dataset that contains groups
Returns
-------
nc.Dataset
netCDF4 Dataset that does not contain groups and that has been
flattened.
"""
# Close the existing read-only dataset and reopen in append mode
nc_dataset.close()
nc_dataset = nc.Dataset(file_to_subset, 'r+')
dimensions = {}
def walk(group_node, path):
for key, item in group_node.items():
group_path = f'{path}{GROUP_DELIM}{key}'
# If there are variables in this group, copy to root group
# and then delete from current group
if item.variables:
# Copy variables to root group with new name
for var_name, var in item.variables.items():
var_group_name = f'{group_path}{GROUP_DELIM}{var_name}'
nc_dataset.variables[var_group_name] = var
# Delete variables
var_names = list(item.variables.keys())
for var_name in var_names:
del item.variables[var_name]
if item.dimensions:
dims = list(item.dimensions.keys())
for dim_name in dims:
new_dim_name = f'{group_path.replace("/", GROUP_DELIM)}{GROUP_DELIM}{dim_name}'
item.dimensions[new_dim_name] = item.dimensions[dim_name]
dimensions[new_dim_name] = item.dimensions[dim_name]
item.renameDimension(dim_name, new_dim_name)
# If there are subgroups in this group, call this function
# again on that group.
if item.groups:
walk(item.groups, group_path)
# Delete non-root groups
group_names = list(group_node.keys())
for group_name in group_names:
del group_node[group_name]
for var_name in list(nc_dataset.variables.keys()):
new_var_name = f'{GROUP_DELIM}{var_name}'
nc_dataset.variables[new_var_name] = nc_dataset.variables[var_name]
del nc_dataset.variables[var_name]
walk(nc_dataset.groups, '')
# Update the dimensions of the dataset in the root group
nc_dataset.dimensions.update(dimensions)
return nc_dataset
def recombine_grouped_datasets(datasets, output_file):
"""
Given a list of xarray datasets, combine those datasets into a
single netCDF4 Dataset and write to the disk. Each dataset has been
transformed using its group path and needs to be un-transformed and
placed in the appropriate group.
Parameters
----------
datasets : list (xr.Dataset)
List of xarray datasets to be combined
output_file : str
Name of the output file to write the resulting NetCDF file to.
"""
def get_nested_group(dataset, group_path):
nested_group = dataset
for group in group_path.strip(GROUP_DELIM).split(GROUP_DELIM)[:-1]:
nested_group = nested_group.groups[group]
return nested_group
base_dataset = nc.Dataset(output_file, mode='w')
for dataset in datasets:
group_lst = []
for var_name in dataset.variables.keys(): # need logic if there is data in the top level not in a group
group_lst.append('/'.join(var_name.split(GROUP_DELIM)[:-1]))
group_lst = ['/' if group == '' else group for group in group_lst]
groups = set(group_lst)
for group in groups:
base_dataset.createGroup(group)
for dim_name in list(dataset.dims.keys()):
new_dim_name = dim_name.split(GROUP_DELIM)[-1]
dim_group = get_nested_group(base_dataset, dim_name)
dim_group.createDimension(new_dim_name, dataset.dims[dim_name])
# Rename variables
for var_name in list(dataset.variables.keys()):
new_var_name = var_name.split(GROUP_DELIM)[-1]
var_group = get_nested_group(base_dataset, var_name)
var_dims = list(var_group.dimensions.keys())
variable = dataset.variables[var_name]
if not var_dims:
var_group_parent = var_group
# This group doesn't contain dimensions. Look at parent group to find dimensions.
while not var_dims:
var_group_parent = var_group_parent.parent
var_dims = list(var_group_parent.dimensions.keys())
if np.issubdtype(
dataset.variables[var_name].dtype, np.dtype(np.datetime64)
) or np.issubdtype(
dataset.variables[var_name].dtype, np.dtype(np.timedelta64)
):
# Use xarray datetime encoder
cf_dt_coder = xr.coding.times.CFDatetimeCoder()
encoded_var = cf_dt_coder.encode(dataset.variables[var_name])
variable = encoded_var
var_group.createVariable(new_var_name, variable.dtype, var_dims)
# Copy attributes
var_attrs = variable.attrs
var_group.variables[new_var_name].setncatts(var_attrs)
# Copy data
var_group.variables[new_var_name].set_auto_maskandscale(False)
var_group.variables[new_var_name][:] = variable.data
# Remove group vars from base dataset
for var_name in list(base_dataset.variables.keys()):
if GROUP_DELIM in var_name:
del base_dataset.variables[var_name]
# Remove group dims from base dataset
for dim_name in list(base_dataset.dimensions.keys()):
if GROUP_DELIM in dim_name:
del base_dataset.dimensions[dim_name]
# Copy global attributes
base_dataset.setncatts(datasets[0].attrs)
# Write and close
base_dataset.close()
def subset(file_to_subset, bbox, output_file, variables=None, # pylint: disable=too-many-branches
cut=True, shapefile=None, min_time=None, max_time=None, origin_source=None):
"""
Subset a given NetCDF file given a bounding box
Parameters
----------
file_to_subset : string
The location of the file which will be subset
output_file : string
The file path for the output of the subsetting operation.
bbox : np.ndarray
The chosen bounding box. This is a tuple of tuples formatted
as such: ((west, east), (south, north)). The assumption is that
the valid range is ((-180, 180), (-90, 90)). This will be
transformed as appropriate if the actual longitude range is
0-360.
shapefile : str
Name of local shapefile used to subset given file.
variables : list, str, optional
List of variables to include in the resulting data file.
NOTE: This will remove ALL variables which are not included
in this list, including coordinate variables!
cut : boolean
True if the scanline should be cut, False if the scanline should
not be cut. Defaults to True.
min_time : str
ISO timestamp representing the lower bound of the temporal
subset to be performed. If this value is not provided, the
granule will not be subset temporally on the lower bound.
max_time : str
ISO timestamp representing the upper bound of the temporal
subset to be performed. If this value is not provided, the
granule will not be subset temporally on the upper bound.
"""
# Open dataset with netCDF4 first, so we can get group info
nc_dataset = nc.Dataset(file_to_subset, mode='r')
has_groups = bool(nc_dataset.groups)
# If dataset has groups, transform to work with xarray
if has_groups:
nc_dataset = transform_grouped_dataset(nc_dataset, file_to_subset)
args = {
'decode_coords': False,
'mask_and_scale': False,
'decode_times': False
}
if min_time or max_time:
args['decode_times'] = True
with xr.open_dataset(
xr.backends.NetCDF4DataStore(nc_dataset),
**args
) as dataset:
lat_var_names, lon_var_names = get_coord_variable_names(dataset)
time_var_names = [
get_time_variable_name(
dataset, dataset[lat_var_name]
) for lat_var_name in lat_var_names
]
chunks_dict = calculate_chunks(dataset)
if chunks_dict:
dataset = dataset.chunk(chunks_dict)
if variables:
# Drop variables that aren't explicitly requested, except lat_var_name and
# lon_var_name which are needed for subsetting
variables = [variable.upper() for variable in variables]
vars_to_drop = [
var_name for var_name, var in dataset.data_vars.items()
if var_name.upper() not in variables
and var_name not in lat_var_names
and var_name not in lon_var_names
and var_name not in time_var_names
]
dataset = dataset.drop_vars(vars_to_drop)
if bbox is not None:
datasets = subset_with_bbox(
dataset=dataset,
lat_var_names=lat_var_names,
lon_var_names=lon_var_names,
time_var_names=time_var_names,
bbox=bbox,
cut=cut,
min_time=min_time,
max_time=max_time
)
elif shapefile:
datasets = [
subset_with_shapefile(dataset, lat_var_names[0], lon_var_names[0], shapefile, cut)
]
else:
raise ValueError('Either bbox or shapefile must be provided')
spatial_bounds = []
for dataset in datasets:
set_version_history(dataset, cut, bbox, shapefile)
set_json_history(dataset, cut, file_to_subset, bbox, shapefile, origin_source)
if has_groups:
spatial_bounds.append(get_spatial_bounds(
dataset=dataset,
lat_var_names=lat_var_names,
lon_var_names=lon_var_names
))
else:
encoding = {}
compression = dict(zlib=True, complevel=5, _FillValue=None)
if (min_time or max_time) and any(dataset.dims.values()):
encoding = {
var_name: {
'units': nc_dataset.variables[var_name].__dict__['units'],
'zlib': True,
"complevel": 5,
"_FillValue": None
} for var_name in time_var_names
if 'units' in nc_dataset.variables[var_name].__dict__
}
for var in dataset.data_vars:
if var not in encoding:
encoding[var] = compression
dataset.load().to_netcdf(output_file, 'w', encoding=encoding)
if has_groups:
recombine_grouped_datasets(datasets, output_file)
return np.array([[
min(lon[0][0][0] for lon in zip(spatial_bounds)),
max(lon[0][0][1] for lon in zip(spatial_bounds))
], [
min(lat[0][1][0] for lat in zip(spatial_bounds)),
max(lat[0][1][1] for lat in zip(spatial_bounds))
]])
return get_spatial_bounds(
dataset=dataset,
lat_var_names=lat_var_names,
lon_var_names=lon_var_names
)
|
py | 1a54fac380013b0dc616a2c6ef4fa2ce50ddf4a8 | import time
import logging
from datetime import datetime
import pytest
from tests.common.fixtures.ptfhost_utils import change_mac_addresses # lgtm[py/unused-import]
from tests.common.fixtures.ptfhost_utils import remove_ip_addresses # lgtm[py/unused-import]
from tests.common.fixtures.ptfhost_utils import copy_ptftests_directory # lgtm[py/unused-import]
from tests.common.fixtures.ptfhost_utils import set_ptf_port_mapping_mode # lgtm[py/unused-import]
from tests.common.fixtures.ptfhost_utils import ptf_test_port_map
from tests.ptf_runner import ptf_runner
from tests.common.dualtor.mux_simulator_control import mux_server_url
from tests.common.dualtor.mux_simulator_control import toggle_all_simulator_ports_to_rand_selected_tor
from tests.common.dualtor.mux_simulator_control import toggle_all_simulator_ports_to_random_side
from tests.common.utilities import is_ipv4_address
from tests.common.fixtures.fib_utils import fib_info_files_per_function
from tests.common.utilities import wait
logger = logging.getLogger(__name__)
pytestmark = [
pytest.mark.topology('any')
]
# Usually src-mac, dst-mac, vlan-id are optional hash keys. Not all the platform supports these optional hash keys. Not enable these three by default.
# The 'ingress-port' key is not used in hash by design. We are doing negative test for 'ingress-port'.
# When 'ingress-port' is included in HASH_KEYS, the PTF test will try to inject same packet to different ingress ports
# and expect that they are forwarded from same egress port.
# HASH_KEYS = ['src-ip', 'dst-ip', 'src-port', 'dst-port', 'ingress-port', 'src-mac', 'dst-mac', 'ip-proto', 'vlan-id']
HASH_KEYS = ['src-ip', 'dst-ip', 'src-port', 'dst-port', 'ingress-port', 'ip-proto']
SRC_IP_RANGE = ['8.0.0.0', '8.255.255.255']
DST_IP_RANGE = ['9.0.0.0', '9.255.255.255']
SRC_IPV6_RANGE = ['20D0:A800:0:00::', '20D0:FFFF:0:00::FFFF']
DST_IPV6_RANGE = ['20D0:A800:0:01::', '20D0:FFFF:0:01::FFFF']
VLANIDS = range(1032, 1279)
VLANIP = '192.168.{}.1/24'
PTF_QLEN = 2000
DEFAULT_MUX_SERVER_PORT = 8080
PTF_TEST_PORT_MAP = '/root/ptf_test_port_map.json'
@pytest.fixture(scope='module')
def router_macs(duthosts):
return [duthost.facts['router_mac'] for duthost in duthosts]
@pytest.fixture(scope="module")
def ignore_ttl(duthosts):
# on the multi asic devices, the packet can have different ttl based on how the packet is routed
# within in the device. So set this flag to mask the ttl in the ptf test
for duthost in duthosts:
if duthost.sonichost.is_multi_asic:
return True
return False
@pytest.fixture(scope="module")
def single_fib_for_duts(tbinfo):
# For a T2 topology, we are generating a single fib file across all asics, but have multiple frontend nodes (DUTS).
if tbinfo['topo']['type'] == "t2":
return True
return False
@pytest.mark.parametrize("ipv4, ipv6, mtu", [pytest.param(True, True, 1514)])
def test_basic_fib(duthosts, ptfhost, ipv4, ipv6, mtu,
toggle_all_simulator_ports_to_random_side,
fib_info_files_per_function,
tbinfo, mux_server_url, router_macs,
ignore_ttl, single_fib_for_duts):
if 'dualtor' in tbinfo['topo']['name']:
wait(30, 'Wait some time for mux active/standby state to be stable after toggled mux state')
timestamp = datetime.now().strftime('%Y-%m-%d-%H:%M:%S')
# do not test load balancing for vs platform as kernel 4.9
# can only do load balance base on L3
if duthosts[0].facts['asic_type'] in ["vs"]:
test_balancing = False
else:
test_balancing = True
logging.info("run ptf test")
log_file = "/tmp/fib_test.FibTest.ipv4.{}.ipv6.{}.{}.log".format(ipv4, ipv6, timestamp)
logging.info("PTF log file: %s" % log_file)
ptf_runner(ptfhost,
"ptftests",
"fib_test.FibTest",
platform_dir="ptftests",
params={"fib_info_files": fib_info_files_per_function[:3], # Test at most 3 DUTs
"ptf_test_port_map": ptf_test_port_map(ptfhost, tbinfo, duthosts, mux_server_url),
"router_macs": router_macs,
"ipv4": ipv4,
"ipv6": ipv6,
"testbed_mtu": mtu,
"test_balancing": test_balancing,
"ignore_ttl": ignore_ttl,
"single_fib_for_duts": single_fib_for_duts},
log_file=log_file,
qlen=PTF_QLEN,
socket_recv_size=16384)
def get_vlan_untag_ports(duthosts, duts_running_config_facts):
"""Get vlan untagged ports.
Args:
duthosts (DutHosts): Instance of DutHosts for interacting with DUT hosts.
duts_running_config_facts (dict): Running config facts of all DUT hosts.
Returns:
[type]: [description]
"""
vlan_untag_ports = {}
for duthost in duthosts:
if duthost.is_multi_asic:
continue
ports = []
for asic_cfg_facts in duts_running_config_facts[duthost.hostname]:
vlans = asic_cfg_facts.get('VLAN_INTERFACE', {}).keys()
for vlan in vlans:
vlan_member_info = asic_cfg_facts.get('VLAN_MEMBER', {}).get(vlan, {})
if vlan_member_info:
for port_name, tag_mode in vlan_member_info.items():
if tag_mode['tagging_mode'] == 'untagged':
ports.append(port_name)
vlan_untag_ports[duthost.hostname] = ports
return vlan_untag_ports
@pytest.fixture(scope="module")
def hash_keys(duthost):
hash_keys = HASH_KEYS[:] # Copy from global var to avoid side effects of multiple iterations
if 'dst-mac' in hash_keys:
hash_keys.remove('dst-mac')
# do not test load balancing on L4 port on vs platform as kernel 4.9
# can only do load balance base on L3
if duthost.facts['asic_type'] in ["vs"]:
if 'src-port' in hash_keys:
hash_keys.remove('src-port')
if 'dst-port' in hash_keys:
hash_keys.remove('dst-port')
if duthost.facts['asic_type'] in ["mellanox"]:
if 'ip-proto' in hash_keys:
hash_keys.remove('ip-proto')
if duthost.facts['asic_type'] in ["barefoot"]:
if 'ingress-port' in hash_keys:
hash_keys.remove('ingress-port')
# removing ingress-port and ip-proto from hash_keys not supported by Marvell SAI
if duthost.facts['platform'] in ['armhf-nokia_ixs7215_52x-r0']:
if 'ip-proto' in hash_keys:
hash_keys.remove('ip-proto')
if 'ingress-port' in hash_keys:
hash_keys.remove('ingress-port')
# remove the ingress port from multi asic platform
# In multi asic platform each asic has different hash seed,
# the same packet coming in different asic
# could egress out of different port
# the hash_test condition for hash_key == ingress_port will fail
if duthost.sonichost.is_multi_asic:
hash_keys.remove('ingress-port')
return hash_keys
def configure_vlan(duthost, ports):
for vlan in VLANIDS:
duthost.shell('config vlan add {}'.format(vlan))
for port in ports:
duthost.shell('config vlan member add {} {}'.format(vlan, port))
duthost.shell('config interface ip add Vlan{} '.format(vlan) + VLANIP.format(vlan%256))
time.sleep(5)
def unconfigure_vlan(duthost, ports):
for vlan in VLANIDS:
for port in ports:
duthost.shell('config vlan member del {} {}'.format(vlan, port))
duthost.shell('config interface ip remove Vlan{} '.format(vlan) + VLANIP.format(vlan%256))
duthost.shell('config vlan del {}'.format(vlan))
time.sleep(5)
@pytest.fixture
def setup_vlan(tbinfo, duthosts, duts_running_config_facts, hash_keys):
vlan_untag_ports = get_vlan_untag_ports(duthosts, duts_running_config_facts)
need_to_clean_vlan = False
# add some vlan for hash_key vlan-id test
if tbinfo['topo']['type'] == 't0' and 'dualtor' not in tbinfo['topo']['name'] and 'vlan-id' in hash_keys:
for duthost in duthosts:
configure_vlan(duthost, vlan_untag_ports[duthost.hostname])
need_to_clean_vlan = True
yield
# remove added vlan
if need_to_clean_vlan:
for duthost in duthosts:
unconfigure_vlan(duthost, vlan_untag_ports[duthost.hostname])
@pytest.fixture(params=["ipv4", "ipv6"])
def ipver(request):
return request.param
@pytest.fixture
def add_default_route_to_dut(duts_running_config_facts, duthosts, tbinfo):
"""
Add a default route to the device for storage backend testbed.
This is to ensure the IO packets could be successfully directed.
"""
if "backend" in tbinfo["topo"]["name"]:
logging.info("Add default route on the DUT.")
try:
for duthost in duthosts:
cfg_facts = duts_running_config_facts[duthost.hostname]
for asic_index, asic_cfg_facts in enumerate(cfg_facts):
asic = duthost.asic_instance(asic_index)
bgp_neighbors = asic_cfg_facts["BGP_NEIGHBOR"]
ipv4_cmd_parts = ["ip route add default"]
ipv6_cmd_parts = ["ip -6 route add default"]
for neighbor in bgp_neighbors.keys():
if is_ipv4_address(neighbor):
ipv4_cmd_parts.append("nexthop via %s" % neighbor)
else:
ipv6_cmd_parts.append("nexthop via %s" % neighbor)
ipv4_cmd_parts.sort()
ipv6_cmd_parts.sort()
# limit to 4 nexthop entries
ipv4_cmd = " ".join(ipv4_cmd_parts[:5])
ipv6_cmd = " ".join(ipv6_cmd_parts[:5])
asic.shell(ipv4_cmd)
asic.shell(ipv6_cmd)
yield
finally:
logging.info("Remove default route on the DUT.")
for duthost in duthosts:
for asic in duthost.asics:
if asic.is_it_backend():
continue
asic.shell("ip route del default", module_ignore_errors=True)
asic.shell("ip -6 route del default", module_ignore_errors=True)
else:
yield
def test_hash(add_default_route_to_dut, duthosts, fib_info_files_per_function, setup_vlan, hash_keys, ptfhost, ipver,
toggle_all_simulator_ports_to_rand_selected_tor,
tbinfo, mux_server_url, router_macs,
ignore_ttl, single_fib_for_duts):
if 'dualtor' in tbinfo['topo']['name']:
wait(30, 'Wait some time for mux active/standby state to be stable after toggled mux state')
timestamp = datetime.now().strftime('%Y-%m-%d-%H:%M:%S')
log_file = "/tmp/hash_test.HashTest.{}.{}.log".format(ipver, timestamp)
logging.info("PTF log file: %s" % log_file)
if ipver == "ipv4":
src_ip_range = SRC_IP_RANGE
dst_ip_range = DST_IP_RANGE
else:
src_ip_range = SRC_IPV6_RANGE
dst_ip_range = DST_IPV6_RANGE
ptf_runner(ptfhost,
"ptftests",
"hash_test.HashTest",
platform_dir="ptftests",
params={"fib_info_files": fib_info_files_per_function[:3], # Test at most 3 DUTs
"ptf_test_port_map": ptf_test_port_map(ptfhost, tbinfo, duthosts, mux_server_url),
"hash_keys": hash_keys,
"src_ip_range": ",".join(src_ip_range),
"dst_ip_range": ",".join(dst_ip_range),
"router_macs": router_macs,
"vlan_ids": VLANIDS,
"ignore_ttl":ignore_ttl,
"single_fib_for_duts": single_fib_for_duts
},
log_file=log_file,
qlen=PTF_QLEN,
socket_recv_size=16384)
|
py | 1a54fbdb7abcb85d05437c5fcde58fffbb80804e | import ssl
from typing import Dict, Tuple, Type, Union
import attr
import httpx
@attr.s(auto_attribs=True)
class Client:
"""A class for keeping track of data related to the API"""
base_url: str
cookies: Dict[str, str] = attr.ib(factory=dict, kw_only=True)
headers: Dict[str, str] = attr.ib(factory=dict, kw_only=True)
timeout: float = attr.ib(300.0, kw_only=True)
verify_ssl: Union[str, bool, ssl.SSLContext] = attr.ib(True, kw_only=True)
def get_headers(self) -> Dict[str, str]:
"""Get headers to be used in all endpoints"""
return {**self.headers}
def with_headers(self, headers: Dict[str, str]) -> "Client":
"""Get a new client matching this one with additional headers"""
return attr.evolve(self, headers={**self.headers, **headers})
def get_cookies(self) -> Dict[str, str]:
return {**self.cookies}
def with_cookies(self, cookies: Dict[str, str]) -> "Client":
"""Get a new client matching this one with additional cookies"""
return attr.evolve(self, cookies={**self.cookies, **cookies})
def get_timeout(self) -> float:
return self.timeout
def with_timeout(self, timeout: float) -> "Client":
"""Get a new client matching this one with a new timeout (in seconds)"""
return attr.evolve(self, timeout=timeout)
@attr.s(auto_attribs=True)
class AuthenticatedClient(Client):
"""A Client which has been authenticated for use on secured endpoints"""
auth: Union[None, Tuple[str, str], Type[httpx.Auth]] = attr.ib(None, kw_only=True)
def get_auth(self) -> Union[None, Tuple[str, str], Type[httpx.Auth]]:
return self.auth
def with_auth(self, auth: Union[None, Tuple[str, str], Type[httpx.Auth]]) -> "Client":
"""Get a new client matching this one with a new auth method"""
return attr.evolve(self, auth=auth)
|
py | 1a54fc1e559b2fdd9969fe1e9f0be578e1180b9c | # Given a binary tree
# struct TreeLinkNode {
# TreeLinkNode *left;
# TreeLinkNode *right;
# TreeLinkNode *next;
# }
# Populate each next pointer to point to its next right node. If there is no next right node, the next pointer should be set to NULL.
# Initially, all next pointers are set to NULL.
# Note:
# You may only use constant extra space.
# Recursive approach is fine, implicit stack space does not count as extra space for this problem.
# Example:
# Given the following binary tree,
# 1
# / \
# 2 3
# / \ \
# 4 5 7
# After calling your function, the tree should look like:
# 1 -> NULL
# / \
# 2 -> 3 -> NULL
# / \ \
# 4-> 5 -> 7 -> NULL
# Definition for binary tree with next pointer.
# class TreeLinkNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
# self.next = None
class Solution:
# @param root, a tree link node
# @return nothing
def connect(self, root):
# from collections import deque
if root:
queue = [root]
while queue:
tem = []
for i in range(len(queue)):
if queue[i].left: tem.append(queue[i].left)
if queue[i].right: tem.append(queue[i].right)
for i in range(len(queue)-1):
queue[i].next = queue[i+1]
queue[len(queue) - 1].next = None
queue = tem
# Time: O(n)
# Space: O(1)
# Difficulty: medium |
py | 1a54fc87eee5e5293822753e283f15249ae3f19a | from .base import BaseConcern, AsyncConcern, ConcernResponse
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.