ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a3ca1654a817e8f472eb95d4d980b6cfc9b3a51 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-04-13 13:12
from __future__ import unicode_literals
import django.contrib.auth.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.ASCIIUsernameValidator()], verbose_name='username'),
),
]
|
py | 1a3ca4c89f656b6cfce555f851004371363e39ed | import platform
import torch
#
from utils.dataset import train_data, test_data
from utils.model import SimpleLinear, SimpleCNN
from train import train
from test import test
if __name__ == '__main__':
# == Setting ==
device = torch.device('cpu')
# == Model ==
model = SimpleCNN()
model = model.to(device)
# == optimizer ==
criterion = torch.nn.CrossEntropyLoss().to(device)
optimizer = torch.optim.Adam(model.parameters())
# == Main Loop ==
max_acc = 0
max_epoch = 1
# first epoch
# test(model, test_data, device=device)
for epoch in range(1, max_epoch + 1):
train(model, train_data, epoch, criterion, optimizer, device=device)
acc = test(model, test_data, device=device)
if acc > max_acc:
max_acc = acc
torch.save(model.state_dict(), 'checkpoints/best_model.pt')
print('==========Max Acc: {}=========='.format(max_acc))
|
py | 1a3ca526170d2f25c157a9b8dc945370770ef251 | # -*- coding: utf-8 -*-
"""
Created on Fri Aug 25 13:08:16 2020
@author: haolinl
"""
import copy
import os
import time
import numpy as np
import random
import scipy.io # For extracting data from .mat file
class inputFileGenerator(object):
"""
Generate input file for Abaqus.
Unit system:
Length: m
Force: N
Pressure: Pa
"""
def __init__(self, data_file_name, write_path, material_type, fix_indices_list, node_variable_name, elem_variable_name, user_prescribed_force_field=[]):
"""
Initialize parameters.
Parameters:
----------
data_file_name: String.
The file path of information of node, element, etc.
write_path: String.
The path to write the inp file.
material_type: String.
The type of material.
Used to indicate whether to consider material nonlinearity.
fix_indices_list: List of ints.
The node indices to be fixed.
node_variable_name: String.
The variable name of the nodes matrix in the data file.
elem_variable_name: String.
The variable name of the elements matrix in the data file.
user_prescribed_force_field (optional): List of floats.
The user-prescribed vector of laplacian force field.
Size: nSurfI x 3.
Default: [].
"""
# Data & Variables.
self.data_file_name = data_file_name
self.data_mat = scipy.io.loadmat(self.data_file_name)
self._surface_mat = self.data_mat["FaceI"]
self._surface_nodes = self.data_mat["idxSurfI"]
self._surface_nodes_num = self.data_mat["nSurfI"][0,0]
self._outer_surface_regionNum = 1 # Int. The region number of outer surface.
self._outer_surface_nodes_list = self._extractOuterSurfaceNodes(self.data_mat["faces"], self._outer_surface_regionNum) # List of sorted ints. The indices of outer surface nodes. Indexed from 1.
self._outer_surface_nodes_num = len(self._outer_surface_nodes_list)
self._triangle_nodes_list = []
self._coupled_list = []
self._node_variable_name = node_variable_name
self._elem_variable_name = elem_variable_name
self._inputFile_lines_total = []
self.writePath = write_path
self._modulus = 1e7 # Young's modulus. Unit: Pa. Default: 1e7.
self._poisson_ratio = 0.48 # Poisson's ratio. Linear elastic default: 0.3; neo-Hookean default: 0.48.
self._isCoupleOn = False # Boolean. True: use coupling constraint; False: do not use coupling constraint. Must not turn on if applying Laplacian smoothing.
self._coupling_type = "Kinematic" # String. "Kinematic" / "Distributing".
self._coupling_neighbor_layers = 1 # How deep does the neighborhood searching go. Default: 1.
self._isLaplacianSmoothingOn = True # Boolean. True: use laplacian smoothing. False: do not use laplacian smoothing.
self._laplacian_variable_name = "laplacianMatrixI3"
self._massMatrix_variable_name = "massMatrixI3"
self._laplacian_iter_num = 20 # Default: 3.
self._smoothing_rate = 0.1 # Default: 0.1 (Previous: 1e-4).
self.loads_num = 3 # For initial testing.
self._load_sampling_style = "gaussian" # String. Indicating the type of random sampling for force components. "uniform" / "gaussian".
self._load_scale = (0.0, 10.0) # Absolute range of the force for uniform sampling. Case and BC specific. (min, max). Unit: N.
self._gaussian_params = (12.0, 2.4) # Mean and deviation of the force for Gaussian sampling. Case and BC specific. (mean, deviation). Unit: N.
self._load_params_tuple = None
self._initial_force_component_vector = [] # List of floats. Default: []. Example: [5., 5., 5.].
self.autoIncrementNum = 5000 # Int. The maximum increment number of the AutoSolver.
self.initIncrem = 0.001 # Float. The initial length of the increment (for fixed-step, this is also the length per increm).
self.minIncrem = 1e-20 # Float. The minimum increment length for the AutoSolver (ueless for the StaticSolver).
self.maxIncrem = 1.0 # Float. The maximum increment length for the AutoSolver (useless for the StaticSovler).
self.totalTime = 1.0 # Float. The total time for one simulation step.
self.frameNum = 1 # Int. The number of frames intending to extract from the nodal file.
# ================== Load sampling variables ================== #
if self._isCoupleOn: self._couple_region_num = self.loads_num
else: self._couple_region_num = 0
if self._load_sampling_style == "gaussian": self._load_params_tuple = self._gaussian_params
elif self._load_sampling_style == "uniform": self._load_params_tuple = self._load_scale
else:
self._load_sampling_style = "uniform"
self._load_params_tuple = self._load_scale
# ============================================================= #
# Header.
self._header = ["*Heading"]
# Part definition.
self._part_name = "part-1"
self._material_name = "tissue"
self._part_initial = ["*Part, name={}".format(self._part_name)] # Total list of Part definition.
self._node = ["*Node"]
self._elem = ["*Element, type=C3D10"] # Nonlinear tetrahedron. http://web.mit.edu/calculix_v2.7/CalculiX/ccx_2.7/doc/ccx/node33.html#tennode.
self._nset_all = []
self._elset_all = []
self._section = ["*Solid Section, elset=allElems, material={}".format(self._material_name),
","]
self._part_end = ["*End Part"]
self._new_node_list = []
self._new_node_dict = {}
self._node_num = None
self._orig_node_num = None
self._elem_num = None
self._part = self.generatePart()
# Load settings.
self._loads_nset_name_list = []
self._rf_name_list = []
self._rf_nset_name_list = []
self._rf_nsets = []
self._load_nsets = [] # Nset definition of loads.
self._load = self.generateLoadSetting()
# Assembly definition.
self._assembly_name = "assembly-1"
self._instance_name = "instance-1"
self._assembly_initial = ["*Assembly, name={}".format(self._assembly_name)] # Total list of Assembly definition.
self._instance = ["*Instance, name={}, part={}".format(self._instance_name, self._part_name),
"*End Instance"]
self._ref_nodes_list = []
self._fix_nset_name = "fix"
self._fix_indices_list = fix_indices_list
self._fix_nset = self.generateNset(self._fix_indices_list, self._fix_nset_name, self._instance_name) # Nset definition of fix BC.
self._loads_posi_indices_list = self._generateLoadPositions(self.loads_num, self._fix_indices_list) # Generate load positions. Randomly. For fixed mode: style="fix", input_posi_indices_list=[415, 470, 107].
self._laplacian_initial_loads_posi = None # List. Containing the original position of concentrated forces.
self._laplacian_force_field = None # 2D Array of floats. Size: nSurfI * 3. The force field on the outer surface.
self._user_prescribed_force_field = user_prescribed_force_field # List of floats. Size: nSurfI * 3. The prescribed force field on the outer surface. Default: [].
self._surface_list = []
self._coupling_list = []
self._nset_boundary = [] # All nsets definitions in assembly. Boundary conditions
self._assembly_end = ["*End Assembly"]
self._assembly = self.generateAssembly()
# Material.
self.material_type = material_type # String. Indicate material type. "linear"/"neo_hookean_fitting"/"neo_hookean_solid".
self._material_def_file_name = "" # Default: "". If there is a file of stress strain definition, please specify here (must not be "").
self._material = self.generateMaterial(self.material_type)
# Boundary condition.
self._boundary_initial = ["*Boundary"]
self._boundary = self.generateBoundaryCondition_fixAll()
# Step settings.
self.freq = int(self.autoIncrementNum / self.frameNum) # Int. The data frame extraction frequency (also refers to the number of increments. Extract one frame per "self.freq" increments). Especially for StaticSolver case.
self._step = ["*Step, name=step-1, nlgeom=YES, inc={}".format(self.autoIncrementNum),
"*Static",
"{}, {}, {}, {}".format(self.initIncrem, self.totalTime,
self.minIncrem, self.maxIncrem)] # Auto solver.
self._step_end = ["*End Step"]
# Rest settings.
self._restart = ["*Restart, write, frequency=0"]
self._output = ["*Output, field, variable=PRESELECT",
"*Output, history, variable=PRESELECT"]
self._fil = ["*FILE FORMAT, ASCII",
"*node file, frequency={}".format(self.freq),
"U, COORD",
"*El file, frequency={}".format(self.freq),
"S, COORD"]
self._resSettings = self._restart + self._output + self._fil
def readFile(self, read_path):
"""
Read files from specific path.
Parameters:
----------
read_path: String.
Path of the original inp file.
Return:
----------
lines: List of strings.
The list of lines from the file.
"""
with open(read_path, "rt") as f: lines = f.read().splitlines()
return lines
def writeFile(self, write_status):
"""
Write 'self.write_lines' into a new inp file.
Parameters:
----------
write_status: String.
"Normal" / "Fast".
"Normal": generate all definitions;
"Fast": generate nodes and elements definition only.
"""
if write_status == "Normal":
self._inputFile_lines_total = (self._header + self._part + self._assembly +
self._material + self._boundary + self._step +
self._load + self._resSettings + self._step_end)
content = '\n'.join(self._inputFile_lines_total)
with open(self.writePath, 'w') as f: f.write(content)
elif write_status == "Fast":
self._inputFile_lines_total = self._header + self._part
content = '\n'.join(self._inputFile_lines_total)
with open(self.writePath, 'w') as f: f.write(content)
else:
self.writeFile("Normal")
def generatePart(self):
"""
Generate part definition.
Returns:
----------
The list collection of all sub-definition lists, including:
part_initial: header part of "Part definition".
node: Node definition.
elem: Element definition.
elset_all: The elset containing all elements. For material definition specifically.
section: Section definition.
part_end: The endline of "Part definition".
"""
self.generateNodes(self.data_mat[self._node_variable_name], self._node)
self.generateElements(self.data_mat[self._elem_variable_name], self._elem)
self.nonlinearization()
# Generate all element elset.
allElem_list, allElem_list_name = [], "allElems"
for i in range(len(self._elem[1:])): allElem_list.append(str(i+1))
self._elset_all = self.generateElset(allElem_list, allElem_list_name)
# Generate Section.
self._section = self.generateSection(allElem_list_name, self._material_name)
# Collection.
return (self._part_initial + self._node + self._elem + self._elset_all +
self._section + self._part_end)
def generateNodes(self, node_mat, target_node_list, specified_indices_list=[]):
"""
Generate nodes information.
Parameters:
----------
node_mat: 2D Array of ints.
The matrix containing the coordinates of the nodes to-be-defined under "*Node".
targer_node_list: List of strings.
The definition of node list.
specified_indices_list (optional): List of ints.
List the indices of the input node list, following the exact order of the node_mat.
Default: [].
"""
for i in range(node_mat.shape[0]):
if specified_indices_list == []: node_list_temp = ["{}".format(i+1)]
else: node_list_temp = ["{}".format(specified_indices_list[i])]
node_list_temp += [str(coord) for coord in list(node_mat[i,:])]
target_node_list.append(', '.join(node_list_temp))
def _extractOuterSurfaceNodes(self, faces_def_matrix, outer_surface_regionNum):
"""
Extract the nodes on the outer surface of the geometry (for force application in next step)
Parameters:
----------
faces_def_matrix: 2D Array of ints.
The definition of all faces, including the information of surface region number.
outer_surface_regionNum: Int.
The region number of outer surface of the geometry.
Returns:
----------
outer_surface_nodes_list: List of ints.
The indices of nodes on the outer surface. Indexed from 1. Sorted.
"""
outer_surface_nodes_list = []
for i in range(faces_def_matrix.shape[0]):
if faces_def_matrix[i,0] == outer_surface_regionNum: # The region number of outer surface.
outer_surface_nodes_list += [int(ind) for ind in faces_def_matrix[i,1:]] # Indexed from 1.
outer_surface_nodes_list = list(set(outer_surface_nodes_list))
outer_surface_nodes_list.sort()
return outer_surface_nodes_list
def generateElements(self, elem_mat, target_elem_list, specified_indices_list=[]):
"""
Generate elements information.
Parameters:
----------
elem_mat: 2D Array of ints.
The matrix containing the indices of each element to-be-defined under "*Element".
targer_elem_list: List of strings.
The definition of element list.
specified_indices_list (optional): List of ints.
List the indices of the input element list, following the exact order of the elem_mat.
Default: [].
"""
for i in range(elem_mat.shape[0]):
if specified_indices_list == []: elem_list_temp = ["{}".format(i+1)]
else: elem_list_temp = ["{}".format(specified_indices_list[i])]
elem_line_temp = [str(ind) for ind in list(elem_mat[i,:])]
# Make sure the order of nodes for tetrahedron definition is counter-clockwise, otherwise resulting in negative volume.
ind_temp = elem_line_temp[1]
elem_line_temp[1] = elem_line_temp[2]
elem_line_temp[2] = ind_temp
elem_list_temp += elem_line_temp
target_elem_list.append(', '.join(elem_list_temp))
def generateNset(self, node_list, nset_name, instance_name=None):
"""
Generate node set information.
Parameters:
----------
node_list: List of ints.
The list of nodes to be contained in the node list.
nset_name: String.
The name of the to-be-defined node list.
instance_name (optional): String.
The name of specified instance.
Only use in assembly definition.
Default: None. (Part cases)
Returns:
----------
nset: List of strings.
The definition of a specific nset.
"""
if instance_name == None: nset = ["*Nset, nset={}".format(nset_name)]
else: nset = ["*Nset, nset={}, instance={}".format(nset_name, instance_name)]
nset_line_temp, nset_string_temp = [], None
for i, ind in enumerate(node_list):
nset_line_temp.append(str(ind))
if (i+1) % 10 == 0:
nset_string_temp = ', '.join(nset_line_temp)
nset.append(copy.deepcopy(nset_string_temp))
nset_line_temp, nset_string_temp = [], None
nset_string_temp = ', '.join(nset_line_temp)
nset.append(copy.deepcopy(nset_string_temp))
return nset
def generateElset(self, elem_list, elset_name, instance_name=None):
"""
Generate element set information.
Parameters:
----------
elem_list: List of ints.
The list of elements to be contained in the element list.
elset_name: String.
The name of the to-be-defined element list.
instance_name (optional): String.
The name of specified instance.
Only use in assembly definition.
Default: None. (Part cases)
Returns:
----------
elset: List of strings.
The definition of a specific elset.
"""
if instance_name == None: elset = ["*Elset, elset={}".format(elset_name)]
else: elset = ["*Elset, elset={}, instance={}".format(elset_name, instance_name)]
elset_line_temp, elset_string_temp = [], None
for i, ind in enumerate(elem_list):
elset_line_temp.append(str(ind))
if (i+1) % 10 == 0:
elset_string_temp = ', '.join(elset_line_temp)
elset.append(copy.deepcopy(elset_string_temp))
elset_line_temp, elset_string_temp = [], None
elset_string_temp = ', '.join(elset_line_temp)
elset.append(copy.deepcopy(elset_string_temp))
return elset
def generateSection(self, elset_name, material_name):
"""
Generate section information.
Parameters:
----------
elset_name: String.
The name of the elset to be assigned a section.
material_name: String.
The name of defined material.
Returns:
----------
section: List of strings.
The definition of section.
"""
section = ["*Solid Section, elset={}, material={}".format(elset_name, material_name),
","]
return section
def generateMaterial(self, material_type):
"""
Generate lines for material definition.
Parameters:
----------
material_type: String.
Indicate what type of material is used.
Returns:
----------
material_lines: List of lines.
The lines of material definition.
"""
material_lines = ["*Material, name={}".format(self._material_name)]
if material_type == "neo_hookean_fitting":
stress_strain_lines = self._generateNeoHookeanFitting(self._modulus, (-0.3, 0.3), file_name=self._material_def_file_name)
material_lines += ["*Hyperelastic, neo hooke, test data input, poisson={}".format(self._poisson_ratio),
"*Uniaxial Test Data"]
material_lines += stress_strain_lines
elif material_type == "neo_hookean_solid":
c10 = self._modulus / (4 * (1 + self._poisson_ratio))
d1 = 6 * (1 - 2 * self._poisson_ratio) / self._modulus
material_lines += ["*Hyperelastic, neo hooke",
"{}, {}".format(c10, d1)]
elif material_type == "linear":
material_lines += ["*Elastic",
"{}, {}".format(self._modulus, self._poisson_ratio)]
else: material_lines = self.generateMaterial("linear")
return material_lines
def _generateNeoHookeanFitting(self, modulus, strain_range, file_name=""):
"""
Import/Generate stress strain data for neo-Hookean material fitting.
Parameters:
----------
modulus: Float.
The elastic modulus of material.
strain_range: Tuple of floats.
Range for strain interpolation.
file_name (optional): String.
The name of stress strain data definition file.
Default: "".
Returns:
----------
stress_strain_lines: List of strings.
The lines of stress strain data.
"""
if file_name != "": return self.readFile(file_name)
else:
"""
Assumptions of neo-Hookean formulation:
Incompressible (Poisson's ratio = ~0.5, small deformation).
Undergoing uniaxial loading.
Formulation: sigma = 2*C*(stretch - 1/(stretch^2)).
E = 6*C.
"""
strain_data = np.linspace(strain_range[0], strain_range[1], 100)
stretch_data = strain_data + 1.0
stress_data = (self._modulus / 3.0) * (stretch_data - 1.0 / stretch_data**2) # Formulation.
stress_strain_lines = []
for i in range(len(stress_data)):
stress_strain_lines.append("%.6f, %.6f" % (stress_data[i], strain_data[i]))
return stress_strain_lines
def _generateLoadPositions(self, loads_num, fix_indices_list, style="random", input_posi_indices_list=[]):
"""
Randomly generate positions of the load.
Parameters:
----------
loads_num: Int.
Number of loads.
fix_indices_list: List of ints.
Indices of fixed nodes.
style (optional): String.
Indicate how to generate initial load positions.
"random" / "fix":
"random": Randomly generate load positions.
"fix": Use the user input of initial load position indices.
Default: "random".
input_posi_indices_list (optional): List of ints.
User input of initial load positions indices list.
Indexed from 1.
Default: [].
Returns:
----------
loads_posi_indices_list: List of ints.
Picked indices for load application positions.
"""
if style == "random":
loads_posi_indices_list = []
for i in range(loads_num):
while(True):
load_posi_index_temp = random.choice(self._outer_surface_nodes_list) # Randomly chosen an outer surface node to apply load F(x, y, z). Indexed from 1.
if load_posi_index_temp not in fix_indices_list: break # The randomly generated index cannot be one of the fixed nodes.
loads_posi_indices_list.append(load_posi_index_temp)
return loads_posi_indices_list
elif style == "fix": return input_posi_indices_list
else: return self._generateLoadPositions(loads_num, fix_indices_list)
def _generateLoadValues(self, output_dimension, load_scale, sampling_style="uniform"):
"""
Randomly generate force values for load component definition.
Using function: numpy.random.rand().
Parameters:
----------
output_dimension: Tuple of ints.
The shape of output random array.
Size: 2*1. (dim1, dim2).
load_scale: Tuple of floats.
Size: 2*1. (min_laod, max_laod) / (mean, deviation).
sampling_style (optional): String.
Indicating the type of sampling.
"uniform": uniform distribution.
"gaussian": Gaussian distribution.
Default: "uniform".
Returns:
----------
load_result: Array of floats.
Size: output_dimension.
"""
if sampling_style == "uniform":
load_result = (np.random.rand(output_dimension[0], output_dimension[1]) * 2 - 1) * abs(load_scale[1] - load_scale[0])
load_result = load_result.reshape(-1,1)
for index, load_value_temp in enumerate(load_result):
if load_value_temp < 0: load_result[index] -= self._load_scale[0]
else: load_result[index] += self._load_scale[0]
load_result = load_result.reshape(output_dimension[0], output_dimension[1])
elif sampling_style == "gaussian":
mean, deviation = load_scale[0], load_scale[1]
load_result = np.random.normal(mean, deviation, size=output_dimension)
load_result = load_result.reshape(-1,1)
for index, load_value_temp in enumerate(load_result):
if np.random.rand() <= 0.5: load_result[index] *= -1
load_result = load_result.reshape(output_dimension[0], output_dimension[1])
else: load_result = self._generateLoadValues(output_dimension, load_scale)
return load_result
def generateAssembly(self):
"""
Generate assembly definition.
Returns:
----------
The list collection of all sub-definition lists, including:
assenbly_initial: Header of the assembly definition.
instance: The instance definition.
nset_boundary: The definition of BC related node set.
asssenbly_end: The endline of assembly definition.
"""
# Generate "self.loads_num" nsets, each of which has 1 node.
if self._isCoupleOn:
for i, load_posi_index_temp in enumerate(self._loads_posi_indices_list):
ref_name_temp = "rf-{}".format(i+1)
ref_nset_name_temp = "rf-{}-nset".format(i+1)
self._rf_name_list.append(ref_name_temp)
self._rf_nset_name_list.append(ref_nset_name_temp)
# Generate assembly node definitions for reference points.
ref_node_list_temp = ["*Node"]
ref_pt_coord_list_temp = [float(item) for item in self._node[load_posi_index_temp].split(',')[1:]]
self.generateNodes(np.array(ref_pt_coord_list_temp).astype(float).reshape(1,-1), ref_node_list_temp,
specified_indices_list=[i+1])
self._ref_nodes_list += copy.deepcopy(ref_node_list_temp)
rf_nset_list_temp = self._findCouplingNodes(load_posi_index_temp, self._coupling_neighbor_layers)
# Generate reference point node sets.
self._load_nsets += self.generateNset([i+1], ref_name_temp)
# Generate coupling constraint node sets.
self._rf_nsets += self.generateNset(rf_nset_list_temp, ref_nset_name_temp,
self._instance_name)
self.generateCoupling()
else:
if self._isLaplacianSmoothingOn:
force_vector_temp = np.zeros(shape=(3*self._surface_nodes_num, 1))
self._laplacian_initial_loads_posi = copy.deepcopy(self._loads_posi_indices_list)
if self._initial_force_component_vector == []:
for load_posi_index_temp in self._loads_posi_indices_list:
force_vector_temp[(load_posi_index_temp-1)*3:load_posi_index_temp*3,:] = self._generateLoadValues((3,1), self._load_params_tuple,
sampling_style=self._load_sampling_style)
else:
for load_posi_index_temp in self._loads_posi_indices_list:
force_vector_temp[(load_posi_index_temp-1)*3:load_posi_index_temp*3,:] = np.array(self._initial_force_component_vector).astype(float).reshape(3,1)
laplacian_matrix, mass_matrix = self.data_mat[self._laplacian_variable_name], self.data_mat[self._massMatrix_variable_name]
force_vector_new = self._laplacianSmoothing(force_vector_temp, laplacian_matrix, mass_matrix, iter_num=self._laplacian_iter_num,
smoothing_rate=self._smoothing_rate, laplacian_force_field=self._user_prescribed_force_field) # Size: (nSurfI x 3)*1. Fix force value: initial_BC_state="fix" (not recommended).
self._laplacian_force_field = force_vector_new.reshape(-1,3)
self._loads_posi_indices_list = copy.deepcopy([(list(force_vector_new).index(item)//3)+1 for item in list(force_vector_new) if item != 0]) # Indexed from 1.
self._loads_posi_indices_list = list(set(self._loads_posi_indices_list))
self._loads_posi_indices_list.sort()
for i, load_posi_index_temp in enumerate(self._loads_posi_indices_list):
load_nset_name_temp = "Load-{}".format(i+1)
self._loads_nset_name_list.append(load_nset_name_temp)
self._load_nsets += self.generateNset([load_posi_index_temp], load_nset_name_temp, self._instance_name)
self._load_nsets += self.generateNset(self._laplacian_initial_loads_posi, "Orig_loads_posi", self._instance_name)
self._load = self.generateLoadSetting(force_list=list(force_vector_new.reshape(-1,1)))
else:
for i, load_posi_index_temp in enumerate(self._loads_posi_indices_list):
load_nset_name_temp = "Load-{}".format(i+1)
self._loads_nset_name_list.append(load_nset_name_temp)
self._load_nsets += self.generateNset([load_posi_index_temp], load_nset_name_temp, self._instance_name)
# Concatenate assembly subparts.
self._nset_boundary = self._nset_boundary + self._load_nsets + self._rf_nsets + self._fix_nset + self._surface_list + self._coupling_list
return (self._assembly_initial + self._instance + self._ref_nodes_list + self._nset_boundary + self._assembly_end)
def generateCoupling(self):
"""
Generate coupling constriants for concentrated forces application.
"""
for index, rf_name in enumerate(self._rf_nset_name_list):
self._surface_list += ["*Surface, type=NODE, name={}_CNS_, internal".format(rf_name),
"{}, 1.".format(rf_name)]
self._coupling_list += ["*Coupling, constraint name={}, ref node={}, surface={}_CNS_".format(self._rf_name_list[index],
self._rf_name_list[index],
rf_name),
"*{}".format(self._coupling_type)]
def _findCouplingNodes(self, rf_node_ind, neighbor_layers):
"""
Find the immediate neighbors of each specified node index.
Parameters:
----------
rf_node_ind: Int.
The index of target node.
Returns:
----------
rf_nset_list: List of ints (duplicated items removed).
"rf_node_ind"'s corresponding immediate neighbor nodes set.
"""
rf_nset_list, new_nodes_list, searched_nodes_list = [rf_node_ind], [rf_node_ind], []
for j in range(neighbor_layers):
for ind_temp in new_nodes_list:
for i in range(len(self._triangle_nodes_list)):
if ind_temp in self._triangle_nodes_list[i]:
rf_nset_list += copy.deepcopy(self._triangle_nodes_list[i])
else: continue
searched_nodes_list += copy.deepcopy(new_nodes_list)
rf_nset_list = list(set(copy.deepcopy(rf_nset_list)))
new_nodes_list = [ind for ind in rf_nset_list if ind not in searched_nodes_list]
# Avoid assigning same nodes to different coupled node sets.
for ind in rf_nset_list:
if ind in self._coupled_list: rf_nset_list.remove(ind)
else: self._coupled_list.append(ind)
return rf_nset_list
def generateBoundaryCondition_fixAll(self):
"""
Generate fix boundary condition.
Returns:
----------
The list collection of all sub-definition lists, including:
boundary_initial: Header of boundary condition definition.
BC_list_temp: The detailed BC definition of boundary conditions.
"""
BC_list_temp = []
for i in range(6): # 6: 6 DOFs (disp. + rot.); 3: 3 DOFs (disp.).
BC_list_temp.append("{}, {}, {}".format(self._fix_nset_name, i+1, i+1))
return (self._boundary_initial + BC_list_temp)
def generateLoadSetting(self, force_list=[]):
"""
Generate load information.
Returns:
----------
load_list: List of strings.
Definition of concentrated forces.
force_list (optional): List of forces (floats).
Size: loads_num * 3.
Default: [].
"""
load_list = []
if force_list == []:
force_list = list(self._generateLoadValues((self.loads_num*3, 1), self._load_params_tuple, sampling_style=self._load_sampling_style))
force_list = np.array(force_list).astype(float).reshape(-1,3) # 2D Array of floats. Size: self._loads_num * 3.
if self._isCoupleOn:
for j, rf_name in enumerate(self._rf_name_list): # Length: self._loads_num
load_temp = ["*Cload, op=NEW"]
for i in range(force_list.shape[1]): # 3: Three directions.
load_temp.append("{}, {}, {}".format(rf_name, i+1, force_list[j,i]))
load_list += copy.deepcopy(load_temp)
else:
for j, load_name in enumerate(self._loads_nset_name_list): # Length: length of self._loads_nset_name_list.
load_temp = ["*Cload"]
for i in range(force_list.shape[1]): # 3: Three directions.
load_temp.append("{}, {}, {}".format(load_name, i+1, force_list[self._loads_posi_indices_list[j]-1,i]))
load_list += copy.deepcopy(load_temp)
return load_list
def _laplacianMatrixShrink(self, laplacian_matrix, surface_nodes_list, faces_def_matrix, outer_surface_regionNum):
"""
Assign zeros to the DOFs without force value applied.
Parameters:
----------
laplacian_matrix: 2D Array of floats.
The surface's Laplacian for force smoothing.
Size: nSurfI*3 x nSurfI*3.
surface_nodes_list: List of ints.
All indices of nodes on all surfaces.
faces_def_matrix: 2D Array of ints.
The definition of all faces, including the information of surface region number.
outer_surface_regionNum: Int.
The region number of outer surface of the geometry.
Returns:
----------
laplacian_matrix: 2D Array of floats.
Laplacian with zeros assigned to the nodes not on the outer surfaces.
Size: nSurfI*3 x nSurfI*3.
"""
surface_nodes_list = [ind for ind in surface_nodes_list]
outer_surface_nodes_list = self._extractOuterSurfaceNodes(faces_def_matrix, outer_surface_regionNum)
other_surface_nodes_list = [ind for ind in surface_nodes_list if ind not in outer_surface_nodes_list]
other_surface_nodes_list.sort()
for ind in other_surface_nodes_list:
laplacian_matrix[surface_nodes_list.index(ind)*3:(surface_nodes_list.index(ind)+1)*3,:] = 0.0
laplacian_matrix[:,surface_nodes_list.index(ind)*3:(surface_nodes_list.index(ind)+1)*3] = 0.0
return laplacian_matrix
def _laplacianSmoothing(self, force_vector, laplacian_matrix, mass_matrix, iter_num=3, smoothing_rate=1e-4, initial_BC_state="", laplacian_force_field=[]):
"""
Implement laplacian smoothing based on pre-calculated Laplacian matrix.
Formulation: Forward Euler.
F_(n+1) = (I + lambda*massMatrix*Laplacian) * F_n
Parameters:
----------
force_vector: 1D Array of floats.
With concentrated force values applied at the specidied nodes.
Size: (self._surface_nodes_num x 3) * 1.
laplacian_matrix: 2D Array of floats.
Size: (self._surface_nodes_num x 3) * (self._surface_nodes_num x 3).
mass_matrix: 2D Array of floats.
Diagonal matrix.
Size: (self._surface_nodes_num x 3) * (self._surface_nodes_num x 3).
iter_num (optional): Int.
The number of smoothing iterations.
Default: 3.
smoothing_rate (optional): float.
The coefficient that control the step size of smoothing.
Default: 1e-4.
initial_BC_state (optional): String.
Indicating whether to "fix" or "decay" the original concentrated force value.
Default: "". Indicating smoothing including the original forces.
laplacian_force_field (optional): List of floats.
The user-prescribed vector of laplacian force field.
Size: self._surface_nodes_num x 3.
Default: [].
Returns:
----------
force_vector_new: 1D Array of floats.
The laplacian-smoothed force vector.
Size: (self._surface_nodes_num x 3) * 1.
"""
if laplacian_force_field == []:
force_vector_new = copy.deepcopy(force_vector)
for i in range(iter_num):
force_vector_new += smoothing_rate * (laplacian_matrix @ force_vector_new) # Without mass matrix.
# force_vector_new += smoothing_rate * (mass_matrix @ laplacian_matrix @ force_vector_new) # With mass matrix (NOT recommended).
if initial_BC_state == "fix":
for j, value in enumerate(force_vector):
if value != 0:
force_vector_new[j] = value
else: force_vector_new = np.array(laplacian_force_field).astype(float).reshape(len(laplacian_force_field),1)
return force_vector_new
def _computeMidPoint(self, ind_1, ind_2):
"""
Compute the mid-point of the edge.
Parameters:
----------
ind_1: Int.
The first index of the node pair. Indexed from 1.
ind_2: Int.
The second index of the node pair. Indexed from 1.
Returns:
----------
ind_mid: Int.
The index of the self._node. Index from 1.
"""
key_string_temp_1, key_string_temp_2 = "{}_{}".format(ind_1, ind_2), "{}_{}".format(ind_2, ind_1)
if key_string_temp_1 in self._new_node_dict.keys(): return self._new_node_dict[key_string_temp_1]
elif key_string_temp_2 in self._new_node_dict.keys(): return self._new_node_dict[key_string_temp_2]
else:
coord_temp_1 = np.array(self._node[ind_1].split(',')[1:]).astype(float).reshape(1,-1)
coord_temp_2 = np.array(self._node[ind_2].split(',')[1:]).astype(float).reshape(1,-1)
coord_temp_mid = (coord_temp_1 + coord_temp_2) / 2.0
coord_mid_list = [str(item) for item in list(coord_temp_mid[0])]
self._node_num = len(self._node)
new_node_def_list_temp = copy.deepcopy([str(self._node_num)])
new_node_def_list_temp += copy.deepcopy(coord_mid_list)
self._node.append(', '.join(new_node_def_list_temp))
self._new_node_list.append(', '.join(new_node_def_list_temp))
self._new_node_dict[key_string_temp_1] = self._node_num
self._new_node_dict[key_string_temp_2] = self._node_num
return self._node_num
def insertNode(self):
"""
Insert one node (at the mid-point) of each edge.
Create C3D10 element structure.
"""
for index, elem_def_string in enumerate(self._elem[1:]):
elem_node_list_temp = [int(ind) for ind in elem_def_string.split(',')[1:]]
# Obtain the mid-point index in order. Assume tetrahedral element (C3D4).
mid_pt_ind_5 = self._computeMidPoint(elem_node_list_temp[0], elem_node_list_temp[1])
mid_pt_ind_6 = self._computeMidPoint(elem_node_list_temp[1], elem_node_list_temp[2])
mid_pt_ind_7 = self._computeMidPoint(elem_node_list_temp[0], elem_node_list_temp[2])
mid_pt_ind_8 = self._computeMidPoint(elem_node_list_temp[0], elem_node_list_temp[3])
mid_pt_ind_9 = self._computeMidPoint(elem_node_list_temp[1], elem_node_list_temp[3])
mid_pt_ind_10 = self._computeMidPoint(elem_node_list_temp[2], elem_node_list_temp[3])
elem_new_def_list_temp = [str(mid_pt_ind_5),
str(mid_pt_ind_6),
str(mid_pt_ind_7),
str(mid_pt_ind_8),
str(mid_pt_ind_9),
str(mid_pt_ind_10)]
# Redefine the new C3D10 element in order.
elem_def_list_temp = copy.deepcopy(elem_def_string.split(',')) + copy.deepcopy(elem_new_def_list_temp)
elem_def_string_temp = ', '.join(elem_def_list_temp)
self._elem[index+1] = copy.deepcopy(elem_def_string_temp)
def _triangleNodesCollection(self):
"""
Collect all the nodes on each triangle (surface).
Need to be implemented after "self.insertNode()".
"""
for i in range(self._surface_mat.shape[0]):
tri_temp = self._surface_mat[i,:]
# Assuming all triangles on the surface of geometry.
middle_pts_list_temp = [self._computeMidPoint(tri_temp[0], tri_temp[1]),
self._computeMidPoint(tri_temp[0], tri_temp[2]),
self._computeMidPoint(tri_temp[1], tri_temp[2])]
triangle_nodes_list_temp = list(copy.deepcopy(tri_temp)) + copy.deepcopy(middle_pts_list_temp)
self._triangle_nodes_list.append(copy.deepcopy(triangle_nodes_list_temp)) # List of lists of ints.
def nonlinearization(self):
"""
Nonlinearize the linear tetrahedral (CST) element to quadratic tetrahedral element.
"""
self._elem_num = len(self._elem) - 1
self._orig_node_num = len(self._node) - 1
self.insertNode()
self._triangleNodesCollection()
self._node_num = len(self._node) - 1
def saveLog(file_name_list, elapsed_time_list, write_status, data_file_name,
sample_num, fix_indices_list, loads_num, load_sampling_type, load_param_tuple,
material_type, modulus, poisson_ratio, isCoupleOn, isLaplacianSmoothingOn,
coupling_type="", coupling_neighbor_layer_num=1,
laplacian_iter_num=5, laplacian_smoothing_rate=1e-4, write_path="nonlinear_case_generation.log"):
"""
Save the nonlinear cases generation results into .log file.
Parameters:
----------
file_name_list: List of strings.
Names of generated files.
elapsed_time_list: List of floats.
Elapsed time of generation for each input file.
In exact order.
write_status: String.
Indicating the type of input file generation.
"Normal" / "Fast":
"Normal": generate all definitions;
"Fast": generate nodes and elements definition only.
data_file_name: String.
The name of modeling data file.
Format: .mat
sample_num: Int.
Number of generated input files.
fix_indices_list: List of ints.
Indices of fixed points.
Indexed from 1.
loads_num: Int.
The number of concentrated forces.
load_sampling_type: String.
The distribution type for force sampling.
"uniform" / "gaussian":
"uniform": uniform distribution with specified (min, max) range.
"gaussian": gaussian distribution with specified (mean, dev) parameters.
load_param_tuple: tuple of floats.
Parameters of load sampling.
load_sampling_type specific.
material_type: String.
The type of material.
"linear" / "neo_hookean_solid" / "neo_hookean_fitting":
"linear": linear elastic material.
"neo_hookean_solid": neo-Hookean solid following the stain energy formulation.
"neo_hookean_fitting": neo-Hookean solid following the strass-strain curved fitted from user-input strss-strain data.
modulus: Float.
Elastic modulus of the material.
poisson_ratio: Float.
Poisson's ratio of the material.
isCoupleOn: Boolean indicator.
True: using coupling constraint for local force distribution.
False: not using coupling constraint.
isLaplacianSmoothingOn: Boolean indicator.
True: using Laplacian-Beltrami operator matrix to smooth the force distribution.
False: not using Laplacian smoothing.
coupling_type (optional): String.
The type of coupling constraint.
Default: "".
coupling_neighbor_layer_num (optional): Int.
The number of neighbor layers to which the local force distributing goes.
Default: 1.
laplacian_iter_num (optional): Int.
The number of iteration for laplacian smoothing.
Default: 5.
laplacian_smoothing_rate (optional): Float.
The rate of Laplacian smoothing.
Default: 1e-4.
write_path (optional): String.
The path of to-be-written file.
Default: "nonlinear_case_generation.log".
"""
if isCoupleOn: isCoupleOn_status = "On"
else: isCoupleOn_status = "Off"
if isLaplacianSmoothingOn: isLaplacianSmoothingOn_status = "On"
else: isLaplacianSmoothingOn_status = "Off"
content = ["Data_file_name: {}".format(data_file_name),
"Sample_num = {}".format(sample_num),
"Fixed_indices_list (indexed from 1): {}".format(fix_indices_list),
"Material type: {}".format(material_type),
"Elastic modulus = {} Pa".format(modulus),
"Poisson's ratio = {}".format(poisson_ratio),
"Loads_num = {}".format(loads_num)]
if load_sampling_type == "uniform":
content += ["Load sampling type: {}".format(load_sampling_type),
"Load sampling range (min, max): {} N".format(load_param_tuple)]
elif load_sampling_type == "gaussian":
content += ["Load sampling type: {}".format(load_sampling_type),
"Load sampling parameters (mean, dev): {} N".format(load_param_tuple)]
else:
load_sampling_type = "uniform"
content += ["Load sampling type: {}".format(load_sampling_type),
"Load sampling range (min, max): {} N".format(load_param_tuple)]
content += ["Coupling constraint status: {}".format(isCoupleOn_status),
"Laplacian smoothing status: {}".format(isLaplacianSmoothingOn_status)]
if isCoupleOn:
content += ["Coupling type: {}".format(coupling_type),
"Coupling neighbor layer numbers: {}".format(coupling_neighbor_layer_num)]
if isLaplacianSmoothingOn:
content += ["Laplacian smoothing iteration numbers = {}".format(laplacian_iter_num),
"Laplacian smoothing rate = {}".format(laplacian_smoothing_rate)]
content += ["----------------------------------------------------------",
"Input file\t\tExport status\tGeneration status\tElapsed time/s"]
elapsed_time_total = 0
for i, file_name in enumerate(file_name_list):
data_string_temp = "{}\t\t{}\t\tCompleted\t".format(file_name, write_status) + "\t%.8f" % (elapsed_time_list[i])
content.append(data_string_temp)
elapsed_time_total += elapsed_time_list[i]
content += ["----------------------------------------------------------",
"Total elapsed time: {} s".format(elapsed_time_total)]
content = '\n'.join(content)
with open(write_path, 'w') as f: f.write(content)
def main():
abaqus_default_directory = "C:/temp" # Default working directory of Abaqus.
inp_folder = "inp_files"
sample_nums = 2000 # Default: 2000.
data_file_path = "data_kidney.mat"
node_variable_name, elem_variable_name = "NodeI", "EleI"
results_folder_path_stress, results_folder_path_coor = "stress", "coor"
material_type = "neo_hookean_solid" # "linear" / "neo_hookean_fitting" / "neo_hookean_solid".
fix_indices_list = [2, 453, 745] # Specify the node to fix. At least 3. Indexed from 1.
write_status = "Normal" # String. "Normal" / "Fast". "Normal": generate all definitions; "Fast": generate nodes and elements definition only.
# ================================== Force interpolation related variables ================================== #
force_field_mat_name = "force_field_data.mat"
force_interpolation_folder = "inp_interpolation"
isPrescribedForceOn = False # Boolean indicator. True: use prescribed force field; False: no specified force field. Default: False.
force_type = "random" # String. The type of prescribed force field. "interpolated": interpolated force fields; "random": weighted-summed force fields.
eigen_num_force, force_scalar = 100, 2.0 # Float. The scalar of force fields controlling the force magnitude -> deformation magnitude of the tumor in nonlinear solver. Unit: N.
# =========================================================================================================== #
if isPrescribedForceOn:
"""
The pipeline of generating interpolated force fields:
1. Run "nonlinearCasesCreation.py" with 'isPrescribedForceOn = False' firstly.
2. Run "forceInterpolation.py" in the same directory.
3. Set 'isPrescribedForceOn = True', set 'force_type = "interpolated", then run "nonlinearCasesCreation.py" again.
Get input files with "*_interpolated.inp" in the folder 'force_interpolation_folder'.
4. Set 'isPrescribedForceOn = True', set 'force_type = "random", then run "nonlinearCasesCreation.py" again.
Get input files with "*_random.inp" in the folder 'force_interpolation_folder'.
"""
force_fields = (scipy.io.loadmat(force_field_mat_name)["force_field_interpolated"] if force_type == "interpolated" else
scipy.io.loadmat(force_field_mat_name)["force_field_random"]) # Size: nSurfI*3 x sampleNum. Concatenated as xyzxyz...
sample_nums = force_fields.shape[1]
# Generate input file for Abaqus.
file_name_list, elapsed_time_list, force_field_matrix = [], [], None
for i in range(sample_nums):
start_time = time.time()
if isPrescribedForceOn:
if not os.path.isdir(force_interpolation_folder): os.mkdir(force_interpolation_folder)
file_name_temp = ("{}_interpolated.inp".format(str(i+20001)) if force_type == "interpolated" else
"{}_random.inp".format(str(i+20001)))
write_path = os.path.join(force_interpolation_folder, file_name_temp)
force_field_prescribed_list = list(force_fields[:,i])
inputFile_temp = inputFileGenerator(data_file_path, write_path, material_type,
fix_indices_list, node_variable_name, elem_variable_name,
user_prescribed_force_field=force_field_prescribed_list)
else:
if not os.path.isdir(inp_folder): os.mkdir(inp_folder)
file_name_temp = "{}.inp".format(str(i+20001))
write_path = os.path.join(inp_folder, file_name_temp)
inputFile_temp = inputFileGenerator(data_file_path, write_path, material_type,
fix_indices_list, node_variable_name, elem_variable_name)
inputFile_temp.writeFile(write_status)
end_time = time.time()
elapsed_time = end_time - start_time
file_name_list.append(file_name_temp)
elapsed_time_list.append(elapsed_time)
if i == 0: force_field_matrix = inputFile_temp._laplacian_force_field.reshape(-1,1)
else: force_field_matrix = np.hstack((force_field_matrix, inputFile_temp._laplacian_force_field.reshape(-1,1)))
# ============================ For force visualization only (sample_nums = 1) ============================ #
# print(inputFile_temp._laplacian_initial_loads_posi)
# force_field = {"force_field": inputFile_temp._laplacian_force_field}
# scipy.io.savemat("force_field.mat", force_field)
# ======================================================================================================== #
print("Input_file: ", file_name_temp, "| Status:", write_status, "| Generation: Completed | Time: %.4f s" % (elapsed_time))
saveLog(file_name_list, elapsed_time_list, write_status, data_file_path, sample_nums,
fix_indices_list, inputFile_temp.loads_num, inputFile_temp._load_sampling_style, inputFile_temp._load_params_tuple,
material_type, inputFile_temp._modulus, inputFile_temp._poisson_ratio,
inputFile_temp._isCoupleOn, inputFile_temp._isLaplacianSmoothingOn,
coupling_type=inputFile_temp._coupling_type, coupling_neighbor_layer_num=inputFile_temp._coupling_neighbor_layers,
laplacian_iter_num=inputFile_temp._laplacian_iter_num, laplacian_smoothing_rate=inputFile_temp._smoothing_rate,
write_path="nonlinear_case_generation.log")
if not isPrescribedForceOn: weight_matrix = (2.0 * np.random.rand(eigen_num_force, 3*sample_nums) - 1.0) # Distinct random weights corresponding to each smoothed concentrated force field.
else: weight_matrix = scipy.io.loadmat(force_field_mat_name)["weight_matrix"] # Distinct random force field for each smoothed concentrated force field.
mdict = {"fix_indices_list": fix_indices_list,
"orig_data_file_name": data_file_path,
"orig_config_var_name": node_variable_name,
"inp_folder": inp_folder if not isPrescribedForceOn else force_interpolation_folder, # The folder containing input files.
"current_directory": os.getcwd(),
"results_folder_path_stress": results_folder_path_stress,
"results_folder_path_coor": results_folder_path_coor,
"original_node_number": inputFile_temp._orig_node_num,
"couple_region_num": inputFile_temp._couple_region_num,
"force_field_matrix": force_field_matrix, # The force field matrix of all generated samples. Size: nSurfI*3 x sampleNum_total.
"weight_matrix": weight_matrix, "force_scalar_coeff": force_scalar, # The randomly generated matrix for force fields' reconstruction. Size: eigen_num x (3*sample_num).
"eigen_number_force": eigen_num_force, # Int. The eigenmode number of force field reconstruction. (Used only in force field interpolation)
"alpha_indexing_vector": np.zeros(shape=(sample_nums, 1)) if not isPrescribedForceOn else scipy.io.loadmat(force_field_mat_name)["alpha_indexing_vector"]
}
scipy.io.savemat("training_parameters_transfer.mat", mdict)
# np.save(os.path.join(abaqus_default_directory, "training_parameters_transfer.npy"), mdict, fix_imports=True)
# np.savez(os.path.join(abaqus_default_directory, "training_parameters_transfer.npz"),
# fix_indices_list=fix_indices_list,
# orig_data_file_name=data_file_path,
# orig_config_var_name=node_variable_name,
# inp_folder=inp_folder,
# current_directory=os.getcwd(),
# results_folder_path_stress=results_folder_path_stress,
# results_folder_path_coor=results_folder_path_coor)
if __name__ == "__main__":
main()
|
py | 1a3ca54aecba809c406bef99f85292dbdc6e4752 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetConnectionTypeResult',
'AwaitableGetConnectionTypeResult',
'get_connection_type',
]
@pulumi.output_type
class GetConnectionTypeResult:
"""
Definition of the connection type.
"""
def __init__(__self__, creation_time=None, description=None, field_definitions=None, id=None, is_global=None, last_modified_time=None, name=None, type=None):
if creation_time and not isinstance(creation_time, str):
raise TypeError("Expected argument 'creation_time' to be a str")
pulumi.set(__self__, "creation_time", creation_time)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if field_definitions and not isinstance(field_definitions, dict):
raise TypeError("Expected argument 'field_definitions' to be a dict")
pulumi.set(__self__, "field_definitions", field_definitions)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if is_global and not isinstance(is_global, bool):
raise TypeError("Expected argument 'is_global' to be a bool")
pulumi.set(__self__, "is_global", is_global)
if last_modified_time and not isinstance(last_modified_time, str):
raise TypeError("Expected argument 'last_modified_time' to be a str")
pulumi.set(__self__, "last_modified_time", last_modified_time)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="creationTime")
def creation_time(self) -> str:
"""
Gets the creation time.
"""
return pulumi.get(self, "creation_time")
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
Gets or sets the description.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="fieldDefinitions")
def field_definitions(self) -> Mapping[str, 'outputs.FieldDefinitionResponse']:
"""
Gets the field definitions of the connection type.
"""
return pulumi.get(self, "field_definitions")
@property
@pulumi.getter
def id(self) -> str:
"""
Gets the id of the resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="isGlobal")
def is_global(self) -> Optional[bool]:
"""
Gets or sets a Boolean value to indicate if the connection type is global.
"""
return pulumi.get(self, "is_global")
@property
@pulumi.getter(name="lastModifiedTime")
def last_modified_time(self) -> Optional[str]:
"""
Gets or sets the last modified time.
"""
return pulumi.get(self, "last_modified_time")
@property
@pulumi.getter
def name(self) -> str:
"""
Gets the name of the connection type.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type
"""
return pulumi.get(self, "type")
class AwaitableGetConnectionTypeResult(GetConnectionTypeResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetConnectionTypeResult(
creation_time=self.creation_time,
description=self.description,
field_definitions=self.field_definitions,
id=self.id,
is_global=self.is_global,
last_modified_time=self.last_modified_time,
name=self.name,
type=self.type)
def get_connection_type(automation_account_name: Optional[str] = None,
connection_type_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetConnectionTypeResult:
"""
Definition of the connection type.
:param str automation_account_name: The name of the automation account.
:param str connection_type_name: The name of connection type.
:param str resource_group_name: Name of an Azure Resource group.
"""
__args__ = dict()
__args__['automationAccountName'] = automation_account_name
__args__['connectionTypeName'] = connection_type_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:automation/v20190601:getConnectionType', __args__, opts=opts, typ=GetConnectionTypeResult).value
return AwaitableGetConnectionTypeResult(
creation_time=__ret__.creation_time,
description=__ret__.description,
field_definitions=__ret__.field_definitions,
id=__ret__.id,
is_global=__ret__.is_global,
last_modified_time=__ret__.last_modified_time,
name=__ret__.name,
type=__ret__.type)
|
py | 1a3ca5d1955c0cd0fa528756f820594348c0766b | # Copyright (c) WiPhy Development Team
# This library is released under the MIT License, see LICENSE.txt
import os
import unittest
import numpy as np
import numpy as np
import wiphy.util.general as me
import wiphy.code.modulator as mod
import wiphy.code.im as im
import wiphy.code.duc as duc
class Test(unittest.TestCase):
def test_getGrayIndixes(self):
self.assertEqual(me.getGrayIndixes(2), [0, 1, 3, 2])
self.assertEqual(me.getGrayIndixes(4), [0, 1, 3, 2, 6, 7, 5, 4, 12, 13, 15, 14, 10, 11, 9, 8])
def test_frodiff(self):
fro = me.frodiff(np.array([1, 1j, 0, 0]), np.array([1, 0, 1j, 0]))
self.assertAlmostEqual(fro, 2.0, msg="The Frobenius norm calculation is wrong")
fro = me.frodiff(me.randn_c(int(1e6)), me.randn_c(int(1e6)))
self.assertAlmostEqual(fro / 2e6, 1.0, places=2)
def test_matmulb(self):
H = np.array([[1, 1.j], [-1.j, -1]])
codes = im.generateIMCodes("opt", 2, 1, 2, "PSK", 1, 1)
ret = me.matmulb(H, codes)
np.testing.assert_almost_equal(ret, np.matmul(H, codes))
def test_getEuclideanDistances(self):
codes = mod.generatePSKSymbols(4).reshape(4, 1, 1)
ret = me.asnumpy(me.getEuclideanDistances(np.array(codes)))
np.testing.assert_almost_equal(ret, [2., 2., 4., 4., 2., 2.])
#
codes = im.generateIMCodes("opt", 2, 1, 2, "PSK", 1, 1)
ret = me.asnumpy(me.getEuclideanDistances(np.array(codes)))
np.testing.assert_almost_equal(ret, [2.])
#
codes = im.generateIMCodes("opt", 4, 2, 4, "PSK", 1, 1)
ret = me.asnumpy(me.getEuclideanDistances(np.array(codes)))
np.testing.assert_almost_equal(ret, [1., 1., 2., 2., 1., 1.])
#
codes = duc.generateDUCCodes(2, 2)
ret = me.asnumpy(me.getEuclideanDistances(np.array(codes)))
np.testing.assert_almost_equal(ret, [16.])
def test_getMinimumEuclideanDistance(self):
codes = mod.generatePSKSymbols(4).reshape(4, 1, 1)
med = me.getMinimumEuclideanDistance(np.array(codes))
self.assertAlmostEqual(med, 2.0)
codes = mod.generateStarQAMSymbols(16).reshape(16, 1, 1)
med = me.getMinimumEuclideanDistance(np.array(codes))
self.assertAlmostEqual(med, 0.2343145750507619)
codes = im.generateIMCodes("opt", 4, 2, 4, "PSK", 4, 1)
med = me.getMinimumEuclideanDistance(np.array(codes))
self.assertAlmostEqual(med, 1.0)
codes = im.generateIMCodes("opt", 8, 4, 64, "PSK", 2, 1)
med = me.getMinimumEuclideanDistance(np.array(codes))
self.assertAlmostEqual(med, 0.5)
def test_getDFTMatrix(self):
W = me.getDFTMatrix(4)
np.testing.assert_almost_equal(W.dot(W.conj().T), np.eye(4, dtype=np.complex), decimal=3)
W = me.getDFTMatrix(8)
np.testing.assert_almost_equal(W.dot(W.conj().T), np.eye(8, dtype=np.complex), decimal=3)
W = me.getDFTMatrix(16)
np.testing.assert_almost_equal(W.dot(W.conj().T), np.eye(16, dtype=np.complex), decimal=3)
def test_inv_dB(self):
self.assertAlmostEqual(me.inv_dB(0.0), 1.0, msg="The implementation of inv_dB may be wrong.")
def test_randn(self):
ret = me.randn(int(1e6))
meanPower = np.mean(np.power(np.abs(ret), 2))
self.assertAlmostEqual(meanPower, 1.0, places=2, msg="The mean power of randn differs from 1.0")
def test_randn_c(self):
ret = me.randn_c(int(1e6))
meanPower = np.mean(np.power(np.abs(ret), 2))
self.assertAlmostEqual(meanPower, 1.0, places=2, msg="The mean power of randn_c differs from 1.0")
def test_countErrorBits(self):
self.assertEqual(me.countErrorBits(1, 2), 2)
self.assertEqual(me.countErrorBits(1, 5), 1)
def test_getXORtoErrorBitsArray(self):
a = me.getXORtoErrorBitsArray(4)
np.testing.assert_almost_equal(a, np.array([0, 1, 1, 2, 1]))
a = me.getXORtoErrorBitsArray(16)
np.testing.assert_almost_equal(a, np.array([0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 1]))
def test_getErrorBitsTable(self):
t = me.getErrorBitsTable(4)
np.testing.assert_almost_equal(t, np.array([[0, 1, 1, 2], [1, 0, 2, 1], [1, 2, 0, 1], [2, 1, 1, 0]]))
t = me.getErrorBitsTable(8)
np.testing.assert_almost_equal(t, np.array(
[[0, 1, 1, 2, 1, 2, 2, 3], [1, 0, 2, 1, 2, 1, 3, 2], [1, 2, 0, 1, 2, 3, 1, 2], [2, 1, 1, 0, 3, 2, 2, 1],
[1, 2, 2, 3, 0, 1, 1, 2], [2, 1, 3, 2, 1, 0, 2, 1], [2, 3, 1, 2, 1, 2, 0, 1], [3, 2, 2, 1, 2, 1, 1, 0]]))
def test_getRandomHermitianMatrix(self):
np.set_printoptions(linewidth=np.inf)
H = me.getRandomHermitianMatrix(4)
np.testing.assert_almost_equal(H, H.conj().T)
H = me.getRandomHermitianMatrix(8)
np.testing.assert_almost_equal(H, H.conj().T)
H = me.getRandomHermitianMatrix(16)
np.testing.assert_almost_equal(H, H.conj().T)
def test_convertIntToBinArray(self):
np.testing.assert_almost_equal(me.convertIntToBinArray(0, 1), [0])
np.testing.assert_almost_equal(me.convertIntToBinArray(2, 2), [1, 0])
np.testing.assert_almost_equal(me.convertIntToBinArray(3, 2), [1, 1])
np.testing.assert_almost_equal(me.convertIntToBinArray(4, 3), [1, 0, 0])
def test_CayleyTransform(self):
U = me.CayleyTransform(me.asnumpy(me.getRandomHermitianMatrix(4)))
np.testing.assert_almost_equal(me.asnumpy(U.dot(U.conj().T)), np.eye(4, dtype=np.complex))
U = me.CayleyTransform(me.asnumpy(me.getRandomHermitianMatrix(8)))
np.testing.assert_almost_equal(me.asnumpy(U.dot(U.conj().T)), np.eye(8, dtype=np.complex))
U = me.CayleyTransform(me.asnumpy(me.getRandomHermitianMatrix(16)))
np.testing.assert_almost_equal(me.asnumpy(U.dot(U.conj().T)), np.eye(16, dtype=np.complex))
def test_kurtosis(self):
self.assertAlmostEqual(me.kurtosis(me.randn(10000000)), 0.0, places=2)
if __name__ == '__main__':
unittest.main()
|
py | 1a3ca6f1b3791df107c0d9bf03efce635f336cc0 | import numpy as np
import matplotlib.pyplot as plt
from modelling.utilities import ProgressBar
class Solver:
def __init__(self, function, initial=np.array([0,0])):
self._function = function
self._initial = initial
self._solution = np.array([])
self._solutions = np.zeros((len(function),1))
self._time = np.array([])
self._label = "none"
self._fig, self._ax = plt.subplots()
self._functions = function
def __euler(self, start, end, h):
x0 = self._initial[0]
result_a = np.arange(x0, end, h)
time_a = np.arange(x0, end, h)
result_a[0] = self._initial[1]
for (i, previous_result) in enumerate(result_a[:-1]):
result_a[i+1] = previous_result + h*self._function(time_a[i], previous_result)
result_b = np.arange(x0, start-h, -h)
time_b = np.arange(x0, start-h, -h)
result_b[0] = self._initial[1]
for (i, previous_result) in enumerate(result_b[:-1]):
result_b[i+1] = previous_result + -h*self._function(time_b[i], previous_result)
result = np.concatenate((result_b[::-1], result_a[1:]))
time = np.concatenate((time_b[::-1], time_a[1:]))
self._solution = result
self._time = time
self._label = "Euler"
return np.stack([time, result])
def __heun(self, start, end, h):
x0 = self._initial[0]
result_a = np.arange(x0, end, h)
time_a = np.arange(x0, end, h)
result_a[0] = self._initial[1]
for (i, previous_result) in enumerate(result_a[:-1]):
y_i1 = previous_result + h*self._function(time_a[i], previous_result)
result_a[i+1] = previous_result + h/2*(self._function(time_a[i], previous_result) + self._function(time_a[i+1], y_i1))
result_b = np.arange(x0, start-h, -h)
time_b = np.arange(x0, start-h, -h)
result_b[0] = self._initial[1]
for (i, previous_result) in enumerate(result_b[:-1]):
y_i1 = previous_result - h*self._function(time_b[i], previous_result)
result_b[i+1] = previous_result - h/2*(self._function(time_b[i], previous_result) + self._function(time_b[i+1], y_i1))
result = np.concatenate((result_b[::-1], result_a[1:]))
time = np.concatenate((time_b[::-1], time_a[1:]))
self._solution = result
self._time = time
self._label = "Heun"
return np.stack([time, result])
def __rk4(self, start, end, h):
result = np.arange(start, end, h)
time = np.arange(start, end, h)
result[0] = self._initial
for (i,_) in enumerate(time[1:],1):
k1 = h*self._function(time[i-1], result[i-1])
k2 = h*self._function(time[i-1] + h/2, result[i-1] + k1/2)
k3 = h*self._function(time[i-1] + h/2, result[i-1] + k2/2)
k4 = h*self._function(time[i-1] + h, result[i-1] + k3)
result[i] = result[i-1] + 1/6*(k1+2*k2+2*k3+k4)
self._solution = result
self._time = time
self._label = "Runge-Kutta 4"
return np.stack([time, result], axis=1)
def __rk_4_vec(self, start, end, h):
result = np.zeros([len(self._functions), int((end-start)/h)])
time = np.arange(start, end, h)
result[:,0] = self._initial[1:].T
k_values = np.zeros((len(self._functions), 4))
for (i,t) in enumerate(time[:-1],0):
for (k, func) in enumerate(self._functions):
k_values[k,0] = h*func(t, *result[:,i])
for (k, func) in enumerate(self._functions):
# print(result[:,i] + k_values[k,0]/2, k_values[:,0])
k_values[k,1] = h*func(t + h/2, *(result[:,i] + k_values[:,0]/2))
for (k, func) in enumerate(self._functions):
k_values[k,2] = h*func(t + h/2, *(result[:,i] + k_values[:,1]/2))
for (k, func) in enumerate(self._functions):
k_values[k,3] = h*func(t + h, *(result[:,i] + k_values[:,2]))
result[:,i+1] = result[:,i] + 1/6*(k_values[:,0]+2*k_values[:,1]+2*k_values[:,2]+k_values[:,3])
self._solutions = result
self._time = time
self._label = "Runge-Kutta 4"
return np.vstack([time, result])
def __rk45_vec(self, start, end, error=0.0001, h=0.5, limit=100):
progress = ProgressBar()
t = self._initial[0]
result = np.zeros((len(self._functions),1))
result[:, 0] = self._initial[1:].T
# print(result)
time = np.array([t])
while(t < end):
h, y5 = self.__rk45_step_vec(limit, h, t, result[:,-1], error)
t += h
# print(result.shape, y5.shape)
result = np.concatenate([result, y5], axis=1)
# print(result)
time = np.append(time, [t])
# print(t)
progress.step(h/(end-start)*100)
progress.show()
h = -h
t = self._initial[0]
while(start < t):
h, y5 = self.__rk45_step_vec(limit, h, t, result[:,0], error)
t += h
result = np.append([y5], result)
time = np.append([t], time)
progress.step(-h/(end-start)*100)
progress.show()
self._solutions = result
self._time = time
self._label = "Runge-Kutta 45"
return np.vstack([time, result])
def __rk45(self, start, end, error=0.0001, h=0.5, limit=100):
t = self._initial[0]
result = np.array([self._initial[1]])
time = np.array([t])
while(t < end):
h, y5 = self.__rk45_step(limit, h, t, result[-1], error)
t += h
result = np.append(result, [y5])
time = np.append(time, [t])
h = -h
t = self._initial[0]
while(start < t):
h, y5 = self.__rk45_step(limit, h, t, result[0], error)
t += h
result = np.append([y5], result)
time = np.append([t], time)
self._solution = result
self._time = time
self._label = "Runge-Kutta 45"
return np.stack([time, result])
def __rk45_step_vec(self, limit, h, ti, yi, max_error, error_factor=2):
i = 0
k_values = np.zeros((len(self._functions), 6))
while (i < limit):
i += 1
for (k, func) in enumerate(self._functions):
k_values[k,0] = h*func(ti, *yi)
for (k, func) in enumerate(self._functions):
k_values[k,1] = h*func(ti + h/4, *(yi + k_values[:,0]))
for (k, func) in enumerate(self._functions):
k_values[k,2] = h*func(ti + h*3/8, *(yi + k_values[:,0]*3/32 + k_values[:,1]*9/32))
for (k, func) in enumerate(self._functions):
k_values[k,3] = h*func(ti + h*12/13, *(yi + k_values[:,0]*1932/2197 - k_values[:,1]*7200/2197 + k_values[:,2]*7296/2197))
for (k, func) in enumerate(self._functions):
k_values[k,4] = h*func(ti + h, *(yi + k_values[:,0]*439/216 - k_values[:,1]*8 + k_values[:,2]*3680/513 - k_values[:,3]*845/4104))
for (k, func) in enumerate(self._functions):
k_values[k,5] = h*func(ti + h/2, *(yi - k_values[:,0]*8/27) + k_values[:,1]*2 - k_values[:,2]*3544/2565 + k_values[:,3]*1859/4104 - k_values[:,4]*11/40)
y5 = yi + 16/135*k_values[:,0] + 6656/12825*k_values[:,2] + 28561/56430*k_values[:,3] - 9/50*k_values[:,4] + 2/55*k_values[:,5]
y4 = yi + 25/216*k_values[:,0] + 1408/2565*k_values[:,2] + 2197/4104*k_values[:,3] - 1/5*k_values[:,4]
error = np.amax(np.abs(y5-y4))
# print(error, max_error)
if (error > max_error):
h /= error_factor
elif (error < max_error/error_factor):
h *= error_factor
break
else:
break
return h, np.reshape(y5, (len(self._functions),1))
def __rk45_step(self, limit, h, ti, yi, error):
i = 0
while (i < limit):
i += 1
k1 = h*self._function(ti, yi)
k2 = h*self._function(ti+h/4, yi+k1/4)
k3 = h*self._function(ti+3*h/8, yi+3*k1/32+9*k2/32)
k4 = h*self._function(ti+12*h/13, yi+1932/2197*k1-7200/2197*k2+7296/2197*k3)
k5 = h*self._function(ti+h, yi+k1*439/216-k2*8+k3*3680/513-k4*845/4104)
k6 = h*self._function(ti+h/2, yi-8/27*k1+2*k2-3544/2565*k3+1859/4104*k4-11/40*k5)
y5 = yi + 16/135*k1+6656/12825*k3+28561/56430*k4-9/50*k5+2/55*k6
y4 = yi + 25/216*k1+1408/2565*k3+2197/4104*k4-1/5*k5
if (abs(y5-y4) > error):
h /= 2
else:
break
return h, y5
def run(self, method, start, end, h):
if (method == "euler"):
return self.__euler(start, end, h)
elif (method == "heun"):
return self.__heun(start, end, h)
elif (method == "rk4"):
return self.__rk_4_vec(start, end, h)
elif (method == "rk45"):
return self.__rk45_vec(start, end, h=h)
else:
return None
def plot(self, label=[]):
if not label:
label = self._label
for (i, sol) in enumerate(self._solutions):
self._ax.plot(self._time, sol, label=label[i])
def show(self):
self._ax.legend()
plt.title("Populations vs Time")
plt.legend()
plt.xlabel("Time")
plt.ylabel("Population")
plt.show() |
py | 1a3ca765582ddf9e19e514a479be08c70bec251a | # BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
from __future__ import absolute_import
import pytest # noqa: F401
import numpy as np # noqa: F401
import awkward as ak # noqa: F401
def test_empty_listarray():
a = ak.Array(
ak.layout.ListArray64(
ak.layout.Index64(np.array([], dtype=np.int64)),
ak.layout.Index64(np.array([], dtype=np.int64)),
ak.layout.NumpyArray(np.array([])),
)
)
assert ak.to_list(a * 3) == []
starts = ak.layout.Index64(np.array([], dtype=np.int64))
stops = ak.layout.Index64(np.array([3, 3, 5], dtype=np.int64))
content = ak.layout.NumpyArray(np.array([1.1, 2.2, 3.3, 4.4, 5.5]))
array = ak.Array(ak.layout.ListArray64(starts, stops, content))
array + array
|
py | 1a3ca7822903f995c0e9cc377b773c666b7c6118 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from sqlalchemy import Column, Integer, String, ForeignKey, DateTime
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, sessionmaker
from sqlalchemy import create_engine
from passlib.apps import custom_app_context as pwd_context
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from itsdangerous import BadSignature, SignatureExpired
from data_control import get_unique_str
from settings import *
Base = declarative_base() # initialisation the database
secret_key = get_unique_str(32) # create secret_key
# create session
engine = create_engine('sqlite:///catalog.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
# TODO: User model
class User(Base):
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
username = Column(String(32), index=True)
picture = Column(String(250), default='/img/no-img.png')
first_name = Column(String(25), default=None)
last_name = Column(String(25), default=None)
email = Column(String(40))
password_hash = Column(String(64))
status = Column(String(10), default='user')
def hash_password(self, password):
"""
hash password
:param password:
:return void:
"""
self.password_hash = pwd_context.encrypt(password)
def verify_password(self, password):
"""
Password verification
:param password:
:return bool:
"""
return pwd_context.verify(password, self.password_hash)
@property
def get_full_name(self):
"""
Return full name (first and last name)
:return string:
"""
return "%s %s" % (self.first_name, self.last_name)
def generate_auth_token(self, expiration=3600):
"""
Generate authentication token
:param expiration:
:return string: (token)
"""
s = Serializer(secret_key, expires_in=expiration)
return s.dumps({'uid': self.id})
@staticmethod
def verify_auth_token(token):
"""
Try to load token, success return user id false return None
:param token:
:return mix:
"""
s = Serializer(secret_key)
try:
data = s.loads(token)
except SignatureExpired:
# Valid Token, but expired
return None
except BadSignature:
# Invalid Token
return None
uid = data['uid']
return uid
@property
def serialize(self):
"""
Return user data
:return dict:
"""
return {
'id': self.id,
'username': self.username,
'picture': self.picture,
'first_name': self.first_name,
'last_name': self.last_name,
'email': self.email,
'status': self.status
}
# TODO: Image model
class Image(Base):
__tablename__ = 'image'
id = Column(Integer, primary_key=True)
product = Column(Integer, nullable=False)
url = Column(String(250))
@property
def serialize(self):
"""
Return user data
:return dict:
"""
return {
'id': self.id,
'url': self.url
}
# TODO: Category model
class Category(Base):
__tablename__ = 'category'
id = Column(Integer, primary_key=True)
name = Column(String(30))
@property
def serialize(self):
"""
Return user data
:return dict:
"""
return {
'id': self.id,
'name': self.name
}
# TODO: Catalog model
class Catalog(Base):
__tablename__ = 'catalog'
id = Column(Integer, primary_key=True)
model = Column(String(30))
title = Column(String(250))
description = Column(String(250))
category = Column(Integer, ForeignKey("category.id"), nullable=False)
price = Column(Integer, nullable=False)
author = Column(Integer, ForeignKey("user.id"), nullable=False)
def get_author(self):
"""
Return product`s author
:return object:
"""
return session.query(User).filter_by(id=self.author).one().serialize
def get_images(self):
"""
Prepare list of images for JSON
:return list:
"""
images = session.query(Image).filter_by(product=self.id).all()
return [img.serialize for img in images]
def get_category(self):
"""
Return category
:return object:
"""
category = session.query(Category).filter_by(id=self.category).first()
return category.serialize
@property
def serialize(self):
"""
Return user data
:return dict:
"""
return {
'id': self.id,
'model': self.model,
'title': self.title,
'description': self.description,
'brand': self.get_category(),
'price': self.price,
'images': self.get_images(),
'author': self.get_author(),
}
# TODO: Database actions
def user_exist(username):
"""
Check user exist
:param username:
:return bool:
"""
return session.query(User).filter_by(username=username).first() is not None
def email_exist(email):
"""
Check user exist
:param email:
:return bool:
"""
return session.query(User).filter_by(email=email).first() is not None
def get_user_by_email(email):
"""
Return user by email or None
:param email:
:return object:
"""
return session.query(User).filter_by(email=email).first() or None
def create_user(username, password, first_name, last_name, email,
picture=None):
"""
Create a new user
:param username:
:param password:
:param first_name:
:param last_name:
:param picture:
:param email:
:return object:
"""
user = User(username=username, first_name=first_name,
last_name=last_name, email=email, picture=picture)
user.hash_password(password)
session.add(user)
session.commit()
return user
def get_user_by_username(username):
"""
Return user by username
:param username:
:return object:
"""
return session.query(User).filter_by(username=username).first()
def get_user_by_id(uid):
"""
Return user by user id
:param uid:
:return return:
"""
return session.query(User).filter_by(id=uid).one()
def update_user_photo(photo, uid):
"""
Update user photo and remove old file
:param photo:
:param uid:
:return object:
"""
user = get_user_by_id(uid)
if user.picture != '/img/no-img.png' and os.path.isfile(user.picture):
os.remove('%s%s' % (BASE_DIR, user.picture))
user.picture = photo
session.commit()
return user
def update_user(usr):
"""
Update user and return new data
:param usr:
:return object:
"""
user = session.query(User).filter_by(id=usr['uid']).first()
user.username = usr['username']
user.first_name = usr['first_name']
user.last_name = usr['last_name']
user.email = usr['email']
session.commit()
return user
def remove_user(uid):
"""
Remove user by user id
:param uid:
:return void:
"""
user = session.query(User).filter_by(id=uid).first()
session.delete(user)
session.commit()
def check_category(name):
"""
Check category name
:param name:
:return bool:
"""
return session.query(Category).filter_by(name=name).first() is None
def create_category(name):
"""
Create a new category
:param name:
:return object:
"""
category = Category(name=name)
session.add(category)
session.commit()
return category
def get_categories():
"""
Return list of categories
:return object:
"""
return session.query(Category).all()
def remove_category(category_id):
"""
Remove category
:param category_id:
:return void:
"""
category = session.query(Category).filter_by(id=category_id).first()
session.delete(category)
session.commit()
def create_item(title, description, model, category, author, price):
"""
Create item in catalog
:param title:
:param description:
:param model:
:param category:
:param author:
:param price:
:return object:
"""
item = Catalog(title=title, model=model, description=description,
category=category, author=author, price=price)
session.add(item)
session.commit()
return item
def get_items(limit, offset=None):
"""
Return items from catalog with limit and offset
:param limit:
:param offset:
:return object:
"""
return session.query(Catalog).offset(offset).limit(limit)
def get_items_by_user(uid):
"""
Return list of items by user id
:param uid:
:return object:
"""
return session.query(Catalog).filter_by(author=uid).all() or []
def get_items_by_category(category_id, limit, offset=None):
"""
Return items from catalog by category with limit and offset
:param category_id:
:param limit:
:param offset:
:return object:
"""
return session.query(Catalog).filter_by(
category=category_id).offset(offset).limit(limit)
def get_item_by_id(item_id):
"""
Return item by id
:param item_id:
:return object:
"""
return session.query(Catalog).filter_by(id=item_id).first()
def update_item(item, item_id):
"""
Update item
:param item:
:param item_id:
:return void:
"""
current_item = session.query(Catalog).filter_by(id=item_id).first()
current_item.title = item['title']
current_item.model = item['model']
current_item.description = item['description']
current_item.category = item['brand']
current_item.author = item['author']
current_item.price = item['price']
session.commit()
return current_item
def delete_item(item_id):
"""
Remove item by id
:param item_id:
:return void:
"""
item = session.query(Catalog).filter_by(id=item_id).first()
session.delete(item)
session.commit()
def add_images(images, item_id):
"""
Add the images data into database for item
:param images: list
:param item_id: integer
:return object:
"""
objects = list()
# prepare saving data
for image in images:
objects.append(Image(product=item_id, url=image))
# saving objects
session.bulk_save_objects(objects)
session.commit()
# return list of images
return session.query(Image).filter_by(product=item_id).all()
def get_images_by_item_id(item_id):
"""
return list of images by item id
:param item_id:
:return object:
"""
return session.query(Image).filter_by(product=item_id).all() or []
def remove_images_by_item_id(item_id):
"""
Remove images
:param item_id:
:return:
"""
session.query(Image).filter_by(product=item_id).delete()
session.commit()
def category_exist(cat_id):
"""
Check if category exist
:param cat_id:
:return:
"""
return session.query(Category).filter_by(id=cat_id).first() is not None
def update_category(cat_id, name):
"""
Update category
:param cat_id:
:param name:
:return void:
"""
cat = session.query(Category).filter_by(id=cat_id).first()
cat.name = name
session.commit()
def delete_category(cat_id):
"""
Remove category from database
:param cat_id:
:return void:
"""
session.query(Image).filter_by(id=cat_id).delete()
session.commit()
Base.metadata.create_all(engine)
|
py | 1a3ca9eeb2fa3abff5dad07ef5ee8fc220e1cb00 | """
Sponge Knowledge Base
Demo Plus
"""
from java.lang import System
from os import listdir
from os.path import isfile, join, isdir
class DrawAndUploadDoodle(Action):
def onConfigure(self):
self.withLabel("Draw and upload a doodle").withDescription("Shows a canvas to draw a doodle and uploads it to the server")
self.withArg(
BinaryType("image").withLabel("Doodle").withMimeType("image/png")
.withFeatures({"characteristic":"drawing", "width":300, "height":250, "background":"FFFFFF", "color":"000000", "strokeWidth":2})
)
self.withResult(StringType().withLabel("Status"))
self.withFeatures({"icon":"brush"})
def onCall(self, image):
if not sponge.getVariable("demo.readOnly", False):
filename = str(System.currentTimeMillis()) + ".png"
SpongeUtils.writeByteArrayToFile(image, sponge.getProperty("doodlesDir") + "/" + filename)
return "Uploaded as " + filename
else:
return "Uploading disabled in the read only mode"
class ListDoodles(Action):
def onConfigure(self):
self.withLabel("List doodles").withDescription("Returns a list of doodle filenames").withFeatures({"visible":False})
self.withNoArgs().withResult(ListType(StringType()).withLabel("Doodles"))
def onCall(self):
dir = sponge.getProperty("doodlesDir")
doodles = [f for f in listdir(dir) if isfile(join(dir, f)) and f.endswith(".png")] if isdir(dir) else []
return sorted(doodles, reverse=True)
class ViewDoodle(Action):
def onConfigure(self):
self.withLabel("View a doodle").withDescription("Views a doodle")
self.withArg(StringType("image").withLabel("Doodle name").withProvided(ProvidedMeta().withValue().withValueSet().withOverwrite()))
self.withResult(BinaryType().withAnnotated().withMimeType("image/png").withLabel("Doodle image"))
self.withFeature("icon", "drawing")
def onCall(self, name):
return AnnotatedValue(SpongeUtils.readFileToByteArray(sponge.getProperty("doodlesDir") + "/" + name)).withFeatures({"filename":"doodle_" + name})
def onProvideArgs(self, context):
if "image" in context.provide:
doodles = sponge.call("ListDoodles")
context.provided["image"] = ProvidedValue().withValue(doodles[0] if doodles else None).withValueSet(doodles)
def onStartup():
sponge.logger.info(str(sponge.call("ListDoodles")))
|
py | 1a3caa3127698557766d9e641a43febec91642d0 | """
Support for SSH access.
For more details about this platform, please refer to the documentation at
https://github.com/custom-components/switch.ssh
"""
import base64
import paramiko
import logging
import voluptuous as vol
from datetime import timedelta
import json
import asyncio
from homeassistant.helpers.entity import Entity
import homeassistant.helpers.config_validation as cv
from homeassistant.util import Throttle
from homeassistant.components.switch import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_NAME, CONF_HOST, CONF_USERNAME, CONF_PASSWORD,
CONF_VALUE_TEMPLATE, CONF_PORT,
STATE_UNKNOWN, CONF_UNIT_OF_MEASUREMENT)
__version__ = '0.2.2'
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'switch'
DEFAULT_NAME = 'SSH'
DEFAULT_SSH_PORT = 22
DEFAULT_INTERVAL = 30
CONF_KEY = 'key'
CONF_INTERVAL = 'interval'
CONF_COMMAND_ON = 'command_on'
CONF_COMMAND_OFF = 'command_off'
CONF_COMMAND_STATUS = 'command_status'
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=30)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_KEY): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_SSH_PORT): cv.port,
vol.Required(CONF_COMMAND_ON): cv.string,
vol.Required(CONF_COMMAND_OFF): cv.string,
vol.Required(CONF_COMMAND_STATUS): cv.string,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
})
async def async_setup_platform(hass, config, async_add_devices, discovery_info=None):
dev = []
dev.append(SSHSwitch(hass, config))
async_add_devices(dev, True)
class SSHSwitch(Entity):
def __init__(self, hass, config):
"""Initialize the scanner."""
self._name = config.get(CONF_NAME)
self._host = config.get(CONF_HOST)
self._username = config.get(CONF_USERNAME)
self._password = config.get(CONF_PASSWORD)
self._key = config.get(CONF_KEY)
self._interval = config.get(CONF_INTERVAL)
self._port = config.get(CONF_PORT)
self._command_on = config.get(CONF_COMMAND_ON)
self._command_off = config.get(CONF_COMMAND_OFF)
self._command_status = config.get(CONF_COMMAND_STATUS)
self._value_template = config.get(CONF_VALUE_TEMPLATE)
self._ssh = None
self._state = None
self._connected = False
self._attributes = {}
if self._value_template is not None:
self._value_template.hass = hass
@property
def name(self):
"""Return the name of the switch."""
return self._name
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return 'mdi:folder-key-network'
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def is_on(self):
"""Return true if switch is on."""
return self._state == "on"
@property
def state_attributes(self):
"""Return the device state attributes."""
return self._attributes
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self, **kwargs):
from paramiko import ssh_exception
try:
if not self._connected:
self._connect()
"""Exit if still not connected by now."""
if not self._connected:
return None
stdin, stdout, stderr = self._ssh.exec_command(self._command_status)
value = ""
for line in stdout:
value = line.strip('\n')
if self._value_template is not None:
self._state = self._value_template.render_with_possible_json_value(
value, STATE_UNKNOWN)
else:
self._state = value
_LOGGER.debug(self._state)
except Exception as err:
_LOGGER.error("Update error: %s", str(err))
self._disconnect()
async def async_turn_on(self, **kwargs):
"""Instruct the switch to turn on."""
self._state = "on"
self._execute(self._command_on)
async def async_turn_off(self, **kwargs):
"""Instruct the switch to turn off."""
self._state = "off"
self._execute(self._command_off)
def _execute(self, command):
"""Execute remote command."""
from paramiko import ssh_exception
cmd = command.strip('\n')
try:
if not self._connected:
self._connect()
"""Exit if still not connected by now."""
if not self._connected:
_LOGGER.error("Unable to establish connection.")
return None
"""
Option 1:
"""
stdin, stdout, stderr = self._ssh.exec_command(cmd)
"""
Option 2:
chan = self._ssh.invoke_shell()
stdin = chan.makefile('wb')
stdout = chan.makefile('r')
stdin.write(cmd + '\n')
chan.close()
"""
for line in stdout:
_LOGGER.debug("Raw Line Response: %s", str(line))
"""Ignore any response"""
return None
except Exception as err:
_LOGGER.error("Unexpected SSH error: %s", str(err))
self._disconnect()
def _connect(self):
"""Connect to the SSH server."""
from paramiko import RSAKey, SSHClient, ssh_exception
from base64 import b64decode
try:
key = paramiko.RSAKey(data=base64.b64decode(self._key))
client = paramiko.SSHClient()
client.get_host_keys().add(self._host, 'ssh-rsa', key)
client.connect(self._host, username=self._username, password=self._password)
self._ssh = client
self._connected = True
except ssh_exception.BadHostKeyException as err:
_LOGGER.error("Host Key Mismatch: %s", str(err))
self._disconnect()
except:
_LOGGER.error("Connection refused. SSH enabled?")
self._disconnect()
def _disconnect(self):
"""Disconnect the current SSH connection."""
try:
self._ssh.close()
except Exception:
pass
finally:
self._ssh = None
self._connected = False
|
py | 1a3caae226ae5bc50a8f60aa9582083173ed3e6c | import datetime
from moto.organizations import utils
def test_make_random_org_id():
org_id = utils.make_random_org_id()
org_id.should.match(utils.ORG_ID_REGEX)
def test_make_random_root_id():
root_id = utils.make_random_root_id()
root_id.should.match(utils.ROOT_ID_REGEX)
def test_make_random_ou_id():
root_id = utils.make_random_root_id()
ou_id = utils.make_random_ou_id(root_id)
ou_id.should.match(utils.OU_ID_REGEX)
def test_make_random_account_id():
account_id = utils.make_random_account_id()
account_id.should.match(utils.ACCOUNT_ID_REGEX)
def test_make_random_create_account_status_id():
create_account_status_id = utils.make_random_create_account_status_id()
create_account_status_id.should.match(utils.CREATE_ACCOUNT_STATUS_ID_REGEX)
def test_make_random_policy_id():
policy_id = utils.make_random_policy_id()
policy_id.should.match(utils.POLICY_ID_REGEX)
def validate_organization(response):
org = response["Organization"]
sorted(org.keys()).should.equal(
[
"Arn",
"AvailablePolicyTypes",
"FeatureSet",
"Id",
"MasterAccountArn",
"MasterAccountEmail",
"MasterAccountId",
]
)
org["Id"].should.match(utils.ORG_ID_REGEX)
org["MasterAccountId"].should.equal(utils.MASTER_ACCOUNT_ID)
org["MasterAccountArn"].should.equal(
utils.MASTER_ACCOUNT_ARN_FORMAT.format(org["MasterAccountId"], org["Id"])
)
org["Arn"].should.equal(
utils.ORGANIZATION_ARN_FORMAT.format(org["MasterAccountId"], org["Id"])
)
org["MasterAccountEmail"].should.equal(utils.MASTER_ACCOUNT_EMAIL)
org["FeatureSet"].should.be.within(["ALL", "CONSOLIDATED_BILLING"])
org["AvailablePolicyTypes"].should.equal(
[{"Type": "SERVICE_CONTROL_POLICY", "Status": "ENABLED"}]
)
def validate_roots(org, response):
response.should.have.key("Roots").should.be.a(list)
response["Roots"].shouldnt.equal([])
root = response["Roots"][0]
root.should.have.key("Id").should.match(utils.ROOT_ID_REGEX)
root.should.have.key("Arn").should.equal(
utils.ROOT_ARN_FORMAT.format(org["MasterAccountId"], org["Id"], root["Id"])
)
root.should.have.key("Name").should.be.a(str)
root.should.have.key("PolicyTypes").should.equal([])
def validate_organizational_unit(org, response):
response.should.have.key("OrganizationalUnit").should.be.a(dict)
ou = response["OrganizationalUnit"]
ou.should.have.key("Id").should.match(utils.OU_ID_REGEX)
ou.should.have.key("Arn").should.equal(
utils.OU_ARN_FORMAT.format(org["MasterAccountId"], org["Id"], ou["Id"])
)
ou.should.have.key("Name").should.be.a(str)
def validate_account(org, account):
sorted(account.keys()).should.equal(
["Arn", "Email", "Id", "JoinedMethod", "JoinedTimestamp", "Name", "Status"]
)
account["Id"].should.match(utils.ACCOUNT_ID_REGEX)
account["Arn"].should.equal(
utils.ACCOUNT_ARN_FORMAT.format(
org["MasterAccountId"], org["Id"], account["Id"]
)
)
account["Email"].should.match(utils.EMAIL_REGEX)
account["JoinedMethod"].should.be.within(["INVITED", "CREATED"])
account["Status"].should.be.within(["ACTIVE", "SUSPENDED"])
account["Name"].should.be.a(str)
account["JoinedTimestamp"].should.be.a(datetime.datetime)
def validate_create_account_status(create_status):
sorted(create_status.keys()).should.equal(
[
"AccountId",
"AccountName",
"CompletedTimestamp",
"Id",
"RequestedTimestamp",
"State",
]
)
create_status["Id"].should.match(utils.CREATE_ACCOUNT_STATUS_ID_REGEX)
create_status["AccountId"].should.match(utils.ACCOUNT_ID_REGEX)
create_status["AccountName"].should.be.a(str)
create_status["State"].should.equal("SUCCEEDED")
create_status["RequestedTimestamp"].should.be.a(datetime.datetime)
create_status["CompletedTimestamp"].should.be.a(datetime.datetime)
def validate_policy_summary(org, summary):
summary.should.be.a(dict)
summary.should.have.key("Id").should.match(utils.POLICY_ID_REGEX)
summary.should.have.key("Arn").should.equal(
utils.SCP_ARN_FORMAT.format(org["MasterAccountId"], org["Id"], summary["Id"])
)
summary.should.have.key("Name").should.be.a(str)
summary.should.have.key("Description").should.be.a(str)
summary.should.have.key("Type").should.equal("SERVICE_CONTROL_POLICY")
summary.should.have.key("AwsManaged").should.be.a(bool)
def validate_service_control_policy(org, response):
response.should.have.key("PolicySummary").should.be.a(dict)
response.should.have.key("Content").should.be.a(str)
validate_policy_summary(org, response["PolicySummary"])
def validate_account_created(accounts_list, account_id):
account_created = False
for account in accounts_list:
if account_id == account["Id"]:
account_created = True
assert account_created
def validate_account_closed(accounts_list, account_id):
for account in accounts_list:
if account_id == account["Id"]:
assert False
|
py | 1a3cabb8c01a41285cc8ebe9b1466896e8de386b | # flake8: noqa
from . import dataclasses
from .class_validators import root_validator, validator
from .decorator import validate_arguments
from .env_settings import BaseSettings
from .error_wrappers import ValidationError
from .errors import *
from .fields import Field, Required, Schema
from .main import *
from .networks import *
from .parse import Protocol
from .tools import *
from .types import *
from .version import VERSION
# WARNING __all__ from .errors is not included here, it will be removed as an export here in v2
# please use "from pydantic.errors import ..." instead
__all__ = [
# dataclasses
'dataclasses',
# class_validators
'root_validator',
'validator',
# decorator
'validate_arguments',
# env_settings
'BaseSettings',
# error_wrappers
'ValidationError',
# fields
'Field',
'Required',
'Schema',
# main
'BaseConfig',
'BaseModel',
'Extra',
'compiled',
'create_model',
'validate_model',
# network
'AnyUrl',
'AnyHttpUrl',
'HttpUrl',
'stricturl',
'EmailStr',
'NameEmail',
'IPvAnyAddress',
'IPvAnyInterface',
'IPvAnyNetwork',
'PostgresDsn',
'RedisDsn',
'validate_email',
# parse
'Protocol',
# tools
'parse_file_as',
'parse_obj_as',
'parse_raw_as',
# types
'NoneStr',
'NoneBytes',
'StrBytes',
'NoneStrBytes',
'StrictStr',
'ConstrainedBytes',
'conbytes',
'ConstrainedList',
'conlist',
'ConstrainedSet',
'conset',
'ConstrainedStr',
'constr',
'PyObject',
'ConstrainedInt',
'conint',
'PositiveInt',
'NegativeInt',
'ConstrainedFloat',
'confloat',
'PositiveFloat',
'NegativeFloat',
'ConstrainedDecimal',
'condecimal',
'UUID1',
'UUID3',
'UUID4',
'UUID5',
'FilePath',
'DirectoryPath',
'Json',
'JsonWrapper',
'SecretStr',
'SecretBytes',
'StrictBool',
'StrictInt',
'StrictFloat',
'PaymentCardNumber',
'ByteSize',
# version
'VERSION',
]
|
py | 1a3cac2306ff2f5577c8d65d4f7df66ef74eb85e | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import datasets
from huggingface_hub.inference_api import InferenceApi
from .testing_utils import with_production_testing
class InferenceApiTest(unittest.TestCase):
def read(self, filename: str) -> bytes:
with open(filename, "rb") as f:
bpayload = f.read()
return bpayload
@with_production_testing
def test_simple_inference(self):
api = InferenceApi("bert-base-uncased")
inputs = "Hi, I think [MASK] is cool"
results = api(inputs)
self.assertIsInstance(results, list)
result = results[0]
self.assertIsInstance(result, dict)
self.assertTrue("sequence" in result)
self.assertTrue("score" in result)
@with_production_testing
def test_inference_with_params(self):
api = InferenceApi("typeform/distilbert-base-uncased-mnli")
inputs = "I bought a device but it is not working and I would like to get reimbursed!"
params = {"candidate_labels": ["refund", "legal", "faq"]}
result = api(inputs, params)
self.assertIsInstance(result, dict)
self.assertTrue("sequence" in result)
self.assertTrue("scores" in result)
@with_production_testing
def test_inference_with_dict_inputs(self):
api = InferenceApi("deepset/roberta-base-squad2")
inputs = {
"question": "What's my name?",
"context": "My name is Clara and I live in Berkeley.",
}
result = api(inputs)
self.assertIsInstance(result, dict)
self.assertTrue("score" in result)
self.assertTrue("answer" in result)
@with_production_testing
def test_inference_with_audio(self):
api = InferenceApi("facebook/wav2vec2-large-960h-lv60-self")
dataset = datasets.load_dataset(
"patrickvonplaten/librispeech_asr_dummy", "clean", split="validation"
)
data = self.read(dataset["file"][0])
result = api(data=data)
self.assertIsInstance(result, dict)
self.assertTrue("text" in result)
@with_production_testing
def test_inference_with_image(self):
api = InferenceApi("google/vit-base-patch16-224")
dataset = datasets.load_dataset("Narsil/image_dummy", "image", split="test")
data = self.read(dataset["file"][0])
result = api(data=data)
self.assertIsInstance(result, list)
for classification in result:
self.assertIsInstance(classification, dict)
self.assertTrue("score" in classification)
self.assertTrue("label" in classification)
@with_production_testing
def test_inference_overriding_task(self):
api = InferenceApi(
"sentence-transformers/paraphrase-albert-small-v2",
task="feature-extraction",
)
inputs = "This is an example again"
result = api(inputs)
self.assertIsInstance(result, list)
@with_production_testing
def test_inference_overriding_invalid_task(self):
with self.assertRaises(
ValueError, msg="Invalid task invalid-task. Make sure it's valid."
):
InferenceApi("bert-base-uncased", task="invalid-task")
@with_production_testing
def test_inference_missing_input(self):
api = InferenceApi("deepset/roberta-base-squad2")
result = api({"question": "What's my name?"})
self.assertIsInstance(result, dict)
self.assertTrue("error" in result)
self.assertTrue("warnings" in result)
self.assertTrue(len(result["warnings"]) > 0)
|
py | 1a3cacd9e106718cf9a041a30c3008079605753f | from typing import IO, Any, Generic, List, Optional, TextIO, TypeVar, Union, overload
from . import get_console
from .console import Console
from .text import Text, TextType
PromptType = TypeVar("PromptType")
DefaultType = TypeVar("DefaultType")
class PromptError(Exception):
"""Exception base class for prompt related errors."""
class InvalidResponse(PromptError):
"""Exception to indicate a response was invalid. Raise this within process_response() to indicate an error
and provide an error message.
Args:
message (Union[str, Text]): Error message.
"""
def __init__(self, message: TextType) -> None:
self.message = message
def __rich__(self) -> TextType:
return self.message
class PromptBase(Generic[PromptType]):
"""Ask the user for input until a valid response is received. This is the base class, see one of
the concrete classes for examples.
Args:
prompt (TextType, optional): Prompt text. Defaults to "".
console (Console, optional): A Console instance or None to use global console. Defaults to None.
password (bool, optional): Enable password input. Defaults to False.
choices (List[str], optional): A list of valid choices. Defaults to None.
show_default (bool, optional): Show default in prompt. Defaults to True.
show_choices (bool, optional): Show choices in prompt. Defaults to True.
"""
response_type: type = str
validate_error_message = "[prompt.invalid]Please enter a valid value"
illegal_choice_message = (
"[prompt.invalid.choice]Please select one of the available options"
)
prompt_suffix = ": "
choices: Optional[List[str]] = None
def __init__(
self,
prompt: TextType = "",
*,
console: Console = None,
password: bool = False,
choices: List[str] = None,
show_default: bool = True,
show_choices: bool = True,
) -> None:
self.console = console or get_console()
self.prompt = (
Text.from_markup(prompt, style="prompt")
if isinstance(prompt, str)
else prompt
)
self.password = password
if choices is not None:
self.choices = choices
self.show_default = show_default
self.show_choices = show_choices
@classmethod
@overload
def ask(
cls,
prompt: TextType = "",
*,
console: Console = None,
password: bool = False,
choices: List[str] = None,
show_default: bool = True,
show_choices: bool = True,
default: DefaultType,
stream: TextIO = None,
) -> Union[DefaultType, PromptType]:
...
@classmethod
@overload
def ask(
cls,
prompt: TextType = "",
*,
console: Console = None,
password: bool = False,
choices: List[str] = None,
show_default: bool = True,
show_choices: bool = True,
stream: TextIO = None,
) -> PromptType:
...
@classmethod
def ask(
cls,
prompt: TextType = "",
*,
console: Console = None,
password: bool = False,
choices: List[str] = None,
show_default: bool = True,
show_choices: bool = True,
default: Any = ...,
stream: TextIO = None,
) -> Any:
"""Shortcut to construct and run a prompt loop and return the result.
Example:
>>> filename = Prompt.ask("Enter a filename")
Args:
prompt (TextType, optional): Prompt text. Defaults to "".
console (Console, optional): A Console instance or None to use global console. Defaults to None.
password (bool, optional): Enable password input. Defaults to False.
choices (List[str], optional): A list of valid choices. Defaults to None.
show_default (bool, optional): Show default in prompt. Defaults to True.
show_choices (bool, optional): Show choices in prompt. Defaults to True.
stream (TextIO, optional): Optional text file open for reading to get input. Defaults to None.
"""
_prompt = cls(
prompt,
console=console,
password=password,
choices=choices,
show_default=show_default,
show_choices=show_choices,
)
return _prompt(default=default, stream=stream)
def render_default(self, default: DefaultType) -> Text:
"""Turn the supplied default in to a Text instance.
Args:
default (DefaultType): Default value.
Returns:
Text: Text containing rendering of default value.
"""
return Text(f"({default})", "prompt.default")
def make_prompt(self, default: DefaultType) -> Text:
"""Make prompt text.
Args:
default (DefaultType): Default value.
Returns:
Text: Text to display in prompt.
"""
prompt = self.prompt.copy()
prompt.end = ""
if self.show_choices and self.choices:
_choices = "/".join(self.choices)
choices = f"[{_choices}]"
prompt.append(" ")
prompt.append(choices, "prompt.choices")
if (
default != ...
and self.show_default
and isinstance(default, (str, self.response_type))
):
prompt.append(" ")
_default = self.render_default(default)
prompt.append(_default)
prompt.append(self.prompt_suffix)
return prompt
@classmethod
def get_input(
cls,
console: Console,
prompt: TextType,
password: bool,
stream: TextIO = None,
) -> str:
"""Get input from user.
Args:
console (Console): Console instance.
prompt (TextType): Prompt text.
password (bool): Enable password entry.
Returns:
str: String from user.
"""
return console.input(prompt, password=password, stream=stream)
def check_choice(self, value: str) -> bool:
"""Check value is in the list of valid choices.
Args:
value (str): Value entered by user.
Returns:
bool: True if choice was valid, otherwise False.
"""
assert self.choices is not None
return value.strip() in self.choices
def process_response(self, value: str) -> PromptType:
"""Process response from user, convert to prompt type.
Args:
value (str): String typed by user.
Raises:
InvalidResponse: If ``value`` is invalid.
Returns:
PromptType: The value to be returned from ask method.
"""
value = value.strip()
try:
return_value = self.response_type(value)
except ValueError:
raise InvalidResponse(self.validate_error_message)
if self.choices is not None and not self.check_choice(value):
raise InvalidResponse(self.illegal_choice_message)
return return_value
def on_validate_error(self, value: str, error: InvalidResponse) -> None:
"""Called to handle validation error.
Args:
value (str): String entered by user.
error (InvalidResponse): Exception instance the initiated the error.
"""
self.console.print(error)
def pre_prompt(self) -> None:
"""Hook to display something before the prompt."""
@overload
def __call__(self, *, stream: TextIO = None) -> PromptType:
...
@overload
def __call__(
self, *, default: DefaultType, stream: TextIO = None
) -> Union[PromptType, DefaultType]:
...
def __call__(self, *, default: Any = ..., stream: TextIO = None) -> Any:
"""Run the prompt loop.
Args:
default (Any, optional): Optional default value.
Returns:
PromptType: Processed value.
"""
while True:
self.pre_prompt()
prompt = self.make_prompt(default)
value = self.get_input(self.console, prompt, self.password, stream=stream)
if value == "" and default != ...:
return default
try:
return_value = self.process_response(value)
except InvalidResponse as error:
self.on_validate_error(value, error)
continue
else:
return return_value
class Prompt(PromptBase[str]):
"""A prompt that returns a str.
Example:
>>> name = Prompt.ask("Enter your name")
"""
response_type = str
class IntPrompt(PromptBase[int]):
"""A prompt that returns an integer.
Example:
>>> burrito_count = IntPrompt.ask("How many burritos do you want to order", prompt_suffix="? ")
"""
response_type = int
validate_error_message = "[prompt.invalid]Please enter a valid integer number"
class FloatPrompt(PromptBase[int]):
"""A prompt that returns a float.
Example:
>>> temperature = FloatPrompt.ask("Enter desired temperature")
"""
response_type = float
validate_error_message = "[prompt.invalid]Please enter a number"
class Confirm(PromptBase[bool]):
"""A yes / no confirmation prompt.
Example:
>>> if Confirm.ask("Continue"):
run_job()
"""
response_type = bool
validate_error_message = "[prompt.invalid]Please enter Y or N"
choices = ["y", "n"]
def render_default(self, default: DefaultType) -> Text:
"""Render the default as (y) or (n) rather than True/False."""
assert self.choices is not None
yes, no = self.choices
return Text(f"({yes})" if default else f"({no})", style="prompt.default")
def process_response(self, value: str) -> bool:
"""Convert choices to a bool."""
value = value.strip().lower()
if value not in self.choices:
raise InvalidResponse(self.validate_error_message)
assert self.choices is not None
return value == self.choices[0]
if __name__ == "__main__": # pragma: no cover
from rich import print
if Confirm.ask("Run [i]prompt[/i] tests?", default=True):
while True:
result = IntPrompt.ask(
":rocket: Enter a number between [b]1[/b] and [b]10[/b]", default=5
)
if result >= 1 and result <= 10:
break
print(":pile_of_poo: [prompt.invalid]Number must be between 1 and 10")
print(f"number={result}")
while True:
password = Prompt.ask(
"Please enter a password [cyan](must be at least 5 characters)",
password=True,
)
if len(password) >= 5:
break
print("[prompt.invalid]password too short")
print(f"password={password!r}")
fruit = Prompt.ask("Enter a fruit", choices=["apple", "orange", "pear"])
print(f"fruit={fruit!r}")
else:
print("[b]OK :loudly_crying_face:")
|
py | 1a3cad0da0809160db91ca01ff347fa0c4b343e8 | # -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import numpy as np
import os
import xml.etree.ElementTree as ET
from typing import List, Tuple, Union
from fvcore.common.file_io import PathManager
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.structures import BoxMode
__all__ = ["load_voc_instances", "register_pascal_voc"]
# fmt: off
CLASS_NAMES = (
"aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat",
"chair", "cow", "diningtable", "dog", "horse", "motorbike", "person",
"pottedplant", "sheep", "sofa", "train", "tvmonitor"
)
# fmt: on
def load_voc_instances(dirname: str, split: str, class_names: Union[List[str], Tuple[str, ...]]):
"""
Load Pascal VOC detection annotations to Detectron2 format.
Args:
dirname: Contain "Annotations", "ImageSets", "JPEGImages"
split (str): one of "train", "test", "val", "trainval"
class_names: list or tuple of class names
"""
with PathManager.open(os.path.join(dirname, "ImageSets", "Main", split + ".txt")) as f:
fileids = np.loadtxt(f, dtype=np.str)
# Needs to read many small annotation files. Makes sense at local
annotation_dirname = PathManager.get_local_path(os.path.join(dirname, "Annotations/"))
dicts = []
for fileid in fileids:
anno_file = os.path.join(annotation_dirname, fileid + ".xml")
jpeg_file = os.path.join(dirname, "JPEGImages", fileid + ".jpg")
with PathManager.open(anno_file) as f:
tree = ET.parse(f)
r = {
"file_name": jpeg_file,
"image_id": fileid,
"height": int(tree.findall("./size/height")[0].text),
"width": int(tree.findall("./size/width")[0].text),
}
instances = []
for obj in tree.findall("object"):
cls = obj.find("name").text
# We include "difficult" samples in training.
# Based on limited experiments, they don't hurt accuracy.
# difficult = int(obj.find("difficult").text)
# if difficult == 1:
# continue
bbox = obj.find("bndbox")
bbox = [float(bbox.find(x).text) for x in ["xmin", "ymin", "xmax", "ymax"]]
# Original annotations are integers in the range [1, W or H]
# Assuming they mean 1-based pixel indices (inclusive),
# a box with annotation (xmin=1, xmax=W) covers the whole image.
# In coordinate space this is represented by (xmin=0, xmax=W)
bbox[0] -= 1.0
bbox[1] -= 1.0
instances.append(
{"category_id": class_names.index(cls), "bbox": bbox, "bbox_mode": BoxMode.XYXY_ABS}
)
r["annotations"] = instances
dicts.append(r)
return dicts
def register_pascal_voc(name, dirname, split, year, class_names=CLASS_NAMES):
DatasetCatalog.register(name, lambda: load_voc_instances(dirname, split, class_names))
MetadataCatalog.get(name).set(
thing_classes=list(class_names), dirname=dirname, year=year, split=split
)
|
py | 1a3cad8bf7029fc78f819395351eceb3bdc6991a | from .file import read_file_upper
class Solver:
"""
Solver for a wordsearch puzzle.
Variables:
directions {list} -- Two-digit permutations of [-1, 0, 1], excluding [0, 0].
"""
directions = [
[ 0, -1],
[-1, 0],
[ 0, 1],
[ 1, 0],
[-1, -1],
[-1, 1],
[ 1, -1],
[ 1, 1]
]
def __init__(self, puzzle):
self.puzzle = puzzle
def find_candidates(self, char):
candidates = []
for row_key, row in enumerate(self.puzzle.grid):
for col_key, cell in enumerate(row):
if char == cell:
candidates.append([row_key, col_key])
return candidates
def find_word(self, word):
word_chars = list(word.replace(' ', '').strip().upper())
for candidate in self.find_candidates(word_chars[0]):
for direction in self.directions:
coords = []
for char_key, char in enumerate(word_chars):
row_key = candidate[0] + (direction[0] * char_key)
col_key = candidate[1] + (direction[1] * char_key)
if self.puzzle.coord_matches(row_key, col_key, char):
coords.append([row_key, col_key])
else:
break
if len(coords) == len(word_chars):
return coords
return []
def find_words(self, words):
coords = []
for word in words:
coords = coords + self.find_word(word)
return coords
def find_from_wordlist(self, wordlist):
words = read_file_upper(wordlist).splitlines()
return self.find_words(words)
|
py | 1a3cae025ac1bd57bb0f3d7a86fd3077c0107b03 | import logging
import typing
import numpy as np
from scipy import optimize
from smac.configspace import ConfigurationSpace
from smac.epm.base_gp import BaseModel
from smac.epm.gp_base_prior import Prior
from smac.utils.constants import VERY_SMALL_NUMBER
from skopt.learning.gaussian_process.kernels import Kernel
from skopt.learning.gaussian_process import GaussianProcessRegressor
logger = logging.getLogger(__name__)
class GaussianProcess(BaseModel):
"""
Gaussian process model.
The GP hyperparameterŝ are obtained by optimizing the marginal log likelihood.
This code is based on the implementation of RoBO:
Klein, A. and Falkner, S. and Mansur, N. and Hutter, F.
RoBO: A Flexible and Robust Bayesian Optimization Framework in Python
In: NIPS 2017 Bayesian Optimization Workshop
Parameters
----------
types : List[int]
Specifies the number of categorical values of an input dimension where
the i-th entry corresponds to the i-th input dimension. Let's say we
have 2 dimension where the first dimension consists of 3 different
categorical choices and the second dimension is continuous than we
have to pass [3, 0]. Note that we count starting from 0.
bounds : List[Tuple[float, float]]
bounds of input dimensions: (lower, uppper) for continuous dims; (n_cat, np.nan) for categorical dims
seed : int
Model seed.
kernel : george kernel object
Specifies the kernel that is used for all Gaussian Process
prior : prior object
Defines a prior for the hyperparameters of the GP. Make sure that
it implements the Prior interface.
normalize_y : bool
Zero mean unit variance normalization of the output values
n_opt_restart : int
Number of restarts for GP hyperparameter optimization
instance_features : np.ndarray (I, K)
Contains the K dimensional instance features of the I different instances
pca_components : float
Number of components to keep when using PCA to reduce dimensionality of instance features. Requires to
set n_feats (> pca_dims).
"""
def __init__(
self,
configspace: ConfigurationSpace,
types: typing.List[int],
bounds: typing.List[typing.Tuple[float, float]],
seed: int,
kernel: Kernel,
normalize_y: bool = True,
n_opt_restarts: int = 10,
instance_features: typing.Optional[np.ndarray] = None,
pca_components: typing.Optional[int] = None,
):
super().__init__(
configspace=configspace,
types=types,
bounds=bounds,
seed=seed,
kernel=kernel,
instance_features=instance_features,
pca_components=pca_components,
)
self.normalize_y = normalize_y
self.n_opt_restarts = n_opt_restarts
self.hypers = np.empty((0, ))
self.is_trained = False
self._n_ll_evals = 0
self._set_has_conditions()
def _train(self, X: np.ndarray, y: np.ndarray, do_optimize: bool = True) -> 'GaussianProcess':
"""
Computes the Cholesky decomposition of the covariance of X and
estimates the GP hyperparameters by optimizing the marginal
loglikelihood. The prior mean of the GP is set to the empirical
mean of X.
Parameters
----------
X: np.ndarray (N, D)
Input data points. The dimensionality of X is (N, D),
with N as the number of points and D is the number of features.
y: np.ndarray (N,)
The corresponding target values.
do_optimize: boolean
If set to true the hyperparameters are optimized otherwise
the default hyperparameters of the kernel are used.
"""
X = self._impute_inactive(X)
if self.normalize_y:
y = self._normalize_y(y)
if len(y.shape) == 1:
self.n_objectives_ = 1
else:
self.n_objectives_ = y.shape[1]
if self.n_objectives_ == 1:
y = y.flatten()
n_tries = 10
for i in range(n_tries):
try:
self.gp = self._get_gp()
self.gp.fit(X, y)
break
except np.linalg.LinAlgError as e:
if i == n_tries:
raise e
# Assume that the last entry of theta is the noise
theta = np.exp(self.kernel.theta)
theta[-1] += 1
self.kernel.theta = np.log(theta)
if do_optimize:
self._all_priors = self._get_all_priors(add_bound_priors=False)
self.hypers = self._optimize()
self.gp.kernel.theta = self.hypers
self.gp.fit(X, y)
else:
self.hypers = self.gp.kernel.theta
self.is_trained = True
return self
def _get_gp(self) -> GaussianProcessRegressor:
return GaussianProcessRegressor(
kernel=self.kernel,
normalize_y=False,
optimizer=None,
n_restarts_optimizer=-1, # Do not use scikit-learn's optimization routine
alpha=0, # Governed by the kernel
noise=None,
random_state=self.rng,
)
def _nll(self, theta: np.ndarray) -> typing.Tuple[float, np.ndarray]:
"""
Returns the negative marginal log likelihood (+ the prior) for
a hyperparameter configuration theta.
(negative because we use scipy minimize for optimization)
Parameters
----------
theta : np.ndarray(H)
Hyperparameter vector. Note that all hyperparameter are
on a log scale.
Returns
----------
float
lnlikelihood + prior
"""
self._n_ll_evals += 1
try:
lml, grad = self.gp.log_marginal_likelihood(theta, eval_gradient=True)
except np.linalg.LinAlgError:
return 1e25, np.zeros(theta.shape)
for dim, priors in enumerate(self._all_priors):
for prior in priors:
lml += prior.lnprob(theta[dim])
grad[dim] += prior.gradient(theta[dim])
# We add a minus here because scipy is minimizing
if not np.isfinite(lml).all() or not np.all(np.isfinite(grad)):
return 1e25, np.zeros(theta.shape)
else:
return -lml, -grad
def _optimize(self) -> np.ndarray:
"""
Optimizes the marginal log likelihood and returns the best found
hyperparameter configuration theta.
Returns
-------
theta : np.ndarray(H)
Hyperparameter vector that maximizes the marginal log likelihood
"""
log_bounds = [(b[0], b[1]) for b in self.gp.kernel.bounds]
# Start optimization from the previous hyperparameter configuration
p0 = [self.gp.kernel.theta]
if self.n_opt_restarts > 0:
dim_samples = []
prior = None # type: typing.Optional[typing.Union[typing.List[Prior], Prior]]
for dim, hp_bound in enumerate(log_bounds):
prior = self._all_priors[dim]
# Always sample from the first prior
if isinstance(prior, list):
if len(prior) == 0:
prior = None
else:
prior = prior[0]
prior = typing.cast(typing.Optional[Prior], prior)
if prior is None:
try:
sample = self.rng.uniform(
low=hp_bound[0],
high=hp_bound[1],
size=(self.n_opt_restarts,),
)
except OverflowError:
raise ValueError('OverflowError while sampling from (%f, %f)' % (hp_bound[0], hp_bound[1]))
dim_samples.append(sample.flatten())
else:
dim_samples.append(prior.sample_from_prior(self.n_opt_restarts).flatten())
p0 += list(np.vstack(dim_samples).transpose())
theta_star = None
f_opt_star = np.inf
for i, start_point in enumerate(p0):
theta, f_opt, _ = optimize.fmin_l_bfgs_b(self._nll, start_point, bounds=log_bounds)
if f_opt < f_opt_star:
f_opt_star = f_opt
theta_star = theta
return theta_star
def _predict(self, X_test: np.ndarray,
cov_return_type: typing.Optional[str] = 'diagonal_cov') \
-> typing.Tuple[np.ndarray, typing.Optional[np.ndarray]]:
r"""
Returns the predictive mean and variance of the objective function at
the given test points.
Parameters
----------
X_test: np.ndarray (N, D)
Input test points
cov_return_type: typing.Optional[str]
Specifies what to return along with the mean. Refer ``predict()`` for more information.
Returns
----------
np.array(N,)
predictive mean
np.array(N,) or np.array(N, N) or None
predictive variance or standard deviation
"""
if not self.is_trained:
raise Exception('Model has to be trained first!')
X_test = self._impute_inactive(X_test)
if cov_return_type is None:
mu = self.gp.predict(X_test)
var = None
if self.normalize_y:
mu = self._untransform_y(mu)
else:
predict_kwargs = {'return_cov': False, 'return_std': True}
if cov_return_type == 'full_cov':
predict_kwargs = {'return_cov': True, 'return_std': False}
mu, var = self.gp.predict(X_test, **predict_kwargs)
if cov_return_type != 'full_cov':
var = var ** 2 # since we get standard deviation for faster computation
# Clip negative variances and set them to the smallest
# positive float value
var = np.clip(var, VERY_SMALL_NUMBER, np.inf)
if self.normalize_y:
mu, var = self._untransform_y(mu, var)
if cov_return_type == 'diagonal_std':
var = np.sqrt(var) # converting variance to std deviation if specified
return mu, var
def sample_functions(self, X_test: np.ndarray, n_funcs: int = 1) -> np.ndarray:
"""
Samples F function values from the current posterior at the N
specified test points.
Parameters
----------
X_test: np.ndarray (N, D)
Input test points
n_funcs: int
Number of function values that are drawn at each test point.
Returns
----------
function_samples: np.array(F, N)
The F function values drawn at the N test points.
"""
if not self.is_trained:
raise Exception('Model has to be trained first!')
X_test = self._impute_inactive(X_test)
funcs = self.gp.sample_y(X_test, n_samples=n_funcs, random_state=self.rng)
if self.normalize_y:
funcs = self._untransform_y(funcs)
if len(funcs.shape) == 1:
return funcs[None, :]
else:
return funcs
|
py | 1a3cae03fb0faf711c7677b13baa25d19032f580 | #!/usr/bin/env python
"""
@package ion.agents.platform.resource_monitor
@file ion/agents/platform/resource_monitor.py
@author Carlos Rueda
@brief Platform resource monitoring for a set of attributes having same rate
"""
__author__ = 'Carlos Rueda'
import logging
import pprint
from gevent import Greenlet, sleep
from pyon.public import log
from pyon.util.containers import current_time_millis
from ion.agents.platform.platform_driver_event import AttributeValueDriverEvent
from ion.agents.platform.util import ntp_2_ion_ts
# Platform attribute values are reported for the stream name "parsed".
# TODO confirm this.
_STREAM_NAME = "parsed"
# A small "ION System time" compliant increment to the latest received timestamp
# for purposes of the next request so we don't get that last sample repeated.
# Since "ION system time" is in milliseconds, this delta is in milliseconds.
_DELTA_TIME = 10
# _MULT_INTERVAL: for any request, the from_time parameter will be at most
# _MULT_INTERVAL * _rate_secs in the past wrt current time (OOIION-1372):
_MULT_INTERVAL = 3
class ResourceMonitor(object):
"""
Monitor for specific attributes having the same nominal monitoring rate.
"""
def __init__(self, platform_id, rate_secs, attr_defns,
get_attribute_values, notify_driver_event):
"""
Creates a monitor for a specific attribute in a given platform.
Call start to start the monitoring greenlet.
@param platform_id Platform ID
@param rate_secs Monitoring rate in secs
@param attr_defns List of attribute definitions
@param get_attribute_values
Function to retrieve attribute values for the specific
platform, to be called like this:
get_attribute_values(attr_ids, from_time)
@param notify_driver_event
Callback to notify whenever a value is retrieved.
"""
log.debug("%r: ResourceMonitor entered. rate_secs=%s, attr_defns=%s",
platform_id, rate_secs, attr_defns)
self._get_attribute_values = get_attribute_values
self._platform_id = platform_id
self._rate_secs = rate_secs
self._attr_defns = attr_defns
self._notify_driver_event = notify_driver_event
# corresponding attribute IDs to be retrieved
self._attr_ids = []
# and "ION System time" compliant timestamp of last retrieved value for
# each attribute:
self._last_ts_millis = {}
for attr_defn in self._attr_defns:
if 'attr_id' in attr_defn:
attr_id = attr_defn['attr_id']
self._attr_ids.append(attr_id)
self._last_ts_millis[attr_id] = None
else:
log.warn("%r: 'attr_id' key expected in attribute definition: %s",
self._platform_id, attr_defn)
self._active = False
# for debugging purposes
self._pp = pprint.PrettyPrinter()
log.debug("%r: ResourceMonitor created. rate_secs=%s, attr_ids=%s",
platform_id, rate_secs, self._attr_ids)
def __str__(self):
return "%s{platform_id=%r; rate_secs=%s; attr_ids=%s}" % (
self.__class__.__name__,
self._platform_id, self._rate_secs, str(self._attr_ids))
def start(self):
"""
Starts greenlet for resource monitoring.
"""
log.debug("%r: starting resource monitoring %s", self._platform_id, self)
self._active = True
runnable = Greenlet(self._run)
runnable.start()
def _run(self):
"""
The target function for the greenlet.
"""
while self._active:
slept = 0
# loop to incrementally sleep up to rate_secs while promptly
# reacting to request for termination
while self._active and slept < self._rate_secs:
# sleep in increments of 0.5 secs
incr = min(0.5, self._rate_secs - slept)
sleep(incr)
slept += incr
if self._active:
try:
self._retrieve_attribute_values()
except Exception:
log.exception("exception in _retrieve_attribute_values")
log.debug("%r: monitoring greenlet stopped. rate_secs=%s; attr_ids=%s",
self._platform_id, self._rate_secs, self._attr_ids)
def _retrieve_attribute_values(self):
"""
Retrieves the attribute values using the given function and calls
_values_retrieved.
"""
# TODO: note that the "from_time" parameters for the request below
# as well as the expected response are influenced by the RSN case
# (see CI-OMS interface). Need to see whether it also applies to
# CGSN so eventually adjustments may be needed.
#
# note that the "from_time" parameter in each pair (attr_id, from_time)
# for the _get_attribute_values call below, is in millis in UNIX epoch.
curr_time_millis = current_time_millis()
# minimum value for the from_time parameter (OOIION-1372):
min_from_time = curr_time_millis - 1000 * _MULT_INTERVAL * self._rate_secs
# this corresponds to (_MULT_INTERVAL * self._rate_secs) ago.
# determine each from_time for the request:
attrs = []
for attr_id in self._attr_ids:
if self._last_ts_millis[attr_id] is None:
# Very first request for this attribute. Use min_from_time:
from_time = min_from_time
else:
# We've already got values for this attribute. Use the latest
# timestamp + _DELTA_TIME as a basis for the new request:
from_time = int(self._last_ts_millis[attr_id]) + _DELTA_TIME
# but adjust it if it goes too far in the past:
if from_time < min_from_time:
from_time = min_from_time
log.trace("test_resource_monitoring_recent: attr_id=%s, from_time=%s, %s millis ago",
attr_id, from_time, curr_time_millis - from_time)
attrs.append((attr_id, from_time))
log.debug("%r: _retrieve_attribute_values: attrs=%s",
self._platform_id, attrs)
retrieved_vals = self._get_attribute_values(attrs)
if retrieved_vals is None:
# lost connection; nothing else to do here:
return
good_retrieved_vals = {}
# do validation: we expect an array of tuples (val, timestamp) for
# each attribute. If not, log a warning for the attribute and
# continue processing with the other valid attributes:
for attr_id, vals in retrieved_vals.iteritems():
if not isinstance(vals, (list, tuple)):
log.warn("%r: expecting an array for attribute %r, but got: %r",
self._platform_id, attr_id, vals)
continue
if len(vals):
if not isinstance(vals[0], (tuple, list)):
log.warn("%r: expecting elements in array to be tuples "
"(val, ts) for attribute %r, but got: %r",
self._platform_id, attr_id, vals[0])
continue
good_retrieved_vals[attr_id] = vals # even if empty array.
if not good_retrieved_vals:
# nothing else to do. TODO perhaps an additional warning?
return
if log.isEnabledFor(logging.TRACE): # pragma: no cover
# show good_retrieved_vals as retrieved (might be large)
log.trace("%r: _retrieve_attribute_values: _get_attribute_values "
"for attrs=%s returned\n%s",
self._platform_id, attrs, self._pp.pformat(good_retrieved_vals))
elif log.isEnabledFor(logging.DEBUG): # pragma: no cover
summary = {attr_id: "(%d vals)" % len(vals)
for attr_id, vals in good_retrieved_vals.iteritems()}
log.debug("%r: _retrieve_attribute_values: _get_attribute_values "
"for attrs=%s returned %s",
self._platform_id, attrs, summary)
# vals_dict: attributes with non-empty reported values:
vals_dict = {}
for attr_id, from_time in attrs:
if not attr_id in good_retrieved_vals:
log.warn("%r: _retrieve_attribute_values: unexpected: "
"response does not include requested attribute %r. "
"Response is: %s",
self._platform_id, attr_id, good_retrieved_vals)
continue
attr_vals = good_retrieved_vals[attr_id]
if not attr_vals:
log.debug("%r: No values reported for attribute=%r from_time=%f",
self._platform_id, attr_id, from_time)
continue
if log.isEnabledFor(logging.DEBUG):
self._debug_values_retrieved(attr_id, attr_vals)
# ok, include this attribute for the notification:
vals_dict[attr_id] = attr_vals
if vals_dict:
self._values_retrieved(vals_dict)
def _values_retrieved(self, vals_dict):
"""
A values response has been received. Create and notify
corresponding event to platform agent.
"""
# update _last_ts_millis for each retrieved attribute:
for attr_id, attr_vals in vals_dict.iteritems():
_, ntp_ts = attr_vals[-1]
# update _last_ts_millis based on ntp_ts: note that timestamps are reported
# in NTP so we need to convert it to ION system time for a subsequent request:
self._last_ts_millis[attr_id] = ntp_2_ion_ts(ntp_ts)
# finally, notify the values event:
driver_event = AttributeValueDriverEvent(self._platform_id,
_STREAM_NAME,
vals_dict)
self._notify_driver_event(driver_event)
def _debug_values_retrieved(self, attr_id, values): # pragma: no cover
ln = len(values)
# just show a couple of elements
arrstr = "["
if ln <= 3:
vals = [str(e) for e in values[:ln]]
arrstr += ", ".join(vals)
else:
vals = [str(e) for e in values[:2]]
last_e = values[-1]
arrstr += ", ".join(vals)
arrstr += ", ..., " + str(last_e)
arrstr += "]"
log.debug("%r: attr=%r: values retrieved(%s) = %s",
self._platform_id, attr_id, ln, arrstr)
def stop(self):
log.debug("%r: stopping resource monitoring %s", self._platform_id, self)
self._active = False
|
py | 1a3cb11c65463a45f6ac8a7e48f8518d0da8b0c1 | # Example test file, safe to delete
import pytest
@pytest.fixture
def name_registry_package(deployer):
# returns an ethpm.package instance loaded with a "name_registry" deployment
return deployer.deploy("name_registry")
@pytest.fixture
def name_registry(name_registry_package):
# returns a name_registry web3.contract instance
return name_registry_package.deployments.get_instance("name_registry")
def test_name_registry_lookup(name_registry, name_registry_package):
w3 = name_registry_package.w3
name = b"Ongo Gablogian"
name_registry.functions.register(name, w3.eth.accounts[0]).transact()
registered_address = name_registry.functions.lookup(name).call()
assert registered_address == w3.eth.accounts[0]
|
py | 1a3cb269ac70893d77044ec036283ec0f4304eff | # Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2021 Dan <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
from io import BytesIO
from pyrogram.raw.core.primitives import Int, Long, Int128, Int256, Bool, Bytes, String, Double, Vector
from pyrogram.raw.core import TLObject
from pyrogram import raw
from typing import List, Union, Any
# # # # # # # # # # # # # # # # # # # # # # # #
# !!! WARNING !!! #
# This is a generated file! #
# All changes made in this file will be lost! #
# # # # # # # # # # # # # # # # # # # # # # # #
class InputMessagesFilterDocument(TLObject): # type: ignore
"""This object is a constructor of the base type :obj:`~pyrogram.raw.base.MessagesFilter`.
Details:
- Layer: ``126``
- ID: ``0x9eddf188``
**No parameters required.**
"""
__slots__: List[str] = []
ID = 0x9eddf188
QUALNAME = "types.InputMessagesFilterDocument"
def __init__(self) -> None:
pass
@staticmethod
def read(data: BytesIO, *args: Any) -> "InputMessagesFilterDocument":
# No flags
return InputMessagesFilterDocument()
def write(self) -> bytes:
data = BytesIO()
data.write(Int(self.ID, False))
# No flags
return data.getvalue()
|
py | 1a3cb56c3d0e08e9c8dcba4ea14b7fd6f0f04bd4 | # -*- coding: utf-8 -*-
import numpy as np
from PIL import Image
from skimage.draw import disk
import cv2
defocusKernelDims = [3,5,7,9]
def DefocusBlur_random(img):
kernelidx = np.random.randint(0, len(defocusKernelDims))
kerneldim = defocusKernelDims[kernelidx]
return DefocusBlur(img, kerneldim)
def DefocusBlur(img, dim):
imgarray = np.array(img, dtype="float32")
kernel = DiskKernel(dim)
# convolved = convolve2d(imgarray, kernel, mode='same', fillvalue=255.0).astype("uint8")
convolved = cv2.filter2D(imgarray, -1, kernel).astype("uint8")
img = Image.fromarray(convolved)
return img
def DiskKernel(dim):
kernelwidth = dim
kernel = np.zeros((kernelwidth, kernelwidth), dtype=np.float32)
circleCenterCoord = dim / 2
circleRadius = circleCenterCoord +1
rr, cc = disk((circleCenterCoord, circleCenterCoord), circleRadius-1)
kernel[rr, cc] = 1
if(dim == 3 or dim == 5):
kernel = Adjust(kernel, dim)
normalizationFactor = np.count_nonzero(kernel)
kernel = kernel / normalizationFactor
return kernel
def Adjust(kernel, kernelwidth):
kernel[0,0] = 0
kernel[0,kernelwidth-1]=0
kernel[kernelwidth-1,0]=0
kernel[kernelwidth-1, kernelwidth-1] =0
return kernel
|
py | 1a3cb62bd362a14f47eb73433039d1d5dae939a5 | import io
import ConfigParser
import logging
CONFIGURATION_ENCODING_FORMAT = "utf-8"
class SnipsConfigParser(ConfigParser.SafeConfigParser):
def to_dict(self):
return {section: {option_name: option for option_name, option in self.items(section)} for section in
self.sections()}
def read_configuration_file(configuration_file):
try:
with io.open(configuration_file, encoding=CONFIGURATION_ENCODING_FORMAT) as f:
conf_parser = SnipsConfigParser()
conf_parser.readfp(f)
return conf_parser.to_dict()
except (IOError, ConfigParser.Error) as e:
return dict()
|
py | 1a3cb63e44af02c1d6c731686a5275d46095a14b | from enum import Enum, unique
from pyxedit.xelib.wrapper_methods.base import WrapperMethodsBase
@unique
class ConflictThis(Enum):
'''
Specific conflict information for a record used by
``xelib.get_conflict_data``.
.. list-table::
:widths: 20 80
:header-rows: 0
:align: left
* - ``ConflictThis.Unknown``
-
* - ``ConflictThis.Ignored``
-
* - ``ConflictThis.NotDefined``
-
* - ``ConflictThis.IdenticalToMaster``
-
* - ``ConflictThis.OnlyOne``
-
* - ``ConflictThis.HiddenByModGroup``
-
* - ``ConflictThis.Master``
-
* - ``ConflictThis.ConflictBenign``
-
* - ``ConflictThis.Override``
-
* - ``ConflictThis.IdenticalToMasterWinsConflict``
-
* - ``ConflictThis.ConflictWins``
-
* - ``ConflictThis.ConflictLoses``
-
'''
Unknown = 0
Ignored = 1
NotDefined = 2
IdenticalToMaster = 3
OnlyOne = 4
HiddenByModGroup = 5
Master = 6
ConflictBenign = 7
Override = 8
IdenticalToMasterWinsConflict = 9
ConflictWins = 10
ConflictLoses = 11
@unique
class ConflictAll(Enum):
'''
Overall conflict information for a record used by
``xelib.get_conflict_data``.
.. list-table::
:widths: 20 80
:header-rows: 0
:align: left
* - ``ConflictAll.Unknown``
-
* - ``ConflictAll.OnlyOne``
-
* - ``ConflictAll.NoConflict``
-
* - ``ConflictAll.ConflictBenign``
-
* - ``ConflictAll.Override``
-
* - ``ConflictAll.Conflict``
-
* - ``ConflictAll.ConflictCritical``
-
'''
Unknown = 0
OnlyOne = 1
NoConflict = 2
ConflictBenign = 3
Override = 4
Conflict = 5
ConflictCritical = 6
@unique
class GetRefrsFlags(Enum):
'''
Options used by ``xelib.get_refrs``
.. list-table::
:widths: 20 80
:header-rows: 0
:align: left
* - ``GetRefrsFlags.excludeDeleted``
-
* - ``GetRefrsFlags.excludeDisabled``
-
* - ``GetRefrsFlags.excludeXESP``
-
'''
excludeDeleted = 1
excludeDisabled = 2
excludeXESP = 4
class RecordsMethods(WrapperMethodsBase):
ConflictThis = ConflictThis
ConflictAll = ConflictAll
GetRefrsFlags = GetRefrsFlags
def get_form_id(self, id_, native=False, local=False, ex=True):
'''
Returns the FormID of the record
Args:
id\\_ (``int``)
id handle of record
local (``bool``)
whether to return the 'local' FormID, with the load order
bits masked to to 00
Returns:
(``int``) FormID as an integer, use `xelib.get_hex_form_id` to turn
it into a hex string for representational purposes
'''
form_id = self.get_unsigned_integer(
lambda res: self.raw_api.GetFormID(id_, res, native),
error_msg=f'Failed to get FormID for {self.element_context(id_)}',
ex=ex)
if form_id and local:
return form_id & 0xFFFFFF
return form_id
def get_hex_form_id(self, id_, native=False, local=False, ex=True):
'''
Returns the FormID of the record as a hexadecimal string
Args:
id\\_ (``int``)
id handle of record
local (``bool``)
whether to only return the lower 6 hex digits (ignore the upper
2 which are for representing load order)
Returns:
(``str``) FormID in hex string form
'''
form_id = self.get_form_id(id_, native, local, ex=ex)
return f'{form_id:0>6X}' if local else f'{form_id:0>8X}'
def set_form_id(self,
id_,
new_form_id,
native=False,
fix_references=True,
ex=True):
'''
Sets the FormID of a record to a new FormID
Args:
id\\_ (``int``)
id handle of record
new_form_id (``int``)
the new FormID to set the record to
native (``bool``)
TODO: figure out what this does, it probably means a 'local'
FormID without the load order bits is given
fix_references (``bool``)
Adjust all references of this FormID to match within the current
xEdit session (I think... xEdit does this by default after all)
'''
return self.verify_execution(
self.raw_api.SetFormId(id_, new_form_id, native, fix_references),
error_msg=f'Failed to set FormID on {self.element_context(id_)} to '
f'{new_form_id}',
ex=ex)
def get_record(self, id_, form_id, search_masters=True, ex=True):
'''
Finds and returns the record with the given FormID
Args:
id\\_ (``int``)
id handle of file to start from. This can be passed a ``0``
to search all loaded files.
form_id (``int``)
the FormID to search for, if searching all files, this should
be a "global" FormID with the load order bits included; if
searching a particular file, this should be a "local" FormID
without the load order bits.
'''
return self.get_handle(
lambda res: self.raw_api.GetRecord(id_, form_id, search_masters, res),
error_msg=f'Failed to get record at {self.element_context(id_)}, '
f'{form_id}',
ex=ex)
def get_records(self, id_, search='', include_overrides=False, ex=True):
'''
Returns a list of all records matching ``search`` found in ``id_``.
Args:
id\\_ (``int``)
id handle of file to search. Pass ``0`` to search all loaded
files
search (``str``)
string to search for, if left empty, all records will be
returned. Strings like ``'ARMO'``, ``'COBJ'``,
``'ARMO,WEAP,MISC'``, or
``'Constructible Object,Non-Player Character (Actor)'`` may
be passed to filter down results.
include_overrides (``bool``)
whether to include override records that originate from master
plugins
Returns:
(``List[int]``) a list of id handles for found records
'''
return self.get_array(
lambda len_:
self.raw_api.GetRecords(id_, search, include_overrides, len_),
error_msg=f'Failed to get {search} records from '
f'{self.element_context(id_)}',
ex=ex)
def get_refrs(self, id_, search, opts=None, ex=True):
'''
Returns an array of all REFR records referencing base records with
signatures in ``search`` found within ``id_``.
Args:
id\\_ (``int``)
id handle to start from
search (``str``)
string to search for, Strings like ``'DOOR'`` can be passed
opts (``List[pyxedit.xelib.GetRefrsFlags]``)
list of options to further tweak behavior
Returns:
(``List[int]``) a list of id handles for found references
'''
opts = opts or []
return self.get_array(
lambda len_:
self.raw_api.GetREFRs(id_, search, self.build_flags(opts), len_),
error_msg=f'Failed to get {search} REFRs from '
f'{self.element_context(id_)}',
ex=ex)
def get_overrides(self, id_, ex=True):
'''
Returns an array of handles corresponding to the overrides of given
record.
Args:
id\\_ (``int``)
id handle of record to get overrides for
Returns:
(``List[int]``) id handles for override records
'''
return self.get_array(
lambda len_: self.raw_api.GetOverrides(id_, len_),
error_msg=f'Failed to get overrides for '
f'{self.element_context(id_)}',
ex=ex)
def get_master_record(self, id_, ex=True):
'''
Returns the master record of given record.
Args:
id\\_ (``int``)
id handle of record to get master record for
Returns:
(``int``) id handle for master record; if given record is already
a master record, a new id handle will be returned
'''
return self.get_handle(
lambda res: self.raw_api.GetMasterRecord(id_, res),
error_msg=f'Failed to get master record for '
f'{self.element_context(id_)}',
ex=ex)
def get_previous_override(self, id_, id2, ex=True):
'''
Returns a handle for the winning override of the given record
in the masters of a given file.
Args:
id\\_ (``int``)
id handle of record to get previous override for
id2 (``int``)
id handle to the given file
Returns:
(``int``) id handle to the winning override on the record
among the given file's master files
'''
return self.get_handle(
lambda res: self.raw_api.GetPreviousOverride(id_, id2, res),
error_msg=f'Failed to get previous override record for '
f'{self.element_context(id_)}, targetting file '
f'{self.element_context(id2)}',
ex=ex)
def get_winning_override(self, id_, ex=True):
'''
Returns a handle for the winning override of given record
Args:
id\\_ (``int``)
id handle of record to get winning override for
Returns:
(``int``) id handle to the winning override record
'''
return self.get_handle(
lambda res: self.raw_api.GetWinningOverride(id_, res),
error_msg=f'Failed to get winning override record for '
f'{self.element_context(id_)}',
ex=ex)
def get_injection_target(self, id_, ex=True):
'''
Returns a handle for the file that the given record is injected into.
Args:
id\\_ (``int``)
id handle of record to get injection target for
Returns:
(``int``) id handle of file the record is injected into.
'''
return self.get_handle(
lambda res: self.raw_api.GetInjectionTarget(id_, res),
error_msg=f'Failed to get injection target for '
f'{self.element_context(id_)}',
ex=ex)
def find_next_record(self, id_, search, by_edid, by_name, ex=True):
'''
Returns the next record after the given record which matches ``search``.
Args:
id\\_ (``int``)
id handle of record to find the next record for
search (``str``)
search filter to limit the search to
by_edid (``bool``)
TODO: figure out what this does
by_name (``bool``)
TODO: figure out what this does
Returns:
(``int``) id handle of next record
'''
return self.get_handle(
lambda res:
self.raw_api.FindNextRecord(id_, search, by_edid, by_name, res),
error_msg=f'Failed to find next record for '
f'{self.element_context(id_)}',
ex=ex)
def find_previous_record(self, id_, search, by_edid, by_name, ex=True):
'''
Returns the previous record after the given record which matches
``search``.
Args:
id\\_ (``int``)
id handle of record to find the previous record for
search (``str``)
search filter to limit the search to
by_edid (``bool``)
TODO: figure out what this does
by_name (``bool``)
TODO: figure out what this does
Returns:
(``int``) id handle of previous record
'''
return self.get_handle(
lambda res: self.raw_api.FindPreviousRecord(id_,
search,
by_edid,
by_name,
res),
error_msg=f'Failed to find previous record for '
f'{self.element_context(id_)}',
ex=ex)
def find_valid_references(self, id_, signature, search, limit_to, ex=True):
'''
Returns up to ``limit_to`` records matching ``signature``, which can be
referenced by the file containing the given record. Exclude results
which do not contain ``search`` in their long names.
Args:
id\\_ (``int``)
find valid references for the file containing this record
signature (``str``)
return valid referenceable results matching this signature
search (``str``)
limit the search to referenceable results that match this search
string
limit_to (``int``)
limit to this many search results
Returns:
(``List[int]``) a list of referenceable records by the file of the
given record
'''
return self.get_string_array(
lambda len_: self.raw_api.FindValidReferences(id_,
signature,
search,
limit_to,
len_),
error_msg=f'Failed to find valid {signature} references on '
f'{self.element_context(id_)} searching for {search}',
ex=ex)
def get_referenced_by(self, id_, ex=True):
'''
Returns an array of the records that reference the given record.
References must be built with ``xelib.build_references`` to be returned.
Args:
id\\_ (``int``)
id handle of record to find references for
Returns:
(``List[int]``) a list of records that reference it
'''
return self.get_array(
lambda len_: self.raw_api.GetReferencedBy(id_, len_),
error_msg=f'Failed to get referenced by for: '
f'{self.element_context(id_)}',
ex=ex)
def exchange_references(self, id_, old_form_id, new_form_id, ex=True):
'''
Exchanges all references to ``old_form_id`` with references to
``new_form_id`` on the given record.
Args:
id\\_ (``int``)
id handle of record to exchange references on
old_form_id (``int``)
exchange references from this FormID
new_form_id (``int``)
exchange references to this FormID
'''
return self.verify_execution(
self.raw_api.ExchangeReferences(id_, old_form_id, new_form_id),
error_msg=f'Failed to exchange references on '
f'{self.element_context(id_)} from {old_form_id} to '
f'{new_form_id}',
ex=ex)
def is_master(self, id_, ex=True):
'''
Returns true if given record is a master record
Args:
id\\_ (``int``)
id handle of record
Returns:
(``bool``) whether record is master record
'''
return self.get_bool(
lambda res: self.raw_api.IsMaster(id_, res),
ex=ex)
def is_injected(self, id_, ex=True):
'''
Returns true if given record is a injected record
Args:
id\\_ (``int``)
id handle of record
Returns:
(``bool``) whether record is injected record
'''
return self.get_bool(
lambda res: self.raw_api.IsInjected(id_, res),
ex=ex)
def is_override(self, id_, ex=True):
'''
Returns true if given record is a override record
Args:
id\\_ (``int``)
id handle of record
Returns:
(``bool``) whether record is override record
'''
return self.get_bool(
lambda res: self.raw_api.IsOverride(id_, res),
ex=ex)
def is_winning_override(self, id_, ex=True):
'''
Returns true if given record is a winning override record
Args:
id\\_ (``int``)
id handle of record
Returns:
(``bool``) whether record is winning override record
'''
return self.get_bool(
lambda res: self.raw_api.IsWinningOverride(id_, res),
ex=ex)
def get_record_def(self, sig, ex=True):
'''
TODO: figure out what this does
'''
return self.get_handle(
lambda res: self.raw_api.GetRecordDef(sig, res),
error_msg=f'Failed to get record def for {sig}',
ex=ex)
def get_nodes(self, id_, ex=True):
'''
Returns a handle pointing to a node tree for ``id_``. The handle
returned by this function must be freed with ``xelib.release_nodes``.
NOTE: this can be slow for very large records like ``NAVI``
Args:
id\\_ (``int``)
id handle of record to get node tree for
Returns:
(``int``) id handle to node tree
'''
return self.get_handle(
lambda res: self.raw_api.GetNodes(id_, res),
error_msg=f'Failed to get nodes for {self.element_context(id_)}',
ex=ex)
def get_conflict_data(self, nodes, handle, as_string=False, ex=False):
'''
Returns conflict for a given element.
Args:
nodes (``int``)
id handle of node tree returned by ``get_nodes`` when called
on ``handle``
handle (``int``)
id handle of element to operate on
as_string (``bool``)
if set to true, results will be returned as strings instead
of enum values
Returns:
(``pyxedit.xelib.ConflictAll, pyxedit.xelib.ConflictThis`` or
``str, str``) conflict information for element
'''
conflict_all, conflict_this = self.get_two_bytes(
lambda res1, res2:
self.raw_api.GetConflictData(nodes, handle, res1, res2),
error_msg=f'GetConflictData failed on {nodes}, {handle}',
ex=ex)
conflict_all = ConflictAll(conflict_all or 0)
conflict_this = ConflictThis(conflict_this or 0)
if as_string:
return conflict_all.name, conflict_this.name
else:
return conflict_all, conflict_this
def get_record_conflict_data(self, element, ex=False):
'''
TODO: figure out what this does
'''
return self.get_conflict_data(0, element, ex=ex)
def get_node_elements(self, nodes, element, ex=True):
'''
Returns handles for the element children of ``element``.
Args:
nodes (``int``)
id handle of node tree returned by ``get_nodes`` when called
on ``handle``
handle (``int``)
id handle of element to get child elements for
Returns:
(``List[Optional[int]]``) A list of handles for child elements
under the given element, including ``None`` placeholders for
unassigned elements.
'''
return self.get_array(
lambda len_: self.raw_api.GetNodeElements(nodes, element, len_),
error_msg=f'GetNodeElements failed on {self.element_context(nodes)}, '
f'{self.element_context(element)}',
ex=ex)
|
py | 1a3cb6cb64239d1a186b1ef32d8c31ce868b81e9 | import numpy as np
import random
import logging
import logging.config
#logging.disable()
logging.config.fileConfig('logging.conf')
# create logger
logger = logging.getLogger('simpleExample')
# Setting the size of the field
cells_number = 12
"""
We are going to show the following path finding algorithms:
Dijkstra’s Algorithm algorithm = D
A* Search Algorithm algorithm = A*
D* Algorithm algorithm = D*
"""
class Full_field():
def __init__(self):
'''
Status:
1 = solved
0 = not visited
-1 = to be checked next
-2 = Obstacle
3 = shortest path
'''
self.algorithm = "A*"
self.__start_field()
logger.info('Starting status of the board \n:' + str(self.status))
logger.info('Starting distance values\n' + str(self.values))
def __start_field(self):
self.values = np.zeros((cells_number, cells_number))
self.status = np.zeros((cells_number, cells_number))
self.previous_cell = np.full(
(cells_number, cells_number), tuple)
def set_obstacles(
self, obstacle_positions=[], number_of_obstacles=cells_number):
"""
Obstacles will be flagged as cells of value -2
"""
if obstacle_positions == []:
_ = 0
while _ < number_of_obstacles:
i = random.randint(0, cells_number - 1)
j = random.randint(0, cells_number - 1)
if self.status[i, j] not in [1, 2]:
self.status[i, j] = -2
_ += 1
else:
for pos in obstacle_positions:
if self.status[pos] != 1:
self.status[pos] = -2
def set_origin(
self, x0=0, y0=0):
self.x0 = x0
self.y0 = y0
self.status[x0, y0] = 1
def set_destination(
self, xf=cells_number - 1, yf=cells_number - 1
):
self.xf = xf
self.yf = yf
self.status[xf, yf] = 2
def select_algorithm(self, algorithm):
"""
We are going to show the following path finding algorithms:
Dijkstra’s Algorithm algorithm = Di
A* Search Algorithm algorithm = A*
D* Algorithm algorithm = D*
"""
self.algorithm = algorithm
logger.debug("Algorithm has been set to " + str(algorithm))
def find_shortest_path(self):
while self.status[self.xf, self.yf] != 1:
self.check_nodes()
self.min_closest_node()
logger.info('These are the values\n' + str(self.values))
logger.info('This is to see which one is solved\n' +
str(self.status))
def check_nodes(self):
for i in range(cells_number):
for j in range(cells_number):
if self.status[i, j] == 1:
self.find_next_node(i, j)
logger.info("These are the candidates'status\n" + str(self.status))
def min_closest_node(self):
# Search the closest node with the minimum value
node_values = []
node_pos = []
for i in range(cells_number):
for j in range(cells_number):
if self.status[i, j] == -1:
self.status[i, j] = 0
node_values.append(self.values[i, j])
node_pos.append([i, j])
logger.debug('This is the minimum value: ' + str(node_values))
try:
pos = np.argmin(node_values)
except:
logger.error('It is not possible to go from ' + str(self.x0) + str(self.y0) +
" to " + str(self.xf) + str(self.yf))
closest_node = tuple(node_pos[pos])
logger.debug('Node to be set as solved' + str(closest_node))
self.status[closest_node] = 1
def find_next_node(self, x0, y0):
for i in [x0 - 1, x0, x0 + 1]:
for j in [y0 - 1, y0, y0 + 1]:
if (i >= 0) & (i < cells_number) \
& (j >= 0) & (j < cells_number):
x1 = int(i)
y1 = int(j)
self.__set_value(x0, y0, x1, y1)
def __set_value(self, x0, y0, x1, y1):
distance_to_previous = self.__distance_between(x0, y0, x1, y1)
if self.algorithm == "A*":
distance_to_destination_x0 = self.__distance_between(
x0, y0, self.xf, self.yf)
distance_to_destination = self.__distance_between(
x1, y1, self.xf, self.yf)
elif self.algorithm == "Di":
distance_to_destination = 0
distance_to_destination_x0 = 0
cumulative_distance = self.values[x0, y0] - distance_to_destination_x0 + \
distance_to_previous + distance_to_destination
if (self.status[x1, y1] in [0, 2]) or \
((self.status[x1, y1] == -1) and
(cumulative_distance < self.values[x1, y1])):
self.values[x1, y1] = cumulative_distance
self.status[x1, y1] = -1
self.previous_cell[x1, y1] = (x0, y0)
def __distance_between(self, x0, y0, x1, y1):
distance = ((x1 - x0) ** 2 + (y1 - y0) ** 2) ** (1 / 2)
return distance
def show_path(self):
logger.debug('This is the previous_cell\n' + str(self.previous_cell))
x = self.xf
y = self.yf
while (x != self.x0) | (y != self.y0):
self.status[x, y] = 3
if x == self.xf and y == self.yf:
self.status[x, y] = 2
x, y = self.previous_cell[x, y]
logger.info("This is the shortest path from " + str(self.x0) + str(self.y0) +
" to " + str(self.xf) + str(self.yf) + "\n" + str(self.status))
def solve_fast(self):
self.find_shortest_path()
self.show_path()
def solve_one_step(self):
if self.status[self.xf, self.yf] != 1:
self.check_nodes()
class Find_Path(Full_field):
def __init__(self):
Full_field.__init__(self)
self.set_origin()
self.set_destination()
self.set_obstacles()
self.solve_fast()
if __name__ == "__main__":
Find_Path()
|
py | 1a3cb7124c2d37fbf9bba7353037bb83d97812ff | from ethereum import utils
def mk_multisend_code(payments): # expects a dictionary, {address: wei}
kode = b''
for address, wei in payments.items():
kode += b'\x60\x00\x60\x00\x60\x00\x60\x00' # 0 0 0 0
encoded_wei = utils.encode_int(wei) or b'\x00'
kode += utils.ascii_chr(0x5f + len(encoded_wei)) + encoded_wei # value
kode += b'\x73' + utils.normalize_address(address) # to
kode += b'\x60\x00\xf1\x50' # 0 CALL POP
kode += b'\x33\xff' # CALLER SELFDESTRUCT
return kode
def get_multisend_gas(payments):
o = 26002 # 21000 + 2 (CALLER) + 5000 (SELFDESTRUCT)
for address, wei in payments.items():
encoded_wei = utils.encode_int(wei) or b'\x00'
# 20 bytes in txdata for address = 1360
# bytes in txdata for wei = 68 * n
# gas for pushes and pops = 3 * 7 + 2 = 23
# CALL = 9700 + 25000 (possible if new account)
o += 1360 + 68 * len(encoded_wei) + 23 + 34700
return o
|
py | 1a3cb859b20deef2da78e9d096d020d9810771fa | import psycopg2
conn = psycopg2.connect(database="lms", user="postgres", password="Aaditya@1432178", host="127.0.0.1", port="12345")
cur = conn.cursor()
class UpdateAddress:
def update_address(self):
cur.execute(""" UPDATE "Address" set city = 'Barwani' where user_id = 1; """)
return conn.commit()
|
py | 1a3cb8a2f8e0cb8354f61796fc9bf64184bacb8f | # Define a procedure, union,
# that takes as inputs two lists.
# It should modify the first input
# list to be the set union of the two
# lists. You may assume the first list
# is a set, that is, it contains no
# repeated elements.
def union(a,b):
i = 0
for number in b:
if b[i] not in a:
a.append(b[i])
i += 1
return a
# To test, uncomment all lines
# below except those beginning with >>>.
a = [1,2,3]
b = [2,4,6]
union(a,b)
print a
#>>> [1,2,3,4,6]
print b
#>>> [2,4,6]
|
py | 1a3cb93afb9a3cf4922f292d3870190793702b7e | from app import app
|
py | 1a3cb972b0d741497a6be8a1c2903d19727a4ba5 | """Tests for the ProtocolAnalyzer."""
import pytest
from decoy import Decoy
from datetime import datetime
from opentrons.types import MountType, DeckSlotName
from opentrons.protocol_engine import commands as pe_commands, types as pe_types
from opentrons.protocol_runner import ProtocolRunner, ProtocolRunData, JsonPreAnalysis
from robot_server.protocols.analysis_store import AnalysisStore
from robot_server.protocols.protocol_store import ProtocolResource
from robot_server.protocols.protocol_analyzer import ProtocolAnalyzer
@pytest.fixture
def protocol_runner(decoy: Decoy) -> ProtocolRunner:
"""Get a mocked out ProtocolRunner."""
return decoy.mock(cls=ProtocolRunner)
@pytest.fixture
def analysis_store(decoy: Decoy) -> AnalysisStore:
"""Get a mocked out AnalysisStore."""
return decoy.mock(cls=AnalysisStore)
@pytest.fixture
def subject(
protocol_runner: ProtocolRunner,
analysis_store: AnalysisStore,
) -> ProtocolAnalyzer:
"""Get a ProtocolAnalyzer test subject."""
return ProtocolAnalyzer(
protocol_runner=protocol_runner,
analysis_store=analysis_store,
)
async def test_analyze(
decoy: Decoy,
protocol_runner: ProtocolRunner,
analysis_store: AnalysisStore,
subject: ProtocolAnalyzer,
) -> None:
"""It should be able to analyize a protocol."""
protocol_resource = ProtocolResource(
protocol_id="protocol-id",
pre_analysis=JsonPreAnalysis(schema_version=123, metadata={}),
created_at=datetime(year=2021, month=1, day=1),
files=[],
)
analysis_command = pe_commands.Pause(
id="command-id",
status=pe_commands.CommandStatus.SUCCEEDED,
createdAt=datetime(year=2022, month=2, day=2),
data=pe_commands.PauseData(message="hello world"),
)
analysis_labware = pe_types.LoadedLabware(
id="labware-id",
loadName="load-name",
definitionUri="namespace/load-name/42",
location=pe_types.DeckSlotLocation(slot=DeckSlotName.SLOT_1),
)
analysis_pipette = pe_types.LoadedPipette(
id="pipette-id",
pipetteName=pe_types.PipetteName.P300_SINGLE,
mount=MountType.LEFT,
)
decoy.when(await protocol_runner.run(protocol_resource)).then_return(
ProtocolRunData(
commands=[analysis_command],
labware=[analysis_labware],
pipettes=[analysis_pipette],
)
)
await subject.analyze(
protocol_resource=protocol_resource,
analysis_id="analysis-id",
)
decoy.verify(
analysis_store.update(
analysis_id="analysis-id",
commands=[analysis_command],
labware=[analysis_labware],
pipettes=[analysis_pipette],
errors=[],
),
)
async def test_analyze_error(
decoy: Decoy,
protocol_runner: ProtocolRunner,
analysis_store: AnalysisStore,
subject: ProtocolAnalyzer,
) -> None:
"""It should handle errors raised by the runner."""
protocol_resource = ProtocolResource(
protocol_id="protocol-id",
pre_analysis=JsonPreAnalysis(schema_version=123, metadata={}),
created_at=datetime(year=2021, month=1, day=1),
files=[],
)
error = RuntimeError("oh no")
decoy.when(await protocol_runner.run(protocol_resource)).then_raise(error)
await subject.analyze(
protocol_resource=protocol_resource,
analysis_id="analysis-id",
)
decoy.verify(
analysis_store.update(
analysis_id="analysis-id",
commands=[],
labware=[],
pipettes=[],
errors=[error],
),
)
|
py | 1a3cbbd50702e8941bcafa3eaae9476b5568910b | import os
import json
if not os.path.exists('normal'):
os.mkdir('normal')
for data_type in ['train', 'dev', 'test']:
with open('{}.json'.format(data_type), 'r') as f:
data = json.load(f)
dataset = []
for sample in data:
token = sample['token']
h_pos = [sample['subj_start'], sample['subj_end'] + 1]
t_pos = [sample['obj_start'], sample['obj_end'] + 1]
relation = sample['relation']
dataset.append({'token': token,
'h': {'pos': h_pos},
't': {'pos': t_pos},
'relation': relation})
with open('normal/{}.txt'.format(data_type), 'w') as f:
for sample in dataset:
f.write(json.dumps(sample))
f.write('\n')
|
py | 1a3cbc67d5d6e3ece858820f21f470156f9f1315 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: load-vgg19.py
from __future__ import print_function
import argparse
import numpy as np
import os
import cv2
import six
import tensorflow as tf
from tensorpack import *
from tensorpack.dataflow.dataset import ILSVRCMeta
enable_argscope_for_module(tf.layers)
def tower_func(image):
is_training = get_current_tower_context().is_training
with argscope([tf.layers.conv2d], kernel_size=3, activation=tf.nn.relu, padding='same'):
x = image
x = tf.layers.conv2d(x, 64, name='conv1_1')
x = tf.layers.conv2d(x, 64, name='conv1_2')
x = tf.layers.max_pooling2d(x, 2, 2, name='pool1')
x = tf.layers.conv2d(x, 128, name='conv2_1')
x = tf.layers.conv2d(x, 128, name='conv2_2')
x = tf.layers.max_pooling2d(x, 2, 2, name='pool2')
x = tf.layers.conv2d(x, 256, name='conv3_1')
x = tf.layers.conv2d(x, 256, name='conv3_2')
x = tf.layers.conv2d(x, 256, name='conv3_3')
x = tf.layers.conv2d(x, 256, name='conv3_4')
x = tf.layers.max_pooling2d(x, 2, 2, name='pool3')
x = tf.layers.conv2d(x, 512, name='conv4_1')
x = tf.layers.conv2d(x, 512, name='conv4_2')
x = tf.layers.conv2d(x, 512, name='conv4_3')
x = tf.layers.conv2d(x, 512, name='conv4_4')
x = tf.layers.max_pooling2d(x, 2, 2, name='pool4')
x = tf.layers.conv2d(x, 512, name='conv5_1')
x = tf.layers.conv2d(x, 512, name='conv5_2')
x = tf.layers.conv2d(x, 512, name='conv5_3')
x = tf.layers.conv2d(x, 512, name='conv5_4')
x = tf.layers.max_pooling2d(x, 2, 2, name='pool5')
x = tf.layers.flatten(x, name='flatten')
x = tf.layers.dense(x, 4096, activation=tf.nn.relu, name='fc6')
x = tf.layers.dropout(x, rate=0.5, name='drop0', training=is_training)
x = tf.layers.dense(x, 4096, activation=tf.nn.relu, name='fc7')
x = tf.layers.dropout(x, rate=0.5, name='drop1', training=is_training)
logits = tf.layers.dense(x, 1000, activation=tf.identity, name='fc8')
tf.nn.softmax(logits, name='prob')
def run_test(path, input):
param_dict = dict(np.load(path))
param_dict = {k.replace('/W', '/kernel').replace('/b', '/bias'): v for k, v in six.iteritems(param_dict)}
predict_func = OfflinePredictor(PredictConfig(
inputs_desc=[InputDesc(tf.float32, (None, 224, 224, 3), 'input')],
tower_func=tower_func,
session_init=DictRestore(param_dict),
input_names=['input'],
output_names=['prob'] # prob:0 is the probability distribution
))
im = cv2.imread(input)
assert im is not None, input
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
im = cv2.resize(im, (224, 224)).reshape((1, 224, 224, 3)).astype('float32')
# VGG19 requires channelwise mean substraction
VGG_MEAN = [103.939, 116.779, 123.68]
im -= VGG_MEAN[::-1]
outputs = predict_func(im)[0]
prob = outputs[0]
ret = prob.argsort()[-10:][::-1]
print("Top10 predictions:", ret)
meta = ILSVRCMeta().get_synset_words_1000()
print("Top10 class names:", [meta[k] for k in ret])
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')
parser.add_argument('--load', required=True,
help='.npz model file generated by tensorpack.utils.loadcaffe')
parser.add_argument('--input', help='an input image', required=True)
args = parser.parse_args()
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
run_test(args.load, args.input)
|
py | 1a3cbe2d9641e75f0d2e61e3e2cf3572c36e9831 | """
Django settings for trainingDjango project.
Generated by 'django-admin startproject' using Django 4.0.1.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/4.0/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/4.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
with open('trainingDjango/secret') as f:
SECRET_KEY = f.read().strip()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'users.apps.UsersConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'trainingDjango.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'trainingDjango.wsgi.application'
# Database
# https://docs.djangoproject.com/en/4.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/4.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/4.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/4.0/howto/static-files/
STATIC_URL = 'static/'
# Default primary key field type
# https://docs.djangoproject.com/en/4.0/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
# Custom User Model
AUTH_USER_MODEL = 'users.User' |
py | 1a3cbeb893a9283570525dc92fd89bdd8bcfca23 | import os
class Config:
SSL_REDIRECT = False
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
DEBUG = True
class TestingConfig(Config):
TESTING = True
class ProductionConfig(Config):
@staticmethod
def init_app(app):
Config.init_app(app)
# log to stderr
import logging
from logging import StreamHandler
file_handler = StreamHandler()
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
class HerokuConfig(ProductionConfig):
SSL_REDIRECT = True if os.environ.get('DYNO') else False
@staticmethod
def init_app(app):
ProductionConfig.init_app(app)
# handle reverse proxy server headers
from werkzeug.middleware.proxy_fix import ProxyFix
app.wsgi_app = ProxyFix(app.wsgi_app)
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig,
'heroku': HerokuConfig,
'default': DevelopmentConfig
} |
py | 1a3cbf10a99d9e656f49ad80eb99aa7c5917cfea | from django.contrib.auth import login, logout
from django.contrib.auth.forms import AuthenticationForm
from django.middleware import csrf
from django.utils.translation import gettext_lazy as _
from rest_framework.authentication import SessionAuthentication
from rest_framework.decorators import action
from rest_framework.exceptions import ParseError, PermissionDenied
from rest_framework.response import Response
from rest_framework.viewsets import ViewSet
from c3nav.api.models import Token
from c3nav.api.utils import get_api_post_data
class SessionViewSet(ViewSet):
"""
Session for Login, Logout, etc…
Don't forget to set X-Csrftoken for POST requests!
/login – POST with fields token or username and password to log in
/get_token – POST with fields username and password to get a login token
/logout - POST to log out
"""
def list(self, request, *args, **kwargs):
return Response({
'is_authenticated': request.user.is_authenticated,
'csrf_token': csrf.get_token(request),
})
@action(detail=False, methods=['post'])
def login(self, request, *args, **kwargs):
# django-rest-framework doesn't do this for logged out requests
SessionAuthentication().enforce_csrf(request)
if request.user.is_authenticated:
raise ParseError(_('Log out first.'))
data = get_api_post_data(request)
if 'token' in data:
try:
token = Token.get_by_token(data['token'])
except Token.DoesNotExist:
raise PermissionDenied(_('This token does not exist or is no longer valid.'))
user = token.user
elif 'username' in data:
form = AuthenticationForm(request, data=data)
if not form.is_valid():
raise ParseError(form.errors)
user = form.user_cache
else:
raise ParseError(_('You need to send a token or username and password.'))
login(request, user)
return Response({
'detail': _('Login successful.'),
'csrf_token': csrf.get_token(request),
})
@action(detail=False, methods=['post'])
def get_token(self, request, *args, **kwargs):
# django-rest-framework doesn't do this for logged out requests
SessionAuthentication().enforce_csrf(request)
data = get_api_post_data(request)
form = AuthenticationForm(request, data=data)
if not form.is_valid():
raise ParseError(form.errors)
token = form.user_cache.login_tokens.create()
return Response({
'token': token.get_token(),
})
@action(detail=False, methods=['post'])
def logout(self, request, *args, **kwargs):
# django-rest-framework doesn't do this for logged out requests
SessionAuthentication().enforce_csrf(request)
if not request.user.is_authenticated:
return ParseError(_('Not logged in.'))
logout(request)
return Response({
'detail': _('Logout successful.'),
'csrf_token': csrf.get_token(request),
})
|
py | 1a3cc05f5e39f33d366e307a405783165f1731e5 |
def extractVasaandypresWordpressCom(item):
'''
Parser for 'vasaandypres.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
py | 1a3cc0e385996330edfc645488c901324c69fa6f |
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn.modules.flatten import Flatten
# Code for CIFAR ResNet is modified from https://github.com/itchencheng/pytorch-residual-networks
class FashionMNIST(nn.Module):
def __init__(self):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(1, 12, kernel_size=5),
nn.ReLU(),
nn.MaxPool2d(2, 2),
nn.Conv2d(12, 24, 5),
nn.ReLU(),
nn.MaxPool2d(2, 2),
nn.Flatten(),
nn.Linear(24*4*4, 50),
nn.ReLU(),
nn.Linear(50, 10),
nn.ReLU(),
)
def forward(self, x):
x = self.net(x)
return x
class SpeechCommand_Simplified(nn.Module):
def __init__(self, n_input=1, n_output=35, stride=16, n_channel=32):
super().__init__()
self.net = nn.Sequential(
# 1*8000
nn.Conv1d(n_input, n_channel, kernel_size=80, stride=stride),
# 32*496
nn.BatchNorm1d(n_channel),
nn.ReLU(),
nn.MaxPool1d(kernel_size=4, stride=1),
# 32*493
nn.Conv1d(n_channel, n_channel//2, kernel_size=3),
# 16*491
nn.BatchNorm1d(n_channel//2),
nn.ReLU(),
nn.MaxPool1d(kernel_size=4, stride=1),
# 16*488
nn.Conv1d(n_channel//2, n_channel//2, kernel_size=3),
# 16*486
nn.BatchNorm1d(n_channel//2),
nn.ReLU(),
nn.MaxPool1d(kernel_size=4, stride=1),
# 16*483
nn.Flatten(),
nn.Linear(16*483, 512),
nn.Linear(512, n_output),
nn.LogSoftmax(dim=1)
)
def forward(self, x):
x = self.net(x)
return x
class SpeechCommand(nn.Module):
def __init__(self, n_input=1, n_output=35, stride=16, n_channel=32):
super().__init__()
self.conv1 = nn.Conv1d(n_input, n_channel, kernel_size=80, stride=stride)
self.bn1 = nn.BatchNorm1d(n_channel)
self.pool1 = nn.MaxPool1d(4)
self.conv2 = nn.Conv1d(n_channel, n_channel, kernel_size=3)
self.bn2 = nn.BatchNorm1d(n_channel)
self.pool2 = nn.MaxPool1d(4)
self.conv3 = nn.Conv1d(n_channel, 2 * n_channel, kernel_size=3)
self.bn3 = nn.BatchNorm1d(2 * n_channel)
self.pool3 = nn.MaxPool1d(4)
self.conv4 = nn.Conv1d(2 * n_channel, 2 * n_channel, kernel_size=3)
self.bn4 = nn.BatchNorm1d(2 * n_channel)
self.pool4 = nn.MaxPool1d(4)
self.fc1 = nn.Linear(2 * n_channel, n_output)
def forward(self, x):
x = self.conv1(x)
x = F.relu(self.bn1(x))
x = self.pool1(x)
x = self.conv2(x)
x = F.relu(self.bn2(x))
x = self.pool2(x)
x = self.conv3(x)
x = F.relu(self.bn3(x))
x = self.pool3(x)
x = self.conv4(x)
x = F.relu(self.bn4(x))
x = self.pool4(x)
x = F.avg_pool1d(x, x.shape[-1])
x = x.permute(0, 2, 1)
x = self.fc1(x)
return F.log_softmax(x, dim=2)
class AGNEWS(nn.Module):
def __init__(self, vocab_size = 95811, embed_dim = 64, num_class = 4):
super().__init__()
self.embedding = nn.EmbeddingBag(vocab_size, embed_dim, sparse=True)
self.fc = nn.Linear(embed_dim, num_class)
self.init_weights()
def init_weights(self):
initrange = 0.5
self.embedding.weight.data.uniform_(-initrange, initrange)
self.fc.weight.data.uniform_(-initrange, initrange)
self.fc.bias.data.zero_()
def forward(self, text, offsets):
embedded = self.embedding(text, offsets)
return self.fc(embedded)
class CIFAR_CNN(nn.Module):
def __init__(self):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(3, 32, 3, padding=1),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.MaxPool2d(2, 2),
nn.Conv2d(32, 128, 3, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.MaxPool2d(2, 2),
nn.Conv2d(128, 256, 3, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.MaxPool2d(2, 2),
nn.Dropout(0.1),
nn.Flatten(),
nn.Linear(256*4*4, 256),
nn.ReLU(),
nn.Linear(256, 10),
nn.ReLU(),
)
# self.conv1 = nn.Conv2d(3, 6, 5)
# self.pool = nn.MaxPool2d(2, 2)
# self.conv2 = nn.Conv2d(6, 16, 5)
# self.fc1 = nn.Linear(16 * 5 * 5, 120)
# self.fc2 = nn.Linear(120, 84)
# self.fc3 = nn.Linear(84, 10)
def forward(self, x):
# x = self.pool(F.relu(self.conv1(x)))
# x = self.pool(F.relu(self.conv2(x)))
# x = torch.flatten(x, 1) # flatten all dimensions except batch
# x = F.relu(self.fc1(x))
# x = F.relu(self.fc2(x))
# x = self.fc3(x)
x = self.net(x)
return x
class ResBlock(nn.Module):
def __init__(self, in_chann, chann, stride):
super(ResBlock, self).__init__()
self.conv1 = nn.Conv2d(in_chann, chann, kernel_size=3, padding=1, stride=stride)
self.bn1 = nn.BatchNorm2d(chann)
self.conv2 = nn.Conv2d(chann, chann, kernel_size=3, padding=1, stride=1)
self.bn2 = nn.BatchNorm2d(chann)
def forward(self, x):
y = self.conv1(x)
y = self.bn1(y)
y = F.relu(y)
y = self.conv2(y)
y = self.bn2(y)
if (x.shape == y.shape):
z = x
else:
z = F.avg_pool2d(x, kernel_size=2, stride=2)
x_channel = x.size(1)
y_channel = y.size(1)
ch_res = (y_channel - x_channel)//2
pad = (0, 0, 0, 0, ch_res, ch_res)
z = F.pad(z, pad=pad, mode="constant", value=0)
z = z + y
z = F.relu(z)
return z
class BaseNet(nn.Module):
def __init__(self, Block, n):
super(BaseNet, self).__init__()
self.Block = Block
self.conv0 = nn.Conv2d(3, 16, kernel_size=3, padding=1)
self.bn0 = nn.BatchNorm2d(16)
self.convs = self._make_layers(n)
self.avgpool = nn.AvgPool2d(kernel_size=8, stride=1)
self.fc = nn.Linear(64, 10)
def forward(self, x):
x = self.conv0(x)
x = self.bn0(x)
x = F.relu(x)
x = self.convs(x)
x = self.avgpool(x)
x = x.view(x.size(0),-1)
x = self.fc(x)
return x
def _make_layers(self, n):
layers = []
in_chann = 16
chann = 16
stride = 1
for i in range(3):
for j in range(n):
if ((i > 0) and (j == 0)):
in_chann = chann
chann = chann * 2
stride = 2
layers += [self.Block(in_chann, chann, stride)]
stride = 1
in_chann = chann
return nn.Sequential(*layers)
class CIFARResNet(BaseNet):
def __init__(self, n=3):
super().__init__(ResBlock, n) |
py | 1a3cc1e221ff9b256f51c2dd04ccdfb15a30810c | # -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.4.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # World News from data (good & bad)
# > Significant changes vs. 10 days ago in transmission rates, ICU demand, and cases & deaths data.
#
# - categories: [world, overview, interactive, news]
# - permalink: /covid-news/
# - author: <a href=https://github.com/artdgn/>artdgn</a>
# - toc: true
# - image: images/news.png
# - hide: false
# > Warning: This dashboard was not built by an epidemiologist.
# > Note: Click a country name to open a search results page for that country's COVID-19 news.
# +
#hide
import pandas as pd
import covid_helpers as covid_helpers
stylers = covid_helpers.PandasStyling
# +
#hide
day_diff = 10
cur_data = covid_helpers.CovidData()
df_cur_all, debug_dfs = cur_data.table_with_projections(projection_days=[30], debug_dfs=True)
df_cur = cur_data.filter_df(df_cur_all)
past_data = covid_helpers.CovidData(-day_diff)
df_past = past_data.filter_df(past_data.table_with_projections(projection_days=[day_diff-1]))
# -
#hide_input
from IPython.display import Markdown
past_date = pd.to_datetime(past_data.dt_cols[-1]).date().isoformat()
Markdown(f"***Based on data up to: {cur_data.cur_date}. \
Compared to ({day_diff} days before): {past_date}***")
# +
#hide
df_data = df_cur.copy()
df_data['transmission_rate_past'] = df_past['transmission_rate']
df_data['transmission_rate_std_past'] = df_past['transmission_rate_std']
df_data['needICU.per100k_past'] = df_past['needICU.per100k']
# deaths toll changes
df_data['Deaths.total.diff'] = df_data['Deaths.total'] - df_past['Deaths.total']
df_data['Deaths.new.per100k.past'] = df_past['Deaths.new.per100k']
df_data['Deaths.new.past'] = df_past['Deaths.new']
df_data['Deaths.diff.per100k'] = df_data['Deaths.total.diff'] / (df_data['population'] / 1e5)
# misses and explanations
df_data['transmission_rate.change'] = (df_data['transmission_rate'] / df_data['transmission_rate_past']) - 1
df_data['affected_ratio.miss'] = (df_cur['affected_ratio.est'] / df_past['affected_ratio.est.+9d']) - 1
df_data['needICU.per100k.miss'] = (df_cur['needICU.per100k'] / df_past['needICU.per100k.+9d']) - 1
df_data['testing_bias.change'] = (df_data['current_testing_bias'] / df_past['current_testing_bias']) - 1
# -
#hide
def emoji_flags(inds):
return ' '.join(df_cur.loc[inds]['emoji_flag'])
# # Transmission rate:
# > Note: "transmission rate" here is a measure of speed of spread of infection, and means how much of the susceptible population each infected person is infecting per day (if everyone is susceptible). E.g. 10% means that 100 infected patients will infect 10 new people per day. Related to [R0](https://en.wikipedia.org/wiki/Basic_reproduction_number). See [Methodology](#Methodology) for details of calculation.
# hide
def style_news_infections(df):
cols = {
'transmission_rate': '<i>Current:</i><br>Estimated<br>daily<br>transmission<br>rate',
'transmission_rate_past': f'<i>{day_diff} days ago:</i><br>Estimated<br>daily<br>transmission<br>rate',
'Cases.new.est': 'Estimated <br> <i>recent</i> cases <br> in last 5 days',
'needICU.per100k': 'Estimated<br>current<br>ICU need<br>per 100k<br>population',
'affected_ratio.est': 'Estimated <br><i>total</i><br>affected<br>population<br>percentage',
}
rate_norm = max(df['transmission_rate'].max(), df['transmission_rate_past'].max())
df_show = stylers.country_index_emoji_link(df)[cols.keys()].rename(columns=cols)
return (df_show.style
.bar(subset=[cols['needICU.per100k']], color='#b21e3e', vmin=0, vmax=10)
.bar(subset=cols['Cases.new.est'], color='#b57b17', vmin=0)
.bar(subset=cols['affected_ratio.est'], color='#5dad64', vmin=0, vmax=1.0)
.apply(stylers.add_bar, color='#f49d5a',
s_v=df['transmission_rate'] / rate_norm, subset=cols['transmission_rate'])
.apply(stylers.add_bar, color='#d8b193',
s_v=df['transmission_rate_past'] / rate_norm,
subset=cols['transmission_rate_past'])
.format('<b>{:.2f}</b>', subset=[cols['needICU.per100k']])
.format('<b>{:,.0f}</b>', subset=cols['Cases.new.est'])
.format('<b>{:.1%}</b>', subset=[cols['affected_ratio.est'],
cols['transmission_rate'],
cols['transmission_rate_past']], na_rep="-"))
# hide
rate_diff = df_data['transmission_rate'] - df_data['transmission_rate_past']
higher_trans = (
(df_data['Cases.new.est'] > 100) &
(rate_diff > 0.02) &
(rate_diff > df_data['transmission_rate_std_past']) &
(df_data['transmission_rate_past'] != 0) # countries reporting infrequently
)
new_waves = rate_diff[higher_trans].sort_values(ascending=False).index
# hide_input
Markdown(f"## ⭕ Bad news: new waves {emoji_flags(new_waves)}")
# > Large increase in transmission rate vs. 10 days ago, that might mean a relapse, new wave, worsening outbreak.
#
# - Countries are sorted by size of change in transmission rate.
# - Includes only countries that were previously active (more than 100 estimated new cases).
# - "Large increase" = at least +2% change.
# hide_input
style_news_infections(df_data.loc[new_waves])
# +
# hide
df_alt_all = pd.concat([d.reset_index() for d in debug_dfs], axis=0)
def infected_plots(countries, title):
return covid_helpers.altair_multiple_countries_infected(
df_alt_all, countries=countries, title=title, marker_day=day_diff)
# -
# > Tip: Click country name in legend to switch countries. Uze mouse wheel to zoom Y axis.
#hide_input
infected_plots(new_waves, "Countries with new waves (vs. 10 days ago)")
#hide
lower_trans = (
(rate_diff < -0.02) &
(df_cur['Cases.new.est'] > 100) &
(rate_diff.abs() > df_data['transmission_rate_std']) &
(df_data['transmission_rate'] != 0) # countries reporting infrequently
)
slowing_outbreaks = rate_diff[lower_trans].sort_values().index
#hide_input
Markdown(f"## 🟢 Good news: slowing waves {emoji_flags(slowing_outbreaks)}")
# > Large decrease in transmission rate vs. 10 days ago, that might mean a slowing down / effective control measures.
#
# - Countries are sorted by size of change in transmission rate.
# - Includes only countries that were previously active (more than 100 estimated new cases).
# - "Large decrease" = at least -2% change.
#hide_input
style_news_infections(df_data.loc[slowing_outbreaks])
# > Tip: Click country name in legend to switch countries. Uze mouse wheel to zoom Y axis.
#hide_input
infected_plots(slowing_outbreaks, "Countries with slowing waves (vs. 10 days ago)")
# # ICU need
# hide
def style_news_icu(df):
cols = {
'needICU.per100k': '<i>Current:</i><br>Estimated<br>ICU need<br>per 100k<br>population',
'needICU.per100k_past': f'<i>{day_diff} days ago:</i><br>Estimated<br>ICU need<br>per 100k<br>population',
'Cases.new.est': 'Estimated<br><i>recent</i> cases<br> in last 5 days',
'transmission_rate': 'Estimated<br>daily<br>transmission<br>rate',
'affected_ratio.est': 'Estimated <br><i>total</i><br>affected<br>population<br>percentage',
}
df_show = stylers.country_index_emoji_link(df)[cols.keys()].rename(columns=cols)
return (df_show.style
.bar(subset=cols['needICU.per100k'], color='#b21e3e', vmin=0, vmax=10)
.bar(subset=cols['needICU.per100k_past'], color='#c67f8e', vmin=0, vmax=10)
.bar(subset=cols['Cases.new.est'], color='#b57b17', vmin=0)
.bar(subset=cols['affected_ratio.est'], color='#5dad64', vmin=0, vmax=1.0)
.apply(stylers.add_bar, color='#f49d5a',
s_v=df['transmission_rate']/df['transmission_rate'].max(),
subset=cols['transmission_rate'])
.format('<b>{:.2f}</b>', subset=[cols['needICU.per100k'], cols['needICU.per100k_past']])
.format('<b>{:,.0f}</b>', subset=cols['Cases.new.est'])
.format('<b>{:.1%}</b>', subset=[cols['affected_ratio.est'],
cols['transmission_rate']]))
# hide
icu_diff = df_cur['needICU.per100k'] - df_past['needICU.per100k']
icu_increase = icu_diff[icu_diff > 0.2].sort_values(ascending=False).index
# hide_input
Markdown(f"## ⭕ Bad news: higher ICU need {emoji_flags(icu_increase)}")
# > Large increases in need for ICU beds per 100k population vs. 10 days ago.
#
# - Only countries for which the ICU need increased by more than 0.2 (per 100k).
# hide_input
style_news_icu(df_data.loc[icu_increase])
# > Tip: Click country name in legend to switch countries. Uze mouse wheel to zoom Y axis.
# hide_input
infected_plots(icu_increase, "Countries with Higher ICU need (vs. 10 days ago)")
# hide
icu_decrease = icu_diff[icu_diff < -0.1].sort_values().index
# hide_input
Markdown(f"## 🟢 Good news: lower ICU need {emoji_flags(icu_decrease)}")
# > Large decreases in need for ICU beds per 100k population vs. 10 days ago.
#
# - Only countries for which the ICU need decreased by more than 0.1 (per 100k).
# hide_input
style_news_icu(df_data.loc[icu_decrease])
# > Tip: Click country name in legend to switch countries. Uze mouse wheel to zoom Y axis.
# hide_input
infected_plots(icu_decrease, "Countries with Lower ICU need (vs. 10 days ago)")
# # New cases and deaths:
# hide
new_entries = df_cur.index[~df_cur.index.isin(df_past.index)]
# hide_input
Markdown(f"## ⭕ Bad news: new first significant outbreaks {emoji_flags(new_entries)}")
# > Countries that have started their first significant outbreak (crossed 1000 total reported cases or 20 deaths) vs. 10 days ago.
# hide_input
style_news_infections(df_data.loc[new_entries])
# > Tip: Click country name in legend to switch countries. Uze mouse wheel to zoom Y axis.
# hide_input
infected_plots(new_entries, "Countries with first large outbreak (vs. 10 days ago)")
# hide
def style_no_news(df):
cols = {
'Cases.total.est': 'Estimated<br>total<br>cases',
'Deaths.total': 'Total<br>reported<br>deaths',
'last_case_date': 'Date<br>of last<br>reported case',
'last_death_date': 'Date<br>of last<br>reported death',
}
df_show = stylers.country_index_emoji_link(df)[cols.keys()].rename(columns=cols)
return (df_show.style
.format('<b>{:,.0f}</b>', subset=[cols['Cases.total.est'], cols['Deaths.total']]))
#hide
significant_past = ((df_past['Cases.total.est'] > 1000) & (df_past['Deaths.total'] > 10))
active_in_past = ((df_past['Cases.new'] > 0) | (df_past['Deaths.new'] > 0))
no_cases_filt = ((df_cur['Cases.total'] - df_past['Cases.total']) == 0)
no_deaths_filt = ((df_cur['Deaths.total'] - df_past['Deaths.total']) == 0)
no_cases_and_deaths = df_cur.loc[no_cases_filt & no_deaths_filt &
significant_past & active_in_past].index
# hide_input
Markdown(f"## 🟢 Good news: no new cases or deaths {emoji_flags(no_cases_and_deaths)}")
# > New countries with no new cases or deaths vs. 10 days ago.
#
# - Only considering countries that had at least 1000 estimated total cases and at least 10 total deaths and had an active outbreak previously.
# hide_input
style_no_news(df_data.loc[no_cases_and_deaths])
# > Tip: Click country name in legend to switch countries. Uze mouse wheel to zoom Y axis.
# hide_input
infected_plots(no_cases_and_deaths, "New countries with no new cases or deaths (vs. 10 days ago)")
# hide
no_deaths = df_cur.loc[no_deaths_filt & (~no_cases_filt) &
significant_past & active_in_past].index
# hide_input
Markdown(f"## Mixed news: no new deaths, only new cases {emoji_flags(no_deaths)}")
# > New countries with no new deaths (only new cases) vs. 10 days ago.
#
# - Only considering countries that had at least 1000 estimated total cases and at least 10 total deaths and had an active outbreak previously.
# hide_input
style_news_infections(df_data.loc[no_deaths])
# > Tip: Click country name in legend to switch countries. Uze mouse wheel to zoom Y axis.
# hide_input
infected_plots(no_deaths, "Countries with only new cases (vs. 10 days ago)")
# hide
not_active = df_cur.loc[no_cases_filt & significant_past & ~active_in_past].index
# hide_input
Markdown(f"## No news: continously inactive countries {emoji_flags(not_active)}")
# > Countries that had no new cases or deaths 10 days ago or now.
#
# - Only considering countries that had at least 1000 estimated total cases and at least 10 total deaths.
# - Caveat: these countries may have stopped reporting data like [Tanzania](https://en.wikipedia.org/wiki/COVID-19_pandemic_in_Tanzania).
# hide_input
style_no_news(df_data.loc[not_active])
# > Tip: Click country name in legend to switch countries. Uze mouse wheel to zoom Y axis.
# hide_input
infected_plots(not_active, "Continuosly inactive countries (now and 10 days ago)")
# # Deaths burden:
# hide
def style_death_burden(df):
cols = {
'Deaths.new.per100k': f'<i>Current</i>:<br>{cur_data.PREV_LAG} day<br>death<br>burden<br>per 100k',
'Deaths.new.per100k.past': f'<i>{day_diff} days ago</i>:<br>{cur_data.PREV_LAG} day<br>death<br>burden<br>per 100k',
'Deaths.total.diff': f'New<br>reported deaths<br>since {day_diff}<br>days ago',
'needICU.per100k': 'Estimated<br>current<br>ICU need<br>per 100k<br>population',
'affected_ratio.est': 'Estimated <br><i>total</i><br>affected<br>population<br>percentage',
}
df_show = stylers.country_index_emoji_link(df)[cols.keys()].rename(columns=cols)
death_norm = max(df['Deaths.new.per100k'].max(), df['Deaths.new.per100k.past'].max())
return (df_show.style
.bar(subset=cols['needICU.per100k'], color='#b21e3e', vmin=0, vmax=10)
.bar(subset=cols['Deaths.new.per100k'], color='#7b7a7c', vmin=0, vmax=death_norm)
.bar(subset=cols['Deaths.new.per100k.past'], color='#918f93', vmin=0, vmax=death_norm)
.bar(subset=cols['Deaths.total.diff'], color='#6b595d', vmin=0)
.bar(subset=cols['affected_ratio.est'], color='#5dad64', vmin=0, vmax=1.0)
.format('<b>{:.0f}</b>', subset=[cols['Deaths.total.diff'],
])
.format('<b>{:.1f}</b>', subset=cols['needICU.per100k'])
.format('<b>{:.2f}</b>', subset=[cols['Deaths.new.per100k'],
cols['Deaths.new.per100k.past']])
.format('<b>{:.1%}</b>', subset=[cols['affected_ratio.est']], na_rep="-"))
# hide
death_change_ratio = df_data['Deaths.new.per100k'] / df_data['Deaths.new.per100k.past']
filt = (
(df_data['Deaths.new'] > 10) &
(df_data['Deaths.new.past'] > 10) &
(df_data['Deaths.new.per100k'] > 0.1) &
(death_change_ratio > 2))
higher_death_burden = df_data[filt]['Deaths.diff.per100k'].sort_values(ascending=False).index
# hide_input
Markdown(f"## ⭕ Bad news: higher death burden {emoji_flags(higher_death_burden)}")
# > Countries with significantly higher recent death burden per 100k population vs. 10 days ago.
#
# - "Significantly higher" = 100% more.
# - Only considering countries that had at least 10 recent deaths in both timeframes, and death burden of at least 0.1 per 100k.
# hide_input
style_death_burden(df_data.loc[higher_death_burden])
# hide_input
infected_plots(higher_death_burden, "Countries with higher death burden (vs. 10 days ago)")
# hide
filt = (
(df_data['Deaths.new'] > 10) &
(df_data['Deaths.new.past'] > 10) &
(df_data['Deaths.new.per100k.past'] > 0.1) &
(death_change_ratio < 0.5))
lower_death_burden = df_data[filt]['Deaths.diff.per100k'].sort_values(ascending=False).index
# hide_input
Markdown(f"## 🟢 Good news: lower death burden {emoji_flags(lower_death_burden)}")
# > Countries with significantly lower recent death burden per 100k population vs. 10 days ago.
#
# - "Significantly lower" = 50% less
# - Only considering countries that had at least 10 recent deaths in both timeframes, and death burden of at least 0.1 per 100k.
# hide_input
style_death_burden(df_data.loc[lower_death_burden])
# hide_input
infected_plots(lower_death_burden, "Countries with lower death burden (vs. 10 days ago)")
# # Appendix:
# > Note: For interactive map, per country details, projections, and modeling methodology see [Projections of ICU need by Country dashboard](/covid-progress-projections/)
# > Warning: the visualisation below contains the results of a predictive model that was not built by an epidemiologist.
# ## Future model projections plots per country
# > For countries in any of the above groups.
# > Tip: Choose country from the drop-down below the graph.
#hide_input
all_news = (new_waves, slowing_outbreaks,
icu_increase, icu_decrease,
higher_death_burden, lower_death_burden,
not_active, no_deaths, no_cases_and_deaths, new_entries)
news_countries = [c for g in all_news for c in g]
df_alt_filt = df_alt_all[(df_alt_all['day'] > -60) &
(df_alt_all['country'].isin(news_countries))]
covid_helpers.altair_sir_plot(df_alt_filt, new_waves[0])
#hide
df_tot = df_alt_all.rename(columns={'country': cur_data.COL_REGION}
).set_index(cur_data.COL_REGION)
df_tot['population'] = df_cur_all['population']
for c in df_tot.columns[df_alt_all.dtypes == float]:
df_tot[c + '-total'] = df_tot[c] * df_tot['population']
df_tot = df_tot.reset_index()
df_tot.columns = [c.replace('.', '-') for c in df_tot.columns]
#hide_input
df_now = df_tot[df_tot['day'] == 0]
pop = df_now['population'].sum()
s_now = df_now['Susceptible-total'].sum() / pop
i_now = df_now['Infected-total'].sum() / pop
r_now = df_now['Removed-total'].sum() / pop
Markdown("## World totals:\n"
f"Infected 😷: **{i_now:.1%}**, "
f"Removed 😔: **{r_now:.1%}**, "
f"Susceptible 😟: **{s_now:.1%}**")
# ## Future World projections (all countries stacked)
# The outputs of the models for all countries in stacked plots.
# > Tip: Hover the mouse of the area to see which country is which and the countries S/I/R ratios at that point.
#
# > Tip: The plots are zoomable and draggable.
# +
#hide
# filter by days
days = 30
df_tot = df_tot[df_tot['day'].between(-days, days) | (df_tot['day'] % 10 == 0)]
# filter out noisy countries for actively infected plot:
df_tot_filt = df_tot[df_tot[cur_data.COL_REGION].isin(df_cur.index.unique())]
# -
# ### World total estimated actively infected
# +
#hide_input
import altair as alt
alt.data_transformers.disable_max_rows()
# today
today_line = (alt.Chart(pd.DataFrame({'x': [0]}))
.mark_rule(color='orange')
.encode(x='x', size=alt.value(1)))
# make plot
max_y = (df_tot_filt[df_tot_filt['day'].between(-days, days)]
.groupby('day')['Infected-total'].sum().max())
stacked_inf = alt.Chart(df_tot_filt).mark_area().encode(
x=alt.X('day:Q',
title=f'days relative to today ({cur_data.cur_date})',
scale=alt.Scale(domain=(-days, days))),
y=alt.Y("Infected-total:Q", stack=True, title="Number of people",
scale=alt.Scale(domain=(0, max_y))),
color=alt.Color("Country/Region:N", legend=None),
tooltip=['Country/Region', 'Susceptible', 'Infected', 'Removed'],
)
(stacked_inf + today_line).interactive()\
.properties(width=650, height=340)\
.properties(title='Actively infected')\
.configure_title(fontSize=20)
# -
# ### World total estimated recovered or dead
# +
#hide_input
max_y = df_tot_filt[df_tot_filt['day']==days]['Removed-total'].sum()
stacked_rem = alt.Chart(df_tot_filt).mark_area().encode(
x=alt.X('day:Q',
title=f'days relative to today ({cur_data.cur_date})',
scale=alt.Scale(domain=(-days, days))),
y=alt.Y("Removed-total:Q", stack=True, title="Number of people",
scale=alt.Scale(domain=(0, max_y))),
color=alt.Color("Country/Region:N", legend=None),
tooltip=['Country/Region', 'Susceptible', 'Infected', 'Removed']
)
(stacked_rem + today_line).interactive()\
.properties(width=650, height=340)\
.properties(title='Recovered or dead')\
.configure_title(fontSize=20)
# -
# <a id='methodology'></a>
# ## Methodology
# - I'm not an epidemiologist. This is an attempt to understand what's happening, and what the future looks like if current trends remain unchanged.
# - Everything is approximated and depends heavily on underlying assumptions.
# - Transmission rate calculation:
# - Growth rate is calculated over the 5 past days by averaging the daily growth rates.
# - Confidence bounds are calculated from the weighted standard deviation of the growth rate over the last 5 days. Model predictions are calculated for growth rates within 1 STD of the weighted mean. The maximum and minimum values for each day are used as confidence bands.
# Countries with highly noisy transmission rates are exluded from tranmission rate change tables ("new waves", "slowing waves").
# - Transmission rate, and its STD are calculated from growth rate and its STD using active cases estimation.
# - For projections (into future) very noisy projections (with broad confidence bounds) are not shown in the tables.
# - Where the rate estimated from [Total Outstanding Cases](https://covid19dashboards.com/outstanding_cases/#Appendix:-Methodology-of-Predicting-Recovered-Cases) is too high (on down-slopes) recovery probability if 1/20 is used (equivalent 20 days to recover).
# - Total cases are estimated from the reported deaths for each country:
# - Each country has a different testing policy and capacity and cases are under-reported in some countries. Using an estimated IFR (fatality rate) we can estimate the number of cases some time ago by using the total deaths until today.
# - IFRs for each country is estimated using the age adjusted IFRs from [May 1 New York paper](https://papers.ssrn.com/sol3/papers.cfm?abstract_id=3590771) and [UN demographic data for 2020](https://population.un.org/wpp/Download/Standard/Population/). These IFRs can be found in `df['age_adjusted_ifr']` column. Some examples: US - 0.98%, UK - 1.1%, Qatar - 0.25%, Italy - 1.4%, Japan - 1.6%.
# - The average fatality lag is assumed to be 8 days on average for a case to go from being confirmed positive (after incubation + testing lag) to death. This is the same figure used by ["Estimating The Infected Population From Deaths"](https://covid19dashboards.com/covid-infected/).
# - Testing bias adjustment: the actual lagged fatality rate is than divided by the IFR to estimate the testing bias in a country. The estimated testing bias then multiplies the reported case numbers to estimate the *true* case numbers (*=case numbers if testing coverage was as comprehensive as in the heavily tested countries*).
# - ICU need is calculated and age-adjusted as follows:
# - UK ICU ratio was reported as [4.4% of active reported cases](https://www.imperial.ac.uk/media/imperial-college/medicine/sph/ide/gida-fellowships/Imperial-College-COVID19-NPI-modelling-16-03-2020.pdf).
# - Using UKs ICU ratio, UK's testing bias, and IFRs corrected for age demographics we can estimate each country's ICU ratio (the number of cases requiring ICU hospitalisation).
# 
|
py | 1a3cc2e20b0a97aff309eb92d56bb0c86aac8512 | #!/usr/bin/env python
#
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Display unicode coverage of a set of cmaps."""
import argparse
import collections
from nototools import cmap_data
from nototools import unicode_data
from nototools import tool_utils
_MISSING_SCRIPTS = frozenset(['<MISSING>'])
_OMITTED_SCRIPTS = frozenset(['(omitted)'])
_OMITTED = tool_utils.parse_int_ranges("""
0001-000c 000e-001f # C0 controls
007f-009f # del and C1 controls
d800-dfff # surrogates
e000-f8ff # pua
fe00-fe0f # variation selectors
feff # BOM
e0000-e007f # tags
e0100-e01ff # supplementary variation selectors
f0000-ffffd # supplementary PUA
# fe000-fe4e4 fe4ef-fe82b fe82d fe838-ffffd # plane 15 PUA - emoji
100000-10ffff # plane 16 pua""")
_LGC_LIST = ['LGC', 'Latn', 'Grek', 'Cyrl']
def _get_scripts(cp, cp_to_scripts):
scripts = cp_to_scripts.get(cp, None)
if not scripts:
scripts = _OMITTED_SCRIPTS if cp in _OMITTED else _MISSING_SCRIPTS
return scripts
def _script_names(scripts):
script_list = []
# sort LGC first
for lgc in _LGC_LIST:
if lgc in scripts:
script_list.append(lgc)
script_list += [s for s in sorted(scripts) if s not in _LGC_LIST]
return ', '.join(script_list)
def _create_cp_to_scripts(data, only_scripts=None):
cp_to_scripts = collections.defaultdict(set)
all_scripts = set()
skip_set = frozenset(['Zinh', 'Zyyy', 'Zzzz'])
cjk_set = frozenset('Bopo,Hang,Hani,Hans,Hant,Hira,Jpan,Kana,Kore'.split(','))
lgc_set = frozenset('Latn,Grek,Cyrl'.split(','))
for row in data.table.rows:
script = row.script
if only_scripts and script not in only_scripts:
continue
if script in skip_set:
continue
if script in cjk_set:
script = 'CJK'
if script in lgc_set:
script = 'LGC'
all_scripts.add(script)
chars = tool_utils.parse_int_ranges(row.ranges)
for cp in chars:
cp_to_scripts[cp].add(script)
return cp_to_scripts, all_scripts
def _list_details(start_cp, limit_cp, defined_cps, defined_count, details):
num = 0
initial_cp = start_cp
while num < details - 1 and num < defined_count:
if initial_cp in defined_cps:
print '%13d %04x %s' % (
num + 1, initial_cp, unicode_data.name(initial_cp, '(unnamed)'))
num += 1
initial_cp += 1
if num < defined_count:
final_cp = limit_cp - 1
final_name = None
while final_cp >= initial_cp:
if final_cp in defined_cps:
final_name = unicode_data.name(final_cp, '(unnamed)')
num += 1
break
final_cp -= 1
if final_name and num < defined_count:
middle_cp = final_cp - 1
while middle_cp >= initial_cp:
if middle_cp in defined_cps:
print '%13s' % '...'
break
middle_cp -= 1
if final_name:
print '%13d %04x %s' % (defined_count, final_cp, final_name)
def _is_empty_scripts(scripts):
return (not scripts
or scripts == _MISSING_SCRIPTS
or scripts == _OMITTED_SCRIPTS)
def _list_range(
start_cp, limit_cp, defined_cps, defined_count, scripts, all_scripts,
only_scripts, details):
if limit_cp != start_cp + 1:
range_text = '%04x-%04x' % (start_cp, limit_cp - 1)
else:
range_text = '%04x' % start_cp
if not scripts:
num_scripts = 0
script_names = '(none)'
elif _is_empty_scripts(scripts):
num_scripts = 0
script_names = iter(scripts).next()
else:
num_scripts = len(scripts)
if scripts == all_scripts and scripts != only_scripts:
# only use 'all' if we're not limiting scripts
script_names = '(all)'
else:
script_names = _script_names(scripts)
print '%13s %6d %3s in %3d %7s: %s' % (
range_text, defined_count, 'cps' if defined_count != 1 else 'cp',
num_scripts, 'scripts' if num_scripts != 1 else 'script',
script_names)
if details > 0:
_list_details(start_cp, limit_cp, defined_cps, defined_count, details)
def _list_blocks(
start, limit, defined_cps, cp_to_scripts, all_scripts, only_scripts,
details):
start_cp = -1
defined_count = 0
block = None
showed_block = False
scripts = None
skip_empty = bool(only_scripts)
for cp in range(start, limit):
is_defined = cp in defined_cps
cp_block = unicode_data.block(cp)
cp_scripts = _get_scripts(cp, cp_to_scripts) if is_defined else None
if cp_block != block or (
cp_scripts and scripts and cp_scripts != scripts):
if block and block != 'No_Block':
if not (skip_empty and _is_empty_scripts(scripts)):
if not showed_block:
print '...' if block == 'No_Block' else block
showed_block = True
_list_range(
start_cp, cp, defined_cps, defined_count, scripts, all_scripts,
only_scripts, details)
start_cp = cp
defined_count = 0
if cp_block != block:
block = cp_block
showed_block = False
scripts = None
if is_defined:
scripts = cp_scripts
defined_count += 1
if not (skip_empty and _is_empty_scripts(scripts)):
if not showed_block:
print '...' if block == 'No_Block' else block
_list_range(
start_cp, limit, defined_cps, defined_count, scripts, all_scripts,
only_scripts, details)
def _summarize_block(block, block_count, defined_count, script_counts):
if block == 'No_Block':
print '...'
return
if block_count == defined_count:
print '%s (%d cps)' % (block, defined_count)
else:
print '%s (%d of %d cps)' % (block, defined_count, block_count)
lower_limit = int(defined_count / 10)
groups = collections.defaultdict(list)
for script, count in script_counts.iteritems():
groupnum = int(count / 5) * 5
if groupnum < lower_limit:
groupnum = 0
groups[groupnum].append((script, count))
for key in sorted(groups, reverse=True):
group_list = groups[key]
low = 0x110000
hi = -1
scripts = set()
for g in group_list:
count = g[1]
if count < low:
low = count
if count > hi:
hi = count
scripts.add(g[0])
if low == hi:
if hi == defined_count:
count = 'all'
else:
count = '%d' % hi
else:
count = '%d-%d' % (low, hi)
script_names = _script_names(scripts)
print '%6s: %s' % (count, script_names)
def _summarize_blocks(start, limit, defined_cps, cp_to_scripts, all_scripts):
block = None
block_count = 0
defined_count = 0
script_counts = None
for cp in range(start, limit):
cp_block = unicode_data.block(cp)
if cp_block != block:
if block:
_summarize_block(
block, block_count, defined_count, script_counts)
block = cp_block
block_count = 0
defined_count = 0
script_counts = collections.defaultdict(int)
block_count += 1
is_defined = cp in defined_cps and cp not in _OMITTED
if not is_defined:
continue
defined_count += 1
scripts = _get_scripts(cp, cp_to_scripts)
for script in scripts:
script_counts[script] += 1
_summarize_block(block, block_count, defined_count, script_counts)
def block_coverage(
cmap_file, start=0, limit=0x20000, only_scripts=None, details=0,
summary=False):
data = cmap_data.read_cmap_data_file(cmap_file)
cp_to_scripts, all_scripts = _create_cp_to_scripts(data, only_scripts)
defined_cps = unicode_data.defined_characters(version=9.0)
if summary:
_summarize_blocks(
start, limit, defined_cps, cp_to_scripts, all_scripts)
else:
_list_blocks(
start, limit, defined_cps, cp_to_scripts, all_scripts, only_scripts,
details)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'cmap_file', help='cmap data file',
metavar='file')
parser.add_argument(
'-d', '--details', help='show details on N characters in each range'
' (3 if no value provided)', metavar='num', default=0, const=3,
type=int, nargs='?')
parser.add_argument(
'-s', '--summary', help='show summary of block usage only',
action='store_true')
parser.add_argument(
'-r', '--range', help='range of characters to show (default 0-1ffff)',
metavar='range', default='0-1ffff')
parser.add_argument(
'-sc', '--scripts', help='limit scripts to show',
metavar='script', nargs='+', default=None)
args = parser.parse_args()
ranges = tool_utils.parse_int_ranges(args.range)
start = min(ranges)
end = max(ranges)
if end > 0x10ffff:
end = 0x10ffff;
limit = end + 1
if args.scripts:
args.scripts = frozenset(args.scripts)
block_coverage(
args.cmap_file, start, limit, args.scripts, args.details, args.summary)
if __name__ == "__main__":
main()
|
py | 1a3cc332a6ee9686afa080dc8ea8fa7ed6a4e267 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from st2common.exceptions import db
from st2common.models.system.common import ResourceReference
def get_ref_from_model(model):
if model is None:
raise ValueError('Model has None value.')
model_id = getattr(model, 'id', None)
if model_id is None:
raise db.StackStormDBObjectMalformedError('model %s must contain id.' % str(model))
reference = {'id': str(model_id),
'name': getattr(model, 'name', None)}
return reference
def get_model_from_ref(db_api, reference):
if reference is None:
raise db.StackStormDBObjectNotFoundError('No reference supplied.')
model_id = reference.get('id', None)
if model_id is not None:
return db_api.get_by_id(model_id)
model_name = reference.get('name', None)
if model_name is None:
raise db.StackStormDBObjectNotFoundError('Both name and id are None.')
return db_api.get_by_name(model_name)
def get_model_by_resource_ref(db_api, ref):
"""
Retrieve a DB model based on the resource reference.
:param db_api: Class of the object to retrieve.
:type db_api: ``object``
:param ref: Resource reference.
:type ref: ``str``
:return: Retrieved object.
"""
ref_obj = ResourceReference.from_string_reference(ref=ref)
result = db_api.query(name=ref_obj.name, pack=ref_obj.pack).first()
return result
def get_resource_ref_from_model(model):
"""
Return a ResourceReference given db_model.
:param model: DB model that contains name and pack.
:type model: ``object``
:return: ResourceReference.
"""
try:
name = model.name
pack = model.pack
except AttributeError:
raise Exception('Cannot build ResourceReference for model: %s. Name or pack missing.',
model)
return ResourceReference(name=name, pack=pack)
def get_str_resource_ref_from_model(model):
"""
Return a resource reference as string given db_model.
:param model: DB model that contains name and pack.
:type model: ``object``
:return: String representation of ResourceReference.
"""
return get_resource_ref_from_model(model).ref
|
py | 1a3cc3de48f94ad797b6f443569c8ff6c7f33627 | from random import randint as r
from time import sleep as s
print("This is a story about an ant")
sugar = 0
bank = 5_000_000
chocolate = 0
chocoxplode = 0
cost = 0
def intro(sugar, bank, chocolate, chocoxplode, cost):
s(2)
print("\nIt is a true story\n")
s(2)
print(f'Your sugar balance is {sugar}')
s(2)
choice = input("\nHow much sugar would you like?")
s(1)
choice = int(choice)
if choice < 10000:
sugar += choice
bank -= choice
sugarbank(sugar, bank, chocolate, chocoxplode, cost)
else:
print('You greedy monkey! No sugar for you.')
sugarbank(sugar, bank, chocolate, chocoxplode, cost)
def sugarbank(sugar, bank, chocolate, chocoxplode, cost):
s(2)
print(f'\n\nYour sugar balance is {sugar}')
print('''
Welcome to the sugar bank!
Would you like to make a deposit or a wishdrawl?
Type CAT for deposit
Type SNAKEFACE for withhdrawl
Type HAMSTER to rob the bank
Type MANGO to light your sugar on fire
Type GORILLA to give the bank your money
Type RAVEN to trade some sugar for chocolate
Type LADYBUG to play the lottery
Type WOLF to make chocoxplode (exploding chocolate with enchaned flavor)
''')
choice = input("Please pick an option")
if choice == 'CAT':
choice = input("How many sugars do you want to deposit")
choice = int(choice)
print("You deposit {choice} sugars")
sugar -= choice
bank += choice
elif choice == 'HAMSTER':
print("You rob the bank")
print("You now have a huge amount of sugar")
sugar += 50000
bank -= 50000
elif choice == 'MANGO':
print("You light your sugar on fire")
sugar = 0
print("You now have no sugar.")
print("I hope you feel the pain of sugar poverty!\n")
elif choice == 'GORILLA':
bank += sugar
sugar = 0
print("You give all your sugar to the bank. The bank will remember your kindness.")
s(2)
print("But a gorilla hears you calling it so it rampages and nearly kills you. ")
s(2)
print("But it kills you. You die. The end,.")
s(5)
print("Just kidding! The bank remembers your kindness and gives the gorilla 100 sugar. It agrees to leave you alone. ")
bank -= 100
elif choice == 'LADYBUG':
if sugar < 5:
print("You can't afford the lottery!")
else:
winning = []
winning.append(r(1,10))
winning.append(r(1,10))
winning.append(r(1,10))
lottery = []
print("You decide to play the lottery")
print("The lottery costs {cost} sugar")
sugar -= cost
cost *= 1.4
for i in range(3):
choice = input("Pick a number ")
choice = int(choice)
lottery.append(choice)
print(f'The winning lottery numbers are:')
for number in winning:
print(number)
if lottery in winning:
print("You won the lottery!")
sugar += 10*number
elif choice == 'BREAD':
print("You go to the bread bank")
url = open("bread.txt")
html = url.read()
print(html)
elif choice == 'RAVEN':
print("1 chocobar => 50 sugars (chocolate is better than sugar!)")
chocolate = input("How many chocobars would you like?")
sugar -= int(chocolate)*50
elif choice == 'WOLF':
print("10 chocolates + 100 sugars => chocoxplode")
chocoxplode += int(input("How many chocoxplodes would you like?"))
chocolate -= 10*int(chocoxplode)
sugar -= 100*int(chocoxplode)
else:
print("You take 100 sugars")
sugar += 100
bank -= 100
print(f'\nYour sugar balance is {sugar}\n')
print(f'\nYour chocolate balance is {chocolate}\n')
print(f'\nYour chocoxplode balance is {chocoxplode}\n')
s(2)
choice = input("Do you want to play again? y/n ")
s(1)
if choice == 'y':
intro(sugar, bank, chocolate, chocoxplode, cost)
intro(sugar, bank, chocolate, chocoxplode, cost)
print("Thanks for playing, game over")
#I had 97093 Sugar on 4/20/2020
|
py | 1a3cc4287e25be5f120b467b8f40cfa2915a380e | from backend.improvements.seastead import Seastead
import pytest
@pytest.fixture(scope="function")
def setup_improvement():
imp = Seastead()
return imp
# Init
testdata = [
('food', 2),
('production', 0),
('gold', 0),
('science', 0),
('culture', 0),
('faith', 0),
('housing', 2),
('appeal', 0),
('power', 0),
('acceptable_terrain', [
'lake',
'coast',
'ocean',
]),
('acceptable_features', None),
('resources', None),
]
@pytest.mark.parametrize("resource, value", testdata)
def test_init(setup_improvement, resource, value):
test_improvement = setup_improvement
assert getattr(test_improvement, resource) == value
|
py | 1a3cc6055a8b5eb4bb70c93fd000bc58e202b92c | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from oslo_log import log as logging
import paste.urlmap
try:
from urllib.request import parse_http_list # pylint: disable=E0611
except ImportError:
from urllib2 import parse_http_list # Python 2
from cinder.api.openstack import wsgi
_quoted_string_re = r'"[^"\\]*(?:\\.[^"\\]*)*"'
_option_header_piece_re = re.compile(
r';\s*([^\s;=]+|%s)\s*'
r'(?:=\s*([^;]+|%s))?\s*' %
(_quoted_string_re, _quoted_string_re))
LOG = logging.getLogger(__name__)
def unquote_header_value(value):
"""Unquotes a header value.
This does not use the real unquoting but what browsers are actually
using for quoting.
:param value: the header value to unquote.
"""
if value and value[0] == value[-1] == '"':
# this is not the real unquoting, but fixing this so that the
# RFC is met will result in bugs with internet explorer and
# probably some other browsers as well. IE for example is
# uploading files with "C:\foo\bar.txt" as filename
value = value[1:-1]
return value
def parse_list_header(value):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Quotes are removed automatically after parsing.
The return value is a standard :class:`list`:
>>> parse_list_header('token, "quoted value"')
['token', 'quoted value']
:param value: a string with a list header.
:return: :class:`list`
"""
result = []
for item in parse_http_list(value):
if item[:1] == item[-1:] == '"':
item = unquote_header_value(item[1:-1])
result.append(item)
return result
def parse_options_header(value):
"""Parse 'Content-Type'-like header into a tuple.
Parse a ``Content-Type`` like header into a tuple with the content
type and the options:
>>> parse_options_header('Content-Type: text/html; mimetype=text/html')
('Content-Type:', {'mimetype': 'text/html'})
:param value: the header to parse.
:return: (str, options)
"""
def _tokenize(string):
for match in _option_header_piece_re.finditer(string):
key, value = match.groups()
key = unquote_header_value(key)
if value is not None:
value = unquote_header_value(value)
yield key, value
if not value:
return '', {}
parts = _tokenize(';' + value)
name = next(parts)[0]
extra = dict(parts)
return name, extra
class Accept(object):
def __init__(self, value):
self._content_types = [parse_options_header(v) for v in
parse_list_header(value)]
def best_match(self, supported_content_types):
# FIXME: Should we have a more sophisticated matching algorithm that
# takes into account the version as well?
best_quality = -1
best_content_type = None
best_params = {}
best_match = '*/*'
for content_type in supported_content_types:
for content_mask, params in self._content_types:
try:
quality = float(params.get('q', 1))
except ValueError:
continue
if quality < best_quality:
continue
elif best_quality == quality:
if best_match.count('*') <= content_mask.count('*'):
continue
if self._match_mask(content_mask, content_type):
best_quality = quality
best_content_type = content_type
best_params = params
best_match = content_mask
return best_content_type, best_params
def content_type_params(self, best_content_type):
"""Find parameters in Accept header for given content type."""
for content_type, params in self._content_types:
if best_content_type == content_type:
return params
return {}
def _match_mask(self, mask, content_type):
if '*' not in mask:
return content_type == mask
if mask == '*/*':
return True
mask_major = mask[:-2]
content_type_major = content_type.split('/', 1)[0]
return content_type_major == mask_major
def urlmap_factory(loader, global_conf, **local_conf):
if 'not_found_app' in local_conf:
not_found_app = local_conf.pop('not_found_app')
else:
not_found_app = global_conf.get('not_found_app')
if not_found_app:
not_found_app = loader.get_app(not_found_app, global_conf=global_conf)
urlmap = URLMap(not_found_app=not_found_app)
for path, app_name in local_conf.items():
path = paste.urlmap.parse_path_expression(path)
app = loader.get_app(app_name, global_conf=global_conf)
urlmap[path] = app
return urlmap
class URLMap(paste.urlmap.URLMap):
def _match(self, host, port, path_info):
"""Find longest match for a given URL path."""
for (domain, app_url), app in self.applications:
if domain and domain != host and domain != host + ':' + port:
continue
if (path_info == app_url or path_info.startswith(app_url + '/')):
return app, app_url
return None, None
def _set_script_name(self, app, app_url):
def wrap(environ, start_response):
environ['SCRIPT_NAME'] += app_url
return app(environ, start_response)
return wrap
def _munge_path(self, app, path_info, app_url):
def wrap(environ, start_response):
environ['SCRIPT_NAME'] += app_url
environ['PATH_INFO'] = path_info[len(app_url):]
return app(environ, start_response)
return wrap
def _path_strategy(self, host, port, path_info):
"""Check path suffix for MIME type and path prefix for API version."""
mime_type = app = app_url = None
parts = path_info.rsplit('.', 1)
if len(parts) > 1:
possible_type = 'application/' + parts[1]
if possible_type in wsgi.SUPPORTED_CONTENT_TYPES:
mime_type = possible_type
parts = path_info.split('/')
if len(parts) > 1:
possible_app, possible_app_url = self._match(host, port, path_info)
# Don't use prefix if it ends up matching default
if possible_app and possible_app_url:
app_url = possible_app_url
app = self._munge_path(possible_app, path_info, app_url)
return mime_type, app, app_url
def _content_type_strategy(self, host, port, environ):
"""Check Content-Type header for API version."""
app = None
params = parse_options_header(environ.get('CONTENT_TYPE', ''))[1]
if 'version' in params:
app, app_url = self._match(host, port, '/v' + params['version'])
if app:
app = self._set_script_name(app, app_url)
return app
def _accept_strategy(self, host, port, environ, supported_content_types):
"""Check Accept header for best matching MIME type and API version."""
accept = Accept(environ.get('HTTP_ACCEPT', ''))
app = None
# Find the best match in the Accept header
mime_type, params = accept.best_match(supported_content_types)
if 'version' in params:
app, app_url = self._match(host, port, '/v' + params['version'])
if app:
app = self._set_script_name(app, app_url)
return mime_type, app
def __call__(self, environ, start_response):
host = environ.get('HTTP_HOST', environ.get('SERVER_NAME')).lower()
if ':' in host:
host, port = host.split(':', 1)
else:
if environ['wsgi.url_scheme'] == 'http':
port = '80'
else:
port = '443'
path_info = environ['PATH_INFO']
path_info = self.normalize_url(path_info, False)[1]
# The MIME type for the response is determined in one of two ways:
# 1) URL path suffix (eg /servers/detail.json)
# 2) Accept header (eg application/json;q=0.8, application/xml;q=0.2)
# The API version is determined in one of three ways:
# 1) URL path prefix (eg /v1.1/tenant/servers/detail)
# 2) Content-Type header (eg application/json;version=1.1)
# 3) Accept header (eg application/json;q=0.8;version=1.1)
supported_content_types = list(wsgi.SUPPORTED_CONTENT_TYPES)
mime_type, app, app_url = self._path_strategy(host, port, path_info)
# Accept application/atom+xml for the index query of each API
# version mount point as well as the root index
if (app_url and app_url + '/' == path_info) or path_info == '/':
supported_content_types.append('application/atom+xml')
if not app:
app = self._content_type_strategy(host, port, environ)
if not mime_type or not app:
possible_mime_type, possible_app = self._accept_strategy(
host, port, environ, supported_content_types)
if possible_mime_type and not mime_type:
mime_type = possible_mime_type
if possible_app and not app:
app = possible_app
if not mime_type:
mime_type = 'application/json'
if not app:
# Didn't match a particular version, probably matches default
app, app_url = self._match(host, port, path_info)
if app:
app = self._munge_path(app, path_info, app_url)
if app:
environ['cinder.best_content_type'] = mime_type
return app(environ, start_response)
environ['paste.urlmap_object'] = self
return self.not_found_application(environ, start_response)
|
py | 1a3cc6657ef0097faac2dcb6e013708828cf7f48 | test = {
'name': '1.10',
'points': 1,
'suites': [
{
'cases': [
{
'code': r"""
>>> number_of_words_plotted_in_bar_chart == 30
True
""",
'hidden': False,
'locked': False
}
],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'
}
]
}
|
py | 1a3cc67d2ddc16b48d53b3db43a082e89a53c18e | import os
import os.path
import sys
import unittest
from subprocess import Popen
from subprocess import PIPE
class TestRun(unittest.TestCase):
maxDiff = None
def setUp(self):
self.cwd = os.getcwd()
os.chdir(os.path.dirname(__file__))
def tearDown(self):
os.chdir(self.cwd)
def _run(self, script):
env = os.environ.copy()
env['PYTHONWARNINGS'] = 'ignore'
args = [sys.executable, '-m', 'gevent.monkey', script, 'patched']
p = Popen(args, stdout=PIPE, stderr=PIPE, env=env)
gout, gerr = p.communicate()
self.assertEqual(0, p.returncode, (gout, gerr))
args = [sys.executable, script, 'stdlib']
p = Popen(args, stdout=PIPE, stderr=PIPE)
pout, perr = p.communicate()
self.assertEqual(0, p.returncode, (pout, perr))
glines = gout.decode("utf-8").splitlines()
plines = pout.decode('utf-8').splitlines()
self.assertEqual(glines, plines)
self.assertEqual(gerr, perr)
return glines, gerr
def test_run_simple(self):
self._run(os.path.join('monkey_package', 'script.py'))
def test_run_package(self):
# Run a __main__ inside a package.
lines, _ = self._run('monkey_package')
self.assertTrue(lines[0].endswith('__main__.py'), lines[0])
self.assertEqual(lines[1], '__main__')
def test_issue_302(self):
lines, _ = self._run(os.path.join('monkey_package', 'issue302monkey.py'))
self.assertEqual(lines[0], 'True')
lines[1] = lines[1].replace('\\', '/') # windows path
self.assertEqual(lines[1], 'monkey_package/issue302monkey.py')
self.assertEqual(lines[2], 'True', lines)
if __name__ == '__main__':
unittest.main()
|
py | 1a3cc695f031c0d3fce21d101100bdaec87ed8af | '''
Coding our First Game in PyGame
-
Cheat Codes and HomeScreen in PyGame
'''
import pygame
import random
pygame.init()
# print(x) # All 6 pygame modules successfully imported
# Colors
white = (255, 255, 255)
red = (255, 0, 0)
black = (0, 0, 0)
# Creating Game Window
screen_width = 900
screen_height = 600
gameWindow = pygame.display.set_mode((screen_width, screen_height)) # Game Window of 1200x500
pygame.display.set_caption("Snake - by Anubhav Madhav") # Title of the Game, which appears at the top of the window
pygame.display.update() # We need to update our display each and everytime we make a change
clock = pygame.time.Clock()
font = pygame.font.SysFont(None, 55)
def text_screen(text, color, x, y):
screen_text = font.render(text, True, color)
gameWindow.blit(screen_text, [x,y])
def plot_snake(gameWindow, color, snk_list, snake_size):
for x,y in snk_list:
pygame.draw.rect(gameWindow, color, [x, y, snake_size, snake_size])
def welcome(): # For Home Screen
exit_game = False
while not exit_game:
gameWindow.fill((233, 220, 229))
text_screen("Welcome to Snakes", black, 260, 250)
text_screen("Developed by Anubhav Madhav", black, 170, 300)
text_screen("Press Space Bar to Play", black, 220, 400)
for event in pygame.event.get():
if event.type == pygame.QUIT:
exit_game = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
gameloop()
pygame.display.update()
clock.tick(30) # hardcoded 'fps' because 'fps' is local variable of gameloop(), therefore we cannot use it in welcome()
# Creating a Game Loop
def gameloop():
# Game Specific Variables
exit_game = False
game_over = False
snake_x = 45 # Initial Position of Snake
snake_y = 55 # Initial Position of Snake
snake_size = 30
init_velocity = 5
velocity_x = 0
velocity_y = 0
food_x = random.randint(20, screen_width / 2)
food_y = random.randint(20, screen_height / 2)
food_size = 30
score = 0
fps = 30 # frames per second
snk_list = []
snk_length = 1
with open("highscore.txt", "r") as f:
hiscore = f.read()
while not exit_game:
if game_over:
gameWindow.fill(white)
text_screen("Game Over!! Press Enter to Continue", red, 100, 250)
with open("highscore.txt", "w") as f:
f.write(str(hiscore))
for event in pygame.event.get(): # This gets all the events which a user can perform in a game, like mouse hover, mouse click, pressing a certain key etc.
# print(event)
if event.type == pygame.QUIT:
exit_game = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RETURN:
welcome()
else:
for event in pygame.event.get(): # This gets all the events which a user can perform in a game, like mouse hover, mouse click, pressing a certain key etc.
# print(event)
if event.type == pygame.QUIT:
exit_game = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RIGHT:
velocity_x = init_velocity
velocity_y = 0
if event.key == pygame.K_LEFT:
velocity_x = - init_velocity
velocity_y = 0
if event.key == pygame.K_UP:
velocity_y = - init_velocity
velocity_x = 0
if event.key == pygame.K_DOWN:
velocity_y = init_velocity
velocity_x = 0
if event.key == pygame.K_q: # cheatcode
score += 10
snake_x = snake_x + velocity_x
snake_y = snake_y + velocity_y
if abs(snake_x - food_x)<6 and abs(snake_y - food_y)<6: # condition when snake eats the food
score += 10
# print("Score: ", score )
food_x = random.randint(20, screen_width / 2) # to change the position of food after eating
food_y = random.randint(20, screen_height / 2)
snk_length += 5
if score>int(hiscore):
hiscore = score
gameWindow.fill(white) # Setting background color as white
pygame.draw.rect(gameWindow, red, [food_x, food_y, food_size, food_size]) # Making Food for Snake using Rectangle
text_screen("Score: " + str(score ) + " HighScore: " + str(hiscore), red, 5, 5)
head = [] # for the starting of the game
head.append(snake_x)
head.append(snake_y)
snk_list.append(head)
if len(snk_list)>snk_length:
del snk_list[0]
if head in snk_list[:-1]:
game_over = True
if snake_x<0 or snake_x>screen_height or snake_y<0 or snake_y>screen_height:
game_over = True
# print("Game Over!!")
# pygame.draw.rect(gameWindow, black, [snake_x, snake_y, snake_size, snake_size]) # Making Head of Snake using Rectangle
plot_snake(gameWindow, black, snk_list, snake_size)
pygame.display.update() # Need to update display cause we have made changes to gameWindow
clock.tick(fps)
pygame.quit()
quit()
welcome() |
py | 1a3cc789ba22c6a41f72d0100c807b66e53dd971 | import sys
import nltk
from nltk.stem.porter import *
from sklearn.feature_extraction import stop_words
import xml.etree.cElementTree as ET
from collections import Counter
import string
from sklearn.feature_extraction.text import TfidfVectorizer
import zipfile
import os
PARTIALS = False
def gettext(xmltext):
"""
Parse xmltext and return the text from <title> and <text> tags
"""
xmltext = xmltext.encode('ascii', 'ignore') # ensure there are no weird char
def tokenize(text):
"""
Tokenize text and return a non-unique list of tokenized words
found in the text. Normalize to lowercase, strip punctuation,
remove stop words, drop words of length < 3, strip digits.
"""
text = text.lower()
text = re.sub('[' + string.punctuation + '0-9\\r\\t\\n]', ' ', text)
tokens = nltk.word_tokenize(text)
tokens = [w for w in tokens if len(w) > 2] # ignore a, an, to, at, be, ...
...
def stemwords(words):
"""
Given a list of tokens/words, return a new list with each word
stemmed using a PorterStemmer.
"""
def tokenizer(text):
return stemwords(tokenize(text))
def compute_tfidf(corpus):
"""
Create and return a TfidfVectorizer object after training it on
the list of articles pulled from the corpus dictionary. Meaning,
call fit() on the list of document strings, which figures out
all the inverse document frequencies (IDF) for use later by
the transform() function. The corpus argument is a dictionary
mapping file name to xml text.
"""
def summarize(tfidf, text, n):
"""
Given a trained TfidfVectorizer object and some XML text, return
up to n (word,score) pairs in a list. Discard any terms with
scores < 0.09.
"""
def load_corpus(zipfilename):
"""
Given a zip file containing root directory reuters-vol1-disk1-subset
and a bunch of *.xml files, read them from the zip file into
a dictionary of (filename,xmltext) associations. Use namelist() from
ZipFile object to get list of xml files in that zip file.
Convert filename reuters-vol1-disk1-subset/foo.xml to foo.xml
as the keys in the dictionary. The values in the dictionary are the
raw XML text from the various files.
"""
|
py | 1a3cc7ff5af721207224c4cfd5901935f0fb3682 | import cv2
import numpy as np
from skimage.measure import compare_ssim as ssim
def get_mse_psnr(x, y):
if x.ndim == 4:
mse_list = []
psnr_list = []
data_len = len(x)
for k in range(data_len):
img = x[k]
ref = y[k]
mse = np.mean((img - ref) ** 2)
psnr = 10 * np.log10(1. / mse)
mse_list.append(mse)
psnr_list.append(psnr)
return np.asarray(mse_list), np.asarray(psnr_list)
elif x.ndim == 3:
mse = np.mean((x - y) ** 2)
psnr = 10 * np.log10(1. / mse)
return mse, psnr
else:
raise ValueError('Invalid data!')
def get_ssim(x, y):
"""
:param x: input
:param y: reference
:return: SSIM
"""
if x.ndim == 4:
ssim_list = []
data_len = len(x)
for k in range(data_len):
img = x[k]
ref = y[k]
ssim_value = ssim(ref, img, data_range=img.max()-img.min(),
multichannel=True)
ssim_list.append(ssim_value)
return np.asarray(ssim_list)
elif x.ndim == 3:
ssim_value = ssim(y, x, data_range=x.max() - x.min(),
multichannel=True)
return ssim_value
def patch2image(patches, shape):
""" turn patches into an image
:param patches: patches of shape N x H x W x C
:param shape: a tuple (N_H, N_W)
:return: image of shape (N_H x H) x (N_W x W) x C
"""
num_patch = len(patches)
assert num_patch > 0
if num_patch < shape[0] * shape[1]:
patches_shape = patches.shape
patches_zero = np.zeros(
(shape[0] * shape[1] - num_patch,) + patches_shape[1:]
)
patches = np.concatenate((patches, patches_zero))
image = patches.reshape(tuple(shape) + patches.shape[1:])
image = np.swapaxes(image, axis1=1, axis2=2)
img_shape = image.shape
out_shape = (img_shape[0] * img_shape[1], img_shape[2] * img_shape[3],
img_shape[4])
image = image.reshape(out_shape)
return image
def create_patch_mask(size, edge):
""" create a map with all ones, pixels near edge approach 0 linearly
:param size: (h, w)
:param edge: (eh, ew)
:return: a edge_map of size (h, w)
"""
h, w = size
eh, ew = edge
assert eh <= h//2, ew <= w//2
edge_map = np.ones((h, w), dtype=np.float32)
for idx_h in range(eh):
edge_map[idx_h, :] = 1. * (idx_h + 1) / (eh + 1)
edge_map[-1 - idx_h, :] = 1. * (idx_h + 1) / (eh + 1)
for idx_w in range(ew):
temp_column = np.ones((h, ), dtype=np.float32) * (idx_w + 1) / (ew + 1)
edge_map[:, idx_w] = np.minimum(edge_map[:, idx_w], temp_column)
edge_map[:, -1 - idx_w] = np.minimum(
edge_map[:, -1 - idx_w], temp_column
)
return edge_map
def whole2patch(img, size, stride, is_mask=True):
""" split a whole image to overlapped patches
:param img: an input color image
:param size: (h, w), size of each patch
:param stride: (sh, sw), stride of each patch
:param is_mask: use edge mask or not
:return: (patches, positions, count_map)
"""
h, w = size
sh, sw = stride
H, W, C = img.shape
assert sh <= h <= H and sw <= w <= W and C==3
count_map = np.zeros((H, W), dtype=np.float32)
if is_mask:
eh = (h - sh) // 2
ew = (w - sw) // 2
mask = create_patch_mask((h, w), (eh, ew))
# crop
patches = []
positions = []
h_list = list(range(0, H-h, sh)) + [H-h]
w_list = list(range(0, W-w, sw)) + [W-w]
for idx_h in h_list:
for idx_w in w_list:
# position
positions.append([idx_h, idx_w])
# count map
if is_mask:
count_map[idx_h: idx_h + h, idx_w: idx_w + w] += mask
else:
count_map[idx_h: idx_h + h, idx_w: idx_w + w] += 1
# patches
patches.append(img[idx_h: idx_h + h, idx_w: idx_w + w, :])
positions = np.asarray(positions)
patches = np.asarray(patches)
return patches, positions, count_map
def patch2whole(patches, positions, count_map, stride, is_mask=True):
""" this is the inverse function of `whole2patch`
:param patches: cropped patches
:param positions: position for each cropped patch
:param count_map: how many times each pixel is counted
:param stride: (sw, sh)
:param is_mask: whether the count map is calculated with edge mask
:return: image
"""
H, W = count_map.shape # image shape
h, w = patches.shape[1:3] # patch shape
if is_mask:
sh, sw = stride
eh = (h - sh) // 2
ew = (w - sw) // 2
mask = create_patch_mask((h, w), (eh, ew))
mask = np.repeat(np.expand_dims(mask, axis=2), 3, axis=2)
image = np.zeros((H, W, 3), dtype=np.float32)
for patch, pos in zip(patches, positions):
idx_h, idx_w = pos
if is_mask:
image[idx_h: idx_h + h, idx_w: idx_w + w, :] += patch * mask
else:
image[idx_h: idx_h + h, idx_w: idx_w + w, :] += patch
image /= np.repeat(np.expand_dims(count_map, axis=2), 3, axis=2)
return image
def load_images(list_in, list_gt, size=63):
""" load images
:param list_in: input image list
:param list_gt: label image list
:param size: image size
:return: (input, label), RGB images
"""
assert len(list_in) == len(list_gt)
img_num = len(list_in)
imgs_in = np.zeros([img_num, size, size, 3])
imgs_gt = np.zeros([img_num, size, size, 3])
for k in range(img_num):
imgs_in[k, ...] = cv2.imread(list_in[k])[:, :, ::-1] / 255.
imgs_gt[k, ...] = cv2.imread(list_gt[k])[:, :, ::-1] / 255.
return imgs_in, imgs_gt
|
py | 1a3cc88691adc250dcc6f17b7317e0103d4ca69e | import os
import socket
def getIP():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 1))
return s.getsockname()[0]
lsquic_dir = os.path.expanduser('~/oqs/lsquic')
#key_crt_dir = os.path.expanduser('~/SERVER_RSA_FILES/key_crt.pem')
#key_srv_dir = os.path.expanduser('~/SERVER_RSA_FILES/key_srv.pem')
key_crt_dir = f'{lsquic_dir}/certs/secp128r1/key_crt.pem'
key_srv_dir = f'{lsquic_dir}/certs/secp128r1/key_srv.pem'
print(f'Key CRT Directory: {key_crt_dir}')
print(f'Key SRV Directory: {key_srv_dir}')
server_ip = getIP()
print()
print(f'Server IP: {server_ip}')
print()
#myCmd= f'{lsquic_dir}/build/bin/./http_server -L debug -c www.example.com,{key_crt_dir},{key_srv_dir} -s {server_ip}:4433 -p /'
myCmd= f'{lsquic_dir}/build/bin/./http_server -c www.example.com,{key_crt_dir},{key_srv_dir} -s {server_ip}:4433 -p /'
os.system(myCmd)
|
py | 1a3cc890bf7ec57e3666702c2cda5c40d74fb21d | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.dialogflow_v2.services.versions import VersionsAsyncClient
from google.cloud.dialogflow_v2.services.versions import VersionsClient
from google.cloud.dialogflow_v2.services.versions import pagers
from google.cloud.dialogflow_v2.services.versions import transports
from google.cloud.dialogflow_v2.types import version
from google.cloud.dialogflow_v2.types import version as gcd_version
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert VersionsClient._get_default_mtls_endpoint(None) is None
assert VersionsClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
assert (
VersionsClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
VersionsClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
VersionsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert VersionsClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize("client_class", [VersionsClient, VersionsAsyncClient,])
def test_versions_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "dialogflow.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.VersionsGrpcTransport, "grpc"),
(transports.VersionsGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_versions_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize("client_class", [VersionsClient, VersionsAsyncClient,])
def test_versions_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "dialogflow.googleapis.com:443"
def test_versions_client_get_transport_class():
transport = VersionsClient.get_transport_class()
available_transports = [
transports.VersionsGrpcTransport,
]
assert transport in available_transports
transport = VersionsClient.get_transport_class("grpc")
assert transport == transports.VersionsGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(VersionsClient, transports.VersionsGrpcTransport, "grpc"),
(VersionsAsyncClient, transports.VersionsGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
@mock.patch.object(
VersionsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VersionsClient)
)
@mock.patch.object(
VersionsAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(VersionsAsyncClient),
)
def test_versions_client_client_options(client_class, transport_class, transport_name):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(VersionsClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(VersionsClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(VersionsClient, transports.VersionsGrpcTransport, "grpc", "true"),
(
VersionsAsyncClient,
transports.VersionsGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(VersionsClient, transports.VersionsGrpcTransport, "grpc", "false"),
(
VersionsAsyncClient,
transports.VersionsGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
VersionsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VersionsClient)
)
@mock.patch.object(
VersionsAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(VersionsAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_versions_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class", [VersionsClient, VersionsAsyncClient])
@mock.patch.object(
VersionsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VersionsClient)
)
@mock.patch.object(
VersionsAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(VersionsAsyncClient),
)
def test_versions_client_get_mtls_endpoint_and_cert_source(client_class):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(VersionsClient, transports.VersionsGrpcTransport, "grpc"),
(VersionsAsyncClient, transports.VersionsGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_versions_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(VersionsClient, transports.VersionsGrpcTransport, "grpc", grpc_helpers),
(
VersionsAsyncClient,
transports.VersionsGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_versions_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_versions_client_client_options_from_dict():
with mock.patch(
"google.cloud.dialogflow_v2.services.versions.transports.VersionsGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = VersionsClient(client_options={"api_endpoint": "squid.clam.whelk"})
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(VersionsClient, transports.VersionsGrpcTransport, "grpc", grpc_helpers),
(
VersionsAsyncClient,
transports.VersionsGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_versions_client_create_channel_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# test that the credentials from file are saved and used as the credentials.
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel"
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
file_creds = ga_credentials.AnonymousCredentials()
load_creds.return_value = (file_creds, None)
adc.return_value = (creds, None)
client = client_class(client_options=options, transport=transport_name)
create_channel.assert_called_with(
"dialogflow.googleapis.com:443",
credentials=file_creds,
credentials_file=None,
quota_project_id=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
scopes=None,
default_host="dialogflow.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize("request_type", [version.ListVersionsRequest, dict,])
def test_list_versions(request_type, transport: str = "grpc"):
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_versions), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = version.ListVersionsResponse(
next_page_token="next_page_token_value",
)
response = client.list_versions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == version.ListVersionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListVersionsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_versions_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_versions), "__call__") as call:
client.list_versions()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == version.ListVersionsRequest()
@pytest.mark.asyncio
async def test_list_versions_async(
transport: str = "grpc_asyncio", request_type=version.ListVersionsRequest
):
client = VersionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_versions), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
version.ListVersionsResponse(next_page_token="next_page_token_value",)
)
response = await client.list_versions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == version.ListVersionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListVersionsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_versions_async_from_dict():
await test_list_versions_async(request_type=dict)
def test_list_versions_field_headers():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = version.ListVersionsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_versions), "__call__") as call:
call.return_value = version.ListVersionsResponse()
client.list_versions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_versions_field_headers_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = version.ListVersionsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_versions), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
version.ListVersionsResponse()
)
await client.list_versions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_versions_flattened():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_versions), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = version.ListVersionsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_versions(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_versions_flattened_error():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_versions(
version.ListVersionsRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_versions_flattened_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_versions), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = version.ListVersionsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
version.ListVersionsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_versions(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_versions_flattened_error_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_versions(
version.ListVersionsRequest(), parent="parent_value",
)
def test_list_versions_pager(transport_name: str = "grpc"):
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_versions), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
version.ListVersionsResponse(
versions=[version.Version(), version.Version(), version.Version(),],
next_page_token="abc",
),
version.ListVersionsResponse(versions=[], next_page_token="def",),
version.ListVersionsResponse(
versions=[version.Version(),], next_page_token="ghi",
),
version.ListVersionsResponse(
versions=[version.Version(), version.Version(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_versions(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, version.Version) for i in results)
def test_list_versions_pages(transport_name: str = "grpc"):
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_versions), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
version.ListVersionsResponse(
versions=[version.Version(), version.Version(), version.Version(),],
next_page_token="abc",
),
version.ListVersionsResponse(versions=[], next_page_token="def",),
version.ListVersionsResponse(
versions=[version.Version(),], next_page_token="ghi",
),
version.ListVersionsResponse(
versions=[version.Version(), version.Version(),],
),
RuntimeError,
)
pages = list(client.list_versions(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_versions_async_pager():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_versions), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
version.ListVersionsResponse(
versions=[version.Version(), version.Version(), version.Version(),],
next_page_token="abc",
),
version.ListVersionsResponse(versions=[], next_page_token="def",),
version.ListVersionsResponse(
versions=[version.Version(),], next_page_token="ghi",
),
version.ListVersionsResponse(
versions=[version.Version(), version.Version(),],
),
RuntimeError,
)
async_pager = await client.list_versions(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, version.Version) for i in responses)
@pytest.mark.asyncio
async def test_list_versions_async_pages():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_versions), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
version.ListVersionsResponse(
versions=[version.Version(), version.Version(), version.Version(),],
next_page_token="abc",
),
version.ListVersionsResponse(versions=[], next_page_token="def",),
version.ListVersionsResponse(
versions=[version.Version(),], next_page_token="ghi",
),
version.ListVersionsResponse(
versions=[version.Version(), version.Version(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_versions(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize("request_type", [version.GetVersionRequest, dict,])
def test_get_version(request_type, transport: str = "grpc"):
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = version.Version(
name="name_value",
description="description_value",
version_number=1518,
status=version.Version.VersionStatus.IN_PROGRESS,
)
response = client.get_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == version.GetVersionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, version.Version)
assert response.name == "name_value"
assert response.description == "description_value"
assert response.version_number == 1518
assert response.status == version.Version.VersionStatus.IN_PROGRESS
def test_get_version_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_version), "__call__") as call:
client.get_version()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == version.GetVersionRequest()
@pytest.mark.asyncio
async def test_get_version_async(
transport: str = "grpc_asyncio", request_type=version.GetVersionRequest
):
client = VersionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
version.Version(
name="name_value",
description="description_value",
version_number=1518,
status=version.Version.VersionStatus.IN_PROGRESS,
)
)
response = await client.get_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == version.GetVersionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, version.Version)
assert response.name == "name_value"
assert response.description == "description_value"
assert response.version_number == 1518
assert response.status == version.Version.VersionStatus.IN_PROGRESS
@pytest.mark.asyncio
async def test_get_version_async_from_dict():
await test_get_version_async(request_type=dict)
def test_get_version_field_headers():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = version.GetVersionRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_version), "__call__") as call:
call.return_value = version.Version()
client.get_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_version_field_headers_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = version.GetVersionRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_version), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(version.Version())
await client.get_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_version_flattened():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = version.Version()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_version(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_version_flattened_error():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_version(
version.GetVersionRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_version_flattened_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = version.Version()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(version.Version())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_version(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_version_flattened_error_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_version(
version.GetVersionRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [gcd_version.CreateVersionRequest, dict,])
def test_create_version(request_type, transport: str = "grpc"):
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_version.Version(
name="name_value",
description="description_value",
version_number=1518,
status=gcd_version.Version.VersionStatus.IN_PROGRESS,
)
response = client.create_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_version.CreateVersionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcd_version.Version)
assert response.name == "name_value"
assert response.description == "description_value"
assert response.version_number == 1518
assert response.status == gcd_version.Version.VersionStatus.IN_PROGRESS
def test_create_version_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_version), "__call__") as call:
client.create_version()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_version.CreateVersionRequest()
@pytest.mark.asyncio
async def test_create_version_async(
transport: str = "grpc_asyncio", request_type=gcd_version.CreateVersionRequest
):
client = VersionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcd_version.Version(
name="name_value",
description="description_value",
version_number=1518,
status=gcd_version.Version.VersionStatus.IN_PROGRESS,
)
)
response = await client.create_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_version.CreateVersionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcd_version.Version)
assert response.name == "name_value"
assert response.description == "description_value"
assert response.version_number == 1518
assert response.status == gcd_version.Version.VersionStatus.IN_PROGRESS
@pytest.mark.asyncio
async def test_create_version_async_from_dict():
await test_create_version_async(request_type=dict)
def test_create_version_field_headers():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcd_version.CreateVersionRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_version), "__call__") as call:
call.return_value = gcd_version.Version()
client.create_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_version_field_headers_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcd_version.CreateVersionRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_version), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcd_version.Version())
await client.create_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_version_flattened():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_version.Version()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_version(
parent="parent_value", version=gcd_version.Version(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].version
mock_val = gcd_version.Version(name="name_value")
assert arg == mock_val
def test_create_version_flattened_error():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_version(
gcd_version.CreateVersionRequest(),
parent="parent_value",
version=gcd_version.Version(name="name_value"),
)
@pytest.mark.asyncio
async def test_create_version_flattened_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_version.Version()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcd_version.Version())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_version(
parent="parent_value", version=gcd_version.Version(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].version
mock_val = gcd_version.Version(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_version_flattened_error_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_version(
gcd_version.CreateVersionRequest(),
parent="parent_value",
version=gcd_version.Version(name="name_value"),
)
@pytest.mark.parametrize("request_type", [gcd_version.UpdateVersionRequest, dict,])
def test_update_version(request_type, transport: str = "grpc"):
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_version.Version(
name="name_value",
description="description_value",
version_number=1518,
status=gcd_version.Version.VersionStatus.IN_PROGRESS,
)
response = client.update_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_version.UpdateVersionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcd_version.Version)
assert response.name == "name_value"
assert response.description == "description_value"
assert response.version_number == 1518
assert response.status == gcd_version.Version.VersionStatus.IN_PROGRESS
def test_update_version_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_version), "__call__") as call:
client.update_version()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_version.UpdateVersionRequest()
@pytest.mark.asyncio
async def test_update_version_async(
transport: str = "grpc_asyncio", request_type=gcd_version.UpdateVersionRequest
):
client = VersionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcd_version.Version(
name="name_value",
description="description_value",
version_number=1518,
status=gcd_version.Version.VersionStatus.IN_PROGRESS,
)
)
response = await client.update_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_version.UpdateVersionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcd_version.Version)
assert response.name == "name_value"
assert response.description == "description_value"
assert response.version_number == 1518
assert response.status == gcd_version.Version.VersionStatus.IN_PROGRESS
@pytest.mark.asyncio
async def test_update_version_async_from_dict():
await test_update_version_async(request_type=dict)
def test_update_version_field_headers():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcd_version.UpdateVersionRequest()
request.version.name = "version.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_version), "__call__") as call:
call.return_value = gcd_version.Version()
client.update_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "version.name=version.name/value",) in kw[
"metadata"
]
@pytest.mark.asyncio
async def test_update_version_field_headers_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcd_version.UpdateVersionRequest()
request.version.name = "version.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_version), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcd_version.Version())
await client.update_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "version.name=version.name/value",) in kw[
"metadata"
]
def test_update_version_flattened():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_version.Version()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_version(
version=gcd_version.Version(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].version
mock_val = gcd_version.Version(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
def test_update_version_flattened_error():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_version(
gcd_version.UpdateVersionRequest(),
version=gcd_version.Version(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_version_flattened_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_version.Version()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcd_version.Version())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_version(
version=gcd_version.Version(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].version
mock_val = gcd_version.Version(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_version_flattened_error_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_version(
gcd_version.UpdateVersionRequest(),
version=gcd_version.Version(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.parametrize("request_type", [version.DeleteVersionRequest, dict,])
def test_delete_version(request_type, transport: str = "grpc"):
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == version.DeleteVersionRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_version_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_version), "__call__") as call:
client.delete_version()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == version.DeleteVersionRequest()
@pytest.mark.asyncio
async def test_delete_version_async(
transport: str = "grpc_asyncio", request_type=version.DeleteVersionRequest
):
client = VersionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == version.DeleteVersionRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_version_async_from_dict():
await test_delete_version_async(request_type=dict)
def test_delete_version_field_headers():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = version.DeleteVersionRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_version), "__call__") as call:
call.return_value = None
client.delete_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_version_field_headers_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = version.DeleteVersionRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_version), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_version_flattened():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_version(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_version_flattened_error():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_version(
version.DeleteVersionRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_version_flattened_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_version(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_version_flattened_error_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_version(
version.DeleteVersionRequest(), name="name_value",
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.VersionsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.VersionsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = VersionsClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.VersionsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = VersionsClient(client_options=options, transport=transport,)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = VersionsClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.VersionsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = VersionsClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.VersionsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = VersionsClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.VersionsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.VersionsGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[transports.VersionsGrpcTransport, transports.VersionsGrpcAsyncIOTransport,],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.VersionsGrpcTransport,)
def test_versions_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.VersionsTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_versions_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.dialogflow_v2.services.versions.transports.VersionsTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.VersionsTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"list_versions",
"get_version",
"create_version",
"update_version",
"delete_version",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
def test_versions_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.dialogflow_v2.services.versions.transports.VersionsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.VersionsTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id="octopus",
)
def test_versions_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.dialogflow_v2.services.versions.transports.VersionsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.VersionsTransport()
adc.assert_called_once()
def test_versions_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
VersionsClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[transports.VersionsGrpcTransport, transports.VersionsGrpcAsyncIOTransport,],
)
def test_versions_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.VersionsGrpcTransport, grpc_helpers),
(transports.VersionsGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_versions_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"dialogflow.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
scopes=["1", "2"],
default_host="dialogflow.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[transports.VersionsGrpcTransport, transports.VersionsGrpcAsyncIOTransport],
)
def test_versions_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_versions_host_no_port():
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="dialogflow.googleapis.com"
),
)
assert client.transport._host == "dialogflow.googleapis.com:443"
def test_versions_host_with_port():
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="dialogflow.googleapis.com:8000"
),
)
assert client.transport._host == "dialogflow.googleapis.com:8000"
def test_versions_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.VersionsGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_versions_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.VersionsGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.VersionsGrpcTransport, transports.VersionsGrpcAsyncIOTransport],
)
def test_versions_transport_channel_mtls_with_client_cert_source(transport_class):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.VersionsGrpcTransport, transports.VersionsGrpcAsyncIOTransport],
)
def test_versions_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_version_path():
project = "squid"
version = "clam"
expected = "projects/{project}/agent/versions/{version}".format(
project=project, version=version,
)
actual = VersionsClient.version_path(project, version)
assert expected == actual
def test_parse_version_path():
expected = {
"project": "whelk",
"version": "octopus",
}
path = VersionsClient.version_path(**expected)
# Check that the path construction is reversible.
actual = VersionsClient.parse_version_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "oyster"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = VersionsClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "nudibranch",
}
path = VersionsClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = VersionsClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "cuttlefish"
expected = "folders/{folder}".format(folder=folder,)
actual = VersionsClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "mussel",
}
path = VersionsClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = VersionsClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "winkle"
expected = "organizations/{organization}".format(organization=organization,)
actual = VersionsClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nautilus",
}
path = VersionsClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = VersionsClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "scallop"
expected = "projects/{project}".format(project=project,)
actual = VersionsClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "abalone",
}
path = VersionsClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = VersionsClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "squid"
location = "clam"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = VersionsClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "whelk",
"location": "octopus",
}
path = VersionsClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = VersionsClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.VersionsTransport, "_prep_wrapped_messages"
) as prep:
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.VersionsTransport, "_prep_wrapped_messages"
) as prep:
transport_class = VersionsClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = VersionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(VersionsClient, transports.VersionsGrpcTransport),
(VersionsAsyncClient, transports.VersionsGrpcAsyncIOTransport),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
|
py | 1a3cca6df05061179bf824d1d05e1fc269531854 | """Support for deCONZ lights."""
from __future__ import annotations
from pydeconz.group import DeconzGroup as Group
from pydeconz.light import (
ALERT_LONG,
ALERT_SHORT,
EFFECT_COLOR_LOOP,
EFFECT_NONE,
Light,
)
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_EFFECT,
ATTR_FLASH,
ATTR_HS_COLOR,
ATTR_TRANSITION,
ATTR_XY_COLOR,
COLOR_MODE_BRIGHTNESS,
COLOR_MODE_COLOR_TEMP,
COLOR_MODE_HS,
COLOR_MODE_ONOFF,
COLOR_MODE_XY,
DOMAIN,
EFFECT_COLORLOOP,
FLASH_LONG,
FLASH_SHORT,
SUPPORT_EFFECT,
SUPPORT_FLASH,
SUPPORT_TRANSITION,
LightEntity,
)
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.util.color import color_hs_to_xy
from .const import DOMAIN as DECONZ_DOMAIN, NEW_GROUP, NEW_LIGHT, POWER_PLUGS
from .deconz_device import DeconzDevice
from .gateway import get_gateway_from_config_entry
DECONZ_GROUP = "is_deconz_group"
EFFECT_TO_DECONZ = {EFFECT_COLORLOOP: EFFECT_COLOR_LOOP, "None": EFFECT_NONE}
FLASH_TO_DECONZ = {FLASH_SHORT: ALERT_SHORT, FLASH_LONG: ALERT_LONG}
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the deCONZ lights and groups from a config entry."""
gateway = get_gateway_from_config_entry(hass, config_entry)
gateway.entities[DOMAIN] = set()
@callback
def async_add_light(lights=gateway.api.lights.values()):
"""Add light from deCONZ."""
entities = []
for light in lights:
if (
isinstance(light, Light)
and light.type not in POWER_PLUGS
and light.unique_id not in gateway.entities[DOMAIN]
):
entities.append(DeconzLight(light, gateway))
if entities:
async_add_entities(entities)
config_entry.async_on_unload(
async_dispatcher_connect(
hass, gateway.async_signal_new_device(NEW_LIGHT), async_add_light
)
)
@callback
def async_add_group(groups=gateway.api.groups.values()):
"""Add group from deCONZ."""
if not gateway.option_allow_deconz_groups:
return
entities = []
for group in groups:
if not group.lights:
continue
known_groups = set(gateway.entities[DOMAIN])
new_group = DeconzGroup(group, gateway)
if new_group.unique_id not in known_groups:
entities.append(new_group)
if entities:
async_add_entities(entities)
config_entry.async_on_unload(
async_dispatcher_connect(
hass, gateway.async_signal_new_device(NEW_GROUP), async_add_group
)
)
async_add_light()
async_add_group()
class DeconzBaseLight(DeconzDevice, LightEntity):
"""Representation of a deCONZ light."""
TYPE = DOMAIN
def __init__(self, device, gateway):
"""Set up light."""
super().__init__(device, gateway)
self._attr_supported_color_modes = set()
if device.color_temp is not None:
self._attr_supported_color_modes.add(COLOR_MODE_COLOR_TEMP)
if device.hue is not None and device.saturation is not None:
self._attr_supported_color_modes.add(COLOR_MODE_HS)
if device.xy is not None:
self._attr_supported_color_modes.add(COLOR_MODE_XY)
if not self._attr_supported_color_modes and device.brightness is not None:
self._attr_supported_color_modes.add(COLOR_MODE_BRIGHTNESS)
if not self._attr_supported_color_modes:
self._attr_supported_color_modes.add(COLOR_MODE_ONOFF)
if device.brightness is not None:
self._attr_supported_features |= SUPPORT_FLASH
self._attr_supported_features |= SUPPORT_TRANSITION
if device.effect is not None:
self._attr_supported_features |= SUPPORT_EFFECT
self._attr_effect_list = [EFFECT_COLORLOOP]
@property
def color_mode(self) -> str:
"""Return the color mode of the light."""
if self._device.color_mode == "ct":
color_mode = COLOR_MODE_COLOR_TEMP
elif self._device.color_mode == "hs":
color_mode = COLOR_MODE_HS
elif self._device.color_mode == "xy":
color_mode = COLOR_MODE_XY
elif self._device.brightness is not None:
color_mode = COLOR_MODE_BRIGHTNESS
else:
color_mode = COLOR_MODE_ONOFF
return color_mode
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._device.brightness
@property
def color_temp(self):
"""Return the CT color value."""
return self._device.color_temp
@property
def hs_color(self) -> tuple:
"""Return the hs color value."""
return (self._device.hue / 65535 * 360, self._device.saturation / 255 * 100)
@property
def xy_color(self) -> tuple | None:
"""Return the XY color value."""
return self._device.xy
@property
def is_on(self):
"""Return true if light is on."""
return self._device.state
async def async_turn_on(self, **kwargs):
"""Turn on light."""
data = {"on": True}
if ATTR_BRIGHTNESS in kwargs:
data["brightness"] = kwargs[ATTR_BRIGHTNESS]
if ATTR_COLOR_TEMP in kwargs:
data["color_temperature"] = kwargs[ATTR_COLOR_TEMP]
if ATTR_HS_COLOR in kwargs:
if COLOR_MODE_XY in self._attr_supported_color_modes:
data["xy"] = color_hs_to_xy(*kwargs[ATTR_HS_COLOR])
else:
data["hue"] = int(kwargs[ATTR_HS_COLOR][0] / 360 * 65535)
data["saturation"] = int(kwargs[ATTR_HS_COLOR][1] / 100 * 255)
if ATTR_XY_COLOR in kwargs:
data["xy"] = kwargs[ATTR_XY_COLOR]
if ATTR_TRANSITION in kwargs:
data["transition_time"] = int(kwargs[ATTR_TRANSITION] * 10)
elif "IKEA" in self._device.manufacturer:
data["transition_time"] = 0
if (alert := FLASH_TO_DECONZ.get(kwargs.get(ATTR_FLASH))) is not None:
data["alert"] = alert
del data["on"]
if (effect := EFFECT_TO_DECONZ.get(kwargs.get(ATTR_EFFECT))) is not None:
data["effect"] = effect
await self._device.set_state(**data)
async def async_turn_off(self, **kwargs):
"""Turn off light."""
if not self._device.state:
return
data = {"on": False}
if ATTR_TRANSITION in kwargs:
data["brightness"] = 0
data["transition_time"] = int(kwargs[ATTR_TRANSITION] * 10)
if (alert := FLASH_TO_DECONZ.get(kwargs.get(ATTR_FLASH))) is not None:
data["alert"] = alert
del data["on"]
await self._device.set_state(**data)
@property
def extra_state_attributes(self):
"""Return the device state attributes."""
return {DECONZ_GROUP: isinstance(self._device, Group)}
class DeconzLight(DeconzBaseLight):
"""Representation of a deCONZ light."""
@property
def max_mireds(self):
"""Return the warmest color_temp that this light supports."""
return self._device.max_color_temp or super().max_mireds
@property
def min_mireds(self):
"""Return the coldest color_temp that this light supports."""
return self._device.min_color_temp or super().min_mireds
class DeconzGroup(DeconzBaseLight):
"""Representation of a deCONZ group."""
def __init__(self, device, gateway):
"""Set up group and create an unique id."""
self._unique_id = f"{gateway.bridgeid}-{device.deconz_id}"
super().__init__(device, gateway)
@property
def unique_id(self):
"""Return a unique identifier for this device."""
return self._unique_id
@property
def device_info(self):
"""Return a device description for device registry."""
return {
"identifiers": {(DECONZ_DOMAIN, self.unique_id)},
"manufacturer": "Dresden Elektronik",
"model": "deCONZ group",
"name": self._device.name,
"via_device": (DECONZ_DOMAIN, self.gateway.api.config.bridge_id),
}
@property
def extra_state_attributes(self):
"""Return the device state attributes."""
attributes = dict(super().extra_state_attributes)
attributes["all_on"] = self._device.all_on
return attributes
|
py | 1a3ccb263dae3151313422ef6aa4c5940e7e46ce | from typing import List, Tuple
from mock import Mock
from synapse.events import EventBase
from synapse.federation.sender import PerDestinationQueue, TransactionManager
from synapse.federation.units import Edu
from synapse.rest import admin
from synapse.rest.client.v1 import login, room
from tests.test_utils import event_injection, make_awaitable
from tests.unittest import FederatingHomeserverTestCase, override_config
class FederationCatchUpTestCases(FederatingHomeserverTestCase):
servlets = [
admin.register_servlets,
room.register_servlets,
login.register_servlets,
]
def make_homeserver(self, reactor, clock):
return self.setup_test_homeserver(
federation_transport_client=Mock(spec=["send_transaction"]),
)
def prepare(self, reactor, clock, hs):
# stub out get_current_hosts_in_room
state_handler = hs.get_state_handler()
# This mock is crucial for destination_rooms to be populated.
state_handler.get_current_hosts_in_room = Mock(
return_value=make_awaitable(["test", "host2"])
)
# whenever send_transaction is called, record the pdu data
self.pdus = []
self.failed_pdus = []
self.is_online = True
self.hs.get_federation_transport_client().send_transaction.side_effect = (
self.record_transaction
)
async def record_transaction(self, txn, json_cb):
if self.is_online:
data = json_cb()
self.pdus.extend(data["pdus"])
return {}
else:
data = json_cb()
self.failed_pdus.extend(data["pdus"])
raise IOError("Failed to connect because this is a test!")
def get_destination_room(self, room: str, destination: str = "host2") -> dict:
"""
Gets the destination_rooms entry for a (destination, room_id) pair.
Args:
room: room ID
destination: what destination, default is "host2"
Returns:
Dictionary of { event_id: str, stream_ordering: int }
"""
event_id, stream_ordering = self.get_success(
self.hs.get_datastore().db_pool.execute(
"test:get_destination_rooms",
None,
"""
SELECT event_id, stream_ordering
FROM destination_rooms dr
JOIN events USING (stream_ordering)
WHERE dr.destination = ? AND dr.room_id = ?
""",
destination,
room,
)
)[0]
return {"event_id": event_id, "stream_ordering": stream_ordering}
@override_config({"send_federation": True})
def test_catch_up_destination_rooms_tracking(self):
"""
Tests that we populate the `destination_rooms` table as needed.
"""
self.register_user("u1", "you the one")
u1_token = self.login("u1", "you the one")
room = self.helper.create_room_as("u1", tok=u1_token)
self.get_success(
event_injection.inject_member_event(self.hs, room, "@user:host2", "join")
)
event_id_1 = self.helper.send(room, "wombats!", tok=u1_token)["event_id"]
row_1 = self.get_destination_room(room)
event_id_2 = self.helper.send(room, "rabbits!", tok=u1_token)["event_id"]
row_2 = self.get_destination_room(room)
# check: events correctly registered in order
self.assertEqual(row_1["event_id"], event_id_1)
self.assertEqual(row_2["event_id"], event_id_2)
self.assertEqual(row_1["stream_ordering"], row_2["stream_ordering"] - 1)
@override_config({"send_federation": True})
def test_catch_up_last_successful_stream_ordering_tracking(self):
"""
Tests that we populate the `destination_rooms` table as needed.
"""
self.register_user("u1", "you the one")
u1_token = self.login("u1", "you the one")
room = self.helper.create_room_as("u1", tok=u1_token)
# take the remote offline
self.is_online = False
self.get_success(
event_injection.inject_member_event(self.hs, room, "@user:host2", "join")
)
self.helper.send(room, "wombats!", tok=u1_token)
self.pump()
lsso_1 = self.get_success(
self.hs.get_datastore().get_destination_last_successful_stream_ordering(
"host2"
)
)
self.assertIsNone(
lsso_1,
"There should be no last successful stream ordering for an always-offline destination",
)
# bring the remote online
self.is_online = True
event_id_2 = self.helper.send(room, "rabbits!", tok=u1_token)["event_id"]
lsso_2 = self.get_success(
self.hs.get_datastore().get_destination_last_successful_stream_ordering(
"host2"
)
)
row_2 = self.get_destination_room(room)
self.assertEqual(
self.pdus[0]["content"]["body"],
"rabbits!",
"Test fault: didn't receive the right PDU",
)
self.assertEqual(
row_2["event_id"],
event_id_2,
"Test fault: destination_rooms not updated correctly",
)
self.assertEqual(
lsso_2,
row_2["stream_ordering"],
"Send succeeded but not marked as last_successful_stream_ordering",
)
@override_config({"send_federation": True}) # critical to federate
def test_catch_up_from_blank_state(self):
"""
Runs an overall test of federation catch-up from scratch.
Further tests will focus on more narrow aspects and edge-cases, but I
hope to provide an overall view with this test.
"""
# bring the other server online
self.is_online = True
# let's make some events for the other server to receive
self.register_user("u1", "you the one")
u1_token = self.login("u1", "you the one")
room_1 = self.helper.create_room_as("u1", tok=u1_token)
room_2 = self.helper.create_room_as("u1", tok=u1_token)
# also critical to federate
self.get_success(
event_injection.inject_member_event(self.hs, room_1, "@user:host2", "join")
)
self.get_success(
event_injection.inject_member_event(self.hs, room_2, "@user:host2", "join")
)
self.helper.send_state(
room_1, event_type="m.room.topic", body={"topic": "wombat"}, tok=u1_token
)
# check: PDU received for topic event
self.assertEqual(len(self.pdus), 1)
self.assertEqual(self.pdus[0]["type"], "m.room.topic")
# take the remote offline
self.is_online = False
# send another event
self.helper.send(room_1, "hi user!", tok=u1_token)
# check: things didn't go well since the remote is down
self.assertEqual(len(self.failed_pdus), 1)
self.assertEqual(self.failed_pdus[0]["content"]["body"], "hi user!")
# let's delete the federation transmission queue
# (this pretends we are starting up fresh.)
self.assertFalse(
self.hs.get_federation_sender()
._per_destination_queues["host2"]
.transmission_loop_running
)
del self.hs.get_federation_sender()._per_destination_queues["host2"]
# let's also clear any backoffs
self.get_success(
self.hs.get_datastore().set_destination_retry_timings("host2", None, 0, 0)
)
# bring the remote online and clear the received pdu list
self.is_online = True
self.pdus = []
# now we need to initiate a federation transaction somehow…
# to do that, let's send another event (because it's simple to do)
# (do it to another room otherwise the catch-up logic decides it doesn't
# need to catch up room_1 — something I overlooked when first writing
# this test)
self.helper.send(room_2, "wombats!", tok=u1_token)
# we should now have received both PDUs
self.assertEqual(len(self.pdus), 2)
self.assertEqual(self.pdus[0]["content"]["body"], "hi user!")
self.assertEqual(self.pdus[1]["content"]["body"], "wombats!")
def make_fake_destination_queue(
self, destination: str = "host2"
) -> Tuple[PerDestinationQueue, List[EventBase]]:
"""
Makes a fake per-destination queue.
"""
transaction_manager = TransactionManager(self.hs)
per_dest_queue = PerDestinationQueue(self.hs, transaction_manager, destination)
results_list = []
async def fake_send(
destination_tm: str,
pending_pdus: List[EventBase],
_pending_edus: List[Edu],
) -> bool:
assert destination == destination_tm
results_list.extend(pending_pdus)
return True # success!
transaction_manager.send_new_transaction = fake_send
return per_dest_queue, results_list
@override_config({"send_federation": True})
def test_catch_up_loop(self):
"""
Tests the behaviour of _catch_up_transmission_loop.
"""
# ARRANGE:
# - a local user (u1)
# - 3 rooms which u1 is joined to (and remote user @user:host2 is
# joined to)
# - some events (1 to 5) in those rooms
# we have 'already sent' events 1 and 2 to host2
per_dest_queue, sent_pdus = self.make_fake_destination_queue()
self.register_user("u1", "you the one")
u1_token = self.login("u1", "you the one")
room_1 = self.helper.create_room_as("u1", tok=u1_token)
room_2 = self.helper.create_room_as("u1", tok=u1_token)
room_3 = self.helper.create_room_as("u1", tok=u1_token)
self.get_success(
event_injection.inject_member_event(self.hs, room_1, "@user:host2", "join")
)
self.get_success(
event_injection.inject_member_event(self.hs, room_2, "@user:host2", "join")
)
self.get_success(
event_injection.inject_member_event(self.hs, room_3, "@user:host2", "join")
)
# create some events
self.helper.send(room_1, "you hear me!!", tok=u1_token)
event_id_2 = self.helper.send(room_2, "wombats!", tok=u1_token)["event_id"]
self.helper.send(room_3, "Matrix!", tok=u1_token)
event_id_4 = self.helper.send(room_2, "rabbits!", tok=u1_token)["event_id"]
event_id_5 = self.helper.send(room_3, "Synapse!", tok=u1_token)["event_id"]
# destination_rooms should already be populated, but let us pretend that we already
# sent (successfully) up to and including event id 2
event_2 = self.get_success(self.hs.get_datastore().get_event(event_id_2))
# also fetch event 5 so we know its last_successful_stream_ordering later
event_5 = self.get_success(self.hs.get_datastore().get_event(event_id_5))
self.get_success(
self.hs.get_datastore().set_destination_last_successful_stream_ordering(
"host2", event_2.internal_metadata.stream_ordering
)
)
# ACT
self.get_success(per_dest_queue._catch_up_transmission_loop())
# ASSERT, noticing in particular:
# - event 3 not sent out, because event 5 replaces it
# - order is least recent first, so event 5 comes after event 4
# - catch-up is completed
self.assertEqual(len(sent_pdus), 2)
self.assertEqual(sent_pdus[0].event_id, event_id_4)
self.assertEqual(sent_pdus[1].event_id, event_id_5)
self.assertFalse(per_dest_queue._catching_up)
self.assertEqual(
per_dest_queue._last_successful_stream_ordering,
event_5.internal_metadata.stream_ordering,
)
@override_config({"send_federation": True})
def test_catch_up_on_synapse_startup(self):
"""
Tests the behaviour of get_catch_up_outstanding_destinations and
_wake_destinations_needing_catchup.
"""
# list of sorted server names (note that there are more servers than the batch
# size used in get_catch_up_outstanding_destinations).
server_names = ["server%02d" % number for number in range(42)] + ["zzzerver"]
# ARRANGE:
# - a local user (u1)
# - a room which u1 is joined to (and remote users @user:serverXX are
# joined to)
# mark the remotes as online
self.is_online = True
self.register_user("u1", "you the one")
u1_token = self.login("u1", "you the one")
room_id = self.helper.create_room_as("u1", tok=u1_token)
for server_name in server_names:
self.get_success(
event_injection.inject_member_event(
self.hs, room_id, "@user:%s" % server_name, "join"
)
)
# create an event
self.helper.send(room_id, "deary me!", tok=u1_token)
# ASSERT:
# - All servers are up to date so none should have outstanding catch-up
outstanding_when_successful = self.get_success(
self.hs.get_datastore().get_catch_up_outstanding_destinations(None)
)
self.assertEqual(outstanding_when_successful, [])
# ACT:
# - Make the remote servers unreachable
self.is_online = False
# - Mark zzzerver as being backed-off from
now = self.clock.time_msec()
self.get_success(
self.hs.get_datastore().set_destination_retry_timings(
"zzzerver", now, now, 24 * 60 * 60 * 1000 # retry in 1 day
)
)
# - Send an event
self.helper.send(room_id, "can anyone hear me?", tok=u1_token)
# ASSERT (get_catch_up_outstanding_destinations):
# - all remotes are outstanding
# - they are returned in batches of 25, in order
outstanding_1 = self.get_success(
self.hs.get_datastore().get_catch_up_outstanding_destinations(None)
)
self.assertEqual(len(outstanding_1), 25)
self.assertEqual(outstanding_1, server_names[0:25])
outstanding_2 = self.get_success(
self.hs.get_datastore().get_catch_up_outstanding_destinations(
outstanding_1[-1]
)
)
self.assertNotIn("zzzerver", outstanding_2)
self.assertEqual(len(outstanding_2), 17)
self.assertEqual(outstanding_2, server_names[25:-1])
# ACT: call _wake_destinations_needing_catchup
# patch wake_destination to just count the destinations instead
woken = []
def wake_destination_track(destination):
woken.append(destination)
self.hs.get_federation_sender().wake_destination = wake_destination_track
# cancel the pre-existing timer for _wake_destinations_needing_catchup
# this is because we are calling it manually rather than waiting for it
# to be called automatically
self.hs.get_federation_sender()._catchup_after_startup_timer.cancel()
self.get_success(
self.hs.get_federation_sender()._wake_destinations_needing_catchup(), by=5.0
)
# ASSERT (_wake_destinations_needing_catchup):
# - all remotes are woken up, save for zzzerver
self.assertNotIn("zzzerver", woken)
# - all destinations are woken exactly once; they appear once in woken.
self.assertCountEqual(woken, server_names[:-1])
|
py | 1a3ccc14a8075454a2f7ea7cb7cee965bf8db181 | #!/usr/bin/env python3
# Copyright (c) 2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test descriptor wallet function."""
from test_framework.test_framework import VIDCoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error
)
class WalletDescriptorTest(VIDCoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [['-keypool=100']]
self.wallet_names = []
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
self.skip_if_no_sqlite()
def run_test(self):
# Make a legacy wallet and check it is BDB
self.nodes[0].createwallet(wallet_name="legacy1", descriptors=False)
wallet_info = self.nodes[0].getwalletinfo()
assert_equal(wallet_info['format'], 'bdb')
self.nodes[0].unloadwallet("legacy1")
# Make a descriptor wallet
self.log.info("Making a descriptor wallet")
self.nodes[0].createwallet(wallet_name="desc1", descriptors=True)
# A descriptor wallet should have 100 addresses * 3 types = 300 keys
self.log.info("Checking wallet info")
wallet_info = self.nodes[0].getwalletinfo()
assert_equal(wallet_info['format'], 'sqlite')
assert_equal(wallet_info['keypoolsize'], 300)
assert_equal(wallet_info['keypoolsize_hd_internal'], 300)
assert 'keypoololdest' not in wallet_info
# Check that getnewaddress works
self.log.info("Test that getnewaddress and getrawchangeaddress work")
addr = self.nodes[0].getnewaddress("", "legacy")
addr_info = self.nodes[0].getaddressinfo(addr)
assert addr_info['desc'].startswith('pkh(')
assert_equal(addr_info['hdkeypath'], 'm/44\'/1\'/0\'/0/0')
addr = self.nodes[0].getnewaddress("", "p2sh-segwit")
addr_info = self.nodes[0].getaddressinfo(addr)
assert addr_info['desc'].startswith('sh(wpkh(')
assert_equal(addr_info['hdkeypath'], 'm/49\'/1\'/0\'/0/0')
addr = self.nodes[0].getnewaddress("", "bech32")
addr_info = self.nodes[0].getaddressinfo(addr)
assert addr_info['desc'].startswith('wpkh(')
assert_equal(addr_info['hdkeypath'], 'm/84\'/1\'/0\'/0/0')
# Check that getrawchangeaddress works
addr = self.nodes[0].getrawchangeaddress("legacy")
addr_info = self.nodes[0].getaddressinfo(addr)
assert addr_info['desc'].startswith('pkh(')
assert_equal(addr_info['hdkeypath'], 'm/44\'/1\'/0\'/1/0')
addr = self.nodes[0].getrawchangeaddress("p2sh-segwit")
addr_info = self.nodes[0].getaddressinfo(addr)
assert addr_info['desc'].startswith('sh(wpkh(')
assert_equal(addr_info['hdkeypath'], 'm/49\'/1\'/0\'/1/0')
addr = self.nodes[0].getrawchangeaddress("bech32")
addr_info = self.nodes[0].getaddressinfo(addr)
assert addr_info['desc'].startswith('wpkh(')
assert_equal(addr_info['hdkeypath'], 'm/84\'/1\'/0\'/1/0')
# Make a wallet to receive coins at
self.nodes[0].createwallet(wallet_name="desc2", descriptors=True)
recv_wrpc = self.nodes[0].get_wallet_rpc("desc2")
send_wrpc = self.nodes[0].get_wallet_rpc("desc1")
# Generate some coins
send_wrpc.generatetoaddress(101, send_wrpc.getnewaddress())
# Make transactions
self.log.info("Test sending and receiving")
addr = recv_wrpc.getnewaddress()
send_wrpc.sendtoaddress(addr, 10)
# Make sure things are disabled
self.log.info("Test disabled RPCs")
assert_raises_rpc_error(-4, "This type of wallet does not support this command", recv_wrpc.rpc.importprivkey, "cVpF924EspNh8KjYsfhgY96mmxvT6DgdWiTYMtMjuM74hJaU5psW")
assert_raises_rpc_error(-4, "This type of wallet does not support this command", recv_wrpc.rpc.importpubkey, send_wrpc.getaddressinfo(send_wrpc.getnewaddress()))
assert_raises_rpc_error(-4, "This type of wallet does not support this command", recv_wrpc.rpc.importaddress, recv_wrpc.getnewaddress())
assert_raises_rpc_error(-4, "This type of wallet does not support this command", recv_wrpc.rpc.importmulti, [])
assert_raises_rpc_error(-4, "This type of wallet does not support this command", recv_wrpc.rpc.addmultisigaddress, 1, [recv_wrpc.getnewaddress()])
assert_raises_rpc_error(-4, "This type of wallet does not support this command", recv_wrpc.rpc.dumpprivkey, recv_wrpc.getnewaddress())
assert_raises_rpc_error(-4, "This type of wallet does not support this command", recv_wrpc.rpc.dumpwallet, 'wallet.dump')
assert_raises_rpc_error(-4, "This type of wallet does not support this command", recv_wrpc.rpc.importwallet, 'wallet.dump')
assert_raises_rpc_error(-4, "This type of wallet does not support this command", recv_wrpc.rpc.sethdseed)
self.log.info("Test encryption")
# Get the master fingerprint before encrypt
info1 = send_wrpc.getaddressinfo(send_wrpc.getnewaddress())
# Encrypt wallet 0
send_wrpc.encryptwallet('pass')
send_wrpc.walletpassphrase('pass', 10)
addr = send_wrpc.getnewaddress()
info2 = send_wrpc.getaddressinfo(addr)
assert info1['hdmasterfingerprint'] != info2['hdmasterfingerprint']
send_wrpc.walletlock()
assert 'hdmasterfingerprint' in send_wrpc.getaddressinfo(send_wrpc.getnewaddress())
info3 = send_wrpc.getaddressinfo(addr)
assert_equal(info2['desc'], info3['desc'])
self.log.info("Test that getnewaddress still works after keypool is exhausted in an encrypted wallet")
for _ in range(500):
send_wrpc.getnewaddress()
self.log.info("Test that unlock is needed when deriving only hardened keys in an encrypted wallet")
send_wrpc.walletpassphrase('pass', 10)
send_wrpc.importdescriptors([{
"desc": "wpkh(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0h/*h)#y4dfsj7n",
"timestamp": "now",
"range": [0,10],
"active": True
}])
send_wrpc.walletlock()
# Exhaust keypool of 100
for _ in range(100):
send_wrpc.getnewaddress(address_type='bech32')
# This should now error
assert_raises_rpc_error(-12, "Keypool ran out, please call keypoolrefill first", send_wrpc.getnewaddress, '', 'bech32')
self.log.info("Test born encrypted wallets")
self.nodes[0].createwallet('desc_enc', False, False, 'pass', False, True)
enc_rpc = self.nodes[0].get_wallet_rpc('desc_enc')
enc_rpc.getnewaddress() # Makes sure that we can get a new address from a born encrypted wallet
self.log.info("Test blank descriptor wallets")
self.nodes[0].createwallet(wallet_name='desc_blank', blank=True, descriptors=True)
blank_rpc = self.nodes[0].get_wallet_rpc('desc_blank')
assert_raises_rpc_error(-4, 'This wallet has no available keys', blank_rpc.getnewaddress)
self.log.info("Test descriptor wallet with disabled private keys")
self.nodes[0].createwallet(wallet_name='desc_no_priv', disable_private_keys=True, descriptors=True)
nopriv_rpc = self.nodes[0].get_wallet_rpc('desc_no_priv')
assert_raises_rpc_error(-4, 'This wallet has no available keys', nopriv_rpc.getnewaddress)
if __name__ == '__main__':
WalletDescriptorTest().main ()
|
py | 1a3ccc178d459a912a41974e6cf4ce9be0a6b717 | from __future__ import unicode_literals
import requests
from allauth.socialaccount.providers.base import ProviderAccount
from allauth.socialaccount.providers.core.oauth2.provider import OAuth2Provider
class MicrosoftGraphAccount(ProviderAccount):
def to_str(self):
name = self.account.extra_data.get('displayName')
if name.strip() != '':
return name
return super(MicrosoftGraphAccount, self).to_str()
class MicrosoftGraphProvider(OAuth2Provider):
id = str('microsoft')
name = 'Microsoft Graph'
account_class = MicrosoftGraphAccount
tenant = 'common'
access_token_url = 'https://login.microsoftonline.com/{tenant}/oauth2/v2.0/token'
authorize_url = 'https://login.microsoftonline.com/{tenant}/oauth2/v2.0/authorize'
profile_url = 'https://graph.microsoft.com/v1.0/me/'
def get_default_scope(self):
"""
Doc on scopes available at
https://developer.microsoft.com/en-us/graph/docs/concepts/permissions_reference
"""
return ['User.Read']
def extract_uid(self, data):
return str(data['id'])
def extract_common_fields(self, data):
return {
'email': data.get('mail') or data.get('userPrincipalName'),
'first_name': data.get('givenName'),
'last_name': data.get('surname')
}
def complete_login(self, request, app, token, **kwargs):
headers = {'Authorization': 'Bearer {0}'.format(token.token)}
resp = requests.get(self.get_profile_url(request), headers=headers)
extra_data = resp.json()
return self.sociallogin_from_response(request, extra_data)
provider_classes = [MicrosoftGraphProvider]
|
py | 1a3ccc62da8fc50d9cb5a25cdd8fa8bcb8460e4e | # Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
# port to userbot from uniborg by @keselekpermen69
import io
import re
import userbot.modules.sql_helper.blacklist_sql as sql
from userbot import CMD_HELP
from userbot.events import register
@register(incoming=True, disable_edited=True, disable_errors=True)
async def on_new_message(event):
# TODO: exempt admins from locks
name = event.raw_text
snips = sql.get_chat_blacklist(event.chat_id)
for snip in snips:
pattern = r"( |^|[^\w])" + re.escape(snip) + r"( |$|[^\w])"
if re.search(pattern, name, flags=re.IGNORECASE):
try:
await event.delete()
except Exception:
await event.reply("I do not have DELETE permission in this chat")
await sleep(1)
await reply.delete()
sql.rm_from_blacklist(event.chat_id, snip.lower())
break
@register(outgoing=True, pattern="^.addbl(?: |$)(.*)")
async def on_add_black_list(addbl):
text = addbl.pattern_match.group(1)
to_blacklist = list(
{trigger.strip() for trigger in text.split("\n") if trigger.strip()}
)
for trigger in to_blacklist:
sql.add_to_blacklist(addbl.chat_id, trigger.lower())
await addbl.edit("`Added` **{}** `to the blacklist in the current chat`".format(text))
@register(outgoing=True, pattern="^.listbl(?: |$)(.*)")
async def on_view_blacklist(listbl):
all_blacklisted = sql.get_chat_blacklist(listbl.chat_id)
OUT_STR = "Blacklists in the Current Chat:\n"
if len(all_blacklisted) > 0:
for trigger in all_blacklisted:
OUT_STR += f"`{trigger}`\n"
else:
OUT_STR = "`There are no blacklist in current chat.`"
if len(OUT_STR) > 4096:
with io.BytesIO(str.encode(OUT_STR)) as out_file:
out_file.name = "blacklist.text"
await listbl.client.send_file(
listbl.chat_id,
out_file,
force_document=True,
allow_cache=False,
caption="BlackLists in the Current Chat",
reply_to=listbl
)
await listbl.delete()
else:
await listbl.edit(OUT_STR)
@register(outgoing=True, pattern="^.rmbl(?: |$)(.*)")
async def on_delete_blacklist(rmbl):
text = rmbl.pattern_match.group(1)
to_unblacklist = list(
{trigger.strip() for trigger in text.split("\n") if trigger.strip()}
)
successful = 0
for trigger in to_unblacklist:
if sql.rm_from_blacklist(rmbl.chat_id, trigger.lower()):
successful += 1
if not successful:
await rmbl.edit("`Blacklist` **{}** `doesn't exist.`".format(text))
else:
await rmbl.edit("`Blacklist` **{}** `was deleted successfully`".format(text))
CMD_HELP.update({"blacklist": ">`.listbl`"
"\nUsage: Lists all active userbot blacklist in a chat."
"\n\n>`.addbl <keyword>`"
"\nUsage: Saves the message to the 'blacklist keyword'."
"\nThe bot will delete to the message whenever 'blacklist keyword' is mentioned."
"\n\n>`.rmbl <keyword>`"
"\nUsage: Stops the specified blacklist."})
|
py | 1a3cccc713126ea151e9e0d8c4481c3fb4d42065 | import console
n = console.get_value("n")
sum = 0
while n > 1:
sum += 5
n -= 2
console.set_value("z", sum)
print console.get_value("z")
|
py | 1a3ccdb4c8931ccd96087844ea972b3918e15541 | from setuptools import find_packages
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name='Sequoya',
version='0.9.1',
description='Solving Multiple Sequence Alignments with Python',
author='Antonio Benítez-Hidalgo',
author_email='[email protected]',
maintainer='Antonio Benítez-Hidalgo',
maintainer_email='[email protected]',
python_requires='>=3',
license='MIT',
url='https://github.com/benhid/Sequoya',
long_description=open('README.md').read(),
packages=find_packages(exclude=["test.*", "tests"]),
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Programming Language :: Python :: 3.6'
],
install_requires=[
'jmetalpy==1.5.4',
'pyMSA==0.5.1',
'bokeh==1.1.0' # optional for Dask web interface
]
)
|
py | 1a3cce6f97e8b235fc40b56dc964d5a9563ceeee | from subprocess import call
from os import path
import hitchpostgres
import hitchselenium
import hitchpython
import hitchserve
import hitchredis
import hitchtest
import hitchsmtp
# Get directory above this file
PROJECT_DIRECTORY = path.abspath(path.join(path.dirname(__file__), '..'))
class ExecutionEngine(hitchtest.ExecutionEngine):
"""Engine for orchestating and interacting with the app."""
def set_up(self):
"""Ensure virtualenv present, then run all services."""
python_package = hitchpython.PythonPackage(
python_version=self.preconditions['python_version']
)
python_package.build()
python_package.verify()
call([
python_package.pip, "install", "-r",
path.join(PROJECT_DIRECTORY, "requirements/local.txt")
])
postgres_package = hitchpostgres.PostgresPackage(
version=self.settings["postgres_version"],
)
postgres_package.build()
postgres_package.verify()
redis_package = hitchredis.RedisPackage(version="2.8.4")
redis_package.build()
redis_package.verify()
self.services = hitchserve.ServiceBundle(
project_directory=PROJECT_DIRECTORY,
startup_timeout=float(self.settings["startup_timeout"]),
shutdown_timeout=5.0,
)
postgres_user = hitchpostgres.PostgresUser("meet-me", "password")
self.services['Postgres'] = hitchpostgres.PostgresService(
postgres_package=postgres_package,
users=[postgres_user, ],
databases=[hitchpostgres.PostgresDatabase("meet-me", postgres_user), ]
)
self.services['HitchSMTP'] = hitchsmtp.HitchSMTPService(port=1025)
self.services['Django'] = hitchpython.DjangoService(
python=python_package.python,
port=8000,
version=str(self.settings.get("django_version")),
settings="config.settings.local",
needs=[self.services['Postgres'], ],
env_vars=self.settings['environment_variables'],
)
self.services['Redis'] = hitchredis.RedisService(
redis_package=redis_package,
port=16379,
)
self.services['Firefox'] = hitchselenium.SeleniumService(
xvfb=self.settings.get("quiet", False),
no_libfaketime=True,
)
# import hitchcron
# self.services['Cron'] = hitchcron.CronService(
# run=self.services['Django'].manage("trigger").command,
# every=1,
# needs=[ self.services['Django'], ],
# )
self.services.startup(interactive=False)
# Configure selenium driver
self.driver = self.services['Firefox'].driver
self.driver.set_window_size(self.settings['window_size']['height'], self.settings['window_size']['width'])
self.driver.set_window_position(0, 0)
self.driver.implicitly_wait(2.0)
self.driver.accept_next_alert = True
def pause(self, message=None):
"""Stop. IPython time."""
if hasattr(self, 'services'):
self.services.start_interactive_mode()
self.ipython(message)
if hasattr(self, 'services'):
self.services.stop_interactive_mode()
def load_website(self):
"""Navigate to website in Firefox."""
self.driver.get(self.services['Django'].url())
def click(self, on):
"""Click on HTML id."""
self.driver.find_element_by_id(on).click()
def fill_form(self, **kwargs):
"""Fill in a form with id=value."""
for element, text in kwargs.items():
self.driver.find_element_by_id(element).send_keys(text)
def click_submit(self):
"""Click on a submit button if it exists."""
self.driver.find_element_by_css_selector("button[type=\"submit\"]").click()
def confirm_emails_sent(self, number):
"""Count number of emails sent by app."""
assert len(self.services['HitchSMTP'].logs.json()) == int(number)
def wait_for_email(self, containing=None):
"""Wait for, and return email."""
self.services['HitchSMTP'].logs.out.tail.until_json(
lambda email: containing in email['payload'] or containing in email['subject'],
timeout=25,
lines_back=1,
)
def time_travel(self, days=""):
"""Make all services think that time has skipped forward."""
self.services.time_travel(days=int(days))
def on_failure(self):
"""Stop and IPython."""
if not self.settings['quiet']:
if self.settings.get("pause_on_failure", False):
self.pause(message=self.stacktrace.to_template())
def on_success(self):
"""Pause on success if enabled."""
if self.settings.get("pause_on_success", False):
self.pause(message="SUCCESS")
def tear_down(self):
"""Shut down services required to run your test."""
if hasattr(self, 'services'):
self.services.shutdown()
|
py | 1a3ccf6663cb60a1494864b51092a886f8955b9a | '''
This is a sample class that you can use to control the mouse pointer.
It uses the pyautogui library. You can set the precision for mouse movement
(how much the mouse moves) and the speed (how fast it moves) by changing
precision_dict and speed_dict.
Calling the move function with the x and y output of the gaze estimation model
will move the pointer.
This class is provided to help get you started; you can choose whether you want to use it or create your own from scratch.
'''
import pyautogui
class MouseController:
def __init__(self, precision, speed):
precision_dict={'high':100, 'low':1000, 'medium':500}
speed_dict={'fast':1, 'slow':10, 'medium':5}
self.precision=precision_dict[precision]
self.speed=speed_dict[speed]
def move(self, x, y):
pyautogui.moveRel(x*self.precision, -1*y*self.precision, duration=self.speed) |
py | 1a3cd0b5e688077385710966a270bc52d71ffb34 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
class View(nn.Module):
def __init__(self, size):
super(View, self).__init__()
self.size = size
def forward(self, tensor):
return tensor.view(self.size)
class VAE(nn.Module):
"""Encoder-Decoder architecture for both WAE-MMD and WAE-GAN."""
def __init__(self, z_dim=32, nc=3):
super(VAE, self).__init__()
self.z_dim = z_dim
self.nc = nc
self.encoder = nn.Sequential(
nn.Conv2d(nc, 128, 4, 2, 1, bias=False), # B, 128, 32, 32
nn.BatchNorm2d(128),
nn.ReLU(True),
nn.Conv2d(128, 256, 4, 2, 1, bias=False), # B, 256, 16, 16
nn.BatchNorm2d(256),
nn.ReLU(True),
nn.Conv2d(256, 512, 4, 2, 1, bias=False), # B, 512, 8, 8
nn.BatchNorm2d(512),
nn.ReLU(True),
nn.Conv2d(512, 1024, 4, 2, 1, bias=False), # B, 1024, 4, 4
nn.BatchNorm2d(1024),
nn.ReLU(True),
View((-1, 1024*2*2)), # B, 1024*4*4
)
self.fc_mu = nn.Linear(1024*2*2, z_dim) # B, z_dim
self.fc_logvar = nn.Linear(1024*2*2, z_dim) # B, z_dim
self.decoder = nn.Sequential(
nn.Linear(z_dim, 1024*4*4), # B, 1024*8*8
View((-1, 1024, 4, 4)), # B, 1024, 8, 8
nn.ConvTranspose2d(1024, 512, 4, 2, 1, bias=False), # B, 512, 16, 16
nn.BatchNorm2d(512),
nn.ReLU(True),
nn.ConvTranspose2d(512, 256, 4, 2, 1, bias=False), # B, 256, 32, 32
nn.BatchNorm2d(256),
nn.ReLU(True),
nn.ConvTranspose2d(256, 128, 4, 2, 1, bias=False), # B, 128, 64, 64
nn.BatchNorm2d(128),
nn.ReLU(True),
nn.ConvTranspose2d(128, nc, 1), # B, nc, 64, 64
)
self.weight_init()
def weight_init(self):
for block in self._modules:
try:
for m in self._modules[block]:
kaiming_init(m)
except:
kaiming_init(block)
def forward(self, x):
z = self._encode(x)
mu, logvar = self.fc_mu(z), self.fc_logvar(z)
z = self.reparameterize(mu, logvar)
x_recon = self._decode(z)
return x_recon, z, mu, logvar
def reparameterize(self, mu, logvar):
stds = (0.5 * logvar).exp()
epsilon = torch.randn(*mu.size())
if mu.is_cuda:
stds, epsilon = stds.cuda(), epsilon.cuda()
latents = epsilon * stds + mu
return latents
def _encode(self, x):
return self.encoder(x)
def _decode(self, z):
return self.decoder(z)
class Discriminator(nn.Module):
"""Adversary architecture(Discriminator) for WAE-GAN."""
def __init__(self, z_dim=10, n_classes=1):
super(Discriminator, self).__init__()
self.z_dim = z_dim
self.net = nn.Sequential(
nn.Linear(z_dim, 512),
nn.ReLU(True),
nn.Linear(512, 512),
nn.ReLU(True),
nn.Linear(512, 512),
nn.ReLU(True),
nn.Linear(512, 512),
nn.ReLU(True),
nn.Linear(512, n_classes),
nn.Softmax()
)
self.weight_init()
def weight_init(self):
for block in self._modules:
for m in self._modules[block]:
kaiming_init(m)
def forward(self, z):
return self.net(z)
def kaiming_init(m):
if isinstance(m, (nn.Linear, nn.Conv2d)):
init.kaiming_normal(m.weight)
if m.bias is not None:
m.bias.data.fill_(0)
elif isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d)):
m.weight.data.fill_(1)
if m.bias is not None:
m.bias.data.fill_(0)
def normal_init(m, mean, std):
if isinstance(m, (nn.Linear, nn.Conv2d)):
m.weight.data.normal_(mean, std)
if m.bias.data is not None:
m.bias.data.zero_()
elif isinstance(m, (nn.BatchNorm2d, nn.BatchNorm1d)):
m.weight.data.fill_(1)
if m.bias.data is not None:
m.bias.data.zero_()
|
py | 1a3cd1011b1c7d7a304e4fd338059259703d3222 | """
WSGI config for spotcollage project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "spotcollage.settings")
application = get_wsgi_application()
|
py | 1a3cd137897dd93760e345919bf69fa3e3cf5f30 | try:
import matplotlib.pyplot as plt
except:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def plot_no_player_aces(q):
fig, axis = plt.subplots(3, 6, sharey=True)
fig.suptitle('Player Hand Values (No Aces) vs Dealer First Card Value', fontsize=10)
size = 5
for index, player_start in enumerate(range(4, 22)):
p = frozenset(set([player_start]))
hits = list()
stays = list()
for dealer_start in range(1, 12):
d = frozenset(set([dealer_start]))
if dealer_start == 1 or dealer_start == 11:
d = frozenset([1, 11])
if p in q and d in q[p]:
values = q[p][d]
hits.append(values[0])
stays.append(values[1])
else:
hits.append(None)
stays.append(None)
plt.subplot(3, 6, 1 + index)
plt.title('player hand value: ' + str(player_start), fontsize=6)
ax = plt.gca()
ax.set_xlabel('dealer first card', fontsize=5)
ax.set_ylabel('reward', fontsize=5)
ax.tick_params(axis='both', which='major', labelsize=5)
ax.tick_params(axis='both', which='minor', labelsize=10)
plt.plot(range(1, len(hits) + 1), hits, 'g-', label='hits')
plt.plot(range(1, len(stays) + 1), stays, 'r-', label='stays')
if player_start == 21:
handles, labels = ax.get_legend_handles_labels()
fig.legend(handles, labels, loc='lower right')
plt.tight_layout(pad=0.2, h_pad=0.4, w_pad=-1.4)
plt.subplots_adjust(top=0.9, bottom=0.2)
plt.show()
def print_no_player_aces(q):
for player_start in range(4, 22):
for dealer_start in range(1, 12):
p = frozenset(set([player_start]))
d = frozenset(set([dealer_start]))
if dealer_start == 1 or dealer_start == 11:
d = frozenset([1, 11])
if p in q and d in q[p]:
values = q[p][d]
hit = str(round(values[0], 3)).ljust(6, '0')
stay = str(round(values[1], 3)).ljust(6, '0')
else:
hit = "-"
stay = "-"
print('{} / {}: [{}, {}]'.format(str(player_start).rjust(2, ' '), str(dealer_start).rjust(2, ' '), hit, stay))
def print_one_player_ace(q):
print('fit, one player ace, dealer ace = 1 or 11:')
for player_start in range(2, 21):
for dealer_start in range(1, 12):
p = frozenset(set([player_start + 1, player_start + 11]))
d = frozenset(set([dealer_start]))
if dealer_start == 1 or dealer_start == 11:
d = frozenset([1, 11])
if p in q and d in q[p]:
values = q[p][d]
hit = str(round(values[0], 3)).ljust(6, '0')
stay = str(round(values[1], 3)).ljust(6, '0')
else:
hit = "-"
stay = "-"
print('{} / {}: [{}, {}]'.format(str([player_start + 1, player_start + 11]).rjust(8, ' '), str(dealer_start).rjust(2, ' '), hit, stay))
|
py | 1a3cd17549d9f4b32c2c47a34034097ece343d15 | from cryptography.fernet import Fernet
key = 'TluxwB3fV_GWuLkR1_BzGs1Zk90TYAuhNMZP_0q4WyM='
# Oh no! The code is going over the edge! What are you going to do?
message = b'gAAAAABc9-3vl7axhmpucu0xhOTDMROLu0ihsnHeqph6OSUUFPS27-Y58CkRZY4O1RgjsJwM5caNw6BDCdq3-cGaH1N69k-B126NmnSb9EL9ARyq4oRgslIFSOmBQHHIZOvjjaV-ZzCzojfz0duX9Cs2KjqI5USn8GUjzWXxwhV8zY4hECNAN3c='
def main():
f = Fernet(key)
print(f.decrypt(message))
if __name__ == "__main__":
main() |
py | 1a3cd1f1c335a4729a71cf4df8b946783ddac973 | import math
tcase = int(input())
while(tcase):
num = int(input())
lst = []
while(num / 10 != 0):
lst.append(num % 10)
num = math.floor(num / 10)
print(lst[0] + lst[-1])
tcase -= 1
|
py | 1a3cd282c2d1ede5c60a7c2c84846cbeed7808f0 | # Copyright (c) Facebook, Inc. and its affiliates.
import itertools
import logging
from typing import Dict, List
import torch
from detectron2.config import configurable
from detectron2.layers import ShapeSpec, batched_nms_rotated, cat
from detectron2.structures import Instances, RotatedBoxes, pairwise_iou_rotated
from detectron2.utils.memory import retry_if_cuda_oom
from ..box_regression import Box2BoxTransformRotated
from .build import PROPOSAL_GENERATOR_REGISTRY
from .proposal_utils import _is_tracing
from .rpn import RPN
logger = logging.getLogger(__name__)
def find_top_rrpn_proposals(
proposals,
pred_objectness_logits,
image_sizes,
nms_thresh,
pre_nms_topk,
post_nms_topk,
min_box_size,
training,
):
"""
For each feature map, select the `pre_nms_topk` highest scoring proposals,
apply NMS, clip proposals, and remove small boxes. Return the `post_nms_topk`
highest scoring proposals among all the feature maps if `training` is True,
otherwise, returns the highest `post_nms_topk` scoring proposals for each
feature map.
Args:
proposals (list[Tensor]): A list of L tensors. Tensor i has shape (N, Hi*Wi*A, 5).
All proposal predictions on the feature maps.
pred_objectness_logits (list[Tensor]): A list of L tensors. Tensor i has shape (N, Hi*Wi*A).
image_sizes (list[tuple]): sizes (h, w) for each image
nms_thresh (float): IoU threshold to use for NMS
pre_nms_topk (int): number of top k scoring proposals to keep before applying NMS.
When RRPN is run on multiple feature maps (as in FPN) this number is per
feature map.
post_nms_topk (int): number of top k scoring proposals to keep after applying NMS.
When RRPN is run on multiple feature maps (as in FPN) this number is total,
over all feature maps.
min_box_size(float): minimum proposal box side length in pixels (absolute units wrt
input images).
training (bool): True if proposals are to be used in training, otherwise False.
This arg exists only to support a legacy bug; look for the "NB: Legacy bug ..."
comment.
Returns:
proposals (list[Instances]): list of N Instances. The i-th Instances
stores post_nms_topk object proposals for image i.
"""
num_images = len(image_sizes)
device = proposals[0].device
# 1. Select top-k anchor for every level and every image
topk_scores = [] # #lvl Tensor, each of shape N x topk
topk_proposals = []
level_ids = [] # #lvl Tensor, each of shape (topk,)
batch_idx = torch.arange(num_images, device=device)
for level_id, proposals_i, logits_i in zip(
itertools.count(), proposals, pred_objectness_logits
):
Hi_Wi_A = logits_i.shape[1]
if isinstance(Hi_Wi_A, torch.Tensor): # it's a tensor in tracing
num_proposals_i = torch.clamp(Hi_Wi_A, max=pre_nms_topk)
else:
num_proposals_i = min(Hi_Wi_A, pre_nms_topk)
topk_scores_i, topk_idx = logits_i.topk(num_proposals_i, dim=1)
# each is N x topk
topk_proposals_i = proposals_i[batch_idx[:, None], topk_idx] # N x topk x 5
topk_proposals.append(topk_proposals_i)
topk_scores.append(topk_scores_i)
level_ids.append(torch.full((num_proposals_i,), level_id, dtype=torch.int64, device=device))
# 2. Concat all levels together
topk_scores = cat(topk_scores, dim=1)
topk_proposals = cat(topk_proposals, dim=1)
level_ids = cat(level_ids, dim=0)
# 3. For each image, run a per-level NMS, and choose topk results.
results = []
for n, image_size in enumerate(image_sizes):
boxes = RotatedBoxes(topk_proposals[n])
scores_per_img = topk_scores[n]
lvl = level_ids
valid_mask = torch.isfinite(boxes.tensor).all(dim=1) & torch.isfinite(scores_per_img)
if not valid_mask.all():
if training:
raise FloatingPointError(
"Predicted boxes or scores contain Inf/NaN. Training has diverged."
)
boxes = boxes[valid_mask]
scores_per_img = scores_per_img[valid_mask]
lvl = lvl[valid_mask]
boxes.clip(image_size)
# filter empty boxes
keep = boxes.nonempty(threshold=min_box_size)
if _is_tracing() or keep.sum().item() != len(boxes):
boxes, scores_per_img, lvl = (boxes[keep], scores_per_img[keep], lvl[keep])
keep = batched_nms_rotated(boxes.tensor, scores_per_img, lvl, nms_thresh)
# In Detectron1, there was different behavior during training vs. testing.
# (https://github.com/facebookresearch/Detectron/issues/459)
# During training, topk is over the proposals from *all* images in the training batch.
# During testing, it is over the proposals for each image separately.
# As a result, the training behavior becomes batch-dependent,
# and the configuration "POST_NMS_TOPK_TRAIN" end up relying on the batch size.
# This bug is addressed in Detectron2 to make the behavior independent of batch size.
keep = keep[:post_nms_topk]
res = Instances(image_size)
res.proposal_boxes = boxes[keep]
res.objectness_logits = scores_per_img[keep]
results.append(res)
return results
@PROPOSAL_GENERATOR_REGISTRY.register()
class RRPN(RPN):
"""
Rotated Region Proposal Network described in :paper:`RRPN`.
"""
@configurable
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.anchor_boundary_thresh >= 0:
raise NotImplementedError(
"anchor_boundary_thresh is a legacy option not implemented for RRPN."
)
@classmethod
def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]):
ret = super().from_config(cfg, input_shape)
ret["box2box_transform"] = Box2BoxTransformRotated(weights=cfg.MODEL.RPN.BBOX_REG_WEIGHTS)
return ret
@torch.no_grad()
def label_and_sample_anchors(self, anchors: List[RotatedBoxes], gt_instances: List[Instances]):
"""
Args:
anchors (list[RotatedBoxes]): anchors for each feature map.
gt_instances: the ground-truth instances for each image.
Returns:
list[Tensor]:
List of #img tensors. i-th element is a vector of labels whose length is
the total number of anchors across feature maps. Label values are in {-1, 0, 1},
with meanings: -1 = ignore; 0 = negative class; 1 = positive class.
list[Tensor]:
i-th element is a Nx5 tensor, where N is the total number of anchors across
feature maps. The values are the matched gt boxes for each anchor.
Values are undefined for those anchors not labeled as 1.
"""
anchors = RotatedBoxes.cat(anchors)
gt_boxes = [x.gt_boxes for x in gt_instances]
del gt_instances
gt_labels = []
matched_gt_boxes = []
for gt_boxes_i in gt_boxes:
"""
gt_boxes_i: ground-truth boxes for i-th image
"""
match_quality_matrix = retry_if_cuda_oom(pairwise_iou_rotated)(gt_boxes_i, anchors)
matched_idxs, gt_labels_i = retry_if_cuda_oom(self.anchor_matcher)(match_quality_matrix)
# Matching is memory-expensive and may result in CPU tensors. But the result is small
gt_labels_i = gt_labels_i.to(device=gt_boxes_i.device)
# A vector of labels (-1, 0, 1) for each anchor
gt_labels_i = self._subsample_labels(gt_labels_i)
if len(gt_boxes_i) == 0:
# These values won't be used anyway since the anchor is labeled as background
matched_gt_boxes_i = torch.zeros_like(anchors.tensor)
else:
# TODO wasted indexing computation for ignored boxes
matched_gt_boxes_i = gt_boxes_i[matched_idxs].tensor
gt_labels.append(gt_labels_i) # N,AHW
matched_gt_boxes.append(matched_gt_boxes_i)
return gt_labels, matched_gt_boxes
@torch.no_grad()
def predict_proposals(self, anchors, pred_objectness_logits, pred_anchor_deltas, image_sizes):
pred_proposals = self._decode_proposals(anchors, pred_anchor_deltas)
return find_top_rrpn_proposals(
pred_proposals,
pred_objectness_logits,
image_sizes,
self.nms_thresh,
self.pre_nms_topk[self.training],
self.post_nms_topk[self.training],
self.min_box_size,
self.training,
)
|
py | 1a3cd303db3e13b1899d9f8f924bf4840ba6c8b2 | import glob
import os
import torch
import numpy as np
from torch.utils.data import Dataset
from PIL import Image
import torchvision.transforms as transforms
class ImageDataset(Dataset):
def __init__(self, root, transforms_=None, mode='train'):
self.transforms_ = transforms.Compose(transforms_)
self.files = sorted(glob.glob(os.path.join(root, mode) + '/*.*'))
def __getitem__(self, index):
img = Image.open(self.files[index % len(self.files)])
w, h = img.size
img_A = img.crop((0, 0, w/2, h))
img_B = img.crop((w/2, 0, w, h))
if np.random.random() < 0.5:
img_A = Image.fromarray(np.array(img_A)[:, ::-1, :], 'RGB')
img_B = Image.fromarray(np.array(img_B)[:, ::-1, :], 'RGB')
img_A = self.transforms_(img_A)
img_B = self.transforms_(img_B)
return {'A': img_A, 'B' : img_B}
def __len__(self):
return len(self.files) |
py | 1a3cd3d08b2ca31b3fcce39c29f6ad0205913fd4 | from spaceone.core.service import *
from spaceone.identity.manager import DomainManager
from spaceone.identity.manager.domain_secret_manager import DomainSecretManager
from spaceone.identity.model import Domain
@authentication_handler(exclude=['create', 'list', 'get_public_key'])
@authorization_handler(exclude=['create', 'list', 'get_public_key'])
@mutation_handler(exclude=['create', 'list', 'get_public_key'])
@event_handler
class DomainService(BaseService):
def __init__(self, metadata):
super().__init__(metadata)
self.domain_mgr: DomainManager = self.locator.get_manager('DomainManager')
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@check_required(['name'])
def create(self, params):
""" Create domain
Args:
params (dict): {
'name': 'str',
'config': 'dict',
'plugin_info': 'dict',
'tags': 'list'
}
Returns:
domain_vo (object)
"""
# Create Domain
domain_vo: Domain = self.domain_mgr.create_domain(params)
# Create domain secret
domain_secret_mgr: DomainSecretManager = self._get_domain_secret_manager()
domain_secret_mgr.create_domain_secret(domain_vo.domain_id)
return domain_vo
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@check_required(['domain_id'])
def update(self, params):
""" Update domain
Args:
params (dict): {
'domain_id': 'str',
'config': 'dict',
'tags': 'list'
}
Returns:
domain_vo (object)
"""
return self.domain_mgr.update_domain(params)
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@check_required(['domain_id'])
def delete(self, params):
""" Delete domain
Args:
params (dict): {
'domain_id': 'str'
}
Returns:
None
"""
self.domain_mgr.delete_domain(params['domain_id'])
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@check_required(['domain_id'])
def enable(self, params):
""" Enable domain
Args:
params (dict): {
'domain_id': 'str'
}
Returns:
domain_vo (object)
"""
return self.domain_mgr.enable_domain(params['domain_id'])
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@check_required(['domain_id'])
def disable(self, params):
""" Disable domain
Args:
params (dict): {
'domain_id': 'str'
}
Returns:
domain_vo (object)
"""
return self.domain_mgr.disable_domain(params['domain_id'])
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@check_required(['domain_id'])
def get(self, params):
""" Disable domain
Args:
params (dict): {
'domain_id': 'str',
'only': 'list'
}
Returns:
domain_vo (object)
"""
return self.domain_mgr.get_domain(params['domain_id'], params.get('only'))
@transaction(append_meta={'auth.scope': 'SYSTEM'})
@check_required(['domain_id'])
def get_public_key(self, params):
""" Get domain's public key for authentication
Args:
params (dict): {
'domain_id': 'str'
}
Returns:
result (dict): {
'pub_jwk': 'str',
'domain_id': 'str'
}
"""
domain_id = params['domain_id']
domain_secret_mgr: DomainSecretManager = self._get_domain_secret_manager()
pub_jwk = domain_secret_mgr.get_domain_public_key(domain_id=domain_id)
return {
'pub_jwk': pub_jwk,
'domain_id': domain_id
}
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@append_query_filter(['domain_id', 'name'])
@change_tag_filter('tags')
@append_keyword_filter(['domain_id', 'name'])
def list(self, params):
""" List api keys
Args:
params (dict): {
'domain_id': 'str',
'name': 'str',
'query': 'dict (spaceone.api.core.v1.Query)'
}
Returns:
results (list): 'list of domain_vo'
total_count (int)
"""
query = params.get('query', {})
return self.domain_mgr.list_domains(query)
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@check_required(['query'])
@change_tag_filter('tags')
@append_keyword_filter(['domain_id', 'name'])
def stat(self, params):
"""
Args:
params (dict): {
'query': 'dict (spaceone.api.core.v1.StatisticsQuery)'
}
Returns:
values (list): 'list of statistics data'
total_count (int)
"""
query = params.get('query', {})
return self.domain_mgr.stat_domains(query)
def _get_domain_secret_manager(self):
return self.locator.get_manager('DomainSecretManager')
|
py | 1a3cd44f4b81e8fc666d396e89670fe97cb9f761 | # -*- coding: utf-8 -*-
#from DevTemplate import Ui_Form
from PyQt4 import QtCore, QtGui
from acq4.pyqtgraph.WidgetGroup import WidgetGroup
from acq4.pyqtgraph.parametertree import *
import collections
class CameraDeviceGui(QtGui.QWidget):
def __init__(self, dev, win):
#pdb.set_trace()
QtGui.QWidget.__init__(self)
self.dev = dev
self.win = win
#self.cam = self.dev.cam
#self.ui = Ui_Form()
#self.ui.setupUi(self)
self.layout = QtGui.QGridLayout()
self.layout.setContentsMargins(0,0,0,0)
self.setLayout(self.layout)
self.params = self.dev.listParams()
self.stateGroup = WidgetGroup([])
#self.labels = {}
params = []
for k, p in self.params.iteritems():
try:
val = self.dev.getParam(k)
except:
continue
if not p[1]: ## read-only param
params.append({'name': k, 'readonly': True, 'value': val, 'type': 'str'})
#w = QtGui.QLabel()
#w.setText(str(val))
#self.labels[k] = w
else: ## parameter is writable
if type(p[0]) is tuple:
if len(p[0]) == 3:
(mn, mx, step) = p[0]
elif len(p[0]) == 2:
(mn, mx) = p[0]
step = 1
if type(mx) in [int, long] and type(mn) in [int, long]:
params.append({'name': k, 'type': 'int', 'value': val, 'limits': (mn, mx), 'step': step})
else:
params.append({'name': k, 'type': 'float', 'value': val, 'limits': (mn, mx), 'dec': True, 'step': 1})
if k == 'exposure':
params[-1]['suffix'] = 's'
params[-1]['siPrefix'] = True
params[-1]['minStep'] = 1e-6
elif type(p[0]) is list:
#print k, val, p
params.append({'name': k, 'type': 'list', 'value': val, 'values': p[0]})
#elif 'BOOL' in typ:
# w = QtGui.QCheckBox()
# w.setChecked(val)
else:
print " Ignoring parameter '%s': %s" % (k, str(p))
continue
#self.stateGroup.addWidget(w, k)
#self.ui.formLayout_2.addRow(k, w)
#self.stateGroup.sigChanged.connect(self.stateChanged)
self.paramSet = Parameter(name='cameraParams', type='group', children=params)
self.paramWidget = ParameterTree()
self.paramWidget.setParameters(self.paramSet, showTop=False)
self.layout.addWidget(self.paramWidget)
self.paramSet.sigTreeStateChanged.connect(self.stateChanged)
#self.ui.reconnectBtn.clicked.connect(self.reconnect)
self.dev.sigParamsChanged.connect(self.paramsChanged)
#for k, p in self.params.iteritems():
##p = self.params[k]
##print p
##if not p[1]:
##continue
#try:
#val = self.dev.getParam(k)
#except:
#continue
##typ = self.cam.getParamTypeName(p)
#if not p[1]: ## read-only param
#w = QtGui.QLabel()
#w.setText(str(val))
#self.labels[k] = w
#else: ## parameter is writable
#if type(p[0]) is tuple:
#if len(p[0]) == 3:
#(mn, mx, step) = p[0]
#elif len(p[0]) == 2:
#(mn, mx) = p[0]
#step = 1
#if type(mx) in [int, long] and type(mn) in [int, long]:
#w = QtGui.QSpinBox()
#intmax = (2**16)-1
#if mx is None or mx > intmax:
#mx = intmax
#mn = int(mn)
#mx = int(mx)
#step = int(step)
#w.setRange(mn, mx)
#w.setSingleStep(step)
##print k, "val:", val, type(val)
#w.setValue(val)
#else:
#w = SpinBox()
#w.setOpts(value=val, range=(mn, mx), dec=True, step=1)
#elif type(p[0]) is list:
#w = QtGui.QComboBox()
##(opts, vals) = self.cam.getEnumList(p)
#for i in range(len(p[0])):
#w.addItem(str(p[0][i]))
#if p[0][i] == val:
#w.setCurrentIndex(i)
##elif 'BOOL' in typ:
## w = QtGui.QCheckBox()
## w.setChecked(val)
#else:
#print " Ignoring parameter '%s': %s" % (k, str(p))
#continue
#self.stateGroup.addWidget(w, k)
#self.ui.formLayout_2.addRow(k, w)
##QtCore.QObject.connect(self.stateGroup, QtCore.SIGNAL('changed'), self.stateChanged)
#self.stateGroup.sigChanged.connect(self.stateChanged)
##QtCore.QObject.connect(self.ui.reconnectBtn, QtCore.SIGNAL('clicked()'), self.reconnect)
#self.ui.reconnectBtn.clicked.connect(self.reconnect)
##QtCore.QObject.connect(self.dev, QtCore.SIGNAL('paramsChanged'), self.paramsChanged)
#self.dev.sigParamsChanged.connect(self.paramsChanged)
##print "Done with UI"
def stateChanged(self, param, changes):
#print "tree state changed:"
## called when state is changed by user
vals = collections.OrderedDict()
for param, change, data in changes:
if change == 'value':
#print param.name(), param.value()
vals[param.name()] = param.value()
self.dev.setParams(vals)
def paramsChanged(self, params):
#print "Camera param changed:", params
## Called when state of camera has changed
for p in params.keys()[:]: ## flatten out nested dicts
if isinstance(params[p], dict):
for k in params[p]:
params[k] = params[p][k]
try: ## need to ignore tree-change signals while updating it.
self.paramSet.sigTreeStateChanged.disconnect(self.stateChanged)
for k, v in params.iteritems():
self.paramSet[k] = v
for p2 in self.params[k][3]: ## Update bounds if needed
newBounds = self.dev.listParams([p2])[p2][0]
self.paramSet.param(p2).setLimits(newBounds)
finally:
self.paramSet.sigTreeStateChanged.connect(self.stateChanged)
#self.stateGroup.blockSignals(True)
#self.stateGroup.setState(params)
#print "State:", self.stateGroup.state()
#for p in params:
#if not self.params[p][1]:
#self.labels[p].setText(str(params[p])) ## Update read-only labels
#else:
#for p2 in self.params[p][3]: ## Update bounds if needed
#newBounds = self.dev.listParams([p2])[p2][0]
#w = self.stateGroup.findWidget(p2)
##print "Update bounds for %s: %s" % (p2, str(newBounds))
#if type(newBounds) is tuple:
#(mn, mx, step) = newBounds
#if isinstance(w, QtGui.QSpinBox):
#intmax = (2**16)-1
#if mx is None or mx > intmax:
#mx = intmax
#mn = int(mn)
#mx = int(mx)
#step = int(step)
#w.setRange(mn, mx)
#w.setSingleStep(step)
#else:
#w.setOpts(range=(mn, mx))
##elif type(newBounds) is list:
##w.clear()
##for i in range(len(newBounds)):
##w.addItem(str(p[0][i]))
##if p[0][i] == val:
##w.setCurrentIndex(i)
#self.stateGroup.blockSignals(False)
def reconnect(self):
self.dev.reconnect()
|
py | 1a3cd4945ced4133e8f36e4f34d8b6b40d158436 | """Test EQ3 Max! Thermostats."""
from datetime import timedelta
from maxcube.cube import MaxCube
from maxcube.device import (
MAX_DEVICE_MODE_AUTOMATIC,
MAX_DEVICE_MODE_BOOST,
MAX_DEVICE_MODE_MANUAL,
MAX_DEVICE_MODE_VACATION,
)
from maxcube.thermostat import MaxThermostat
from maxcube.wallthermostat import MaxWallThermostat
import pytest
from homeassistant.components.climate.const import (
ATTR_CURRENT_TEMPERATURE,
ATTR_HVAC_ACTION,
ATTR_HVAC_MODE,
ATTR_HVAC_MODES,
ATTR_MAX_TEMP,
ATTR_MIN_TEMP,
ATTR_PRESET_MODE,
ATTR_PRESET_MODES,
ATTR_TARGET_TEMP_HIGH,
ATTR_TARGET_TEMP_LOW,
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
CURRENT_HVAC_OFF,
DOMAIN as CLIMATE_DOMAIN,
HVAC_MODE_AUTO,
HVAC_MODE_DRY,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
PRESET_AWAY,
PRESET_BOOST,
PRESET_COMFORT,
PRESET_ECO,
PRESET_NONE,
SERVICE_SET_HVAC_MODE,
SERVICE_SET_PRESET_MODE,
SERVICE_SET_TEMPERATURE,
ClimateEntityFeature,
)
from homeassistant.components.maxcube.climate import (
MAX_TEMPERATURE,
MIN_TEMPERATURE,
OFF_TEMPERATURE,
ON_TEMPERATURE,
PRESET_ON,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_FRIENDLY_NAME,
ATTR_SUPPORTED_FEATURES,
ATTR_TEMPERATURE,
)
from homeassistant.helpers import entity_registry as er
from homeassistant.util import utcnow
from tests.common import async_fire_time_changed
ENTITY_ID = "climate.testroom_testthermostat"
WALL_ENTITY_ID = "climate.testroom_testwallthermostat"
VALVE_POSITION = "valve_position"
async def test_setup_thermostat(hass, cube: MaxCube):
"""Test a successful setup of a thermostat device."""
entity_registry = er.async_get(hass)
assert entity_registry.async_is_registered(ENTITY_ID)
entity = entity_registry.async_get(ENTITY_ID)
assert entity.unique_id == "AABBCCDD01"
state = hass.states.get(ENTITY_ID)
assert state.state == HVAC_MODE_AUTO
assert state.attributes.get(ATTR_FRIENDLY_NAME) == "TestRoom TestThermostat"
assert state.attributes.get(ATTR_HVAC_ACTION) == CURRENT_HVAC_HEAT
assert state.attributes.get(ATTR_HVAC_MODES) == [
HVAC_MODE_OFF,
HVAC_MODE_AUTO,
HVAC_MODE_HEAT,
]
assert state.attributes.get(ATTR_PRESET_MODES) == [
PRESET_NONE,
PRESET_BOOST,
PRESET_COMFORT,
PRESET_ECO,
PRESET_AWAY,
PRESET_ON,
]
assert state.attributes.get(ATTR_PRESET_MODE) == PRESET_NONE
assert (
state.attributes.get(ATTR_SUPPORTED_FEATURES)
== ClimateEntityFeature.TARGET_TEMPERATURE | ClimateEntityFeature.PRESET_MODE
)
assert state.attributes.get(ATTR_MAX_TEMP) == MAX_TEMPERATURE
assert state.attributes.get(ATTR_MIN_TEMP) == 5.0
assert state.attributes.get(ATTR_CURRENT_TEMPERATURE) == 19.0
assert state.attributes.get(ATTR_TEMPERATURE) == 20.5
assert state.attributes.get(VALVE_POSITION) == 25
async def test_setup_wallthermostat(hass, cube: MaxCube):
"""Test a successful setup of a wall thermostat device."""
entity_registry = er.async_get(hass)
assert entity_registry.async_is_registered(WALL_ENTITY_ID)
entity = entity_registry.async_get(WALL_ENTITY_ID)
assert entity.unique_id == "AABBCCDD02"
state = hass.states.get(WALL_ENTITY_ID)
assert state.state == HVAC_MODE_OFF
assert state.attributes.get(ATTR_FRIENDLY_NAME) == "TestRoom TestWallThermostat"
assert state.attributes.get(ATTR_HVAC_ACTION) == CURRENT_HVAC_HEAT
assert state.attributes.get(ATTR_PRESET_MODE) == PRESET_NONE
assert state.attributes.get(ATTR_MAX_TEMP) == 29.0
assert state.attributes.get(ATTR_MIN_TEMP) == 5.0
assert state.attributes.get(ATTR_CURRENT_TEMPERATURE) == 19.0
assert state.attributes.get(ATTR_TEMPERATURE) is None
async def test_thermostat_set_hvac_mode_off(
hass, cube: MaxCube, thermostat: MaxThermostat
):
"""Turn off thermostat."""
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_HVAC_MODE,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_HVAC_MODE: HVAC_MODE_OFF},
blocking=True,
)
cube.set_temperature_mode.assert_called_once_with(
thermostat, OFF_TEMPERATURE, MAX_DEVICE_MODE_MANUAL
)
thermostat.mode = MAX_DEVICE_MODE_MANUAL
thermostat.target_temperature = OFF_TEMPERATURE
thermostat.valve_position = 0
async_fire_time_changed(hass, utcnow() + timedelta(minutes=5))
await hass.async_block_till_done()
state = hass.states.get(ENTITY_ID)
assert state.state == HVAC_MODE_OFF
assert state.attributes.get(ATTR_TEMPERATURE) is None
assert state.attributes.get(ATTR_HVAC_ACTION) == CURRENT_HVAC_OFF
assert state.attributes.get(VALVE_POSITION) == 0
wall_state = hass.states.get(WALL_ENTITY_ID)
assert wall_state.attributes.get(ATTR_HVAC_ACTION) == CURRENT_HVAC_OFF
async def test_thermostat_set_hvac_mode_heat(
hass, cube: MaxCube, thermostat: MaxThermostat
):
"""Set hvac mode to heat."""
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_HVAC_MODE,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_HVAC_MODE: HVAC_MODE_HEAT},
blocking=True,
)
cube.set_temperature_mode.assert_called_once_with(
thermostat, 20.5, MAX_DEVICE_MODE_MANUAL
)
thermostat.mode = MAX_DEVICE_MODE_MANUAL
async_fire_time_changed(hass, utcnow() + timedelta(minutes=5))
await hass.async_block_till_done()
await hass.async_block_till_done()
state = hass.states.get(ENTITY_ID)
assert state.state == HVAC_MODE_HEAT
async def test_thermostat_set_invalid_hvac_mode(
hass, cube: MaxCube, thermostat: MaxThermostat
):
"""Set hvac mode to heat."""
with pytest.raises(ValueError):
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_HVAC_MODE,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_HVAC_MODE: HVAC_MODE_DRY},
blocking=True,
)
cube.set_temperature_mode.assert_not_called()
async def test_thermostat_set_temperature(
hass, cube: MaxCube, thermostat: MaxThermostat
):
"""Set hvac mode to heat."""
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_TEMPERATURE,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_TEMPERATURE: 10.0},
blocking=True,
)
cube.set_temperature_mode.assert_called_once_with(thermostat, 10.0, None)
thermostat.target_temperature = 10.0
thermostat.valve_position = 0
async_fire_time_changed(hass, utcnow() + timedelta(minutes=5))
await hass.async_block_till_done()
state = hass.states.get(ENTITY_ID)
assert state.state == HVAC_MODE_AUTO
assert state.attributes.get(ATTR_TEMPERATURE) == 10.0
assert state.attributes.get(ATTR_HVAC_ACTION) == CURRENT_HVAC_IDLE
async def test_thermostat_set_no_temperature(
hass, cube: MaxCube, thermostat: MaxThermostat
):
"""Set hvac mode to heat."""
with pytest.raises(ValueError):
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_TEMPERATURE,
{
ATTR_ENTITY_ID: ENTITY_ID,
ATTR_TARGET_TEMP_HIGH: 29.0,
ATTR_TARGET_TEMP_LOW: 10.0,
},
blocking=True,
)
cube.set_temperature_mode.assert_not_called()
async def test_thermostat_set_preset_on(hass, cube: MaxCube, thermostat: MaxThermostat):
"""Set preset mode to on."""
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_PRESET_MODE,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_PRESET_MODE: PRESET_ON},
blocking=True,
)
cube.set_temperature_mode.assert_called_once_with(
thermostat, ON_TEMPERATURE, MAX_DEVICE_MODE_MANUAL
)
thermostat.mode = MAX_DEVICE_MODE_MANUAL
thermostat.target_temperature = ON_TEMPERATURE
async_fire_time_changed(hass, utcnow() + timedelta(minutes=5))
await hass.async_block_till_done()
state = hass.states.get(ENTITY_ID)
assert state.state == HVAC_MODE_HEAT
assert state.attributes.get(ATTR_TEMPERATURE) is None
assert state.attributes.get(ATTR_PRESET_MODE) == PRESET_ON
async def test_thermostat_set_preset_comfort(
hass, cube: MaxCube, thermostat: MaxThermostat
):
"""Set preset mode to comfort."""
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_PRESET_MODE,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_PRESET_MODE: PRESET_COMFORT},
blocking=True,
)
cube.set_temperature_mode.assert_called_once_with(
thermostat, thermostat.comfort_temperature, MAX_DEVICE_MODE_MANUAL
)
thermostat.mode = MAX_DEVICE_MODE_MANUAL
thermostat.target_temperature = thermostat.comfort_temperature
async_fire_time_changed(hass, utcnow() + timedelta(minutes=5))
await hass.async_block_till_done()
state = hass.states.get(ENTITY_ID)
assert state.state == HVAC_MODE_HEAT
assert state.attributes.get(ATTR_TEMPERATURE) == thermostat.comfort_temperature
assert state.attributes.get(ATTR_PRESET_MODE) == PRESET_COMFORT
async def test_thermostat_set_preset_eco(
hass, cube: MaxCube, thermostat: MaxThermostat
):
"""Set preset mode to eco."""
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_PRESET_MODE,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_PRESET_MODE: PRESET_ECO},
blocking=True,
)
cube.set_temperature_mode.assert_called_once_with(
thermostat, thermostat.eco_temperature, MAX_DEVICE_MODE_MANUAL
)
thermostat.mode = MAX_DEVICE_MODE_MANUAL
thermostat.target_temperature = thermostat.eco_temperature
async_fire_time_changed(hass, utcnow() + timedelta(minutes=5))
await hass.async_block_till_done()
state = hass.states.get(ENTITY_ID)
assert state.state == HVAC_MODE_HEAT
assert state.attributes.get(ATTR_TEMPERATURE) == thermostat.eco_temperature
assert state.attributes.get(ATTR_PRESET_MODE) == PRESET_ECO
async def test_thermostat_set_preset_away(
hass, cube: MaxCube, thermostat: MaxThermostat
):
"""Set preset mode to away."""
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_PRESET_MODE,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_PRESET_MODE: PRESET_AWAY},
blocking=True,
)
cube.set_temperature_mode.assert_called_once_with(
thermostat, None, MAX_DEVICE_MODE_VACATION
)
thermostat.mode = MAX_DEVICE_MODE_VACATION
thermostat.target_temperature = thermostat.eco_temperature
async_fire_time_changed(hass, utcnow() + timedelta(minutes=5))
await hass.async_block_till_done()
state = hass.states.get(ENTITY_ID)
assert state.state == HVAC_MODE_HEAT
assert state.attributes.get(ATTR_TEMPERATURE) == thermostat.eco_temperature
assert state.attributes.get(ATTR_PRESET_MODE) == PRESET_AWAY
async def test_thermostat_set_preset_boost(
hass, cube: MaxCube, thermostat: MaxThermostat
):
"""Set preset mode to boost."""
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_PRESET_MODE,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_PRESET_MODE: PRESET_BOOST},
blocking=True,
)
cube.set_temperature_mode.assert_called_once_with(
thermostat, None, MAX_DEVICE_MODE_BOOST
)
thermostat.mode = MAX_DEVICE_MODE_BOOST
thermostat.target_temperature = thermostat.eco_temperature
async_fire_time_changed(hass, utcnow() + timedelta(minutes=5))
await hass.async_block_till_done()
state = hass.states.get(ENTITY_ID)
assert state.state == HVAC_MODE_AUTO
assert state.attributes.get(ATTR_TEMPERATURE) == thermostat.eco_temperature
assert state.attributes.get(ATTR_PRESET_MODE) == PRESET_BOOST
async def test_thermostat_set_preset_none(
hass, cube: MaxCube, thermostat: MaxThermostat
):
"""Set preset mode to boost."""
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_PRESET_MODE,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_PRESET_MODE: PRESET_NONE},
blocking=True,
)
cube.set_temperature_mode.assert_called_once_with(
thermostat, None, MAX_DEVICE_MODE_AUTOMATIC
)
async def test_thermostat_set_invalid_preset(
hass, cube: MaxCube, thermostat: MaxThermostat
):
"""Set hvac mode to heat."""
with pytest.raises(ValueError):
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_PRESET_MODE,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_PRESET_MODE: "invalid"},
blocking=True,
)
cube.set_temperature_mode.assert_not_called()
async def test_wallthermostat_set_hvac_mode_heat(
hass, cube: MaxCube, wallthermostat: MaxWallThermostat
):
"""Set wall thermostat hvac mode to heat."""
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_HVAC_MODE,
{ATTR_ENTITY_ID: WALL_ENTITY_ID, ATTR_HVAC_MODE: HVAC_MODE_HEAT},
blocking=True,
)
cube.set_temperature_mode.assert_called_once_with(
wallthermostat, MIN_TEMPERATURE, MAX_DEVICE_MODE_MANUAL
)
wallthermostat.target_temperature = MIN_TEMPERATURE
async_fire_time_changed(hass, utcnow() + timedelta(minutes=5))
await hass.async_block_till_done()
state = hass.states.get(WALL_ENTITY_ID)
assert state.state == HVAC_MODE_HEAT
assert state.attributes.get(ATTR_TEMPERATURE) == MIN_TEMPERATURE
async def test_wallthermostat_set_hvac_mode_auto(
hass, cube: MaxCube, wallthermostat: MaxWallThermostat
):
"""Set wall thermostat hvac mode to auto."""
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_HVAC_MODE,
{ATTR_ENTITY_ID: WALL_ENTITY_ID, ATTR_HVAC_MODE: HVAC_MODE_AUTO},
blocking=True,
)
cube.set_temperature_mode.assert_called_once_with(
wallthermostat, None, MAX_DEVICE_MODE_AUTOMATIC
)
wallthermostat.mode = MAX_DEVICE_MODE_AUTOMATIC
wallthermostat.target_temperature = 23.0
async_fire_time_changed(hass, utcnow() + timedelta(minutes=5))
await hass.async_block_till_done()
state = hass.states.get(WALL_ENTITY_ID)
assert state.state == HVAC_MODE_AUTO
assert state.attributes.get(ATTR_TEMPERATURE) == 23.0
|
py | 1a3cd5d63877b943e9226f42c039a10ab3473d34 | """pylint should detect yield and return mix inside genrators"""
__revision__ = None
def somegen():
"""this is a bad generator"""
if True:
return 1
else:
yield 2
def moregen():
"""this is another bad generator"""
if True:
yield 1
else:
return 2
|
py | 1a3cd5f7791426f0b3ff480d8de7e73fee82deb7 | # Copyright (c) 2008-2009 Pedro Matiello <[email protected]>
# Salim Fadhley <[email protected]>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
"""
Exceptions.
"""
# Graph errors
class GraphError(RuntimeError):
"""
A base-class for the various kinds of errors that occur in the the python-graph class.
"""
pass
class AdditionError(GraphError):
"""
This error is raised when trying to add a node or edge already added to the graph or digraph.
"""
pass
class NodeUnreachable(GraphError):
"""
Goal could not be reached from start.
"""
def __init__(self, start, goal):
msg = "Node %s could not be reached from node %s" % ( repr(goal), repr(start) )
InvalidGraphType.__init__(self, msg)
self.start = start
self.goal = goal
class InvalidGraphType(GraphError):
"""
Invalid graph type.
"""
pass
# Algorithm errors
class AlgorithmError(RuntimeError):
"""
A base-class for the various kinds of errors that occur in the the
algorithms package.
"""
pass
class NegativeWeightCycleError(AlgorithmError):
"""
Algorithms like the Bellman-Ford algorithm can detect and raise an exception
when they encounter a negative weight cycle.
@see: pygraph.algorithms.shortest_path_bellman_ford
"""
pass
|
py | 1a3cd68fc6a9c5b5854d47ca0330e832eaaf69ce | #!/usr/bin/env python
# -*- coding: utf-8
# Functions dealing with image cropping
import logging
import numpy as np
from .image import Image, zeros_like
logger = logging.getLogger(__name__)
class BoundingBox(object):
def __init__(self, xmin=None, xmax=None, ymin=None, ymax=None, zmin=None, zmax=None):
self.xmin = xmin
self.xmax = xmax
self.ymin = ymin
self.ymax = ymax
self.zmin = zmin
self.zmax = zmax
def get_minmax(self, img=None):
"""
Get voxel-based bounding box from coordinates. Replaces '-1' with max dim along each axis, '-2' with max dim
minus 1, etc.
:param img: Image object to get dimensions
:return:
"""
def _get_min_value(input):
if input is None:
return 0
else:
return input
def _get_max_value(input, dim):
# If empty, return dim+1 (corresponds to the maximum of the given dimension, e.g. nx)
if input is None:
return dim + 1
# If negative sign, return dim+1 if -1, dim if -2, dim-1 if -3, etc.
elif np.sign(input) == -1:
return input + dim + 1
# If user specified a non-negative value, use that
else:
return input
xyz_to_num = {'x': 0, 'y': 1, 'z': 2}
bbox_voxel = BoundingBox()
for attr, value in self.__dict__.items():
if attr[-3:] == 'min':
bbox_voxel.__setattr__(attr, _get_min_value(self.__getattribute__(attr)))
elif attr[-3:] == 'max':
bbox_voxel.__setattr__(attr, _get_max_value(self.__getattribute__(attr), img.dim[xyz_to_num[attr[0]]]))
else:
raise Exception(ValueError)
return bbox_voxel
class ImageCropper(object):
def __init__(self, img_in, mask=None, bbox=BoundingBox(), ref=None):
"""
:param img_in:
:param mask:
:param bbox: BoundingBox object with min and max values for each dimension, used for cropping.
:param ref:
"""
self.img_in = img_in
self.mask = mask
self.bbox = bbox
self.ref = ref
def crop(self, background=None):
"""
Crop image (change dimension)
:param background: int: If set, the output image will not be cropped. Instead, voxels outside the bounding
box will be set to the value specified by this parameter.
:return Image: img_out
"""
bbox = self.bbox
logger.info("Bounding box: x=[{}, {}], y=[{}, {}], z=[{}, {}]"
.format(bbox.xmin, bbox.xmax+1, bbox.ymin, bbox.ymax+1, bbox.zmin, bbox.zmax+1))
# Crop the image
if background is None:
logger.info("Cropping the image...")
data_crop = self.img_in.data[bbox.xmin:bbox.xmax+1, bbox.ymin:bbox.ymax+1, bbox.zmin:bbox.zmax+1]
img_out = Image(param=data_crop, hdr=self.img_in.hdr)
# adapt the origin in the sform and qform matrix
new_origin = np.dot(img_out.hdr.get_qform(), [bbox.xmin, bbox.ymin, bbox.zmin, 1])
img_out.hdr.structarr['qoffset_x'] = new_origin[0]
img_out.hdr.structarr['qoffset_y'] = new_origin[1]
img_out.hdr.structarr['qoffset_z'] = new_origin[2]
img_out.hdr.structarr['srow_x'][-1] = new_origin[0]
img_out.hdr.structarr['srow_y'][-1] = new_origin[1]
img_out.hdr.structarr['srow_z'][-1] = new_origin[2]
# Set voxels outside the bbox to the value 'background'
else:
logger.info("Setting voxels outside the bounding box to: {}".format(background))
img_out = self.img_in.copy()
img_out.data[:] = background
img_out.data[bbox.xmin:bbox.xmax+1, bbox.ymin:bbox.ymax+1, bbox.zmin:bbox.zmax+1] = \
self.img_in.data[bbox.xmin:bbox.xmax+1, bbox.ymin:bbox.ymax+1, bbox.zmin:bbox.zmax+1]
return img_out
def get_bbox_from_minmax(self, bbox=None):
"""
Get voxel bounding box from xmin, xmax, ymin, ymax, zmin, zmax user input
"""
self.bbox = bbox.get_minmax(img=self.img_in)
def get_bbox_from_mask(self, img_mask):
"""
Get bounding box from input binary mask, by looking at min/max values of the binary object in each dimension.
"""
data_nonzero = np.nonzero(img_mask.data)
# find min and max boundaries of the mask
dim = len(data_nonzero)
self.bbox.xmin, self.bbox.ymin, self.bbox.zmin = [min(data_nonzero[i]) for i in range(dim)]
self.bbox.xmax, self.bbox.ymax, self.bbox.zmax = [max(data_nonzero[i]) for i in range(dim)]
def get_bbox_from_ref(self, img_ref):
"""
Get bounding box from input reference image, by looking at min/max indices in each dimension.
img_ref and self.img_in should have the same dimensions.
"""
from spinalcordtoolbox.resampling import resample_nib
# Check that img_ref has the same length as img_in
if not len(img_ref.data.shape) == len(self.img_in.data.shape):
logger.error("Inconsistent dimensions: n_dim(img_ref)={}; n_dim(img_in)={}"
.format(len(img_ref.data.shape), len(self.img_in.data.shape)))
raise Exception(ValueError)
# Fill reference data with ones
img_ref.data[:] = 1
# Resample new image (in reference coordinates) into input image
img_ref_r = resample_nib(img_ref, image_dest=self.img_in, interpolation='nn', mode='constant')
# img_ref_r.save('test.nii') # for debug
# Get bbox from this resampled mask
self.get_bbox_from_mask(img_ref_r)
def get_bbox_from_gui(self):
"""
Launch a GUI. The medial sagittal plane of the image is shown. User selects two points: top-left and bottom-
right of the cropping window.
Note: There is no cropping along the right-left direction.
:return:
"""
from spinalcordtoolbox.gui import base
from spinalcordtoolbox.gui.sagittal import launch_sagittal_dialog
# Change orientation to SAL (for displaying sagittal view in the GUI)
native_orientation = self.img_in.orientation
self.img_in.change_orientation('SAL')
# Launch GUI
params = base.AnatomicalParams()
params.vertebraes = [1, 2] # TODO: Have user draw a sliding rectangle instead (more intuitive)
params.subtitle = "Click on the top-left (Label 1) and bottom-right (Label 2) of the image to select your " \
"cropping window."
img_labels = zeros_like(self.img_in)
launch_sagittal_dialog(self.img_in, img_labels, params)
# Extract coordinates
img_labels.change_orientation(native_orientation)
cropping_coord = img_labels.getNonZeroCoordinates(sorting='value')
# Since there is no cropping along the R-L direction, xmin/xmax are based on image dimension
self.bbox.xmin, self.bbox.ymin, self.bbox.zmin = (
0,
min(cropping_coord[0].y, cropping_coord[1].y),
min(cropping_coord[0].z, cropping_coord[1].z),
)
self.bbox.xmax, self.bbox.ymax, self.bbox.zmax = (
img_labels.dim[0],
max(cropping_coord[0].y, cropping_coord[1].y),
max(cropping_coord[0].z, cropping_coord[1].z),
)
# Put back input image in native orientation
self.img_in.change_orientation(native_orientation)
|
py | 1a3cd87072706bee7c3d5785470e6936a23b2dcb | # -*- coding: utf-8 -*-
"""
Created on Sat Mar 23 06:08:57 2019
@author: Khoi To
"""
class Characteristics(object):
def __init__(self, chromosome_length, number_of_genes):
self.chromosome_length = chromosome_length
self.number_of_genes = number_of_genes |
py | 1a3cd8c13ffd6d0c2ffc613a4c3bdedf255a8886 | # Copyright 2014 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import sys
import time
from google.cloud.environment_vars import CREDENTIALS as TEST_CREDENTIALS
# From shell environ. May be None.
CREDENTIALS = os.getenv(TEST_CREDENTIALS)
ENVIRON_ERROR_MSG = """\
To run the system tests, you need to set some environment variables.
Please check the CONTRIBUTING guide for instructions.
"""
class EmulatorCreds(object):
"""A mock credential object.
Used to avoid unnecessary token refreshing or reliance on the network
while an emulator is running.
"""
@staticmethod
def create_scoped_required():
return False
def check_environ():
err_msg = None
if CREDENTIALS is None:
err_msg = '\nMissing variables: ' + TEST_CREDENTIALS
elif not os.path.isfile(CREDENTIALS):
err_msg = '\nThe %s path %r is not a file.' % (TEST_CREDENTIALS,
CREDENTIALS)
if err_msg is not None:
msg = ENVIRON_ERROR_MSG + err_msg
print(msg, file=sys.stderr)
sys.exit(1)
def unique_resource_id(delimiter='_'):
"""A unique identifier for a resource.
Intended to help locate resources created in particular
testing environments and at particular times.
"""
build_id = os.getenv('TRAVIS_BUILD_ID', '')
if build_id == '':
return '%s%d' % (delimiter, 1000 * time.time())
else:
return '%s%s%s%d' % (delimiter, build_id,
delimiter, time.time())
|
py | 1a3cd8c1649f6e285b5eb7c8da68af8136676f63 | # Copyright (c) Chris Choy ([email protected]) and Wei Dong ([email protected])
#
# Please cite the following papers if you use any part of the code.
# - Christopher Choy, Wei Dong, Vladlen Koltun, Deep Global Registration, CVPR 2020
# - Christopher Choy, Jaesik Park, Vladlen Koltun, Fully Convolutional Geometric Features, ICCV 2019
# - Christopher Choy, JunYoung Gwak, Silvio Savarese, 4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural Networks, CVPR 2019
import copy
import numpy as np
import math
import open3d as o3d
from submodule.DeepGlobalRegistration.core.knn import find_knn_cpu
def make_open3d_point_cloud(xyz, color=None):
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(xyz)
if color is not None:
if len(color) != len(xyz):
color = np.tile(color, (len(xyz), 1))
pcd.colors = o3d.utility.Vector3dVector(color)
return pcd
def make_open3d_feature(data, dim, npts):
feature = o3d.registration.Feature()
feature.resize(dim, npts)
feature.data = data.cpu().numpy().astype('d').transpose()
return feature
def make_open3d_feature_from_numpy(data):
assert isinstance(data, np.ndarray)
assert data.ndim == 2
feature = o3d.registration.Feature()
feature.resize(data.shape[1], data.shape[0])
feature.data = data.astype('d').transpose()
return feature
def pointcloud_to_spheres(pcd, voxel_size, color, sphere_size=0.6):
spheres = o3d.geometry.TriangleMesh()
s = o3d.geometry.TriangleMesh.create_sphere(radius=voxel_size * sphere_size)
s.compute_vertex_normals()
s.paint_uniform_color(color)
if isinstance(pcd, o3d.geometry.PointCloud):
pcd = np.array(pcd.points)
for i, p in enumerate(pcd):
si = copy.deepcopy(s)
trans = np.identity(4)
trans[:3, 3] = p
si.transform(trans)
# si.paint_uniform_color(pcd.colors[i])
spheres += si
return spheres
def prepare_single_pointcloud(pcd, voxel_size):
pcd.estimate_normals(o3d.KDTreeSearchParamHybrid(radius=voxel_size * 2.0, max_nn=30))
return pcd
def prepare_pointcloud(filename, voxel_size):
pcd = o3d.io.read_point_cloud(filename)
T = get_random_transformation(pcd)
pcd.transform(T)
pcd_down = pcd.voxel_down_sample(voxel_size)
return pcd_down, T
def compute_overlap_ratio(pcd0, pcd1, trans, voxel_size):
pcd0_down = pcd0.voxel_down_sample(voxel_size)
pcd1_down = pcd1.voxel_down_sample(voxel_size)
matching01 = get_matching_indices(pcd0_down, pcd1_down, trans, voxel_size, 1)
matching10 = get_matching_indices(pcd1_down, pcd0_down, np.linalg.inv(trans),
voxel_size, 1)
overlap0 = len(matching01) / len(pcd0_down.points)
overlap1 = len(matching10) / len(pcd1_down.points)
return max(overlap0, overlap1)
def get_matching_indices(source, target, trans, search_voxel_size, K=None):
source_copy = copy.deepcopy(source)
target_copy = copy.deepcopy(target)
source_copy.transform(trans)
pcd_tree = o3d.geometry.KDTreeFlann(target_copy)
match_inds = []
for i, point in enumerate(source_copy.points):
[_, idx, _] = pcd_tree.search_radius_vector_3d(point, search_voxel_size)
if K is not None:
idx = idx[:K]
for j in idx:
match_inds.append((i, j))
return match_inds
def evaluate_feature(pcd0, pcd1, feat0, feat1, trans_gth, search_voxel_size):
match_inds = get_matching_indices(pcd0, pcd1, trans_gth, search_voxel_size)
pcd_tree = o3d.geometry.KDTreeFlann(feat1)
dist = []
for ind in match_inds:
k, idx, _ = pcd_tree.search_knn_vector_xd(feat0.data[:, ind[0]], 1)
dist.append(
np.clip(np.power(pcd1.points[ind[1]] - pcd1.points[idx[0]], 2),
a_min=0.0,
a_max=1.0))
return np.mean(dist)
def valid_feat_ratio(pcd0, pcd1, feat0, feat1, trans_gth, thresh=0.1):
pcd0_copy = copy.deepcopy(pcd0)
pcd0_copy.transform(trans_gth)
inds = find_knn_cpu(feat0, feat1)
dist = np.sqrt(((np.array(pcd0_copy.points) - np.array(pcd1.points)[inds])**2).sum(1))
return np.mean(dist < thresh)
def evaluate_feature_3dmatch(pcd0, pcd1, feat0, feat1, trans_gth, inlier_thresh=0.1):
r"""Return the hit ratio (ratio of inlier correspondences and all correspondences).
inliear_thresh is the inlier_threshold in meter.
"""
if len(pcd0.points) < len(pcd1.points):
hit = valid_feat_ratio(pcd0, pcd1, feat0, feat1, trans_gth, inlier_thresh)
else:
hit = valid_feat_ratio(pcd1, pcd0, feat1, feat0, np.linalg.inv(trans_gth),
inlier_thresh)
return hit
def get_matching_matrix(source, target, trans, voxel_size, debug_mode):
source_copy = copy.deepcopy(source)
target_copy = copy.deepcopy(target)
source_copy.transform(trans)
pcd_tree = o3d.geometry.KDTreeFlann(target_copy)
matching_matrix = np.zeros((len(source_copy.points), len(target_copy.points)))
for i, point in enumerate(source_copy.points):
[k, idx, _] = pcd_tree.search_radius_vector_3d(point, voxel_size * 1.5)
if k >= 1:
matching_matrix[i, idx[0]] = 1 # TODO: only the cloest?
return matching_matrix
def get_random_transformation(pcd_input):
def rot_x(x):
out = np.zeros((3, 3))
c = math.cos(x)
s = math.sin(x)
out[0, 0] = 1
out[1, 1] = c
out[1, 2] = -s
out[2, 1] = s
out[2, 2] = c
return out
def rot_y(x):
out = np.zeros((3, 3))
c = math.cos(x)
s = math.sin(x)
out[0, 0] = c
out[0, 2] = s
out[1, 1] = 1
out[2, 0] = -s
out[2, 2] = c
return out
def rot_z(x):
out = np.zeros((3, 3))
c = math.cos(x)
s = math.sin(x)
out[0, 0] = c
out[0, 1] = -s
out[1, 0] = s
out[1, 1] = c
out[2, 2] = 1
return out
pcd_output = copy.deepcopy(pcd_input)
mean = np.mean(np.asarray(pcd_output.points), axis=0).transpose()
xyz = np.random.uniform(0, 2 * math.pi, 3)
R = np.dot(np.dot(rot_x(xyz[0]), rot_y(xyz[1])), rot_z(xyz[2]))
T = np.zeros((4, 4))
T[:3, :3] = R
T[:3, 3] = np.dot(-R, mean)
T[3, 3] = 1
return T
def draw_registration_result(source, target, transformation):
source_temp = copy.deepcopy(source)
target_temp = copy.deepcopy(target)
source_temp.paint_uniform_color([1, 0.706, 0])
target_temp.paint_uniform_color([0, 0.651, 0.929])
source_temp.transform(transformation)
o3d.visualization.draw_geometries([source_temp, target_temp])
|
py | 1a3cd8fcf3d1b73668631d4ac975e4b61fb61a89 | import sys
import socket
import threading
import time
server_ip_address = sys.argv[1]
server_port = int(sys.argv[2])
users = int(sys.argv[3])
type_of_test = sys.argv[4]
tests = 32
output_array = []
output = open(type_of_test+"_"+str(tests)+"_"+str(users)+".txt", 'w')
def writer(output_array):
for line in range(len(output_array)):
output.writelines(output_array[line]+"\n")
def worker(test, user, server_ip_address, server_port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
global output_array
try:
start = time.perf_counter()
s.connect((server_ip_address, server_port))
# Increments ms
s.close()
RTT = (time.perf_counter() - start) * 1000
output_array.append(str(RTT))
except:
output_array.append("failed")
if len(output_array)==tests*users:
writer(output_array)
return
for test in range(tests):
threads = []
for user in range(users):
threads.append(threading.Thread(target = worker, args=(test, user, server_ip_address, server_port,)))
for thrd in range(len(threads)):
threads[thrd].start()
for thrd in range(len(threads)):
threads[thrd].join()
|
py | 1a3cd9305bcc7b337e07413b11d6c4c26a5db088 | from .smarticle import Smarticle
from .flashlight import Flashlight
from .helpers import time_to_steps, load_smarticles
|
py | 1a3cd953639dc7fa0e82ef27239cc637fc5dbfa0 | """
WSGI config for tutoria project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tutoria.settings')
application = get_wsgi_application()
|
py | 1a3cd9d2ade9c42a3d6de8340c68a2e968fc7fa2 | """This module contains the general information for FabricFcEstcCloud ManagedObject."""
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class FabricFcEstcCloudConsts:
pass
class FabricFcEstcCloud(ManagedObject):
"""This is FabricFcEstcCloud class."""
consts = FabricFcEstcCloudConsts()
naming_props = set([])
mo_meta = MoMeta("FabricFcEstcCloud", "fabricFcEstcCloud", "fc-estc", VersionMeta.Version141i, "InputOutput", 0x1f, [], ["admin", "ext-san-config", "ext-san-policy"], [u'fabricEp'], [u'fabricBHVlan', u'fabricFcEstc', u'fabricFcZoneProfile', u'fabricVsan', u'statsThresholdPolicy'], ["Get"])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version141i, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version141i, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version141i, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version141i, MoPropertyMeta.READ_WRITE, 0x10, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
}
prop_map = {
"childAction": "child_action",
"dn": "dn",
"rn": "rn",
"sacl": "sacl",
"status": "status",
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.sacl = None
self.status = None
ManagedObject.__init__(self, "FabricFcEstcCloud", parent_mo_or_dn, **kwargs)
|
py | 1a3cdba34372dc5b749aa4dd1fde4f302519d8f5 | import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.TSO/Serif_12/udhr_Latn.TSO_Serif_12.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
|
py | 1a3cdca49d20a60aea3a41168488062579747758 | """
封装文件操作:
● 递归读取所有文件目录形成列表
● 递归删除空目录
● 批量删除文件
"""
import os
def get_all_files(targetDir):
"""
递归读取所有文件目录形成列表
:param targetDir:
:return:
"""
files = []
listFiles = os.listdir(targetDir)
for i in range(0, len(listFiles)):
path = os.path.join(targetDir, listFiles[i])
if os.path.isdir(path):
files.extend(get_all_files(path))
elif os.path.isfile(path):
files.append(path)
return files
def remove_empty_dir(path):
"""
递归删除空目录
:param path:
:return:
"""
for root, dirs, files in os.walk(path, topdown=False):
if not files and not dirs:
os.rmdir(root)
def delete_files(delete_list: list):
"""
批量删除文件
:param delete_list:
:return:
"""
for file_path in delete_list:
try:
os.remove(file_path)
except(FileNotFoundError):
pass
|
py | 1a3cddf173020cecb578cb2d1d801a7cdb94869e | #!/usr/bin/env python
# coding=utf-8
# Copyright (C) 2018 Copter Express Technologies
#
# Author: Oleg Kalachev <[email protected]>
#
# Distributed under MIT License (available at https://opensource.org/licenses/MIT).
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
import os
import math
import subprocess
import re
from collections import OrderedDict
import traceback
from threading import Event
import numpy
import rospy
import tf2_ros
import tf2_geometry_msgs
from pymavlink import mavutil
from std_srvs.srv import Trigger
from sensor_msgs.msg import BatteryState, Image, CameraInfo, NavSatFix, Imu, Range
from mavros_msgs.msg import State, OpticalFlowRad, Mavlink
from mavros_msgs.srv import ParamGet
from geometry_msgs.msg import PoseStamped, TwistStamped, PoseWithCovarianceStamped, Vector3Stamped
from visualization_msgs.msg import MarkerArray as VisualizationMarkerArray
import tf.transformations as t
from aruco_pose.msg import MarkerArray
from mavros import mavlink
# TODO: check attitude is present
# TODO: disk free space
# TODO: map, base_link, body
# TODO: rc service
# TODO: perform commander check, ekf2 status on PX4
# TODO: check if FCU params setter succeed
# TODO: selfcheck ROS service (with blacklists for checks)
rospy.init_node('selfcheck')
tf_buffer = tf2_ros.Buffer()
tf_listener = tf2_ros.TransformListener(tf_buffer)
failures = []
infos = []
current_check = None
def failure(text, *args):
msg = text % args
rospy.logwarn('%s: %s', current_check, msg)
failures.append(msg)
def info(text, *args):
msg = text % args
rospy.loginfo('%s: %s', current_check, msg)
infos.append(msg)
def check(name):
def inner(fn):
def wrapper(*args, **kwargs):
failures[:] = []
infos[:] = []
global current_check
current_check = name
try:
fn(*args, **kwargs)
except Exception as e:
traceback.print_exc()
rospy.logerr('%s: exception occurred', name)
return
if not failures and not infos:
rospy.loginfo('%s: OK', name)
return wrapper
return inner
param_get = rospy.ServiceProxy('mavros/param/get', ParamGet)
def get_param(name):
try:
res = param_get(param_id=name)
except rospy.ServiceException as e:
failure('%s: %s', name, str(e))
return None
if not res.success:
failure('unable to retrieve PX4 parameter %s', name)
else:
if res.value.integer != 0:
return res.value.integer
return res.value.real
recv_event = Event()
link = mavutil.mavlink.MAVLink('', 255, 1)
mavlink_pub = rospy.Publisher('mavlink/to', Mavlink, queue_size=1)
mavlink_recv = ''
def mavlink_message_handler(msg):
global mavlink_recv
if msg.msgid == 126:
mav_bytes_msg = mavlink.convert_to_bytes(msg)
mav_msg = link.decode(mav_bytes_msg)
mavlink_recv += ''.join(chr(x) for x in mav_msg.data[:mav_msg.count])
if 'nsh>' in mavlink_recv:
# Remove the last line, including newline before prompt
mavlink_recv = mavlink_recv[:mavlink_recv.find('nsh>') - 1]
recv_event.set()
mavlink_sub = rospy.Subscriber('mavlink/from', Mavlink, mavlink_message_handler)
# FIXME: not sleeping here still breaks things
rospy.sleep(0.5)
def mavlink_exec(cmd, timeout=3.0):
global mavlink_recv
mavlink_recv = ''
recv_event.clear()
if not cmd.endswith('\n'):
cmd += '\n'
msg = mavutil.mavlink.MAVLink_serial_control_message(
device=mavutil.mavlink.SERIAL_CONTROL_DEV_SHELL,
flags=mavutil.mavlink.SERIAL_CONTROL_FLAG_RESPOND | mavutil.mavlink.SERIAL_CONTROL_FLAG_EXCLUSIVE |
mavutil.mavlink.SERIAL_CONTROL_FLAG_MULTI,
timeout=3,
baudrate=0,
count=len(cmd),
data=map(ord, cmd.ljust(70, '\0')))
msg.pack(link)
ros_msg = mavlink.convert_to_rosmsg(msg)
mavlink_pub.publish(ros_msg)
recv_event.wait(timeout)
return mavlink_recv
BOARD_ROTATIONS = {
0: 'no rotation',
1: 'yaw 45°',
2: 'yaw 90°',
3: 'yaw 135°',
4: 'yaw 180°',
5: 'yaw 225°',
6: 'yaw 270°',
7: 'yaw 315°',
8: 'roll 180°',
9: 'roll 180°, yaw 45°',
10: 'roll 180°, yaw 90°',
11: 'roll 180°, yaw 135°',
12: 'pitch 180°',
13: 'roll 180°, yaw 225°',
14: 'roll 180°, yaw 270°',
15: 'roll 180°, yaw 315°',
16: 'roll 90°',
17: 'roll 90°, yaw 45°',
18: 'roll 90°, yaw 90°',
19: 'roll 90°, yaw 135°',
20: 'roll 270°',
21: 'roll 270°, yaw 45°',
22: 'roll 270°, yaw 90°',
23: 'roll 270°, yaw 135°',
24: 'pitch 90°',
25: 'pitch 270°',
26: 'roll 270°, yaw 270°',
27: 'roll 180°, pitch 270°',
28: 'pitch 90°, yaw 180',
29: 'pitch 90°, roll 90°',
30: 'yaw 293°, pitch 68°, roll 90°',
31: 'pitch 90°, roll 270°',
32: 'pitch 9°, yaw 180°',
33: 'pitch 45°',
34: 'pitch 315°',
}
@check('FCU')
def check_fcu():
try:
state = rospy.wait_for_message('mavros/state', State, timeout=3)
if not state.connected:
failure('no connection to the FCU (check wiring)')
return
# Make sure the console is available to us
mavlink_exec('\n')
version_str = mavlink_exec('ver all')
if version_str == '':
info('no version data available from SITL')
r = re.compile(r'^FW (git tag|version): (v?\d\.\d\.\d.*)$')
is_clover_firmware = False
for ver_line in version_str.split('\n'):
match = r.search(ver_line)
if match is not None:
field, version = match.groups()
info('firmware %s: %s' % (field, version))
if 'clover' in version or 'clever' in version:
is_clover_firmware = True
if not is_clover_firmware:
failure('not running Clover PX4 firmware, https://clover.coex.tech/firmware')
est = get_param('SYS_MC_EST_GROUP')
if est == 1:
info('selected estimator: LPE')
fuse = get_param('LPE_FUSION')
if fuse & (1 << 4):
info('LPE_FUSION: land detector fusion is enabled')
else:
info('LPE_FUSION: land detector fusion is disabled')
if fuse & (1 << 7):
info('LPE_FUSION: barometer fusion is enabled')
else:
info('LPE_FUSION: barometer fusion is disabled')
mag_yaw_w = get_param('ATT_W_MAG')
if mag_yaw_w == 0:
info('magnetometer weight (ATT_W_MAG) is zero, better for indoor flights')
else:
info('magnetometer weight (ATT_W_MAG) is non-zero (%.2f), better for outdoor flights', mag_yaw_w)
elif est == 2:
info('selected estimator: EKF2')
else:
failure('unknown selected estimator: %s', est)
rot = get_param('SENS_BOARD_ROT')
if rot is not None:
try:
info('board rotation: %s', BOARD_ROTATIONS[rot])
except KeyError:
failure('unknown board rotation %s', rot)
cbrk_usb_chk = get_param('CBRK_USB_CHK')
if cbrk_usb_chk != 197848:
failure('set parameter CBRK_USB_CHK to 197848 for flying with USB connected')
try:
battery = rospy.wait_for_message('mavros/battery', BatteryState, timeout=3)
if not battery.cell_voltage:
failure('cell voltage is not available, https://clover.coex.tech/power')
else:
cell = battery.cell_voltage[0]
if cell > 4.3 or cell < 3.0:
failure('incorrect cell voltage: %.2f V, https://clover.coex.tech/power', cell)
elif cell < 3.7:
failure('critically low cell voltage: %.2f V, recharge battery', cell)
except rospy.ROSException:
failure('no battery state')
except rospy.ROSException:
failure('no MAVROS state (check wiring)')
def describe_direction(v):
if v.x > 0.9:
return 'forward'
elif v.x < - 0.9:
return 'backward'
elif v.y > 0.9:
return 'left'
elif v.y < -0.9:
return 'right'
elif v.z > 0.9:
return 'upward'
elif v.z < -0.9:
return 'downward'
else:
return None
def check_camera(name):
try:
img = rospy.wait_for_message(name + '/image_raw', Image, timeout=1)
except rospy.ROSException:
failure('%s: no images (is the camera connected properly?)', name)
return
try:
camera_info = rospy.wait_for_message(name + '/camera_info', CameraInfo, timeout=1)
except rospy.ROSException:
failure('%s: no calibration info', name)
return
if img.width != camera_info.width:
failure('%s: calibration width doesn\'t match image width (%d != %d)', name, camera_info.width, img.width)
if img.height != camera_info.height:
failure('%s: calibration height doesn\'t match image height (%d != %d))', name, camera_info.height, img.height)
try:
optical = Vector3Stamped()
optical.header.frame_id = img.header.frame_id
optical.vector.z = 1
cable = Vector3Stamped()
cable.header.frame_id = img.header.frame_id
cable.vector.y = 1
optical = describe_direction(tf_buffer.transform(optical, 'base_link').vector)
cable = describe_direction(tf_buffer.transform(cable, 'base_link').vector)
if not optical or not cable:
info('%s: custom camera orientation detected', name)
else:
info('camera is oriented %s, cable from camera goes %s', optical, cable)
except tf2_ros.TransformException:
failure('cannot transform from base_link to camera frame')
@check('Main camera')
def check_main_camera():
check_camera('main_camera')
def is_process_running(binary, exact=False, full=False):
try:
args = ['pgrep']
if exact:
args.append('-x') # match exactly with the command name
if full:
args.append('-f') # use full process name to match
args.append(binary)
subprocess.check_output(args)
return True
except subprocess.CalledProcessError:
return False
@check('ArUco markers')
def check_aruco():
if is_process_running('aruco_detect', full=True):
try:
info('aruco_detect/length = %g m', rospy.get_param('aruco_detect/length'))
except KeyError:
failure('aruco_detect/length parameter is not set')
known_tilt = rospy.get_param('aruco_detect/known_tilt', '')
if known_tilt == 'map':
known_tilt += ' (ALL markers are on the floor)'
elif known_tilt == 'map_flipped':
known_tilt += ' (ALL markers are on the ceiling)'
info('aruco_detector/known_tilt = %s', known_tilt)
try:
rospy.wait_for_message('aruco_detect/markers', MarkerArray, timeout=1)
except rospy.ROSException:
failure('no markers detection')
return
else:
info('aruco_detect is not running')
return
if is_process_running('aruco_map', full=True):
known_tilt = rospy.get_param('aruco_map/known_tilt', '')
if known_tilt == 'map':
known_tilt += ' (marker\'s map is on the floor)'
elif known_tilt == 'map_flipped':
known_tilt += ' (marker\'s map is on the ceiling)'
info('aruco_map/known_tilt = %s', known_tilt)
try:
visualization = rospy.wait_for_message('aruco_map/visualization', VisualizationMarkerArray, timeout=1)
info('map has %s markers', len(visualization.markers))
except:
failure('cannot read aruco_map/visualization topic')
try:
rospy.wait_for_message('aruco_map/pose', PoseWithCovarianceStamped, timeout=1)
except rospy.ROSException:
failure('no map detection')
else:
info('aruco_map is not running')
@check('Vision position estimate')
def check_vpe():
vis = None
try:
vis = rospy.wait_for_message('mavros/vision_pose/pose', PoseStamped, timeout=1)
except rospy.ROSException:
try:
vis = rospy.wait_for_message('mavros/mocap/pose', PoseStamped, timeout=1)
except rospy.ROSException:
failure('no VPE or MoCap messages')
# check if vpe_publisher is running
try:
subprocess.check_output(['pgrep', '-x', 'vpe_publisher'])
except subprocess.CalledProcessError:
return # it's not running, skip following checks
# check PX4 settings
est = get_param('SYS_MC_EST_GROUP')
if est == 1:
ext_yaw = get_param('ATT_EXT_HDG_M')
if ext_yaw != 1:
failure('vision yaw is disabled, change ATT_EXT_HDG_M parameter')
vision_yaw_w = get_param('ATT_W_EXT_HDG')
if vision_yaw_w == 0:
failure('vision yaw weight is zero, change ATT_W_EXT_HDG parameter')
else:
info('Vision yaw weight: %.2f', vision_yaw_w)
fuse = get_param('LPE_FUSION')
if not fuse & (1 << 2):
failure('vision position fusion is disabled, change LPE_FUSION parameter')
delay = get_param('LPE_VIS_DELAY')
if delay != 0:
failure('LPE_VIS_DELAY parameter is %s, but it should be zero', delay)
info('LPE_VIS_XY is %.2f m, LPE_VIS_Z is %.2f m', get_param('LPE_VIS_XY'), get_param('LPE_VIS_Z'))
elif est == 2:
fuse = get_param('EKF2_AID_MASK')
if not fuse & (1 << 3):
failure('vision position fusion is disabled, change EKF2_AID_MASK parameter')
if not fuse & (1 << 4):
failure('vision yaw fusion is disabled, change EKF2_AID_MASK parameter')
delay = get_param('EKF2_EV_DELAY')
if delay != 0:
failure('EKF2_EV_DELAY is %.2f, but it should be zero', delay)
info('EKF2_EVA_NOISE is %.3f, EKF2_EVP_NOISE is %.3f',
get_param('EKF2_EVA_NOISE'),
get_param('EKF2_EVP_NOISE'))
if not vis:
return
# check vision pose and estimated pose inconsistency
try:
pose = rospy.wait_for_message('mavros/local_position/pose', PoseStamped, timeout=1)
except:
return
horiz = math.hypot(vis.pose.position.x - pose.pose.position.x, vis.pose.position.y - pose.pose.position.y)
if horiz > 0.5:
failure('horizontal position inconsistency: %.2f m', horiz)
vert = vis.pose.position.z - pose.pose.position.z
if abs(vert) > 0.5:
failure('vertical position inconsistency: %.2f m', vert)
op = pose.pose.orientation
ov = vis.pose.orientation
yawp, _, _ = t.euler_from_quaternion((op.x, op.y, op.z, op.w), axes='rzyx')
yawv, _, _ = t.euler_from_quaternion((ov.x, ov.y, ov.z, ov.w), axes='rzyx')
yawdiff = yawp - yawv
yawdiff = math.degrees((yawdiff + 180) % 360 - 180)
if abs(yawdiff) > 8:
failure('yaw inconsistency: %.2f deg', yawdiff)
@check('Simple offboard node')
def check_simpleoffboard():
try:
rospy.wait_for_service('navigate', timeout=3)
rospy.wait_for_service('get_telemetry', timeout=3)
rospy.wait_for_service('land', timeout=3)
except rospy.ROSException:
failure('no simple_offboard services')
@check('IMU')
def check_imu():
try:
rospy.wait_for_message('mavros/imu/data', Imu, timeout=1)
except rospy.ROSException:
failure('no IMU data (check flight controller calibration)')
@check('Local position')
def check_local_position():
try:
pose = rospy.wait_for_message('mavros/local_position/pose', PoseStamped, timeout=1)
o = pose.pose.orientation
_, pitch, roll = t.euler_from_quaternion((o.x, o.y, o.z, o.w), axes='rzyx')
MAX_ANGLE = math.radians(2)
if abs(pitch) > MAX_ANGLE:
failure('pitch is %.2f deg; place copter horizontally or redo level horizon calib',
math.degrees(pitch))
if abs(roll) > MAX_ANGLE:
failure('roll is %.2f deg; place copter horizontally or redo level horizon calib',
math.degrees(roll))
except rospy.ROSException:
failure('no local position')
@check('Velocity estimation')
def check_velocity():
try:
velocity = rospy.wait_for_message('mavros/local_position/velocity_local', TwistStamped, timeout=1)
horiz = math.hypot(velocity.twist.linear.x, velocity.twist.linear.y)
vert = velocity.twist.linear.z
if abs(horiz) > 0.1:
failure('horizontal velocity estimation is %.2f m/s; is copter staying still?' % horiz)
if abs(vert) > 0.1:
failure('vertical velocity estimation is %.2f m/s; is copter staying still?' % vert)
velocity = rospy.wait_for_message('mavros/local_position/velocity_body', TwistStamped, timeout=1)
angular = velocity.twist.angular
ANGULAR_VELOCITY_LIMIT = 0.1
if abs(angular.x) > ANGULAR_VELOCITY_LIMIT:
failure('pitch rate estimation is %.2f rad/s (%.2f deg/s); is copter staying still?',
angular.x, math.degrees(angular.x))
if abs(angular.y) > ANGULAR_VELOCITY_LIMIT:
failure('pitch rate estimation is %.2f rad/s (%.2f deg/s); is copter staying still?',
angular.y, math.degrees(angular.y))
if abs(angular.z) > ANGULAR_VELOCITY_LIMIT:
failure('pitch rate estimation is %.2f rad/s (%.2f deg/s); is copter staying still?',
angular.z, math.degrees(angular.z))
except rospy.ROSException:
failure('no velocity estimation')
@check('Global position (GPS)')
def check_global_position():
try:
rospy.wait_for_message('mavros/global_position/global', NavSatFix, timeout=1)
except rospy.ROSException:
info('no global position')
@check('Optical flow')
def check_optical_flow():
# TODO:check FPS!
try:
rospy.wait_for_message('mavros/px4flow/raw/send', OpticalFlowRad, timeout=0.5)
# check PX4 settings
rot = get_param('SENS_FLOW_ROT')
if rot != 0:
failure('SENS_FLOW_ROT parameter is %s, but it should be zero', rot)
est = get_param('SYS_MC_EST_GROUP')
if est == 1:
fuse = get_param('LPE_FUSION')
if not fuse & (1 << 1):
failure('optical flow fusion is disabled, change LPE_FUSION parameter')
if not fuse & (1 << 1):
failure('flow gyro compensation is disabled, change LPE_FUSION parameter')
scale = get_param('LPE_FLW_SCALE')
if not numpy.isclose(scale, 1.0):
failure('LPE_FLW_SCALE parameter is %.2f, but it should be 1.0', scale)
info('LPE_FLW_QMIN is %s, LPE_FLW_R is %.4f, LPE_FLW_RR is %.4f, SENS_FLOW_MINHGT is %.3f, SENS_FLOW_MAXHGT is %.3f',
get_param('LPE_FLW_QMIN'),
get_param('LPE_FLW_R'),
get_param('LPE_FLW_RR'),
get_param('SENS_FLOW_MINHGT'),
get_param('SENS_FLOW_MAXHGT'))
elif est == 2:
fuse = get_param('EKF2_AID_MASK')
if not fuse & (1 << 1):
failure('optical flow fusion is disabled, change EKF2_AID_MASK parameter')
delay = get_param('EKF2_OF_DELAY')
if delay != 0:
failure('EKF2_OF_DELAY is %.2f, but it should be zero', delay)
info('EKF2_OF_QMIN is %s, EKF2_OF_N_MIN is %.4f, EKF2_OF_N_MAX is %.4f, SENS_FLOW_MINHGT is %.3f, SENS_FLOW_MAXHGT is %.3f',
get_param('EKF2_OF_QMIN'),
get_param('EKF2_OF_N_MIN'),
get_param('EKF2_OF_N_MAX'),
get_param('SENS_FLOW_MINHGT'),
get_param('SENS_FLOW_MAXHGT'))
except rospy.ROSException:
failure('no optical flow data (from Raspberry)')
@check('Rangefinder')
def check_rangefinder():
# TODO: check FPS!
rng = False
try:
rospy.wait_for_message('rangefinder/range', Range, timeout=4)
rng = True
except rospy.ROSException:
failure('no rangefinder data from Raspberry')
try:
rospy.wait_for_message('mavros/distance_sensor/rangefinder', Range, timeout=4)
rng = True
except rospy.ROSException:
failure('no rangefinder data from PX4')
if not rng:
return
est = get_param('SYS_MC_EST_GROUP')
if est == 1:
fuse = get_param('LPE_FUSION')
if not fuse & (1 << 5):
info('"pub agl as lpos down" in LPE_FUSION is disabled, NOT operating over flat surface')
else:
info('"pub agl as lpos down" in LPE_FUSION is enabled, operating over flat surface')
elif est == 2:
hgt = get_param('EKF2_HGT_MODE')
if hgt != 2:
info('EKF2_HGT_MODE != Range sensor, NOT operating over flat surface')
else:
info('EKF2_HGT_MODE = Range sensor, operating over flat surface')
aid = get_param('EKF2_RNG_AID')
if aid != 1:
info('EKF2_RNG_AID != 1, range sensor aiding disabled')
else:
info('EKF2_RNG_AID = 1, range sensor aiding enabled')
@check('Boot duration')
def check_boot_duration():
output = subprocess.check_output('systemd-analyze')
r = re.compile(r'([\d\.]+)s\s*$', flags=re.MULTILINE)
duration = float(r.search(output).groups()[0])
if duration > 15:
failure('long Raspbian boot duration: %ss (systemd-analyze for analyzing)', duration)
@check('CPU usage')
def check_cpu_usage():
WHITELIST = 'nodelet',
CMD = "top -n 1 -b -i | tail -n +8 | awk '{ printf(\"%-8s\\t%-8s\\t%-8s\\n\", $1, $9, $12); }'"
output = subprocess.check_output(CMD, shell=True)
processes = output.split('\n')
for process in processes:
if not process:
continue
pid, cpu, cmd = process.split('\t')
if cmd.strip() not in WHITELIST and float(cpu) > 30:
failure('high CPU usage (%s%%) detected: %s (PID %s)',
cpu.strip(), cmd.strip(), pid.strip())
@check('clover.service')
def check_clover_service():
try:
output = subprocess.check_output('systemctl show -p ActiveState --value clover.service'.split(),
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
failure('systemctl returned %s: %s', e.returncode, e.output)
return
if 'inactive' in output:
failure('service is not running, try sudo systemctl restart clover')
return
elif 'failed' in output:
failure('service failed to run, check your launch-files')
r = re.compile(r'^(.*)\[(FATAL|ERROR)\] \[\d+.\d+\]: (.*?)(\x1b(.*))?$')
error_count = OrderedDict()
try:
for line in open('/tmp/clover.err', 'r'):
node_error = r.search(line)
if node_error:
msg = node_error.groups()[1] + ': ' + node_error.groups()[2]
if msg in error_count:
error_count[msg] += 1
else:
error_count.update({msg: 1})
else:
error_count.update({line.strip(): 1})
for error in error_count:
if error_count[error] == 1:
failure(error)
else:
failure('%s (%d)', error, error_count[error])
except IOError as e:
failure('%s', e)
@check('Image')
def check_image():
try:
info('version: %s', open('/etc/clover_version').read().strip())
except IOError:
info('no /etc/clover_version file, not the Clover image?')
@check('Preflight status')
def check_preflight_status():
# Make sure the console is available to us
mavlink_exec('\n')
cmdr_output = mavlink_exec('commander check')
if cmdr_output == '':
failure('no data from FCU')
return
cmdr_lines = cmdr_output.split('\n')
r = re.compile(r'^(.*)(Preflight|Prearm) check: (.*)')
for line in cmdr_lines:
if 'WARN' in line:
failure(line[line.find(']') + 2:])
continue
match = r.search(line)
if match is not None:
check_status = match.groups()[2]
if check_status != 'OK':
failure(' '.join([match.groups()[1], 'check:', check_status]))
@check('Network')
def check_network():
ros_hostname = os.environ.get('ROS_HOSTNAME', '').strip()
if not ros_hostname:
failure('no ROS_HOSTNAME is set')
elif ros_hostname.endswith('.local'):
# using mdns hostname
hosts = open('/etc/hosts', 'r')
for line in hosts:
parts = line.split()
if len(parts) < 2:
continue
ip = parts.pop(0).split('.')
if ip[0] == '127': # loopback ip
if ros_hostname in parts:
break
else:
failure('not found %s in /etc/hosts, ROS will malfunction if network interfaces are down, https://clover.coex.tech/hostname', ros_hostname)
@check('RPi health')
def check_rpi_health():
# `vcgencmd get_throttled` output codes taken from
# https://github.com/raspberrypi/documentation/blob/JamesH65-patch-vcgencmd-vcdbg-docs/raspbian/applications/vcgencmd.md#get_throttled
# TODO: support more base platforms?
FLAG_UNDERVOLTAGE_NOW = 0x1
FLAG_FREQ_CAP_NOW = 0x2
FLAG_THROTTLING_NOW = 0x4
FLAG_THERMAL_LIMIT_NOW = 0x8
FLAG_UNDERVOLTAGE_OCCURRED = 0x10000
FLAG_FREQ_CAP_OCCURRED = 0x20000
FLAG_THROTTLING_OCCURRED = 0x40000
FLAG_THERMAL_LIMIT_OCCURRED = 0x80000
FLAG_DESCRIPTIONS = (
(FLAG_THROTTLING_NOW, 'system throttled to prevent damage'),
(FLAG_THROTTLING_OCCURRED, 'your system is susceptible to throttling'),
(FLAG_UNDERVOLTAGE_NOW, 'not enough power for onboard computer, flight inadvisable'),
(FLAG_UNDERVOLTAGE_OCCURRED, 'power supply cannot provide enough power'),
(FLAG_FREQ_CAP_NOW, 'CPU reached thermal limit and is throttled now'),
(FLAG_FREQ_CAP_OCCURRED, 'CPU may overheat during drone operation, consider additional cooling'),
(FLAG_THERMAL_LIMIT_NOW, 'CPU reached soft thermal limit, frequency reduced'),
(FLAG_THERMAL_LIMIT_OCCURRED, 'CPU may reach soft thermal limit, consider additional cooling'),
)
try:
# vcgencmd outputs a single string in a form of
# <parameter>=<value>
# In case of `get_throttled`, <value> is a hexadecimal number
# with some of the FLAGs OR'ed together
output = subprocess.check_output(['vcgencmd', 'get_throttled'])
except OSError:
failure('could not call vcgencmd binary; not a Raspberry Pi?')
return
throttle_mask = int(output.split('=')[1], base=16)
for flag_description in FLAG_DESCRIPTIONS:
if throttle_mask & flag_description[0]:
failure(flag_description[1])
@check('Board')
def check_board():
try:
info('%s', open('/proc/device-tree/model').readline())
except IOError:
info('could not open /proc/device-tree/model, not a Raspberry Pi?')
def selfcheck():
check_image()
check_board()
check_clover_service()
check_network()
check_fcu()
check_imu()
check_local_position()
check_velocity()
check_global_position()
check_preflight_status()
check_main_camera()
check_aruco()
check_simpleoffboard()
check_optical_flow()
check_vpe()
check_rangefinder()
check_rpi_health()
check_cpu_usage()
check_boot_duration()
if __name__ == '__main__':
rospy.loginfo('Performing selfcheck...')
selfcheck()
|
py | 1a3cde08abfdced670d629798a01abb563885e00 | #!/usr/bin/env python
# Copyright 2012 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""Uses different APIs to touch a file."""
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(
__file__.decode(sys.getfilesystemencoding())))
def main():
print('Only look if a file exists but do not open it.')
assert len(sys.argv) == 2
path = os.path.join(BASE_DIR, 'test_file.txt')
command = sys.argv[1]
if command == 'access':
return not os.access(path, os.R_OK)
if command == 'isfile':
return not os.path.isfile(path)
if command == 'stat':
return not os.stat(path).st_size
return 1
if __name__ == '__main__':
sys.exit(main())
|
py | 1a3cdf368652300451285105f40d2027091f0f2f | """
Copyright (c) 2019-2022, Zihao Ding/Carnegie Mellon University
All rights reserved.
********************************************************************
Project: imgprocess.py
MODULE: util
Author: Zihao Ding, Carnegie Mellon University
Brief:
-------------
Image processing func
Date:
-------------
2022/03/17 ZD 1.0 public version
"""
from copy import deepcopy
from math import exp, floor, log10, ceil
import cv2
import matplotlib.pyplot as plt
import numpy as np
import scipy
from PIL import Image
# Adaptive histogram equalization
def clahe(img, limit=10, tileGridSize=(10, 10)):
'''
Contrast Limited Adaptive Histogram Equalization \\
Refer to https://docs.opencv.org/3.4/d5/daf/tutorial_py_histogram_equalization.html
Input:
------
img: images, 3darray, shape: (n, h, w)
limit: clipLimit, int, default: 10
tileGridSize: grid size, (int, int), default: (10, 10)
Output:
------
img: images after applying clahe, 3darray, shape: (n, h, w)
'''
clahe = cv2.createCLAHE(clipLimit=limit, tileGridSize=tileGridSize)
temp = np.zeros_like(img)
for i in range(img.shape[0]):
temp[i] = clahe.apply(img[i])
return temp
# circular mask
def circularmask(img):
'''
Apply largest circular mask
Input:
------
img: images, 3darray, shape: (n, h, w)
Output:
------
img: images with circular mask applied, 3darray, shape: (n, h, w)
'''
center = [int(img.shape[2]/2), int(img.shape[1]/2)]
radius = min(center[0], center[1], img.shape[2]-center[0], img.shape[1]-center[1])
Y, X = np.ogrid[:img.shape[1], :img.shape[2]]
dist_from_center = np.sqrt((X - center[0])**2 + (Y-center[1])**2)
mask_array = dist_from_center <= radius
temp = img
temp[:,~mask_array] = 0
return temp
# square mask
def squaremask(img):
'''
Apply largest square mask inside image with circular mask \\
Will change the input size
Input:
------
img: images, 3darray, shape: (n, h, w)
Output:
------
img: images with square mask applied, 3darray, shape: (n, 0.707*min(h, w), 0.707*min(h, w))
'''
n = img.shape[1]
start = ceil(n*0.5*(1.-0.5**0.5))
end = floor(n-n*0.5*(1.-0.5**0.5))
return img[:,start-1:end,start-1:end]
def poisson_noise(img, c=1.):
'''
Apply Poisson noise
Input:
------
img: images, 3darray, shape: (n, h, w)
c: inverse of noise level (smaller c brings higher noise level), float, default: 1.
Output:
------
img: images with Poisson noise
'''
temp = np.zeros_like(img)
for i in range(img.shape[0]):
vals = len(np.unique(img[i]))
vals = 2 ** np.ceil(np.log2(vals))
temp[i] = np.random.poisson(img[i] * c * vals) / float(vals) / c
return temp
def bac(img, a=1, b=0):
'''
Adjust brightness and contrast
Input:
------
img: images, 3darray, shape: (n, h, w)
a: contrast ratio, float, default: 1
b: brightness offset, float, default: 0
Output:
------
img: images, 3darray, shape: (n, h, w)
'''
temp = np.clip(a*img+b, 0., 255.)
temp = temp.astype(np.uint8)
return temp
def gamma_trans(img, gamma):
'''
Apply Gamma correction \\
If gamma < 1:
The whole figure is brighter, contrast of dark part is increased
vise versa
Input:
------
img: images, 3darray, shape: (n, h, w)
gamma: Gamma value, float
Output:
------
img: images, 3darray, shape: (n, h, w)
'''
gamma_table = [np.power(x/255.0, gamma)*255.0 for x in range(256)]
gamma_table = np.round(np.array(gamma_table)).astype(np.uint8)
temp = np.zeros_like(img)
for i in range(img.shape[0]):
temp[i] = cv2.LUT(img[i], gamma_table)
temp = temp.astype(np.uint8)
return temp
def autogamma(img):
'''
Apply automatic Gamma correction (gamma = 2.2)
Input:
------
img: images, 3darray, shape: (n, h, w)
Output:
------
img: images, 3darray, shape: (n, h, w)
'''
meanGrayVal = np.sum(img) / np.prod(img.shape)
gamma = log10(1/2.2) / log10(meanGrayVal/255.0)
return gamma_trans(img, gamma) |
py | 1a3cdfcae3280c5371f13f8d0ef2caf520736a2f | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unittests for fake_pathlib.
As most of fake_pathlib is a wrapper around fake_filesystem methods, the tests
are there mostly to ensure basic functionality.
Note that many of the tests are directly taken from examples in the
python docs.
"""
import errno
import os
import pathlib
import stat
import sys
import unittest
from pyfakefs.fake_filesystem import is_root
from pyfakefs import fake_pathlib, fake_filesystem
from pyfakefs.helpers import IS_PYPY
from pyfakefs.tests.test_utils import RealFsTestCase
is_windows = sys.platform == 'win32'
class RealPathlibTestCase(RealFsTestCase):
def __init__(self, methodName='runTest'):
super(RealPathlibTestCase, self).__init__(methodName)
self.pathlib = pathlib
self.path = None
def setUp(self):
super().setUp()
if not self.use_real_fs():
self.pathlib = fake_pathlib.FakePathlibModule(self.filesystem)
self.path = self.pathlib.Path
class FakePathlibInitializationTest(RealPathlibTestCase):
def test_initialization_type(self):
"""Make sure tests for class type will work"""
path = self.path('/test')
if is_windows:
self.assertTrue(isinstance(path, self.pathlib.WindowsPath))
self.assertTrue(isinstance(path, self.pathlib.PureWindowsPath))
self.assertTrue(self.pathlib.PurePosixPath())
# in fake fs, we allow to use the other OS implementation
if self.use_real_fs():
with self.assertRaises(NotImplementedError):
self.pathlib.PosixPath()
else:
self.assertTrue(self.pathlib.PosixPath())
else:
self.assertTrue(isinstance(path, self.pathlib.PosixPath))
self.assertTrue(isinstance(path, self.pathlib.PurePosixPath))
self.assertTrue(self.pathlib.PureWindowsPath())
if self.use_real_fs():
with self.assertRaises(NotImplementedError):
self.pathlib.WindowsPath()
else:
self.assertTrue(self.pathlib.WindowsPath())
def test_init_with_segments(self):
"""Basic initialization tests - taken from pathlib.Path documentation
"""
self.assertEqual(self.path('/', 'foo', 'bar', 'baz'),
self.path('/foo/bar/baz'))
self.assertEqual(self.path(), self.path('.'))
self.assertEqual(self.path(self.path('foo'), self.path('bar')),
self.path('foo/bar'))
self.assertEqual(self.path('/etc') / 'init.d' / 'reboot',
self.path('/etc/init.d/reboot'))
def test_init_collapse(self):
"""Tests for collapsing path during initialization.
Taken from pathlib.PurePath documentation.
"""
self.assertEqual(self.path('foo//bar'), self.path('foo/bar'))
self.assertEqual(self.path('foo/./bar'), self.path('foo/bar'))
self.assertNotEqual(self.path('foo/../bar'), self.path('foo/bar'))
self.assertEqual(self.path('/etc', '/usr', 'lib64'),
self.path('/usr/lib64'))
def test_path_parts(self):
sep = self.os.path.sep
path = self.path(sep + self.os.path.join('foo', 'bar', 'setup.py'))
self.assertEqual(path.parts, (sep, 'foo', 'bar', 'setup.py'))
self.assertEqual(path.drive, '')
self.assertEqual(path.root, sep)
self.assertEqual(path.anchor, sep)
self.assertEqual(path.name, 'setup.py')
self.assertEqual(path.stem, 'setup')
self.assertEqual(path.suffix, '.py')
self.assertEqual(path.parent,
self.path(sep + self.os.path.join('foo', 'bar')))
self.assertEqual(path.parents[0],
self.path(sep + self.os.path.join('foo', 'bar')))
self.assertEqual(path.parents[1], self.path(sep + 'foo'))
self.assertEqual(path.parents[2], self.path(sep))
@unittest.skipIf(is_windows, 'POSIX specific behavior')
def test_is_absolute_posix(self):
self.assertTrue(self.path('/a/b').is_absolute())
self.assertFalse(self.path('a/b').is_absolute())
self.assertFalse(self.path('d:/b').is_absolute())
@unittest.skipIf(not is_windows, 'Windows specific behavior')
def test_is_absolute_windows(self):
self.assertFalse(self.path('/a/b').is_absolute())
self.assertFalse(self.path('a/b').is_absolute())
self.assertTrue(self.path('d:/b').is_absolute())
class RealPathlibInitializationTest(FakePathlibInitializationTest):
def use_real_fs(self):
return True
@unittest.skipIf(not is_windows, 'Windows specific behavior')
class FakePathlibInitializationWithDriveTest(RealPathlibTestCase):
def test_init_with_segments(self):
"""Basic initialization tests - taken from pathlib.Path
documentation"""
self.assertEqual(self.path('c:/', 'foo', 'bar', 'baz'),
self.path('c:/foo/bar/baz'))
self.assertEqual(self.path(), self.path('.'))
self.assertEqual(self.path(self.path('foo'), self.path('bar')),
self.path('foo/bar'))
self.assertEqual(self.path('c:/Users') / 'john' / 'data',
self.path('c:/Users/john/data'))
def test_init_collapse(self):
"""Tests for collapsing path during initialization.
Taken from pathlib.PurePath documentation.
"""
self.assertEqual(self.path('c:/Windows', 'd:bar'),
self.path('d:bar'))
self.assertEqual(self.path('c:/Windows', '/Program Files'),
self.path('c:/Program Files'))
def test_path_parts(self):
path = self.path(
self.os.path.join('d:', 'python scripts', 'setup.py'))
self.assertEqual(path.parts, ('d:', 'python scripts', 'setup.py'))
self.assertEqual(path.drive, 'd:')
self.assertEqual(path.root, '')
self.assertEqual(path.anchor, 'd:')
self.assertEqual(path.name, 'setup.py')
self.assertEqual(path.stem, 'setup')
self.assertEqual(path.suffix, '.py')
self.assertEqual(path.parent,
self.path(
self.os.path.join('d:', 'python scripts')))
self.assertEqual(path.parents[0],
self.path(
self.os.path.join('d:', 'python scripts')))
self.assertEqual(path.parents[1], self.path('d:'))
@unittest.skipIf(not is_windows, 'Windows-specifc behavior')
def test_is_absolute(self):
self.assertTrue(self.path('c:/a/b').is_absolute())
self.assertFalse(self.path('/a/b').is_absolute())
self.assertFalse(self.path('c:').is_absolute())
self.assertTrue(self.path('//some/share').is_absolute())
class RealPathlibInitializationWithDriveTest(
FakePathlibInitializationWithDriveTest):
def use_real_fs(self):
return True
class FakePathlibPurePathTest(RealPathlibTestCase):
"""Tests functionality present in PurePath class."""
@unittest.skipIf(is_windows, 'POSIX specific behavior')
def test_is_reserved_posix(self):
self.assertFalse(self.path('/dev').is_reserved())
self.assertFalse(self.path('/').is_reserved())
self.assertFalse(self.path('COM1').is_reserved())
self.assertFalse(self.path('nul.txt').is_reserved())
@unittest.skipIf(not is_windows, 'Windows specific behavior')
def test_is_reserved_windows(self):
self.check_windows_only()
self.assertFalse(self.path('/dev').is_reserved())
self.assertFalse(self.path('/').is_reserved())
self.assertTrue(self.path('COM1').is_reserved())
self.assertTrue(self.path('nul.txt').is_reserved())
def test_joinpath(self):
self.assertEqual(self.path('/etc').joinpath('passwd'),
self.path('/etc/passwd'))
self.assertEqual(self.path('/etc').joinpath(self.path('passwd')),
self.path('/etc/passwd'))
self.assertEqual(self.path('/foo').joinpath('bar', 'baz'),
self.path('/foo/bar/baz'))
def test_joinpath_drive(self):
self.check_windows_only()
self.assertEqual(self.path('c:').joinpath('/Program Files'),
self.path('c:/Program Files'))
def test_match(self):
self.assertTrue(self.path('a/b.py').match('*.py'))
self.assertTrue(self.path('/a/b/c.py').match('b/*.py'))
self.assertFalse(self.path('/a/b/c.py').match('a/*.py'))
self.assertTrue(self.path('/a.py').match('/*.py'))
self.assertFalse(self.path('a/b.py').match('/*.py'))
def test_relative_to(self):
self.assertEqual(self.path('/etc/passwd').relative_to('/'),
self.path('etc/passwd'))
self.assertEqual(self.path('/etc/passwd').relative_to('/'),
self.path('etc/passwd'))
with self.assertRaises(ValueError):
self.path('passwd').relative_to('/usr')
@unittest.skipIf(sys.version_info < (3, 9),
'is_relative_to new in Python 3.9')
def test_is_relative_to(self):
path = self.path('/etc/passwd')
self.assertTrue(path.is_relative_to('/etc'))
self.assertFalse(path.is_relative_to('/src'))
def test_with_name(self):
self.check_windows_only()
self.assertEqual(
self.path('c:/Downloads/pathlib.tar.gz').with_name('setup.py'),
self.path('c:/Downloads/setup.py'))
with self.assertRaises(ValueError):
self.path('c:/').with_name('setup.py')
def test_with_suffix(self):
self.assertEqual(
self.path('c:/Downloads/pathlib.tar.gz').with_suffix('.bz2'),
self.path('c:/Downloads/pathlib.tar.bz2'))
self.assertEqual(self.path('README').with_suffix('.txt'),
self.path('README.txt'))
class RealPathlibPurePathTest(FakePathlibPurePathTest):
def use_real_fs(self):
return True
class FakePathlibFileObjectPropertyTest(RealPathlibTestCase):
def setUp(self):
super(FakePathlibFileObjectPropertyTest, self).setUp()
self.file_path = self.make_path('home', 'jane', 'test.py')
self.create_file(self.file_path, contents=b'a' * 100)
self.create_dir(self.make_path('home', 'john'))
try:
self.skip_if_symlink_not_supported()
except unittest.SkipTest:
return
self.create_symlink(self.make_path('john'),
self.make_path('home', 'john'))
self.file_link_path = self.make_path('test.py')
self.create_symlink(self.file_link_path, self.file_path)
self.create_symlink(self.make_path('broken_dir_link'),
self.make_path('home', 'none'))
self.create_symlink(self.make_path('broken_file_link'),
self.make_path('home', 'none', 'test.py'))
def test_exists(self):
self.skip_if_symlink_not_supported()
self.assertTrue(self.path(self.file_path).exists())
self.assertTrue(self.path(
self.make_path('home', 'jane')).exists())
self.assertFalse(self.path(
self.make_path('home', 'jane', 'test')).exists())
self.assertTrue(self.path(
self.make_path('john')).exists())
self.assertTrue(self.path(
self.file_link_path).exists())
self.assertFalse(self.path(
self.make_path('broken_dir_link')).exists())
self.assertFalse(self.path(
self.make_path('broken_file_link')).exists())
def test_is_dir(self):
self.skip_if_symlink_not_supported()
self.assertFalse(self.path(
self.file_path).is_dir())
self.assertTrue(self.path(
self.make_path('home/jane')).is_dir())
self.assertTrue(self.path(
self.make_path('john')).is_dir())
self.assertFalse(self.path(
self.file_link_path).is_dir())
self.assertFalse(self.path(
self.make_path('broken_dir_link')).is_dir())
self.assertFalse(self.path(
self.make_path('broken_file_link')).is_dir())
def test_is_file(self):
self.skip_if_symlink_not_supported()
self.assertTrue(self.path(
self.make_path('home/jane/test.py')).is_file())
self.assertFalse(self.path(
self.make_path('home/jane')).is_file())
self.assertFalse(self.path(
self.make_path('john')).is_file())
self.assertTrue(self.path(
self.file_link_path).is_file())
self.assertFalse(self.path(
self.make_path('broken_dir_link')).is_file())
self.assertFalse(self.path(
self.make_path('broken_file_link')).is_file())
def test_is_symlink(self):
self.skip_if_symlink_not_supported()
self.assertFalse(self.path(
self.make_path('home/jane/test.py')).is_symlink())
self.assertFalse(self.path(
self.make_path('home/jane')).is_symlink())
self.assertTrue(self.path(
self.make_path('john')).is_symlink())
self.assertTrue(self.path(
self.file_link_path).is_symlink())
self.assertTrue(self.path(
self.make_path('broken_dir_link')).is_symlink())
self.assertTrue(self.path(
self.make_path('broken_file_link')).is_symlink())
def test_stat(self):
self.skip_if_symlink_not_supported()
file_stat = self.os.stat(self.file_path)
stat_result = self.path(self.file_link_path).stat()
self.assertFalse(stat_result.st_mode & stat.S_IFDIR)
self.assertTrue(stat_result.st_mode & stat.S_IFREG)
self.assertEqual(stat_result.st_ino, file_stat.st_ino)
self.assertEqual(stat_result.st_size, 100)
self.assertEqual(stat_result.st_mtime, file_stat.st_mtime)
self.assertEqual(stat_result[stat.ST_MTIME],
int(file_stat.st_mtime))
def check_lstat(self, expected_size):
self.skip_if_symlink_not_supported()
link_stat = self.os.lstat(self.file_link_path)
stat_result = self.path(self.file_link_path).lstat()
self.assertTrue(stat_result.st_mode & stat.S_IFREG)
self.assertTrue(stat_result.st_mode & stat.S_IFLNK)
self.assertEqual(stat_result.st_ino, link_stat.st_ino)
self.assertEqual(stat_result.st_size, expected_size)
self.assertEqual(stat_result.st_mtime, link_stat.st_mtime)
@unittest.skipIf(is_windows, 'POSIX specific behavior')
def test_lstat_posix(self):
self.check_lstat(len(self.file_path))
@unittest.skipIf(not is_windows, 'Windows specific behavior')
def test_lstat_windows(self):
self.skip_if_symlink_not_supported()
self.check_lstat(0)
@unittest.skipIf(is_windows, 'Linux specific behavior')
def test_chmod(self):
self.check_linux_only()
file_stat = self.os.stat(self.file_path)
self.assertEqual(file_stat.st_mode, stat.S_IFREG | 0o666)
link_stat = self.os.lstat(self.file_link_path)
# we get stat.S_IFLNK | 0o755 under MacOs
self.assertEqual(link_stat.st_mode, stat.S_IFLNK | 0o777)
def test_lchmod(self):
self.skip_if_symlink_not_supported()
file_stat = self.os.stat(self.file_path)
link_stat = self.os.lstat(self.file_link_path)
if not hasattr(os, "lchmod"):
with self.assertRaises(NotImplementedError):
self.path(self.file_link_path).lchmod(0o444)
else:
self.path(self.file_link_path).lchmod(0o444)
self.assertEqual(file_stat.st_mode, stat.S_IFREG | 0o666)
# the exact mode depends on OS and Python version
self.assertEqual(link_stat.st_mode & 0o777700,
stat.S_IFLNK | 0o700)
@unittest.skipIf(sys.version_info < (3, 10),
"follow_symlinks argument new in Python 3.10")
def test_chmod_no_followsymlinks(self):
self.skip_if_symlink_not_supported()
file_stat = self.os.stat(self.file_path)
link_stat = self.os.lstat(self.file_link_path)
if os.chmod not in os.supports_follow_symlinks or IS_PYPY:
with self.assertRaises(NotImplementedError):
self.path(self.file_link_path).chmod(0o444,
follow_symlinks=False)
else:
self.path(self.file_link_path).chmod(0o444, follow_symlinks=False)
self.assertEqual(file_stat.st_mode, stat.S_IFREG | 0o666)
# the exact mode depends on OS and Python version
self.assertEqual(link_stat.st_mode & 0o777700,
stat.S_IFLNK | 0o700)
def test_resolve(self):
self.create_dir(self.make_path('antoine', 'docs'))
self.create_file(self.make_path('antoine', 'setup.py'))
self.os.chdir(self.make_path('antoine'))
# use real path to handle symlink /var to /private/var in MacOs
self.assert_equal_paths(self.path().resolve(),
self.path(self.os.path.realpath(
self.make_path('antoine'))))
self.assert_equal_paths(
self.path(
self.os.path.join('docs', '..', 'setup.py')).resolve(),
self.path(
self.os.path.realpath(
self.make_path('antoine', 'setup.py'))))
def test_stat_file_in_unreadable_dir(self):
self.check_posix_only()
dir_path = self.make_path('some_dir')
file_path = self.os.path.join(dir_path, 'some_file')
self.create_file(file_path)
self.os.chmod(dir_path, 0o000)
if not is_root():
self.assert_raises_os_error(
errno.EACCES, self.path(file_path).stat)
else:
self.assertEqual(0, self.path(file_path).stat().st_size)
def test_iterdir_in_unreadable_dir(self):
self.check_posix_only()
dir_path = self.make_path('some_dir')
file_path = self.os.path.join(dir_path, 'some_file')
self.create_file(file_path)
self.os.chmod(dir_path, 0o000)
iter = self.path(dir_path).iterdir()
if not is_root():
self.assert_raises_os_error(errno.EACCES, list, iter)
else:
path = str(list(iter)[0])
self.assertTrue(path.endswith('some_file'))
def test_resolve_nonexisting_file(self):
path = self.path(
self.make_path('/path', 'to', 'file', 'this can not exist'))
self.assertEqual(path, path.resolve())
def test_cwd(self):
dir_path = self.make_path('jane')
self.create_dir(dir_path)
self.os.chdir(dir_path)
self.assert_equal_paths(self.path.cwd(),
self.path(self.os.path.realpath(dir_path)))
def test_expanduser(self):
if is_windows:
self.assertEqual(self.path('~').expanduser(),
self.path(
os.environ['USERPROFILE'].replace('\\',
'/')))
else:
self.assertEqual(self.path('~').expanduser(),
self.path(os.environ['HOME']))
def test_home(self):
if is_windows:
self.assertEqual(self.path(
os.environ['USERPROFILE'].replace('\\', '/')),
self.path.home())
else:
self.assertEqual(self.path(os.environ['HOME']),
self.path.home())
class RealPathlibFileObjectPropertyTest(FakePathlibFileObjectPropertyTest):
def use_real_fs(self):
return True
class FakePathlibPathFileOperationTest(RealPathlibTestCase):
"""Tests methods related to file and directory handling."""
def test_exists(self):
self.skip_if_symlink_not_supported()
self.create_file(self.make_path('home', 'jane', 'test.py'))
self.create_dir(self.make_path('home', 'john'))
self.create_symlink(
self.make_path('john'), self.make_path('home', 'john'))
self.create_symlink(
self.make_path('none'), self.make_path('home', 'none'))
self.assertTrue(
self.path(self.make_path('home', 'jane', 'test.py')).exists())
self.assertTrue(self.path(self.make_path('home', 'jane')).exists())
self.assertTrue(self.path(self.make_path('john')).exists())
self.assertFalse(self.path(self.make_path('none')).exists())
self.assertFalse(
self.path(self.make_path('home', 'jane', 'test')).exists())
def test_open(self):
self.create_dir(self.make_path('foo'))
with self.assertRaises(OSError):
self.path(self.make_path('foo', 'bar.txt')).open()
self.path(self.make_path('foo', 'bar.txt')).open('w').close()
self.assertTrue(
self.os.path.exists(self.make_path('foo', 'bar.txt')))
def test_read_text(self):
self.create_file(self.make_path('text_file'), contents='foo')
file_path = self.path(self.make_path('text_file'))
self.assertEqual(file_path.read_text(), 'foo')
def test_read_text_with_encoding(self):
self.create_file(self.make_path('text_file'),
contents='ерунда', encoding='cyrillic')
file_path = self.path(self.make_path('text_file'))
self.assertEqual(file_path.read_text(encoding='cyrillic'),
'ерунда')
def test_write_text(self):
path_name = self.make_path('text_file')
file_path = self.path(path_name)
file_path.write_text(str('foo'))
self.assertTrue(self.os.path.exists(path_name))
self.check_contents(path_name, 'foo')
def test_write_text_with_encoding(self):
path_name = self.make_path('text_file')
file_path = self.path(path_name)
file_path.write_text('ανοησίες', encoding='greek')
self.assertTrue(self.os.path.exists(path_name))
self.check_contents(path_name, 'ανοησίες'.encode('greek'))
@unittest.skipIf(sys.version_info < (3, 10),
"newline argument new in Python 3.10")
def test_write_with_newline_arg(self):
path = self.path(self.make_path('some_file'))
path.write_text('1\r\n2\n3\r4', newline='')
self.check_contents(path, b'1\r\n2\n3\r4')
path.write_text('1\r\n2\n3\r4', newline='\n')
self.check_contents(path, b'1\r\n2\n3\r4')
path.write_text('1\r\n2\n3\r4', newline='\r\n')
self.check_contents(path, b'1\r\r\n2\r\n3\r4')
path.write_text('1\r\n2\n3\r4', newline='\r')
self.check_contents(path, b'1\r\r2\r3\r4')
def test_read_bytes(self):
path_name = self.make_path('binary_file')
self.create_file(path_name, contents=b'Binary file contents')
file_path = self.path(path_name)
self.assertEqual(file_path.read_bytes(), b'Binary file contents')
def test_write_bytes(self):
path_name = self.make_path('binary_file')
file_path = self.path(path_name)
file_path.write_bytes(b'Binary file contents')
self.assertTrue(self.os.path.exists(path_name))
self.check_contents(path_name, b'Binary file contents')
def test_rename(self):
file_name = self.make_path('foo', 'bar.txt')
self.create_file(file_name, contents='test')
new_file_name = self.make_path('foo', 'baz.txt')
self.path(file_name).rename(new_file_name)
self.assertFalse(self.os.path.exists(file_name))
self.check_contents(new_file_name, 'test')
def test_replace(self):
self.create_file(self.make_path('foo', 'bar.txt'), contents='test')
self.create_file(self.make_path('bar', 'old.txt'),
contents='replaced')
self.path(self.make_path('bar', 'old.txt')).replace(
self.make_path('foo', 'bar.txt'))
self.assertFalse(
self.os.path.exists(self.make_path('bar', 'old.txt')))
self.check_contents(self.make_path('foo', 'bar.txt'), 'replaced')
def test_unlink(self):
file_path = self.make_path('foo', 'bar.txt')
self.create_file(file_path, contents='test')
self.assertTrue(self.os.path.exists(file_path))
self.path(file_path).unlink()
self.assertFalse(self.os.path.exists(file_path))
def test_touch_non_existing(self):
self.create_dir(self.make_path('foo'))
file_name = self.make_path('foo', 'bar.txt')
self.path(file_name).touch(mode=0o444)
self.check_contents(file_name, '')
self.assertTrue(self.os.stat(file_name).st_mode,
stat.S_IFREG | 0o444)
self.os.chmod(file_name, mode=0o666)
def test_touch_existing(self):
file_name = self.make_path('foo', 'bar.txt')
self.create_file(file_name, contents='test')
file_path = self.path(file_name)
self.assert_raises_os_error(
errno.EEXIST, file_path.touch, exist_ok=False)
file_path.touch()
self.check_contents(file_name, 'test')
def test_samefile(self):
file_name = self.make_path('foo', 'bar.txt')
self.create_file(file_name)
file_name2 = self.make_path('foo', 'baz.txt')
self.create_file(file_name2)
with self.assertRaises(OSError):
self.path(self.make_path('foo', 'other')).samefile(
self.make_path('foo', 'other.txt'))
path = self.path(file_name)
other_name = self.make_path('foo', 'other.txt')
with self.assertRaises(OSError):
path.samefile(other_name)
with self.assertRaises(OSError):
path.samefile(self.path(other_name))
self.assertFalse(path.samefile(file_name2))
self.assertFalse(path.samefile(self.path(file_name2)))
self.assertTrue(
path.samefile(self.make_path('foo', '..', 'foo', 'bar.txt')))
self.assertTrue(path.samefile(
self.path(self.make_path('foo', '..', 'foo', 'bar.txt'))))
def test_symlink_to(self):
self.skip_if_symlink_not_supported()
file_name = self.make_path('foo', 'bar.txt')
self.create_file(file_name)
link_name = self.make_path('link_to_bar')
path = self.path(link_name)
path.symlink_to(file_name)
self.assertTrue(self.os.path.exists(link_name))
self.assertTrue(path.is_symlink())
@unittest.skipIf(sys.version_info < (3, 8),
'link_to new in Python 3.8')
def test_link_to(self):
self.skip_if_symlink_not_supported()
file_name = self.make_path('foo', 'bar.txt')
self.create_file(file_name)
self.assertEqual(1, self.os.stat(file_name).st_nlink)
link_name = self.make_path('link_to_bar')
path = self.path(file_name)
path.link_to(link_name)
self.assertTrue(self.os.path.exists(link_name))
self.assertFalse(path.is_symlink())
self.assertEqual(2, self.os.stat(file_name).st_nlink)
@unittest.skipIf(sys.version_info < (3, 10),
'hardlink_to new in Python 3.10')
def test_hardlink_to(self):
self.skip_if_symlink_not_supported()
file_name = self.make_path('foo', 'bar.txt')
self.create_file(file_name)
self.assertEqual(1, self.os.stat(file_name).st_nlink)
link_path = self.path(self.make_path('link_to_bar'))
path = self.path(file_name)
link_path.hardlink_to(path)
self.assertTrue(self.os.path.exists(link_path))
self.assertFalse(path.is_symlink())
self.assertEqual(2, self.os.stat(file_name).st_nlink)
@unittest.skipIf(sys.version_info < (3, 9),
'readlink new in Python 3.9')
def test_readlink(self):
self.skip_if_symlink_not_supported()
link_path = self.make_path('foo', 'bar', 'baz')
target = self.make_path('tarJAY')
self.create_symlink(link_path, target)
path = self.path(link_path)
self.assert_equal_paths(path.readlink(), self.path(target))
def test_mkdir(self):
dir_name = self.make_path('foo', 'bar')
self.assert_raises_os_error(errno.ENOENT,
self.path(dir_name).mkdir)
self.path(dir_name).mkdir(parents=True)
self.assertTrue(self.os.path.exists(dir_name))
self.assert_raises_os_error(errno.EEXIST,
self.path(dir_name).mkdir)
def test_mkdir_exist_ok(self):
dir_name = self.make_path('foo', 'bar')
self.create_dir(dir_name)
self.path(dir_name).mkdir(exist_ok=True)
file_name = self.os.path.join(dir_name, 'baz')
self.create_file(file_name)
self.assert_raises_os_error(errno.EEXIST,
self.path(file_name).mkdir,
exist_ok=True)
def test_rmdir(self):
dir_name = self.make_path('foo', 'bar')
self.create_dir(dir_name)
self.path(dir_name).rmdir()
self.assertFalse(self.os.path.exists(dir_name))
self.assertTrue(self.os.path.exists(self.make_path('foo')))
self.create_file(self.make_path('foo', 'baz'))
with self.assertRaises(OSError):
self.path(self.make_path('foo')).rmdir()
self.assertTrue(self.os.path.exists(self.make_path('foo')))
def test_iterdir(self):
self.create_file(self.make_path('foo', 'bar', 'file1'))
self.create_file(self.make_path('foo', 'bar', 'file2'))
self.create_file(self.make_path('foo', 'bar', 'file3'))
path = self.path(self.make_path('foo', 'bar'))
contents = [entry for entry in path.iterdir()]
self.assertEqual(3, len(contents))
self.assertIn(self.path(self.make_path('foo', 'bar', 'file2')),
contents)
def test_glob(self):
self.create_file(self.make_path('foo', 'setup.py'))
self.create_file(self.make_path('foo', 'all_tests.py'))
self.create_file(self.make_path('foo', 'README.md'))
self.create_file(self.make_path('foo', 'setup.pyc'))
path = self.path(self.make_path('foo'))
self.assertEqual(sorted(path.glob('*.py')),
[self.path(self.make_path('foo', 'all_tests.py')),
self.path(self.make_path('foo', 'setup.py'))])
@unittest.skipIf(not is_windows, 'Windows specific test')
def test_glob_case_windows(self):
self.create_file(self.make_path('foo', 'setup.py'))
self.create_file(self.make_path('foo', 'all_tests.PY'))
self.create_file(self.make_path('foo', 'README.md'))
self.create_file(self.make_path('foo', 'example.Py'))
path = self.path(self.make_path('foo'))
self.assertEqual(sorted(path.glob('*.py')),
[self.path(self.make_path('foo', 'all_tests.PY')),
self.path(self.make_path('foo', 'example.Py')),
self.path(self.make_path('foo', 'setup.py'))])
@unittest.skipIf(is_windows, 'Posix specific test')
def test_glob_case_posix(self):
self.check_posix_only()
self.create_file(self.make_path('foo', 'setup.py'))
self.create_file(self.make_path('foo', 'all_tests.PY'))
self.create_file(self.make_path('foo', 'README.md'))
self.create_file(self.make_path('foo', 'example.Py'))
path = self.path(self.make_path('foo'))
self.assertEqual(sorted(path.glob('*.py')),
[self.path(self.make_path('foo', 'setup.py'))])
class RealPathlibPathFileOperationTest(FakePathlibPathFileOperationTest):
def use_real_fs(self):
return True
@unittest.skipIf(sys.version_info < (3, 6),
'path-like objects new in Python 3.6')
class FakePathlibUsageInOsFunctionsTest(RealPathlibTestCase):
"""Test that many os / os.path functions accept a path-like object
since Python 3.6. The functionality of these functions is tested
elsewhere, we just check that they accept a fake path object as an
argument.
"""
def test_join(self):
dir1 = 'foo'
dir2 = 'bar'
dir = self.os.path.join(dir1, dir2)
self.assertEqual(dir, self.os.path.join(self.path(dir1), dir2))
self.assertEqual(dir, self.os.path.join(dir1, self.path(dir2)))
self.assertEqual(dir,
self.os.path.join(self.path(dir1),
self.path(dir2)))
def test_normcase(self):
dir1 = self.make_path('Foo', 'Bar', 'Baz')
self.assertEqual(self.os.path.normcase(dir1),
self.os.path.normcase(self.path(dir1)))
def test_normpath(self):
dir1 = self.make_path('foo', 'bar', '..', 'baz')
self.assertEqual(self.os.path.normpath(dir1),
self.os.path.normpath(self.path(dir1)))
def test_realpath(self):
dir1 = self.make_path('foo', 'bar', '..', 'baz')
self.assertEqual(self.os.path.realpath(dir1),
self.os.path.realpath(self.path(dir1)))
def test_relpath(self):
path_foo = self.make_path('path', 'to', 'foo')
path_bar = self.make_path('path', 'to', 'bar')
rel_path = self.os.path.relpath(path_foo, path_bar)
self.assertEqual(rel_path,
self.os.path.relpath(self.path(path_foo),
path_bar))
self.assertEqual(rel_path,
self.os.path.relpath(path_foo,
self.path(path_bar)))
self.assertEqual(rel_path,
self.os.path.relpath(self.path(path_foo),
self.path(path_bar)))
def test_split(self):
dir1 = self.make_path('Foo', 'Bar', 'Baz')
self.assertEqual(self.os.path.split(dir1),
self.os.path.split(self.path(dir1)))
def test_splitdrive(self):
dir1 = self.make_path('C:', 'Foo', 'Bar', 'Baz')
self.assertEqual(self.os.path.splitdrive(dir1),
self.os.path.splitdrive(self.path(dir1)))
def test_abspath(self):
dir1 = self.make_path('foo', 'bar', '..', 'baz')
self.assertEqual(self.os.path.abspath(dir1),
self.os.path.abspath(self.path(dir1)))
def test_exists(self):
dir1 = self.make_path('foo', 'bar', '..', 'baz')
self.assertEqual(self.os.path.exists(dir1),
self.os.path.exists(self.path(dir1)))
def test_lexists(self):
dir1 = self.make_path('foo', 'bar', '..', 'baz')
self.assertEqual(self.os.path.lexists(dir1),
self.os.path.lexists(self.path(dir1)))
def test_expanduser(self):
dir1 = self.os.path.join('~', 'foo')
self.assertEqual(self.os.path.expanduser(dir1),
self.os.path.expanduser(self.path(dir1)))
def test_getmtime(self):
self.skip_real_fs()
dir1 = self.make_path('foo', 'bar1.txt')
path_obj = self.filesystem.create_file(dir1)
path_obj._st_mtime = 24
self.assertEqual(self.os.path.getmtime(dir1),
self.os.path.getmtime(self.path(dir1)))
def test_getctime(self):
self.skip_real_fs()
dir1 = self.make_path('foo', 'bar1.txt')
path_obj = self.filesystem.create_file(dir1)
path_obj.st_ctime = 42
self.assertEqual(self.os.path.getctime(dir1),
self.os.path.getctime(self.path(dir1)))
def test_getatime(self):
self.skip_real_fs()
dir1 = self.make_path('foo', 'bar1.txt')
path_obj = self.filesystem.create_file(dir1)
path_obj.st_atime = 11
self.assertEqual(self.os.path.getatime(dir1),
self.os.path.getatime(self.path(dir1)))
def test_getsize(self):
path = self.make_path('foo', 'bar', 'baz')
self.create_file(path, contents='1234567')
self.assertEqual(self.os.path.getsize(path),
self.os.path.getsize(self.path(path)))
def test_isabs(self):
path = self.make_path('foo', 'bar', '..', 'baz')
self.assertEqual(self.os.path.isabs(path),
self.os.path.isabs(self.path(path)))
def test_isfile(self):
path = self.make_path('foo', 'bar', 'baz')
self.create_file(path)
self.assertEqual(self.os.path.isfile(path),
self.os.path.isfile(self.path(path)))
def test_isfile_not_readable(self):
path = self.make_path('foo', 'bar', 'baz')
self.create_file(path, perm=0)
self.assertEqual(self.os.path.isfile(path),
self.os.path.isfile(self.path(path)))
def test_islink(self):
path = self.make_path('foo', 'bar', 'baz')
self.create_file(path)
self.assertEqual(self.os.path.islink(path),
self.os.path.islink(self.path(path)))
def test_isdir(self):
path = self.make_path('foo', 'bar', 'baz')
self.create_file(path)
self.assertEqual(self.os.path.isdir(path),
self.os.path.isdir(self.path(path)))
def test_ismount(self):
path = self.os.path.sep
self.assertEqual(self.os.path.ismount(path),
self.os.path.ismount(self.path(path)))
def test_access(self):
path = self.make_path('foo', 'bar', 'baz')
self.create_file(path, contents='1234567')
self.assertEqual(self.os.access(path, os.R_OK),
self.os.access(self.path(path), os.R_OK))
def test_chdir(self):
path = self.make_path('foo', 'bar', 'baz')
self.create_dir(path)
self.os.chdir(self.path(path))
# use real path to handle symlink /var to /private/var in MacOs
self.assert_equal_paths(self.os.path.realpath(path), self.os.getcwd())
def test_chmod(self):
path = self.make_path('some_file')
self.create_file(path)
self.os.chmod(self.path(path), 0o444)
self.assertEqual(stat.S_IMODE(0o444),
stat.S_IMODE(self.os.stat(path).st_mode))
self.os.chmod(self.path(path), 0o666)
def test_link(self):
self.skip_if_symlink_not_supported()
file1_path = self.make_path('test_file1')
file2_path = self.make_path('test_file2')
self.create_file(file1_path)
self.os.link(self.path(file1_path), file2_path)
self.assertTrue(self.os.path.exists(file2_path))
self.os.unlink(file2_path)
self.os.link(self.path(file1_path), self.path(file2_path))
self.assertTrue(self.os.path.exists(file2_path))
self.os.unlink(file2_path)
self.os.link(file1_path, self.path(file2_path))
self.assertTrue(self.os.path.exists(file2_path))
def test_listdir(self):
path = self.make_path('foo', 'bar')
self.create_dir(path)
self.create_file(path + 'baz.txt')
self.assertEqual(self.os.listdir(path),
self.os.listdir(self.path(path)))
def test_mkdir(self):
path = self.make_path('foo')
self.os.mkdir(self.path(path))
self.assertTrue(self.os.path.exists(path))
def test_makedirs(self):
path = self.make_path('foo', 'bar')
self.os.makedirs(self.path(path))
self.assertTrue(self.os.path.exists(path))
@unittest.skipIf(is_windows and sys.version_info < (3, 8),
'os.readlink does not to support path-like objects '
'under Windows before Python 3.8')
def test_readlink(self):
self.skip_if_symlink_not_supported()
link_path = self.make_path('foo', 'bar', 'baz')
target = self.make_path('tarJAY')
self.create_symlink(link_path, target)
self.assert_equal_paths(self.os.readlink(self.path(link_path)), target)
@unittest.skipIf(is_windows and sys.version_info < (3, 8),
'os.readlink does not to support path-like objects '
'under Windows before Python 3.8')
def test_readlink_bytes(self):
self.skip_if_symlink_not_supported()
link_path = self.make_path(b'foo', b'bar', b'baz')
target = self.make_path(b'tarJAY')
self.create_symlink(link_path, target)
self.assert_equal_paths(self.os.readlink(self.path(link_path)), target)
def test_remove(self):
path = self.make_path('test.txt')
self.create_file(path)
self.os.remove(self.path(path))
self.assertFalse(self.os.path.exists(path))
def test_rename(self):
path1 = self.make_path('test1.txt')
path2 = self.make_path('test2.txt')
self.create_file(path1)
self.os.rename(self.path(path1), path2)
self.assertTrue(self.os.path.exists(path2))
self.os.rename(self.path(path2), self.path(path1))
self.assertTrue(self.os.path.exists(path1))
def test_replace(self):
path1 = self.make_path('test1.txt')
path2 = self.make_path('test2.txt')
self.create_file(path1)
self.os.replace(self.path(path1), path2)
self.assertTrue(self.os.path.exists(path2))
self.os.replace(self.path(path2), self.path(path1))
self.assertTrue(self.os.path.exists(path1))
def test_rmdir(self):
path = self.make_path('foo', 'bar')
self.create_dir(path)
self.os.rmdir(self.path(path))
self.assertFalse(self.os.path.exists(path))
def test_scandir(self):
directory = self.make_path('xyzzy', 'plugh')
self.create_dir(directory)
self.create_file(self.os.path.join(directory, 'test.txt'))
dir_entries = [entry for entry in
self.os.scandir(self.path(directory))]
self.assertEqual(1, len(dir_entries))
def test_symlink(self):
self.skip_if_symlink_not_supported()
file_path = self.make_path('test_file1')
link_path = self.make_path('link')
self.create_file(file_path)
self.os.symlink(self.path(file_path), link_path)
self.assertTrue(self.os.path.exists(link_path))
self.os.remove(link_path)
self.os.symlink(self.path(file_path), self.path(link_path))
self.assertTrue(self.os.path.exists(link_path))
def test_stat(self):
path = self.make_path('foo', 'bar', 'baz')
self.create_file(path, contents='1234567')
self.assertEqual(self.os.stat(path), self.path(path).stat())
@unittest.skipIf(sys.version_info < (3, 10), "New in Python 3.10")
def test_stat_follow_symlinks(self):
self.check_posix_only()
directory = self.make_path('foo')
base_name = 'bar'
file_path = self.path(self.os.path.join(directory, base_name))
link_path = self.path(self.os.path.join(directory, 'link'))
contents = "contents"
self.create_file(file_path, contents=contents)
self.create_symlink(link_path, base_name)
self.assertEqual(len(contents),
link_path.stat(follow_symlinks=True)[stat.ST_SIZE])
self.assertEqual(len(base_name),
link_path.stat(follow_symlinks=False)[stat.ST_SIZE])
def test_utime(self):
path = self.make_path('some_file')
self.create_file(path, contents='test')
self.os.utime(self.path(path), times=(1, 2))
st = self.os.stat(path)
self.assertEqual(1, st.st_atime)
self.assertEqual(2, st.st_mtime)
def test_truncate(self):
path = self.make_path('some_file')
self.create_file(path, contents='test_test')
self.os.truncate(self.path(path), length=4)
st = self.os.stat(path)
self.assertEqual(4, st.st_size)
@unittest.skipIf(sys.platform == 'win32',
'no pwd and grp modules in Windows')
def test_owner_and_group_posix(self):
self.check_posix_only()
path = self.make_path('some_file')
self.create_file(path)
self.assertTrue(self.path(path).owner())
self.assertTrue(self.path(path).group())
def test_owner_and_group_windows(self):
self.check_windows_only()
path = self.make_path('some_file')
self.create_file(path)
with self.assertRaises(NotImplementedError):
self.path(path).owner()
with self.assertRaises(NotImplementedError):
self.path(path).group()
class RealPathlibUsageInOsFunctionsTest(FakePathlibUsageInOsFunctionsTest):
def use_real_fs(self):
return True
@unittest.skipIf(sys.version_info < (3, 6),
'Path-like objects new in Python 3.6')
class FakeFilesystemPathLikeObjectTest(unittest.TestCase):
def setUp(self):
self.filesystem = fake_filesystem.FakeFilesystem(
path_separator='/')
self.pathlib = fake_pathlib.FakePathlibModule(self.filesystem)
self.os = fake_filesystem.FakeOsModule(self.filesystem)
def test_create_dir_with_pathlib_path(self):
dir_path_string = 'foo/bar/baz'
dir_path = self.pathlib.Path(dir_path_string)
self.filesystem.create_dir(dir_path)
self.assertTrue(self.os.path.exists(dir_path_string))
self.assertEqual(stat.S_IFDIR,
self.os.stat(
dir_path_string).st_mode & stat.S_IFDIR)
def test_create_file_with_pathlib_path(self):
file_path_string = 'foo/bar/baz'
file_path = self.pathlib.Path(file_path_string)
self.filesystem.create_file(file_path)
self.assertTrue(self.os.path.exists(file_path_string))
self.assertEqual(stat.S_IFREG,
self.os.stat(
file_path_string).st_mode & stat.S_IFREG)
def test_create_symlink_with_pathlib_path(self):
file_path = self.pathlib.Path('foo/bar/baz')
link_path_string = 'foo/link'
link_path = self.pathlib.Path(link_path_string)
self.filesystem.create_symlink(link_path, file_path)
self.assertTrue(self.os.path.lexists(link_path_string))
self.assertEqual(stat.S_IFLNK,
self.os.lstat(link_path_string).st_mode &
stat.S_IFLNK)
def test_add_existing_real_file_with_pathlib_path(self):
real_file_path_string = os.path.abspath(__file__)
real_file_path = self.pathlib.Path(real_file_path_string)
self.filesystem.add_real_file(real_file_path)
fake_filepath_string = real_file_path_string.replace(
os.sep, self.os.sep)
self.assertTrue(self.os.path.exists(fake_filepath_string))
self.assertEqual(stat.S_IFREG, self.os.stat(
fake_filepath_string).st_mode & stat.S_IFREG)
def test_add_existing_real_directory_with_pathlib_path(self):
real_dirpath_string = os.path.dirname(os.path.abspath(__file__))
real_dir_path = self.pathlib.Path(real_dirpath_string)
self.filesystem.add_real_directory(real_dir_path)
fake_dirpath_string = real_dirpath_string.replace(
os.sep, self.os.sep)
self.assertTrue(self.os.path.exists(fake_dirpath_string))
self.assertEqual(stat.S_IFDIR, self.os.stat(
fake_dirpath_string).st_mode & stat.S_IFDIR)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
py | 1a3ce005d64cb635a32c8cc90f61757005496ab8 | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
# pylint: disable=no-self-use
from ._deserialize import (
process_storage_error)
from ._generated.models import (
StorageErrorException,
)
from ._shared.response_handlers import return_response_headers
from ._shared.uploads import (
upload_data_chunks,
DataLakeFileChunkUploader)
def _any_conditions(modified_access_conditions=None, **kwargs): # pylint: disable=unused-argument
return any([
modified_access_conditions.if_modified_since,
modified_access_conditions.if_unmodified_since,
modified_access_conditions.if_none_match,
modified_access_conditions.if_match
])
def upload_datalake_file( # pylint: disable=unused-argument
client=None,
stream=None,
length=None,
overwrite=None,
validate_content=None,
max_concurrency=None,
**kwargs):
try:
if length == 0:
return {}
properties = kwargs.pop('properties', None)
umask = kwargs.pop('umask', None)
permissions = kwargs.pop('permissions', None)
path_http_headers = kwargs.pop('path_http_headers', None)
modified_access_conditions = kwargs.pop('modified_access_conditions', None)
chunk_size = kwargs.pop('chunk_size', 100 * 1024 * 1024)
if not overwrite:
# if customers didn't specify access conditions, they cannot flush data to existing file
if not _any_conditions(modified_access_conditions):
modified_access_conditions.if_none_match = '*'
if properties or umask or permissions:
raise ValueError("metadata, umask and permissions can be set only when overwrite is enabled")
if overwrite:
response = client.create(
resource='file',
path_http_headers=path_http_headers,
properties=properties,
modified_access_conditions=modified_access_conditions,
umask=umask,
permissions=permissions,
cls=return_response_headers,
**kwargs)
# this modified_access_conditions will be applied to flush_data to make sure
# no other flush between create and the current flush
modified_access_conditions.if_match = response['etag']
modified_access_conditions.if_none_match = None
modified_access_conditions.if_modified_since = None
modified_access_conditions.if_unmodified_since = None
upload_data_chunks(
service=client,
uploader_class=DataLakeFileChunkUploader,
total_size=length,
chunk_size=chunk_size,
stream=stream,
max_concurrency=max_concurrency,
validate_content=validate_content,
**kwargs)
return client.flush_data(position=length,
path_http_headers=path_http_headers,
modified_access_conditions=modified_access_conditions,
close=True,
cls=return_response_headers,
**kwargs)
except StorageErrorException as error:
process_storage_error(error)
|
py | 1a3ce0536e84f978b69a40a59fb6beb5fa882db8 | # -*- coding: utf-8 -*-
import functools
from mock import Mock
from bravado_core.model import _post_process_spec
from bravado_core.spec import Spec
def test_empty():
swagger_spec = Spec({})
callback = Mock()
_post_process_spec(
spec_dict=swagger_spec.spec_dict,
spec_resolver=swagger_spec.resolver,
on_container_callbacks=[callback],
)
assert callback.call_count == 0
def test_single_key():
spec_dict = {'definitions': {}}
swagger_spec = Spec(spec_dict)
callback = Mock()
_post_process_spec(
spec_dict=swagger_spec.spec_dict,
spec_resolver=swagger_spec.resolver,
on_container_callbacks=[callback],
)
assert callback.call_count == 1
callback.assert_called_once_with(spec_dict, '#/definitions')
def test_visits_refs_only_once():
# bar should only be de-reffed once even though there are two refs to it
spec_dict = {
'ref_one': {'$ref': '#/bar'},
'ref_two': {'$ref': '#/bar'},
'bar': 'baz'
}
swagger_spec = Spec(spec_dict)
# Yech! mock doesn't make this easy
mutable = {'cnt': 0}
def callback(container, json_reference, mutable):
# Bump the mutable counter every time bar is de-reffed
if json_reference.endswith('/bar'):
mutable['cnt'] += 1
_post_process_spec(
spec_dict=swagger_spec.spec_dict,
spec_resolver=swagger_spec.resolver,
on_container_callbacks=[
functools.partial(
callback,
mutable=mutable,
),
],
)
assert mutable['cnt'] == 1
|
py | 1a3ce1334de165f711c6fe15aa3e4a1d91ae1e22 | import unittest
from mock import patch, Mock
import pytest
from nose.tools import * # noqa (PEP8 asserts)
from admin.rdm_addons.utils import get_rdm_addon_option
from osf_tests.factories import (
fake_email,
AuthUserFactory,
InstitutionFactory,
ExternalAccountFactory,
UserFactory,
ProjectFactory
)
from addons.dropboxbusiness.models import NodeSettings
from admin_tests.rdm_addons import factories as rdm_addon_factories
pytestmark = pytest.mark.django_db
class DropboxBusinessAccountFactory(ExternalAccountFactory):
provider = 'dropboxbusiness'
FILEACCESS_NAME = 'dropboxbusiness'
MANAGEMENT_NAME = 'dropboxbusiness_manage'
DBXBIZ = 'addons.dropboxbusiness'
class TestDropboxBusiness(unittest.TestCase):
def setUp(self):
super(TestDropboxBusiness, self).setUp()
self.institution = InstitutionFactory()
self.user = UserFactory()
self.user.eppn = fake_email()
self.user.affiliated_institutions.add(self.institution)
self.user.save()
self.f_option = get_rdm_addon_option(self.institution.id,
FILEACCESS_NAME)
self.m_option = get_rdm_addon_option(self.institution.id,
MANAGEMENT_NAME)
f_account = ExternalAccountFactory(provider=FILEACCESS_NAME)
m_account = ExternalAccountFactory(provider=MANAGEMENT_NAME)
self.f_option.external_accounts.add(f_account)
self.m_option.external_accounts.add(m_account)
def _new_project(self):
with patch(DBXBIZ + '.utils.TeamInfo') as mock1, \
patch(DBXBIZ + '.utils.get_current_admin_group_and_sync') as mock2, \
patch(DBXBIZ + '.utils.get_current_admin_dbmid') as mock3, \
patch(DBXBIZ + '.utils.create_team_folder') as mock4:
mock2.return_value = (Mock(), Mock())
mock3.return_value = 'dbmid:dummy'
mock4.return_value = ('dbtid:dummy', 'g:dummy')
self.project = ProjectFactory(creator=self.user)
def _allowed(self):
self.f_option.is_allowed = True
self.f_option.save()
def test_dropboxbusiness_default_is_not_allowed(self):
assert_false(self.f_option.is_allowed)
self._new_project()
result = self.project.get_addon('dropboxbusiness')
assert_equal(result, None)
def test_dropboxbusiness_no_eppn(self):
self.user.eppn = None
self.user.save()
self._allowed()
self._new_project()
result = self.project.get_addon('dropboxbusiness')
assert_equal(result, None)
def test_dropboxbusiness_no_institution(self):
self.user.affiliated_institutions.clear()
self._allowed()
self._new_project()
result = self.project.get_addon('dropboxbusiness')
assert_equal(result, None)
def test_dropboxbusiness_no_addon_option(self):
self.f_option.delete()
self._allowed()
self._new_project()
result = self.project.get_addon('dropboxbusiness')
assert_equal(result, None)
def test_dropboxbusiness_automount(self):
self.f_option.is_allowed = True
self.f_option.save()
self._new_project()
result = self.project.get_addon('dropboxbusiness')
assert_true(isinstance(result, NodeSettings))
assert_equal(result.admin_dbmid, 'dbmid:dummy')
assert_equal(result.team_folder_id, 'dbtid:dummy')
assert_equal(result.group_id, 'g:dummy')
|
py | 1a3ce257b8f8736847b544b5643ba024d39c7449 | from django.conf.urls import url,include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls)
]
|
py | 1a3ce3d31a10f1689bcfee3588c71e92c6cd1ddf | # Copyright Contributors to the Rez project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Print information about the current rez context, or a given context file.
'''
from __future__ import print_function
# Disable the following:
# - context tracking
# - package caching
#
# Use of rez-context is't really 'using' the context, so much as inspecting it.
# Since features such as context tracking are related to context use only, we
# disable them in this tool.
#
import os
import json
import sys
os.environ.update({
"REZ_CONTEXT_TRACKING_HOST": '',
"REZ_WRITE_PACKAGE_CACHE": "False"
})
from rez.rex import OutputStyle # noqa
def setup_parser(parser, completions=False):
from rez.system import system
from rez.shells import get_shell_types
formats = get_shell_types() + ['dict', 'table']
if json is not None:
formats.append('json')
output_styles = [e.name for e in OutputStyle]
parser.add_argument(
"--req", "--print-request", dest="print_request",
action="store_true",
help="print only the request list (not including implicits)")
parser.add_argument(
"--res", "--print-resolve", dest="print_resolve",
action="store_true",
help="print only the resolve list. Use with --su to print package URIs")
parser.add_argument(
"--so", "--source-order", dest="source_order", action="store_true",
help="print resolved packages in order they are sorted, rather than "
"alphabetical order")
parser.add_argument(
"--su", "--show-uris", dest="show_uris", action="store_true",
help="list resolved package's URIs, rather than the default 'root' "
"filepath")
parser.add_argument(
"-t", "--tools", action="store_true",
help="print a list of the executables available in the context")
parser.add_argument(
"--which", type=str, metavar="CMD",
help="locate a program within the context")
parser.add_argument(
"-g", "--graph", action="store_true",
help="display the resolve graph as an image")
parser.add_argument(
"-d", "--dependency-graph", action="store_true",
help="display the (simpler) dependency graph. Works in combination "
"with other graph options")
parser.add_argument(
"--pg", "--print-graph", dest="print_graph", action="store_true",
help="print the resolve graph as a string")
parser.add_argument(
"--wg", "--write-graph", dest="write_graph", type=str,
metavar='FILE', help="write the resolve graph to FILE")
parser.add_argument(
"--pp", "--prune-package", dest="prune_pkg", metavar="PKG",
type=str, help="prune the graph down to PKG")
parser.add_argument(
"-i", "--interpret", action="store_true",
help="interpret the context and print the resulting code")
parser.add_argument(
"-f", "--format", type=str, choices=formats, default=system.shell,
help="print interpreted output in the given format. Ignored if "
"--interpret is not present (default: %(default)s). If one of "
"table, dict or json, the environ dict is printed.")
parser.add_argument(
"-s", "--style", type=str, default="file", choices=output_styles,
help="Set code output style. Ignored if --interpret is not present "
"(default: %(default)s)")
parser.add_argument(
"--no-env", dest="no_env", action="store_true",
help="interpret the context in an empty environment")
diff_action = parser.add_argument(
"--diff", type=str, metavar="RXT",
help="diff the current context against the given context")
parser.add_argument(
"--fetch", action="store_true",
help="diff the current context against a re-resolved copy of the "
"current context")
RXT_action = parser.add_argument(
"RXT", type=str, nargs='?',
help="rez context file (current context if not supplied). Use '-' to "
"read the context from stdin")
if completions:
from rez.cli._complete_util import FilesCompleter
rxt_completer = FilesCompleter(dirs=False, file_patterns=["*.rxt"])
RXT_action.completer = rxt_completer
diff_action.completer = rxt_completer
def command(opts, parser, extra_arg_groups=None):
from rez.cli._util import print_items
from rez.status import status
from rez.utils.formatting import columnise, PackageRequest
from rez.resolved_context import ResolvedContext
from rez.utils.graph_utils import save_graph, view_graph, prune_graph
from pprint import pformat
rxt_file = opts.RXT if opts.RXT else status.context_file
if not rxt_file:
print("not in a resolved environment context.", file=sys.stderr)
sys.exit(1)
if rxt_file == '-': # read from stdin
rc = ResolvedContext.read_from_buffer(sys.stdin, 'STDIN')
else:
rc = ResolvedContext.load(rxt_file)
def _graph():
if rc.has_graph:
if opts.dependency_graph:
return rc.get_dependency_graph(as_dot=True)
else:
return rc.graph(as_dot=True)
else:
print("The context does not contain a graph.", file=sys.stderr)
sys.exit(1)
parent_env = {} if opts.no_env else None
if not opts.interpret:
if opts.print_request:
print_items(rc.requested_packages(False))
elif opts.print_resolve:
if opts.show_uris:
print_items(x.uri for x in rc.resolved_packages)
else:
print_items(x.qualified_package_name for x in rc.resolved_packages)
elif opts.tools:
rc.print_tools()
elif opts.diff:
rc_other = ResolvedContext.load(opts.diff)
rc.print_resolve_diff(rc_other, True)
elif opts.fetch:
rc_new = ResolvedContext(rc.requested_packages(),
package_paths=rc.package_paths,
verbosity=opts.verbose)
rc.print_resolve_diff(rc_new, heading=("current", "updated"))
elif opts.which:
cmd = opts.which
path = rc.which(cmd, parent_environ=parent_env)
if path:
print(path)
else:
print("'%s' not found in the context" % cmd, file=sys.stderr)
elif opts.print_graph:
gstr = _graph()
print(gstr)
elif opts.graph or opts.dependency_graph or opts.write_graph:
gstr = _graph()
if opts.prune_pkg:
req = PackageRequest(opts.prune_pkg)
gstr = prune_graph(gstr, req.name)
func = view_graph if (opts.graph or opts.dependency_graph) else save_graph
func(gstr, dest_file=opts.write_graph)
else:
rc.print_info(verbosity=opts.verbose,
source_order=opts.source_order,
show_resolved_uris=opts.show_uris)
return
if opts.format in ("dict", "table", "json"):
env = rc.get_environ(parent_environ=parent_env)
if opts.format == 'table':
rows = [x for x in sorted(env.items())]
print('\n'.join(columnise(rows)))
elif opts.format == 'dict':
print(pformat(env))
else: # json
print(json.dumps(env, sort_keys=True, indent=4))
else:
code = rc.get_shell_code(shell=opts.format,
parent_environ=parent_env,
style=OutputStyle[opts.style])
print(code)
|
py | 1a3ce404dbab3bec0c6f36e90536b68ada9cd9b2 | # Copyright 2010 Chet Luther <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
version = '1.0.4'
setup(
name='snmposter',
version=version,
description="SNMP Agent Simulator",
long_description="""
""",
# Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: No Input/Output (Daemon)',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'Intended Audience :: Telecommunications Industry',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Testing',
'Topic :: System :: Systems Administration',
'Topic :: Utilities',
],
keywords='snmp agent simulator snmpwalk',
author='Chet Luther',
author_email='[email protected]',
url='http://github.com/cluther/snmposter',
license='Apache 2',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=[
'Twisted==12.0.0',
#'TwistedSNMP', Not currently installable via PyPI.
#'pysnmp-se', Not currently installable via PyPI.
],
entry_points={
'console_scripts': [
'snmposter = snmposter.scripts:launcher',
]
},
)
|
py | 1a3ce419850097090334ac1204a0e7ece22cdbc2 | import rocksdbpy
from rocksdbpy import Option
def get_db(path):
"""
Open the database by given path
"""
opts = Option()
opts.create_if_missing(True)
return rocksdbpy.open(path, opts)
def destroy(db, path):
"""
Close active database and destroy
"""
db.close()
rocksdbpy.destroy(path)
|
bzl | 1a3ce488b7a8822f5aca102c832cc2cd4e78e0ba | # This is how we set compiler options globally - we define them here, import
# this variable in every build file that has C code, and then set copts on every
# target manually. This is clumsy, but it's also very simple. There will
# probably be an easier way to do this in the future.
#
# See Bazel discussion: https://github.com/bazelbuild/bazel/issues/5198
COPTS_BASE = [
# Ubuntu 18 LTS uses GCC 7.4, but c17 is not supported until GCC 8.
"-std=c11",
# "-D_DEFAULT_SOURCE",
]
_CWARN = [
"-Wall",
"-Wextra",
"-Wpointer-arith",
"-Wwrite-strings",
"-Wmissing-prototypes",
"-Wdouble-promotion",
"-Werror=implicit-function-declaration",
"-Winit-self",
"-Wstrict-prototypes",
]
COPTS = (
COPTS_BASE +
select({
"//tools:warnings_off": [],
"//tools:warnings_on": _CWARN,
"//tools:warnings_error": _CWARN + ["-Werror"],
})
)
CXXOPTS_BASE = [
"-std=c++17",
]
_CXXWARN = [
"-Wall",
"-Wextra",
]
CXXOPTS = (
CXXOPTS_BASE +
select({
"//tools:warnings_off": [],
"//tools:warnings_on": _CXXWARN,
"//tools:warnings_error": _CXXWARN + ["-Werror"],
})
)
|
py | 1a3ce553379c7cad7b711c4a1e121c7b7ece85e2 | import os
from cs50 import SQL
from flask import Flask, flash, jsonify, redirect, render_template, request, session
from flask_session import Session
from tempfile import mkdtemp
from werkzeug.exceptions import default_exceptions, HTTPException, InternalServerError
from werkzeug.security import check_password_hash, generate_password_hash
from helpers import apology, login_required, lookup, usd
# Configure application
app = Flask(__name__)
# Ensure templates are auto-reloaded
app.config["TEMPLATES_AUTO_RELOAD"] = True
# Ensure responses aren't cached
@app.after_request
def after_request(response):
response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
response.headers["Expires"] = 0
response.headers["Pragma"] = "no-cache"
return response
# Custom filter
app.jinja_env.filters["usd"] = usd
# Configure session to use filesystem (instead of signed cookies)
app.config["SESSION_FILE_DIR"] = mkdtemp()
app.config["SESSION_PERMANENT"] = False
app.config["SESSION_TYPE"] = "filesystem"
Session(app)
# Configure CS50 Library to use SQLite database
db = SQL("sqlite:///finance.db")
# Make sure API key is set
if not os.environ.get("API_KEY"):
raise RuntimeError("API_KEY not set")
@app.route("/")
@login_required
def index():
"""Show portfolio of stocks"""
if request.method == "GET":
user_id = session["user_id"]
user_current_stocks = db.execute("SELECT * FROM holdings WHERE user_id = :user_id", user_id=user_id)
cash = db.execute("SELECT cash FROM users WHERE id = :user_id", user_id=user_id)[0]["cash"]
filtered_stocks = []
filtered_total = cash
for stock in user_current_stocks:
response = lookup(stock["symbol"])
name = response["name"]
price = response["price"]
total = price * stock["shares"]
filtered_total += total
tmp_stock = {
"symbol": stock["symbol"],
"name": name,
"shares": stock["shares"],
"price": price,
"total": total
}
filtered_stocks.append(tmp_stock)
return render_template("index.html", stocks=filtered_stocks, cash=cash, total=filtered_total)
@app.route("/buy", methods=["GET", "POST"])
@login_required
def buy():
"""Buys stocks and register"""
if request.method == "GET":
return render_template("buy.html")
elif request.method == "POST":
shares = int(request.form.get("shares"))
symbol = request.form.get("symbol")
# Initial validations
if not shares or not symbol:
return apology("You must complete all the fields")
elif shares < 1:
return apology("Shares must be positive")
# IEX request and validation
response = lookup(symbol)
if response == None:
return apology("This Symbol doesn't exist")
stock_name = response["name"]
price = response["price"]
user_id = session["user_id"]
# Getting user current cash and validate to the shares * price
user_current_cash = db.execute("SELECT cash FROM users WHERE id = :user_id", user_id=user_id)
user_new_cash = user_current_cash[0]["cash"] - (shares * price)
if user_new_cash < 0:
return apology("Can't afford")
# Subtracting the value from user's cash
db.execute("UPDATE users SET cash = :user_new_cash WHERE id = :user_id", user_new_cash=user_new_cash, user_id=user_id)
# Registering the buy in HISTORY table
db.execute("INSERT INTO history (user_id, symbol, stock_name, price, shares, type) VALUES (:user_id, :symbol, :stock_name, :price, :shares, 'buy')", user_id=user_id, symbol=symbol, stock_name=stock_name, price=price, shares=shares)
# Registering the buy in HOLDINGS table
check_hold = db.execute("SELECT * FROM holdings WHERE user_id = :user_id AND symbol = :symbol", user_id=user_id, symbol=symbol)
if not check_hold:
db.execute("INSERT INTO holdings (user_id, symbol, shares) VALUES (:user_id, :symbol, :shares)", user_id=user_id, symbol=symbol, shares=shares)
else:
db.execute("UPDATE holdings SET shares = shares + :shares WHERE user_id = :user_id AND symbol = :symbol", shares=shares, user_id=user_id, symbol=symbol)
# future possible implementation (Conditional INSERT/UPDATE)
# db.execute("CASE WHEN (SELECT * FROM holdings WHERE id = :user_id AND symbol = :symbol) IS NOT NULL THEN UPDATE holdings SET shares = shares + :shares ELSE INSERT INTO holdings (user_id, symbol, shares) VALUES (:user_id, :symbol, :shares) END", user_id=user_id, symbol=symbol, shares=shares)
return redirect("/")
@app.route("/history")
@login_required
def history():
"""Show history of transactions"""
user_id = session["user_id"]
if request.method == "GET":
stocks_history = db.execute("SELECT * FROM history WHERE user_id = :user_id", user_id=user_id)
for stock in stocks_history:
if stock["type"] == "sell":
stock["shares"] = -stock["shares"]
stock["price"] = stock["price"]
return render_template("history.html", stocks_history=stocks_history)
@app.route("/login", methods=["GET", "POST"])
def login():
"""Log user in"""
# Forget any user_id
session.clear()
# User reached route via POST (as by submitting a form via POST)
if request.method == "POST":
# Ensure username was submitted
if not request.form.get("username"):
return apology("must provide username", 403)
# Ensure password was submitted
elif not request.form.get("password"):
return apology("must provide password", 403)
# Query database for username
rows = db.execute("SELECT * FROM users WHERE username = :username",
username=request.form.get("username"))
# Ensure username exists and password is correct
if len(rows) != 1 or not check_password_hash(rows[0]["hash"], request.form.get("password")):
return apology("invalid username and/or password", 403)
# Remember which user has logged in
session["user_id"] = rows[0]["id"]
# Redirect user to home page
return redirect("/")
# User reached route via GET (as by clicking a link or via redirect)
else:
return render_template("login.html")
@app.route("/logout")
def logout():
"""Log user out"""
# Forget any user_id
session.clear()
# Redirect user to login form
return redirect("/")
@app.route("/quote", methods=["GET", "POST"])
@login_required
def quote():
"""Shows price of stock"""
if request.method == "GET":
return render_template("quote.html")
elif request.method == "POST":
symbol = request.form.get("symbol")
if not symbol:
return apology("You must complete all the fields")
response = lookup(symbol)
if response == None:
return apology("This Symbol doesn't exist")
name = response["name"]
symbol = response["symbol"]
price = response["price"]
return render_template("quoted.html", name=name, symbol=symbol, price=price)
@app.route("/register", methods=["GET", "POST"])
def register():
if request.method == "GET":
return render_template("register.html")
elif request.method == "POST":
primary_password = request.form.get("password")
confirmed_password = request.form.get("confirmation")
username = request.form.get("username")
if not primary_password or not confirmed_password or not username:
return apology("You must complete all the fields")
elif primary_password != confirmed_password:
return apology("The passwords don't match")
check_username = db.execute("SELECT * FROM users WHERE username = :username", username=username)
if check_username:
return apology("This username already exists")
else:
password_hash = generate_password_hash(primary_password, method='pbkdf2:sha256', salt_length=8)
user_id = db.execute("INSERT INTO users (username, hash) VALUES (:username, :password_hash)", username=username, password_hash=password_hash)
session["user_id"] = user_id
return redirect("/")
@app.route("/sell", methods=["GET", "POST"])
@login_required
def sell():
"""Sell shares of stock"""
user_id = session["user_id"]
if request.method == "GET":
symbols = db.execute("SELECT symbol FROM holdings WHERE user_id = :user_id", user_id=user_id)
return render_template("sell.html", symbols=symbols)
elif request.method == "POST":
symbol = request.form.get("symbol")
print(request.form)
shares = int(request.form.get("shares"))
user_shares = int(db.execute("SELECT shares FROM holdings WHERE user_id = :user_id AND symbol = :symbol", user_id=user_id, symbol=symbol)[0]["shares"])
print(type(user_shares))
# Initial validations
if not shares or not symbol:
return apology("You must complete all the fields")
elif shares < 1:
return apology("Shares must be positive")
elif user_shares < shares:
return apology("Too many shares")
# Getting IEX current price
response = lookup(symbol)
# Calculating return
total_return = response["price"] * shares
# Updating holdings
db.execute("UPDATE holdings SET shares = shares - :shares WHERE :user_id AND symbol = :symbol", shares=shares, user_id=user_id, symbol=symbol)
# Updating cash
db.execute("UPDATE users SET cash = cash + :total_return WHERE id = :user_id", total_return=total_return, user_id=user_id)
# Inserting history
db.execute("INSERT INTO history (user_id, symbol, stock_name, price, shares, type) VALUES (:user_id, :symbol, :stock_name, :price, :shares, 'sell')", user_id=user_id, symbol=symbol, stock_name=response["name"], price=response["price"], shares=shares)
return redirect("/")
def errorhandler(e):
"""Handle error"""
if not isinstance(e, HTTPException):
e = InternalServerError()
return apology(e.name, e.code)
# Listen for errors
for code in default_exceptions:
app.errorhandler(code)(errorhandler)
|
py | 1a3ce553474beeec9d7422a972215f0d0d7fefb2 | import os
import sys
sys.path.insert(0, os.getcwd())
import time
import glob
import numpy as np
import random
import torch
import darts.cnn.utils as utils
import logging
import torch.nn as nn
import darts.cnn.genotypes
import torch.utils
import torchvision.datasets as dset
import torch.backends.cudnn as cudnn
from collections import namedtuple
from darts.cnn.model import NetworkCIFAR as Network
class Train:
def __init__(self):
self.data='./data'
self.batch_size= 96
self.learning_rate= 0.025
self.momentum= 0.9
self.weight_decay = 3e-4
self.load_weights = 0
self.report_freq = 500
self.gpu = 0
self.epochs = 50
self.init_channels = 32
self.layers = 8
self.auxiliary = True
self.auxiliary_weight = 0.4
self.cutout = True
self.cutout_length = 16
self.drop_path_prob = 0.2
self.save = 'EXP'
self.seed = 0
self.grad_clip = 5
self.train_portion = 0.9
self.validation_set = True
self.CIFAR_CLASSES = 10
self.count = 1
def main(self, seed, arch, epochs=50, gpu=0, load_weights=False, train_portion=0.9, save='model_search'):
# Set up save file and logging
self.save = save
self.save = '{}'.format(self.save)
if not os.path.exists(self.save):
utils.create_exp_dir(self.save, scripts_to_save=glob.glob('*.py'))
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(self.save, 'log-seed{}.txt'.format(seed)))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
else:
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(self.save, 'log-seed{}.txt'.format(seed)))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
self.arch = arch
self.epochs = epochs
self.load_weights = load_weights
self.gpu = gpu
self.train_portion = train_portion
if self.train_portion == 1:
self.validation_set = False
self.seed = seed
# cpu-gpu switch
if not torch.cuda.is_available():
torch.manual_seed(self.seed)
device = torch.device('cpu')
else:
torch.cuda.manual_seed_all(self.seed)
random.seed(self.seed)
torch.manual_seed(self.seed)
device = torch.device(self.gpu)
cudnn.benchmark = False
cudnn.enabled=True
cudnn.deterministic=True
genotype = arch
logging.info('counter: {} genotypes: {}'.format(self.count, genotype))
model = Network(self.init_channels, self.CIFAR_CLASSES, self.layers, self.auxiliary, genotype)
model = model.to(device)
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
total_params = sum(x.data.nelement() for x in model.parameters())
logging.info('Model total parameters: {}'.format(total_params))
criterion = nn.CrossEntropyLoss()
criterion = criterion.to(device)
optimizer = torch.optim.SGD(
model.parameters(),
self.learning_rate,
momentum=self.momentum,
weight_decay=self.weight_decay
)
train_transform, test_transform = utils._data_transforms_cifar10(self.cutout, self.cutout_length)
train_data = dset.CIFAR10(root=self.data, train=True, transform=train_transform)
test_data = dset.CIFAR10(root=self.data, train=False, transform=test_transform)
num_train = len(train_data)
indices = list(range(num_train))
if self.validation_set:
split = int(np.floor(self.train_portion * num_train))
else:
split = num_train
train_queue = torch.utils.data.DataLoader(
train_data, batch_size=self.batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[:split]),
pin_memory=True, num_workers=4)
if self.validation_set:
valid_queue = torch.utils.data.DataLoader(
train_data, batch_size=self.batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[split:num_train]),
pin_memory=True, num_workers=4)
test_queue = torch.utils.data.DataLoader(
test_data, batch_size=self.batch_size, shuffle=False, pin_memory=True, num_workers=4)
if self.load_weights:
logging.info('loading saved weights')
ml = 'cuda:{}'.format(self.gpu) if torch.cuda.is_available() else 'cpu'
model.load_state_dict(torch.load('weights.pt', map_location = ml))
logging.info('loaded saved weights')
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(self.epochs))
valid_accs = []
test_accs = []
for epoch in range(self.epochs):
logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0])
model.drop_path_prob = self.drop_path_prob * epoch / self.epochs
train_acc, train_obj = self.train(train_queue, model, criterion, optimizer)
if self.validation_set:
valid_acc, valid_obj = self.infer(valid_queue, model, criterion)
else:
valid_acc, valid_obj = 0, 0
test_acc, test_obj = self.infer(test_queue, model, criterion, test_data=True)
logging.info('train_acc: {:.4f}, valid_acc: {:.4f}, test_acc: {:.4f}'.format(train_acc, valid_acc, test_acc))
if epoch in list(range(max(0, epochs - 5), epochs)):
valid_accs.append((epoch, valid_acc))
test_accs.append((epoch, test_acc))
scheduler.step()
self.count += 1
return valid_accs, test_accs
def train(self, train_queue, model, criterion, optimizer):
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
model.train()
for step, (input, target) in enumerate(train_queue):
device = torch.device('cuda:{}'.format(self.gpu) if torch.cuda.is_available() else 'cpu')
input = input.to(device)
target = target.to(device)
optimizer.zero_grad()
logits, logits_aux = model(input)
loss = criterion(logits, target)
if self.auxiliary:
loss_aux = criterion(logits_aux, target)
loss += self.auxiliary_weight*loss_aux
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), self.grad_clip)
optimizer.step()
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.item(), n)
top1.update(prec1.item(), n)
top5.update(prec5.item(), n)
return top1.avg, objs.avg
def infer(self, valid_queue, model, criterion, test_data=False):
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
model.eval()
device = torch.device('cuda:{}'.format(self.gpu) if torch.cuda.is_available() else 'cpu')
for step, (input, target) in enumerate(valid_queue):
with torch.no_grad():
input = input.to(device)
target = target.to(device)
logits, _ = model(input)
loss = criterion(logits, target)
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.item(), n)
top1.update(prec1.item(), n)
top5.update(prec5.item(), n)
return top1.avg, objs.avg |
py | 1a3ce572a6b71052f942d8e5d047e9dead90489f | # -*- coding: utf-8 -*-
"""Utility methods for list objects.
AUTHORS:
- Thomas McTavish
"""
# While this software is under the permissive MIT License,
# (http://www.opensource.org/licenses/mit-license.php)
# We ask that you cite the neuronpy package (or tools used in this package)
# in any publications and contact the author with your referenced publication.
#
# Format:
# McTavish, T.S. NeuronPy library, version 0.1, http://bitbucket.org/tommctavish/neuronpy
#
# Copyright (c) 2010 Thomas S. McTavish
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
class ListEmptyError(Exception):
"""Alert when a list is empty"""
def __init__(self, list_name=None):
self.list_name = list_name
def __str__(self):
the_list_name=''
if self.list_name is not None:
the_list_name = '\''+self.list_name+'\''
errstr='List %s is empty.'%the_list_name
return repr(errstr)
def nonempty_copy(list):
"""
Makes a copy of the list and then removes any empty elements.
"""
list_copy=list[:]
remove_empties(list_copy)
return list_copy
def remove_empties(list):
"""
Removes any empty elements from the list.
"""
contains_empties = True
while contains_empties is True:
# We may have to re-loop if there are adjacent empty elements.
contains_empties = False
for i in iter(list):
if len(i)==0:
contains_empties=True
list.remove(i)
def flatten_from_2d(list_of_lists):
"""
Returns a 1d, flattened version of a 2d array or list of lists.
This also removes any empty elements.
"""
vec = []
for row in iter(list_of_lists):
vec.extend(row)
return vec |
py | 1a3ce77866b70a46cbf09a201895e6ae73632afa | F = open('random_distribution.tsv')
O = open('gene_expression.csv', 'w')
O2 = open('gene_expression-few.csv', 'w')
n = 0; m = 0
for line in F:
n = n + 1
m = m + 1
#O.write('gene' + str(n) + ',' + ','.join(line.split('\t')))
O.write('gene' + str(n) + ',' + line.split('\t')[2] + '\n')
O2.write('gene' + str(m) + ',' + line.split('\t')[2] + '\n')
if m == 10: m = 0
O.close()
|
py | 1a3ce79ba04bec39d9da553f033f9b2ff90d3b97 | # Adaptation of the original code from
# https://github.com/idiap/fast-transformers/blob/master/fast_transformers/causal_product/__init__.py
# Copyright (c) 2020 Idiap Research Institute, http://www.idiap.ch/
# Written by Angelos Katharopoulos <[email protected]>,
# Apoorv Vyas <[email protected]>
#
# Modifications Copyright (c) 2021 Kazuki Irie
import torch
import torch.nn.functional as F
from torch.utils.cpp_extension import load
# Just in time import
# https://pytorch.org/tutorials/advanced/cpp_extens
# The extra arg `extra_cuda_cflags=['--ftemplate-depth=1024']` solves:
# ```
# pybind11/detail/common.h(461):
# error: excessive recursion at instantiation of class
# ```
mod_causal_dot_product_cuda = load(
extra_cuda_cflags=['--ftemplate-depth=1024'],
name="fast_lstm_v3_forward",
sources=["utils/fast_lstm_v3/fast_lstm_v3_cuda.cu"], verbose=True)
mod_causal_dot_backward_cuda = load(
extra_cuda_cflags=['--ftemplate-depth=1024'],
name="fast_lstm_v3_backward",
sources=["utils/fast_lstm_v3/fast_lstm_v3_cuda.cu"], verbose=True)
causal_dot_product_cuda = mod_causal_dot_product_cuda.fast_lstm_v3_forward
causal_dot_backward_cuda = mod_causal_dot_backward_cuda.fast_lstm_v3_backward
class FastLSTMv3(torch.autograd.Function):
"""Fast LSTM with the FWM update rule."""
dot = {
# "cpu": causal_dot_product_cpu,
"cuda": causal_dot_product_cuda
}
dot_backward = {
# "cpu": causal_dot_backward_cpu,
"cuda": causal_dot_backward_cuda
}
@staticmethod
def forward(ctx,
Zi, Ki, Vi, bi, Wi,
Zu, Ku, Vu, bu, Wu,
Zo, Ko, Vo, bo, Wo, h0, c0):
# Computations:
# fast weights with sum update rule: R_t = R_t-1 + v_t (x) k_t
# output: h_t = tanh(R_t * h_t-1 + z_t)
# z_t is the output of a feed-forward fast weight layer.
# h0 is the initial RNN state.
# E = M.
# Create the output tensor
device = Zi.device
N, H, L, _ = Zi.shape
_, _, _, M = Vi.shape
assert Ki.shape == (N, H, L, M)
assert Vi.shape == (N, H, L, M)
assert h0.shape == (N, H, 1, M)
assert Wi.shape == (N, H, M, M)
rnn_out = torch.zeros((N, H, L, M), device=device, dtype=Zi.dtype)
cell_out = torch.zeros((N, H, L, M), device=device, dtype=Zi.dtype)
gate_i = torch.zeros((N, H, L, M), device=device, dtype=Zi.dtype)
update_u = torch.zeros((N, H, L, M), device=device, dtype=Zi.dtype)
gate_o = torch.zeros((N, H, L, M), device=device, dtype=Zi.dtype)
out_del_nmz = torch.zeros((N, H, L, M), device=device, dtype=Zi.dtype)
h_init = h0.detach().clone()
c_init = c0.detach().clone()
V_old_i = torch.zeros((N, H, L, M), device=device, dtype=Zi.dtype)
V_old_u = torch.zeros((N, H, L, M), device=device, dtype=Zi.dtype)
V_old_o = torch.zeros((N, H, L, M), device=device, dtype=Zi.dtype)
# W = torch.zeros((N, H, E, M), device=device, dtype=Z.dtype)
# h0 = torch.zeros((N, H, M), device=device, dtype=Z.dtype)
# Actually perform the dot product
FastLSTMv3.dot[device.type](
Zi.data, # input gate
Ki.data,
Vi.data,
bi.data,
Zu.data, # update candidate
Ku.data,
Vu.data,
bu.data,
Zo.data, # ouput gate
Ko.data,
Vo.data,
bo.data,
h0.data, # init hidden states
c0.data, # init cell states
Wi,
Wu,
Wo,
rnn_out,
out_del_nmz,
cell_out,
gate_i,
update_u,
gate_o,
V_old_i,
V_old_u,
V_old_o
)
ctx.save_for_backward(rnn_out, out_del_nmz,
cell_out, gate_i, update_u, gate_o,
Zi, Ki, Vi, Wi, bi,
Zu, Ku, Vu, Wu, bu,
Zo, Ko, Vo, Wo, bo,
V_old_i, V_old_u, V_old_o,
h_init, c_init)
return rnn_out, cell_out
@staticmethod
def backward(ctx, grad_out, grad_cell):
# Extract the saved tensors
(rnn_out, rnn_out_delayed, cell_out, gate_i, update_u, gate_o,
Zi, Ki, Vi, Wi, bi,
Zu, Ku, Vu, Wu, bu,
Zo, Ko, Vo, Wo, bo,
V_old_i, V_old_u, V_old_o,
h0, c0) = ctx.saved_tensors
# Allocate memory for the gradients
grad_Zi = torch.zeros_like(Zi)
grad_Ki = torch.zeros_like(Ki)
grad_Vi = torch.zeros_like(Vi)
grad_bi = torch.zeros_like(bi)
grad_Zu = torch.zeros_like(Zu)
grad_Ku = torch.zeros_like(Ku)
grad_Vu = torch.zeros_like(Vu)
grad_bu = torch.zeros_like(bu)
grad_Zo = torch.zeros_like(Zo)
grad_Ko = torch.zeros_like(Ko)
grad_Vo = torch.zeros_like(Vo)
grad_bo = torch.zeros_like(bo)
# Prepare delayed RNN outputs
# shape of rnn_out: N, H, L, M
# dim2 is the time dim.
# shape of h0: N, H, 1, M
# rnn_out_delayed = torch.cat([h0, rnn_out[:, :, :-1]], dim=2)
c_out_delayed = torch.cat([c0, cell_out[:, :, :-1]], dim=2)
# In the backward pass, we need u_t - cell_{t-1} and not delayed cell.
u_minus_c = update_u - c_out_delayed
# Compute the gradients
FastLSTMv3.dot_backward[Zi.device.type](
grad_out,
Ki.data,
Vi.data,
bi.data,
Ku.data,
Vu.data,
bu.data,
Ko.data,
Vo.data,
bo.data,
V_old_i.data,
V_old_u.data,
V_old_o.data,
rnn_out,
rnn_out_delayed,
cell_out,
u_minus_c,
gate_i,
update_u,
gate_o,
Wi.data,
Wu.data,
Wo.data,
grad_Zi,
grad_Ki,
grad_Vi,
grad_bi,
grad_Zu,
grad_Ku,
grad_Vu,
grad_bu,
grad_Zo,
grad_Ko,
grad_Vo,
grad_bo,
)
return (grad_Zi, grad_Ki, grad_Vi, grad_bi, None,
grad_Zu, grad_Ku, grad_Vu, grad_bu, None,
grad_Zo, grad_Ko, grad_Vo, grad_bo, None,
None, None)
# Alias the autograd functions to python style snake case naming
fast_lstm_v3 = FastLSTMv3.apply
if __name__ == '__main__':
import torch
torch.manual_seed(111)
# Tests pass if the relative difference compared with
# the corresponding torch autograd computation
# is smaller than a threshold.
# Ideally should be tested with double...
rel_threshold = 1e-2
# from https://github.com/idiap/fast-transformers/blob/master/tests/causal_product/test_causal_product_gpu.py
def max_relative_error(a, b, eps=1e-8):
return float(torch.abs((b - a) / (torch.abs(b) + eps)).max().item())
print('##########################')
print('# Test forward pass')
print('##########################')
bsz, n_head, slen, d_head = 3, 5, 11, 18
v_dim = d_head
h0 = torch.zeros(bsz, n_head, 1, v_dim, device='cuda')
c0 = torch.zeros(bsz, n_head, 1, v_dim, device='cuda')
# (B, H, len, dim)
k0i = torch.rand(bsz, n_head, slen, d_head, device='cuda')
v0i = torch.rand(bsz, n_head, slen, v_dim, device='cuda')
z0i = torch.rand(bsz, n_head, slen, v_dim, device='cuda')
b0i = torch.sigmoid(torch.rand(bsz, n_head, slen, 1, device='cuda'))
k0u = torch.rand(bsz, n_head, slen, d_head, device='cuda')
v0u = torch.rand(bsz, n_head, slen, v_dim, device='cuda')
z0u = torch.rand(bsz, n_head, slen, v_dim, device='cuda')
b0u = torch.sigmoid(torch.rand(bsz, n_head, slen, 1, device='cuda'))
k0o = torch.rand(bsz, n_head, slen, d_head, device='cuda')
v0o = torch.rand(bsz, n_head, slen, v_dim, device='cuda')
z0o = torch.rand(bsz, n_head, slen, v_dim, device='cuda')
b0o = torch.sigmoid(torch.rand(bsz, n_head, slen, 1, device='cuda'))
# key sum norm
k0i = F.softmax(k0i, dim=-1)
k0u = F.softmax(k0u, dim=-1)
k0o = F.softmax(k0o, dim=-1)
k1i = torch.zeros(
bsz, n_head, slen, d_head, requires_grad=True, device='cuda')
v1i = torch.zeros(
bsz, n_head, slen, v_dim, requires_grad=True, device='cuda')
z1i = torch.zeros(
bsz, n_head, slen, v_dim, requires_grad=True, device='cuda')
b1i = torch.zeros(
bsz, n_head, slen, 1, requires_grad=True, device='cuda')
# update candidate
k1u = torch.zeros(
bsz, n_head, slen, d_head, requires_grad=True, device='cuda')
v1u = torch.zeros(
bsz, n_head, slen, v_dim, requires_grad=True, device='cuda')
z1u = torch.zeros(
bsz, n_head, slen, v_dim, requires_grad=True, device='cuda')
b1u = torch.zeros(
bsz, n_head, slen, 1, requires_grad=True, device='cuda')
# output gate
k1o = torch.zeros(
bsz, n_head, slen, d_head, requires_grad=True, device='cuda')
v1o = torch.zeros(
bsz, n_head, slen, v_dim, requires_grad=True, device='cuda')
z1o = torch.zeros(
bsz, n_head, slen, v_dim, requires_grad=True, device='cuda')
b1o = torch.zeros(
bsz, n_head, slen, 1, requires_grad=True, device='cuda')
# q1.data = q0.data
k1i.data = k0i.data
v1i.data = v0i.data
b1i.data = b0i.data
z1i.data = z0i.data
k1u.data = k0u.data
v1u.data = v0u.data
b1u.data = b0u.data
z1u.data = z0u.data
k1o.data = k0o.data
v1o.data = v0o.data
b1o.data = b0o.data
z1o.data = z0o.data
W1i = torch.zeros(bsz, n_head, d_head, v_dim, device='cuda')
W1u = torch.zeros(bsz, n_head, d_head, v_dim, device='cuda')
W1o = torch.zeros(bsz, n_head, d_head, v_dim, device='cuda')
# h0 = torch.zeros(n_head, d_head, v_dim, device='cuda')
print("Forwarding custom kernel...")
out1, _ = fast_lstm_v3(z1i, k1i, v1i, b1i, W1i,
z1u, k1u, v1u, b1u, W1u,
z1o, k1o, v1o, b1o, W1o,
h0, c0)
print("done.")
# compute using torch
k2i = torch.zeros(
bsz, n_head, slen, d_head, requires_grad=True, device='cuda')
v2i = torch.zeros(
bsz, n_head, slen, v_dim, requires_grad=True, device='cuda')
z2i = torch.zeros(
bsz, n_head, slen, v_dim, requires_grad=True, device='cuda')
b2i = torch.zeros(
bsz, n_head, slen, 1, requires_grad=True, device='cuda')
# update candidate
k2u = torch.zeros(
bsz, n_head, slen, d_head, requires_grad=True, device='cuda')
v2u = torch.zeros(
bsz, n_head, slen, v_dim, requires_grad=True, device='cuda')
z2u = torch.zeros(
bsz, n_head, slen, v_dim, requires_grad=True, device='cuda')
b2u = torch.zeros(
bsz, n_head, slen, 1, requires_grad=True, device='cuda')
# output gate
k2o = torch.zeros(
bsz, n_head, slen, d_head, requires_grad=True, device='cuda')
v2o = torch.zeros(
bsz, n_head, slen, v_dim, requires_grad=True, device='cuda')
z2o = torch.zeros(
bsz, n_head, slen, v_dim, requires_grad=True, device='cuda')
b2o = torch.zeros(
bsz, n_head, slen, 1, requires_grad=True, device='cuda')
# q1.data = q0.data
k2i.data = k0i.data
v2i.data = v0i.data
b2i.data = b0i.data
z2i.data = z0i.data
k2u.data = k0u.data
v2u.data = v0u.data
b2u.data = b0u.data
z2u.data = z0u.data
k2o.data = k0o.data
v2o.data = v0o.data
b2o.data = b0o.data
z2o.data = z0o.data
# (len, B, H, dim)
# input
z_2i = z2i.permute(2, 0, 1, 3)
slen, bsz, n_head, d_head = z_2i.shape
z_2i = z_2i.reshape(slen, bsz * n_head, d_head)
k_2i = k2i.permute(2, 0, 1, 3)
k_2i = k_2i.reshape(slen, bsz * n_head, d_head)
v_2i = v2i.permute(2, 0, 1, 3)
v_2i = v_2i.reshape(slen, bsz * n_head, v_dim)
b_2i = b2i.permute(2, 0, 1, 3)
b_2i = b_2i.reshape(slen, bsz * n_head, 1)
# update
z_2u = z2u.permute(2, 0, 1, 3)
z_2u = z_2u.reshape(slen, bsz * n_head, d_head)
k_2u = k2u.permute(2, 0, 1, 3)
k_2u = k_2u.reshape(slen, bsz * n_head, d_head)
v_2u = v2u.permute(2, 0, 1, 3)
v_2u = v_2u.reshape(slen, bsz * n_head, v_dim)
b_2u = b2u.permute(2, 0, 1, 3)
b_2u = b_2u.reshape(slen, bsz * n_head, 1)
# output gate
z_2o = z2o.permute(2, 0, 1, 3)
z_2o = z_2o.reshape(slen, bsz * n_head, d_head)
k_2o = k2o.permute(2, 0, 1, 3)
k_2o = k_2o.reshape(slen, bsz * n_head, d_head)
v_2o = v2o.permute(2, 0, 1, 3)
v_2o = v_2o.reshape(slen, bsz * n_head, v_dim)
b_2o = b2o.permute(2, 0, 1, 3)
b_2o = b_2o.reshape(slen, bsz * n_head, 1)
Wi = torch.zeros(bsz * n_head, v_dim, d_head, device='cuda')
Wu = torch.zeros(bsz * n_head, v_dim, d_head, device='cuda')
Wo = torch.zeros(bsz * n_head, v_dim, d_head, device='cuda')
h = torch.zeros(bsz * n_head, d_head, device='cuda')
cell = torch.zeros(bsz * n_head, d_head, device='cuda')
out_list = []
print("Forwarding PyTorch code...")
for pos in range(slen):
# get old values
v_old_i = torch.bmm(Wi, k_2i[pos].unsqueeze(2)).squeeze()
v_old_u = torch.bmm(Wu, k_2u[pos].unsqueeze(2)).squeeze()
v_old_o = torch.bmm(Wo, k_2o[pos].unsqueeze(2)).squeeze()
v_insert_i = b_2i[pos] * (v_2i[pos] - v_old_i)
v_insert_u = b_2u[pos] * (v_2u[pos] - v_old_u)
v_insert_o = b_2o[pos] * (v_2o[pos] - v_old_o)
# update fast weights
Wi = Wi + torch.bmm(v_insert_i.unsqueeze(2), k_2i[pos].unsqueeze(1))
Wu = Wu + torch.bmm(v_insert_u.unsqueeze(2), k_2u[pos].unsqueeze(1))
Wo = Wo + torch.bmm(v_insert_o.unsqueeze(2), k_2o[pos].unsqueeze(1))
h = F.softmax(h, dim=-1)
gate_i = torch.sigmoid(
torch.bmm(Wi, h.unsqueeze(2)).squeeze() + z_2i[pos])
rec_u = torch.sigmoid(
torch.bmm(Wu, h.unsqueeze(2)).squeeze() + z_2u[pos])
gate_o = torch.sigmoid(
torch.bmm(Wo, h.unsqueeze(2)).squeeze() + z_2o[pos])
cell = gate_i * rec_u + (1. - gate_i) * cell
# modified LSTM for FWM update rule.
# skip connection
# h = cell * gate_o + z_2u[pos]
h = cell * gate_o + z_2u[pos]
out_list.append(h.clone())
print("done.")
out2 = torch.stack(out_list)
out2 = out2.view(slen, bsz, n_head, v_dim)
out1 = out1.permute(2, 0, 1, 3)
for s in range(slen):
for b in range(bsz):
for h in range(n_head):
print(f"forward: s={s} b={b} h={h}")
print(f"out: {out1[s][b][h]}")
print(f"ref: {out2[s][b][h]}")
assert max_relative_error(
out1[s][b][h], out2[s][b][h]) < rel_threshold
print("pass!")
print('##########################')
print('# Test Backward pass')
print('##########################')
# grad
loss1 = out1.sum()
z1i.retain_grad()
k1i.retain_grad()
v1i.retain_grad()
b1i.retain_grad()
z1u.retain_grad()
k1u.retain_grad()
v1u.retain_grad()
b1u.retain_grad()
z1o.retain_grad()
k1o.retain_grad()
v1o.retain_grad()
b1o.retain_grad()
loss1.backward()
loss2 = out2.sum()
z2i.retain_grad()
k2i.retain_grad()
v2i.retain_grad()
b2i.retain_grad()
z2u.retain_grad()
k2u.retain_grad()
v2u.retain_grad()
b2u.retain_grad()
z2o.retain_grad()
k2o.retain_grad()
v2o.retain_grad()
b2o.retain_grad()
loss2.backward()
thr = 1e-6
for s in reversed(range(slen)):
for b in reversed(range(bsz)):
for h in range(n_head):
print(f" === backward: s={s}, b={b}, h={h} ===")
# Output gate
print("Output gate ---")
print(f"grad input out: {z1o.grad[b][h][s]}")
print(f"grad input ref: {z2o.grad[b][h][s]}")
assert max_relative_error(
z1o.grad[b][h][s], z2o.grad[b][h][s]) < rel_threshold
print("pass!")
print(f"grad key out: {k1o.grad[b][h][s]}")
print(f"grad key ref: {k2o.grad[b][h][s]}")
assert max_relative_error(
k1o.grad[b][h][s], k2o.grad[b][h][s]) < rel_threshold
print("pass!")
print(f"grad beta out: {b1o.grad[b][h][s]}")
print(f"grad beta ref: {b2o.grad[b][h][s]}")
assert max_relative_error(
b1o.grad[b][h][s], b2o.grad[b][h][s]) < rel_threshold
print("pass!")
print(f"grad value out: {v1o.grad[b][h][s]}")
print(f"grad value ref: {v2o.grad[b][h][s]}")
assert max_relative_error(
v1o.grad[b][h][s], v2o.grad[b][h][s]) < rel_threshold
print("pass!")
# Update term
print("Update candidate ---")
print(f"grad input out: {z1u.grad[b][h][s]}")
print(f"grad input ref: {z2u.grad[b][h][s]}")
assert max_relative_error(
z1u.grad[b][h][s], z2u.grad[b][h][s]) < rel_threshold
print("pass!")
print(f"grad key out: {k1u.grad[b][h][s]}")
print(f"grad key ref: {k2u.grad[b][h][s]}")
assert max_relative_error(
k1u.grad[b][h][s], k2u.grad[b][h][s]) < rel_threshold
print("pass!")
print(f"grad value out: {v1u.grad[b][h][s]}")
print(f"grad value ref: {v2u.grad[b][h][s]}")
assert max_relative_error(
v1u.grad[b][h][s], v2u.grad[b][h][s]) < rel_threshold
print("pass!")
print(f"grad beta out: {b1u.grad[b][h][s]}")
print(f"grad beta ref: {b2u.grad[b][h][s]}")
assert max_relative_error(
b1u.grad[b][h][s], b2u.grad[b][h][s]) < rel_threshold
print("pass!")
# Input gate
print("Input gate ---")
print(f"grad input out: {z1i.grad[b][h][s]}")
print(f"grad input ref: {z2i.grad[b][h][s]}")
assert max_relative_error(
z1i.grad[b][h][s], z2i.grad[b][h][s]) < rel_threshold
print("pass!")
print(f"grad key out: {k1i.grad[b][h][s]}")
print(f"grad key ref: {k2i.grad[b][h][s]}")
assert max_relative_error(
k1i.grad[b][h][s], k2i.grad[b][h][s]) < rel_threshold
print("pass!")
print(f"grad value out: {v1i.grad[b][h][s]}")
print(f"grad value ref: {v2i.grad[b][h][s]}")
assert max_relative_error(
v1i.grad[b][h][s], v2i.grad[b][h][s]) < rel_threshold
print("pass!")
print(f"grad beta out: {b1i.grad[b][h][s]}")
print(f"grad beta ref: {b2i.grad[b][h][s]}")
assert max_relative_error(
b1i.grad[b][h][s], b2i.grad[b][h][s]) < rel_threshold
print("pass!")
print("All tests pass.")
|
py | 1a3ce7febff33843795ad3017a4c658f837c0ae9 | # Copyright (c) 2014 Clinton Knight. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder import exception
from cinder import test
import cinder.tests.unit.volume.drivers.netapp.fakes as na_fakes
import cinder.volume.drivers.netapp.common as na_common
import cinder.volume.drivers.netapp.dataontap.fc_cmode as fc_cmode
import cinder.volume.drivers.netapp.utils as na_utils
class NetAppDriverFactoryTestCase(test.TestCase):
def setUp(self):
super(NetAppDriverFactoryTestCase, self).setUp()
self.mock_object(na_common, 'LOG')
def test_new(self):
self.mock_object(na_utils.OpenStackInfo, 'info',
return_value='fake_info')
mock_create_driver = self.mock_object(na_common.NetAppDriver,
'create_driver')
config = na_fakes.create_configuration()
config.netapp_storage_family = 'fake_family'
config.netapp_storage_protocol = 'fake_protocol'
kwargs = {'configuration': config}
na_common.NetAppDriver(**kwargs)
kwargs['app_version'] = 'fake_info'
mock_create_driver.assert_called_with('fake_family', 'fake_protocol',
*(), **kwargs)
def test_new_missing_config(self):
self.mock_object(na_utils.OpenStackInfo, 'info')
self.mock_object(na_common.NetAppDriver, 'create_driver')
self.assertRaises(exception.InvalidInput, na_common.NetAppDriver, **{})
def test_new_missing_family(self):
self.mock_object(na_utils.OpenStackInfo, 'info')
self.mock_object(na_common.NetAppDriver, 'create_driver')
config = na_fakes.create_configuration()
config.netapp_storage_protocol = 'fake_protocol'
config.netapp_storage_family = None
kwargs = {'configuration': config}
self.assertRaises(exception.InvalidInput,
na_common.NetAppDriver,
**kwargs)
def test_new_missing_protocol(self):
self.mock_object(na_utils.OpenStackInfo, 'info')
self.mock_object(na_common.NetAppDriver, 'create_driver')
config = na_fakes.create_configuration()
config.netapp_storage_family = 'fake_family'
kwargs = {'configuration': config}
self.assertRaises(exception.InvalidInput,
na_common.NetAppDriver,
**kwargs)
def test_create_driver(self):
def get_full_class_name(obj):
return obj.__module__ + '.' + obj.__class__.__name__
kwargs = {
'configuration': na_fakes.create_configuration(),
'app_version': 'fake_info',
'host': 'fakehost@fakebackend',
}
registry = na_common.NETAPP_UNIFIED_DRIVER_REGISTRY
for family in registry:
for protocol, full_class_name in registry[family].items():
driver = na_common.NetAppDriver.create_driver(
family, protocol, **kwargs)
self.assertEqual(full_class_name, get_full_class_name(driver))
def test_create_driver_case_insensitive(self):
kwargs = {
'configuration': na_fakes.create_configuration(),
'app_version': 'fake_info',
'host': 'fakehost@fakebackend',
}
driver = na_common.NetAppDriver.create_driver('ONTAP_CLUSTER', 'FC',
**kwargs)
self.assertIsInstance(driver, fc_cmode.NetAppCmodeFibreChannelDriver)
def test_create_driver_invalid_family(self):
kwargs = {
'configuration': na_fakes.create_configuration(),
'app_version': 'fake_info',
'host': 'fakehost@fakebackend',
}
self.assertRaises(exception.InvalidInput,
na_common.NetAppDriver.create_driver,
'kardashian', 'iscsi', **kwargs)
def test_create_driver_invalid_protocol(self):
kwargs = {
'configuration': na_fakes.create_configuration(),
'app_version': 'fake_info',
'host': 'fakehost@fakebackend',
}
self.assertRaises(exception.InvalidInput,
na_common.NetAppDriver.create_driver,
'ontap_7mode', 'carrier_pigeon', **kwargs)
|
py | 1a3ceb1ceed1013622a2b234c4b31ad2b1fdb875 | from connexion.operations import OpenAPIOperation
from connexion.resolver import RelativeResolver, Resolver, RestyResolver
COMPONENTS = {'parameters': {'myparam': {'in': 'path', 'schema': {'type': 'integer'}}}}
def test_standard_resolve_x_router_controller():
operation = OpenAPIOperation(
api=None,
method='GET',
path='endpoint',
path_parameters=[],
operation={
'x-openapi-router-controller': 'fakeapi.hello',
'operationId': 'post_greeting',
},
app_security=[],
components=COMPONENTS,
resolver=Resolver()
)
assert operation.operation_id == 'fakeapi.hello.post_greeting'
def test_relative_resolve_x_router_controller():
operation = OpenAPIOperation(
api=None,
method='GET',
path='endpoint',
path_parameters=[],
operation={
'x-openapi-router-controller': 'fakeapi.hello',
'operationId': 'post_greeting',
},
app_security=[],
components=COMPONENTS,
resolver=RelativeResolver('root_path')
)
assert operation.operation_id == 'fakeapi.hello.post_greeting'
def test_relative_resolve_operation_id():
operation = OpenAPIOperation(
api=None,
method='GET',
path='endpoint',
path_parameters=[],
operation={
'operationId': 'hello.post_greeting',
},
app_security=[],
components=COMPONENTS,
resolver=RelativeResolver('fakeapi')
)
assert operation.operation_id == 'fakeapi.hello.post_greeting'
def test_relative_resolve_operation_id_with_module():
import fakeapi
operation = OpenAPIOperation(
api=None,
method='GET',
path='endpoint',
path_parameters=[],
operation={
'operationId': 'hello.post_greeting',
},
app_security=[],
components=COMPONENTS,
resolver=RelativeResolver(fakeapi)
)
assert operation.operation_id == 'fakeapi.hello.post_greeting'
def test_resty_resolve_operation_id():
operation = OpenAPIOperation(
api=None,
method='GET',
path='endpoint',
path_parameters=[],
operation={
'operationId': 'fakeapi.hello.post_greeting',
},
app_security=[],
components=COMPONENTS,
resolver=RestyResolver('fakeapi')
)
assert operation.operation_id == 'fakeapi.hello.post_greeting'
def test_resty_resolve_x_router_controller_with_operation_id():
operation = OpenAPIOperation(
api=None,
method='GET',
path='endpoint',
path_parameters=[],
operation={
'x-openapi-router-controller': 'fakeapi.hello',
'operationId': 'post_greeting',
},
app_security=[],
components=COMPONENTS,
resolver=RestyResolver('fakeapi')
)
assert operation.operation_id == 'fakeapi.hello.post_greeting'
def test_resty_resolve_x_router_controller_without_operation_id():
operation = OpenAPIOperation(
api=None,
method='GET',
path='/hello/{id}',
path_parameters=[],
operation={'x-openapi-router-controller': 'fakeapi.hello'},
app_security=[],
components=COMPONENTS,
resolver=RestyResolver('fakeapi'))
assert operation.operation_id == 'fakeapi.hello.get'
def test_resty_resolve_with_default_module_name():
operation = OpenAPIOperation(
api=None,
method='GET',
path='/hello/{id}',
path_parameters=[],
operation={},
app_security=[],
components=COMPONENTS,
resolver=RestyResolver('fakeapi')
)
assert operation.operation_id == 'fakeapi.hello.get'
def test_resty_resolve_with_default_module_name():
operation = OpenAPIOperation(
api=None,
method='GET',
path='/hello/{id}/world',
path_parameters=[],
operation={},
app_security=[],
components=COMPONENTS,
resolver=RestyResolver('fakeapi')
)
assert operation.operation_id == 'fakeapi.hello.world.search'
def test_resty_resolve_with_default_module_name_lowercase_verb():
operation = OpenAPIOperation(
api=None,
method='get',
path='/hello/{id}',
path_parameters=[],
operation={},
app_security=[],
components=COMPONENTS,
resolver=RestyResolver('fakeapi')
)
assert operation.operation_id == 'fakeapi.hello.get'
def test_resty_resolve_with_default_module_name_lowercase_verb_nested():
operation = OpenAPIOperation(
api=None,
method='get',
path='/hello/world/{id}',
path_parameters=[],
operation={},
app_security=[],
components=COMPONENTS,
resolver=RestyResolver('fakeapi')
)
assert operation.operation_id == 'fakeapi.hello.world.get'
def test_resty_resolve_with_default_module_name_will_translate_dashes_in_resource_name():
operation = OpenAPIOperation(
api=None,
method='GET',
path='/foo-bar',
path_parameters=[],
operation={},
app_security=[],
components=COMPONENTS,
resolver=RestyResolver('fakeapi')
)
assert operation.operation_id == 'fakeapi.foo_bar.search'
def test_resty_resolve_with_default_module_name_can_resolve_api_root():
operation = OpenAPIOperation(
api=None,
method='GET',
path='/',
path_parameters=[],
operation={},
app_security=[],
components=COMPONENTS,
resolver=RestyResolver('fakeapi')
)
assert operation.operation_id == 'fakeapi.get'
def test_resty_resolve_with_default_module_name_will_resolve_resource_root_get_as_search():
operation = OpenAPIOperation(
api=None,
method='GET',
path='/hello',
path_parameters=[],
operation={},
app_security=[],
components=COMPONENTS,
resolver=RestyResolver('fakeapi')
)
assert operation.operation_id == 'fakeapi.hello.search'
def test_resty_resolve_with_default_module_name_and_x_router_controller_will_resolve_resource_root_get_as_search():
operation = OpenAPIOperation(
api=None,
method='GET',
path='/hello',
path_parameters=[],
operation={
'x-openapi-router-controller': 'fakeapi.hello',
},
app_security=[],
components=COMPONENTS,
resolver=RestyResolver('fakeapi')
)
assert operation.operation_id == 'fakeapi.hello.search'
def test_resty_resolve_with_default_module_name_will_resolve_resource_root_as_configured():
operation = OpenAPIOperation(
api=None,
method='GET',
path='/hello',
path_parameters=[],
operation={},
app_security=[],
components=COMPONENTS,
resolver=RestyResolver('fakeapi', 'api_list')
)
assert operation.operation_id == 'fakeapi.hello.api_list'
def test_resty_resolve_with_default_module_name_will_resolve_resource_root_post_as_post():
operation = OpenAPIOperation(
api=None,
method='POST',
path='/hello',
path_parameters=[],
operation={},
app_security=[],
components=COMPONENTS,
resolver=RestyResolver('fakeapi')
)
assert operation.operation_id == 'fakeapi.hello.post'
|
py | 1a3ceb55d1690aa47364fb352526903fbe239cac | import zeit.cms.testing
def test_suite():
return zeit.cms.testing.FunctionalDocFileSuite(
'README.txt',
'feed.txt',
'oldchannel.txt',
package='zeit.cms.syndication'
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.