prompt
stringlengths
19
879k
completion
stringlengths
3
53.8k
api
stringlengths
8
59
# -*- coding: utf-8 -*- """ :mod:`orion.algo.robo.rbayes -- TODO ============================================ .. module:: robo :platform: Unix :synopsis: TODO TODO: Write long description """ import george import numpy from robo.acquisition_functions.ei import EI from robo.acquisition_functions.lcb import LCB from robo.acquisition_functions.log_ei import LogEI from robo.acquisition_functions.marginalization import MarginalizationGPMCMC from robo.acquisition_functions.pi import PI from robo.initial_design import init_latin_hypercube_sampling from robo.maximizers.differential_evolution import DifferentialEvolution from robo.maximizers.random_sampling import RandomSampling from robo.maximizers.scipy_optimizer import SciPyOptimizer from robo.models.gaussian_process import GaussianProcess from robo.models.gaussian_process_mcmc import GaussianProcessMCMC from robo.models.random_forest import RandomForest from robo.models.wrapper_bohamiann import WrapperBohamiann from robo.priors.default_priors import DefaultPrior from robo.solver.bayesian_optimization import BayesianOptimization from orion.algo.base import BaseAlgorithm from orion.algo.space import Space def build_bounds(space): """ Build bounds of optimization space :param space: """ lower = numpy.zeros(len(space.keys())) upper = numpy.zeros(len(space.keys())) for i, (_name, dim) in enumerate(space.items()): lower[i], upper[i] = dim.interval() if dim.prior_name == 'reciprocal': lower[i] = numpy.log(lower[i]) upper[i] = numpy.log(upper[i]) return lower, upper def build_optimizer(model, maximizer="random", acquisition_func="log_ei", maximizer_seed=1): """ General interface for Bayesian optimization for global black box optimization problems. Parameters ---------- maximizer: {"random", "scipy", "differential_evolution"} The optimizer for the acquisition function. acquisition_func: {"ei", "log_ei", "lcb", "pi"} The acquisition function maximizer_seed: int Seed for random number generator of the acquisition function maximizer Returns ------- Optimizer :param maximizer_seed: :param acquisition_func: :param maximizer: :param model: """ if acquisition_func == "ei": a = EI(model) elif acquisition_func == "log_ei": a = LogEI(model) elif acquisition_func == "pi": a = PI(model) elif acquisition_func == "lcb": a = LCB(model) else: raise ValueError("'{}' is not a valid acquisition function" .format(acquisition_func)) if isinstance(model, GaussianProcessMCMC): acquisition_func = MarginalizationGPMCMC(a) else: acquisition_func = a maximizer_rng =
numpy.random.RandomState(maximizer_seed)
numpy.random.RandomState
import numpy as np # Simple feedforward neural network: # # x (input layer) -> hidden layers -> y (output layer) -> backpropagation -> ... # # Forward pass: # a_i = sigmoid(w_i.a_{i-1} + b_i) # => a = (a_1, ..., a_m) # # Backpropagation: # - updates weights and biases by trying to find the minimum of the # loss function (i.e. stochastic gradient descent) # # Note: use automatic differentiation instead of symbolic differentiation. (How???) class FNN(): ''' How is this used: 1. Initialize the neural network model = fnn(input_size, output_size, hidden_layer_size, activation_function) model.initialize_weights_and_biases() 2. Set the loss funciton and optimizer and compile 3. Training 4. Prediction ''' def __init__(self, input_size, output_size, hidden_layer_sizes, activation_function): self.input_size = input_size self.output_size = output_size self.hidden_layer_sizes = hidden_layer_sizes self.activation_function = activation_function self.Ws = None self.Bs = None self.loss_function = None self.optimizer = None def initialize_weights_and_biases(self): ''' Set the weights and biases to some random values ''' Ws = [] Bs = [] # Mapping from input layer into the 1. hidden layer W = np.random.normal(size=(self.hidden_layer_sizes[0], self.input_size)) B = np.random.normal(size=self.hidden_layer_sizes[0]) Ws.append(W) Bs.append(B) for i in range(1, len(self.hidden_layer_sizes)): # Mapping from i-1. to i. hidden layer W = np.random.normal(size=(self.hidden_layer_sizes[i], self.hidden_layer_sizes[i-1])) B = np.random.normal(size=self.hidden_layer_sizes[i]) Ws.append(W) Bs.append(B) # Mapping from the last hidden layer into the output layer W = np.random.normal(size=(self.output_size, self.hidden_layer_sizes[-1])) B = np.random.normal(size=self.output_size) Ws.append(W) Bs.append(B) self.Ws = Ws self.Bs = Bs def forward_propagation(self, x): ''' Calculation from the input layer to the output layer using the currect weights and biases. ''' # Input layer: a = x.copy() # Hidden layers + output layer: for i in range(len(self.hidden_layer_sizes) + 1): # Error: ????? Wi = self.Ws[i] Bi = self.Bs[i] a = self.activation_function(Wi.dot(a) + Bi) # == activation_function(matmul(Wi, a) + Bi) return a # # Should this particular loss function and gradient descent for it # # be done in a separate class? # def loss_function(self, y_true, y_pred): # ''' # Mean squarred error (MSE) = (1/n) sum_{i=1}^n (y_true - y_pred)^2 # where n is the number of observartion. # ''' # # sum over all of (y_pred - y_true)**2 # pass # def gradient_descent(self): # ''' # -grad(loss_function(Ws, Bs)) = -grad(C(Ws, Bs)) # = - dC/dw1 - dC/dw2 - ... - dC/dB1 - ... # where each weight and bias of each layer are summed. # ''' # # Can this be done more simply and quickly, using Numpy for example? # gradient = [] # for W in Ws: # for w in W: # # Do the gradient with w # for B in Bs: # for b in B: # # do the gradient with b # return gradient def backpropagation(self): ''' Updates the weighs and biases ''' pass def compile(self): pass def training(self, epochs): ''' forwar propagation + backpropagtion ''' pass # Some (common) loss function class MeanAbsoluteError: ''' The loss/cost function: L = sum_{i=1}^n (y_pred_i - y_true_i)^2 and its ''' def __init__(self): pass def loss_function(self, y_pred, y_true): return np.sum((y_pred - y_true)**2) def gradient_descent(self): pass # Some (common) activation function def sigmoid(x): return 1 / (1 +
np.exp(-x)
numpy.exp
#!/usr/bin/env python # -*- coding: utf-8 -*- import dataclasses from typing import List import numpy as np from scipy.misc import derivative from scipy.interpolate import interp1d from scipy.integrate import solve_ivp from .ship_obj_3dof import ShipObj3dof @dataclasses.dataclass class Mmg3DofBasicParams: """Dataclass for setting basic parameters of MMG 3DOF. Attributes: L_pp (float): Ship length between perpendiculars [m] B (float): Ship breadth [m] d (float): Ship draft [m] x_G (float): Longitudinal coordinate of center of gravity of ship [m] D_p (float): Propeller diameter [m] m (float): Ship mass [kg] I_zG (float): Moment of inertia of ship around center of gravity A_R (float): Profile area of movable part of mariner rudder [m^2] η (float): Ratio of propeller diameter to rudder span (=D_p/HR) m_x (float): Added masses of x axis direction [kg] m_y (float): Added masses of y axis direction [kg] J_z (float): Added moment of inertia f_α (float): Rudder lift gradient coefficient ϵ (float): Ratio of wake fraction at propeller and rudder positions t_R (float): Steering resistance deduction factor a_H (float): Rudder force increase factor x_H (float): Longitudinal coordinate of acting point of the additional lateral force component induced by steering γ_R_minus (float): Flow straightening coefficient if βR < 0 γ_R_plus (float): Flow straightening coefficient if βR > 0 l_R (float): Effective longitudinal coordinate of rudder position in formula of βR κ (float): An experimental constant for expressing uR t_P (float): Thrust deduction factor w_P0 (float): Wake coefficient at propeller position in straight moving x_P (float): Effective Longitudinal coordinate of propeller position in formula of βP Note: For more information, please see the following articles. - <NAME>., <NAME>. (2015) Introduction of MMG standard method for ship maneuvering predictions. J Mar Sci Technol 20, 37–52 https://doi.org/10.1007/s00773-014-0293-y """ L_pp: float B: float d: float x_G: float D_p: float m: float I_zG: float A_R: float η: float m_x: float m_y: float J_z: float f_α: float ϵ: float t_R: float a_H: float x_H: float γ_R_minus: float γ_R_plus: float l_R: float κ: float t_P: float w_P0: float x_P: float @dataclasses.dataclass class Mmg3DofManeuveringParams: """Dataclass for setting maneuvering parameters of MMG 3ODF. Attributes: k_0 (float): One of manuevering parameters of coefficients representing K_T k_1 (float): One of manuevering parameters of coefficients representing K_T k_2 (float): One of manuevering parameters of coefficients representing K_T R_0_dash (float): One of manuevering parameters of Ship resistance coefficient in straight moving X_vv_dash (float): One of manuevering parameters of MMG 3DOF X_vr_dash (float): One of manuevering parameters of MMG 3DOF X_rr_dash (float): One of manuevering parameters of MMG 3DOF X_vvvv_dash (float): One of manuevering parameters of MMG 3DOF Y_v_dash (float): One of manuevering parameters of MMG 3DOF Y_r_dash (float): One of manuevering parameters of MMG 3DOF Y_vvv_dash (float): One of manuevering parameters of MMG 3DOF Y_vvr_dash (float): One of manuevering parameters of MMG 3DOF Y_vrr_dash (float): One of manuevering parameters of MMG 3DOF Y_rrr_dash (float): One of manuevering parameters of MMG 3DOF N_v_dash (float): One of manuevering parameters of MMG 3DOF N_r_dash (float): One of manuevering parameters of MMG 3DOF N_vvv_dash (float): One of manuevering parameters of MMG 3DOF N_vvr_dash (float): One of manuevering parameters of MMG 3DOF N_vrr_dash (float): One of manuevering parameters of MMG 3DOF N_rrr_dash (float): One of manuevering parameters of MMG 3DOF Note: For more information, please see the following articles. - <NAME>., <NAME>. (2015) Introduction of MMG standard method for ship maneuvering predictions. J Mar Sci Technol 20, 37–52 https://doi.org/10.1007/s00773-014-0293-y """ k_0: float k_1: float k_2: float R_0_dash: float X_vv_dash: float X_vr_dash: float X_rr_dash: float X_vvvv_dash: float Y_v_dash: float Y_r_dash: float Y_vvv_dash: float Y_vvr_dash: float Y_vrr_dash: float Y_rrr_dash: float N_v_dash: float N_r_dash: float N_vvv_dash: float N_vvr_dash: float N_vrr_dash: float N_rrr_dash: float def simulate_mmg_3dof( basic_params: Mmg3DofBasicParams, maneuvering_params: Mmg3DofManeuveringParams, time_list: List[float], δ_list: List[float], npm_list: List[float], u0: float = 0.0, v0: float = 0.0, r0: float = 0.0, ρ: float = 1.025, method: str = "RK45", t_eval=None, events=None, vectorized=False, **options ): """MMG 3DOF simulation MMG 3DOF simulation by follwoing equation of motion. .. math:: m (\\dot{u}-vr)&=-m_x\\dot{u}+m_yvr+X_H+X_P+X_R m (\\dot{v}+ur)&=-m_y\\dot{v}+m_xur+Y_H+Y_R I_{zG}\\dot{r}&=-J_Z\\dot{r}+N_H+N_R Args: basic_params (Mmg3DofBasicParams): Basic paramters for MMG 3DOF simulation. maneuvering_params (Mmg3DofManeuveringParams): Maneuvering parameters for MMG 3DOF simulation. time_list (list[float]): time list of simulation. δ_list (list[float]): rudder angle list of simulation. npm_list (List[float]): npm list of simulation. u0 (float, optional): axial velocity [m/s] in initial condition (`time_list[0]`). Defaults to 0.0. v0 (float, optional): lateral velocity [m/s] in initial condition (`time_list[0]`). Defaults to 0.0. r0 (float, optional): rate of turn [rad/s] in initial condition (`time_list[0]`). Defaults to 0.0. ρ (float, optional): seawater density [kg/m^3] Defaults to 1.025. method (str, optional): Integration method to use in `scipy.integrate.solve_ivp() <https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.solve_ivp.html>`_: "RK45" (default): Explicit Runge-Kutta method of order 5(4). The error is controlled assuming accuracy of the fourth-order method, but steps are taken using the fifth-order accurate formula (local extrapolation is done). A quartic interpolation polynomial is used for the dense output. Can be applied in the complex domain. "RK23": Explicit Runge-Kutta method of order 3(2). The error is controlled assuming accuracy of the second-order method, but steps are taken using the third-order accurate formula (local extrapolation is done). A cubic Hermite polynomial is used for the dense output. Can be applied in the complex domain. "DOP853": Explicit Runge-Kutta method of order 8. Python implementation of the “DOP853” algorithm originally written in Fortran. A 7-th order interpolation polynomial accurate to 7-th order is used for the dense output. Can be applied in the complex domain. "Radau": Implicit Runge-Kutta method of the Radau IIA family of order 5. The error is controlled with a third-order accurate embedded formula. A cubic polynomial which satisfies the collocation conditions is used for the dense output. "BDF": Implicit multi-step variable-order (1 to 5) method based on a backward differentiation formula for the derivative approximation. A quasi-constant step scheme is used and accuracy is enhanced using the NDF modification. Can be applied in the complex domain. "LSODA": Adams/BDF method with automatic stiffness detection and switching. This is a wrapper of the Fortran solver from ODEPACK. t_eval (array_like or None, optional): Times at which to store the computed solution, must be sorted and lie within t_span. If None (default), use points selected by the solver. events (callable, or list of callables, optional): Events to track. If None (default), no events will be tracked. Each event occurs at the zeros of a continuous function of time and state. Each function must have the signature event(t, y) and return a float. The solver will find an accurate value of t at which event(t, y(t)) = 0 using a root-finding algorithm. By default, all zeros will be found. The solver looks for a sign change over each step, so if multiple zero crossings occur within one step, events may be missed. Additionally each event function might have the following attributes: terminal (bool, optional): Whether to terminate integration if this event occurs. Implicitly False if not assigned. direction (float, optional): Direction of a zero crossing. If direction is positive, event will only trigger when going from negative to positive, and vice versa if direction is negative. If 0, then either direction will trigger event. Implicitly 0 if not assigned. You can assign attributes like `event.terminal = True` to any function in Python. vectorized (bool, optional): Whether `fun` is implemented in a vectorized fashion. Default is False. options: Options passed to a chosen solver. All options available for already implemented solvers are listed in `scipy.integrate.solve_ivp() <https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.solve_ivp.html>`_: Returns: Bunch object with the following fields defined: t (ndarray, shape (`n_points`,)): Time points. y (ndarray, shape (`n_points`,)): Values of the solution at t. sol (OdeSolution): Found solution as OdeSolution instance from MMG 3DOF simulation. t_events (list of ndarray or None): Contains for each event type a list of arrays at which an event of that type event was detected. None if events was None. y_events (list of ndarray or None): For each value of t_events, the corresponding value of the solution. None if events was None. nfev (int): Number of evaluations of the right-hand side. njev (int): Number of evaluations of the jacobian. nlu (int): Number of LU decomposition. status (int): Reason for algorithm termination: - -1: Integration step failed. - 0: The solver successfully reached the end of `tspan`. - 1: A termination event occurred. message (string): Human-readable description of the termination reason. success (bool): True if the solver reached the interval end or a termination event occurred (`status >= 0`). Examples: >>> duration = 200 # [s] >>> sampling = 2000 >>> time_list = np.linspace(0.00, duration, sampling) >>> δ_list = np.full(len(time_list), 35.0 * np.pi / 180.0) >>> npm_list = np.full(len(time_list), 20.338) >>> basic_params = Mmg3DofBasicParams( >>> L_pp=7.00, >>> B=1.27, >>> d=0.46, >>> x_G=0.25, >>> D_p=0.216, >>> m=3.27*1.025, >>> I_zG=m*((0.25 * L_pp) ** 2), >>> A_R=0.0539, >>> η=D_p/0.345, >>> m_x=0.022*(0.5 * ρ * (L_pp ** 2) * d), >>> m_y=0.223*(0.5 * ρ * (L_pp ** 2) * d), >>> J_z=0.011*(0.5 * ρ * (L_pp ** 4) * d), >>> f_α=2.747, >>> ϵ=1.09, >>> t_R=0.387, >>> a_H=0.312, >>> x_H=-0.464*L_pp, >>> γ_R_minus=0.395, >>> γ_R_plus=0.640, >>> l_R=-0.710, >>> κ=0.50, >>> t_P=0.220, >>> w_P0=0.40, >>> x_P=-0.650, >>> ) >>> maneuvering_params = Mmg3DofManeuveringParams( >>> k_0 = 0.2931, >>> k_1 = -0.2753, >>> k_2 = -0.1385, >>> R_0_dash = 0.022, >>> X_vv_dash = -0.040, >>> X_vr_dash = 0.002, >>> X_rr_dash = 0.011, >>> X_vvvv_dash = 0.771, >>> Y_v_dash = -0.315, >>> Y_r_dash = 0.083, >>> Y_vvv_dash = -1.607, >>> Y_vvr_dash = 0.379, >>> Y_vrr_dash = -0.391, >>> Y_rrr_dash = 0.008, >>> N_v_dash = -0.137, >>> N_r_dash = -0.049, >>> N_vvv_dash = -0.030, >>> N_vvr_dash = -0.294, >>> N_vrr_dash = 0.055, >>> N_rrr_dash = -0.013, >>> ) >>> sol = simulate_mmg_3dof( >>> basic_params, >>> maneuvering_params, >>> time_list, >>> δ_rad_list, >>> npm_list, >>> u0=2.29 * 0.512, >>> ) >>> result = sol.sol(time_list) Note: For more information, please see the following articles. - <NAME>., <NAME>. (2015) Introduction of MMG standard method for ship maneuvering predictions. J Mar Sci Technol 20, 37–52 https://doi.org/10.1007/s00773-014-0293-y """ return simulate( L_pp=basic_params.L_pp, B=basic_params.B, d=basic_params.d, x_G=basic_params.x_G, D_p=basic_params.D_p, m=basic_params.m, I_zG=basic_params.I_zG, A_R=basic_params.A_R, η=basic_params.η, m_x=basic_params.m_x, m_y=basic_params.m_y, J_z=basic_params.J_z, f_α=basic_params.f_α, ϵ=basic_params.ϵ, t_R=basic_params.t_R, a_H=basic_params.a_H, x_H=basic_params.x_H, γ_R_minus=basic_params.γ_R_minus, γ_R_plus=basic_params.γ_R_plus, l_R=basic_params.l_R, κ=basic_params.κ, t_P=basic_params.t_P, w_P0=basic_params.w_P0, x_P=basic_params.x_P, k_0=maneuvering_params.k_0, k_1=maneuvering_params.k_1, k_2=maneuvering_params.k_2, R_0_dash=maneuvering_params.R_0_dash, X_vv_dash=maneuvering_params.X_vv_dash, X_vr_dash=maneuvering_params.X_vr_dash, X_rr_dash=maneuvering_params.X_rr_dash, X_vvvv_dash=maneuvering_params.X_vvvv_dash, Y_v_dash=maneuvering_params.Y_v_dash, Y_r_dash=maneuvering_params.Y_r_dash, Y_vvv_dash=maneuvering_params.Y_vvv_dash, Y_vvr_dash=maneuvering_params.Y_vvr_dash, Y_vrr_dash=maneuvering_params.Y_vrr_dash, Y_rrr_dash=maneuvering_params.Y_rrr_dash, N_v_dash=maneuvering_params.N_v_dash, N_r_dash=maneuvering_params.N_r_dash, N_vvv_dash=maneuvering_params.N_vvv_dash, N_vvr_dash=maneuvering_params.N_vvr_dash, N_vrr_dash=maneuvering_params.N_vrr_dash, N_rrr_dash=maneuvering_params.N_rrr_dash, time_list=time_list, δ_list=δ_list, npm_list=npm_list, u0=u0, v0=v0, r0=r0, ρ=ρ, method=method, t_eval=t_eval, events=events, vectorized=vectorized, **options ) def simulate( L_pp: float, B: float, d: float, x_G: float, D_p: float, m: float, I_zG: float, A_R: float, η: float, m_x: float, m_y: float, J_z: float, f_α: float, ϵ: float, t_R: float, a_H: float, x_H: float, γ_R_minus: float, γ_R_plus: float, l_R: float, κ: float, t_P: float, w_P0: float, x_P: float, k_0: float, k_1: float, k_2: float, R_0_dash: float, X_vv_dash: float, X_vr_dash: float, X_rr_dash: float, X_vvvv_dash: float, Y_v_dash: float, Y_r_dash: float, Y_vvv_dash: float, Y_vvr_dash: float, Y_vrr_dash: float, Y_rrr_dash: float, N_v_dash: float, N_r_dash: float, N_vvv_dash: float, N_vvr_dash: float, N_vrr_dash: float, N_rrr_dash: float, time_list: List[float], δ_list: List[float], npm_list: List[float], u0: float = 0.0, v0: float = 0.0, r0: float = 0.0, ρ: float = 1.025, method: str = "RK45", t_eval=None, events=None, vectorized=False, **options ): """MMG 3DOF simulation MMG 3DOF simulation by follwoing equation of motion. .. math:: m (\\dot{u}-vr)&=-m_x\\dot{u}+m_yvr+X_H+X_P+X_R m (\\dot{v}+ur)&=-m_y\\dot{v}+m_xur+Y_H+Y_R I_{zG}\\dot{r}&=-J_Z\\dot{r}+N_H+N_R Args: L_pp (float): Ship length between perpendiculars [m] B (float): Ship breadth [m] d (float): Ship draft [m] x_G (float): Longitudinal coordinate of center of gravity of ship D_p (float): Propeller diameter [m] m (float): Ship mass [kg] I_zG (float): Moment of inertia of ship around center of gravity A_R (float): Profile area of movable part of mariner rudder [m^2] η (float): Ratio of propeller diameter to rudder span (=D_p/HR) m_x (float): Added masses of x axis direction [kg] m_y (float): Added masses of y axis direction [kg] J_z (float): Added moment of inertia f_α (float): Rudder lift gradient coefficient ϵ (float): Ratio of wake fraction at propeller and rudder positions t_R (float): Steering resistance deduction factor a_H (float): Rudder force increase factor x_H (float): Longitudinal coordinate of acting point of the additional lateral force component induced by steering γ_R_minus (float): Flow straightening coefficient if βR < 0 γ_R_plus (float): Flow straightening coefficient if βR > 0 l_R (float): Effective longitudinal coordinate of rudder position in formula of βR κ (float): An experimental constant for expressing uR t_P (float): Thrust deduction factor w_P0 (float): Wake coefficient at propeller position in straight moving x_P (float): Effective Longitudinal coordinate of propeller position in formula of βP k_0 (float): One of manuevering parameters of coefficients representing K_T k_1 (float): One of manuevering parameters of coefficients representing K_T k_2 (float): One of manuevering parameters of coefficients representing K_T R_0_dash (float): One of manuevering parameters of MMG 3DOF X_vv_dash (float): One of manuevering parameters of MMG 3DOF X_vr_dash (float): One of manuevering parameters of MMG 3DOF X_rr_dash (float): One of manuevering parameters of MMG 3DOF X_vvvv_dash (float): One of manuevering parameters of MMG 3DOF Y_v_dash (float): One of manuevering parameters of MMG 3DOF Y_r_dash (float): One of manuevering parameters of MMG 3DOF Y_vvv_dash (float): One of manuevering parameters of MMG 3DOF Y_vvr_dash (float): One of manuevering parameters of MMG 3DOF Y_vrr_dash (float): One of manuevering parameters of MMG 3DOF Y_rrr_dash (float): One of manuevering parameters of MMG 3DOF N_v_dash (float): One of manuevering parameters of MMG 3DOF N_r_dash (float): One of manuevering parameters of MMG 3DOF N_vvv_dash (float): One of manuevering parameters of MMG 3DOF N_vvr_dash (float): One of manuevering parameters of MMG 3DOF N_vrr_dash (float): One of manuevering parameters of MMG 3DOF N_rrr_dash (float): One of manuevering parameters of MMG 3DOF time_list (list[float]): time list of simulation. δ_list (list[float]): rudder angle list of simulation. npm_list (List[float]): npm list of simulation. u0 (float, optional): axial velocity [m/s] in initial condition (`time_list[0]`). Defaults to 0.0. v0 (float, optional): lateral velocity [m/s] in initial condition (`time_list[0]`). Defaults to 0.0. r0 (float, optional): rate of turn [rad/s] in initial condition (`time_list[0]`). Defaults to 0.0. ρ (float, optional): seawater density [kg/m^3] Defaults to 1.025. method (str, optional): Integration method to use in `scipy.integrate.solve_ivp() <https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.solve_ivp.html>`_: "RK45" (default): Explicit Runge-Kutta method of order 5(4). The error is controlled assuming accuracy of the fourth-order method, but steps are taken using the fifth-order accurate formula (local extrapolation is done). A quartic interpolation polynomial is used for the dense output. Can be applied in the complex domain. "RK23": Explicit Runge-Kutta method of order 3(2). The error is controlled assuming accuracy of the second-order method, but steps are taken using the third-order accurate formula (local extrapolation is done). A cubic Hermite polynomial is used for the dense output. Can be applied in the complex domain. "DOP853": Explicit Runge-Kutta method of order 8. Python implementation of the “DOP853” algorithm originally written in Fortran. A 7-th order interpolation polynomial accurate to 7-th order is used for the dense output. Can be applied in the complex domain. "Radau": Implicit Runge-Kutta method of the Radau IIA family of order 5. The error is controlled with a third-order accurate embedded formula. A cubic polynomial which satisfies the collocation conditions is used for the dense output. "BDF": Implicit multi-step variable-order (1 to 5) method based on a backward differentiation formula for the derivative approximation. A quasi-constant step scheme is used and accuracy is enhanced using the NDF modification. Can be applied in the complex domain. "LSODA": Adams/BDF method with automatic stiffness detection and switching. This is a wrapper of the Fortran solver from ODEPACK. t_eval (array_like or None, optional): Times at which to store the computed solution, must be sorted and lie within t_span. If None (default), use points selected by the solver. events (callable, or list of callables, optional): Events to track. If None (default), no events will be tracked. Each event occurs at the zeros of a continuous function of time and state. Each function must have the signature event(t, y) and return a float. The solver will find an accurate value of t at which event(t, y(t)) = 0 using a root-finding algorithm. By default, all zeros will be found. The solver looks for a sign change over each step, so if multiple zero crossings occur within one step, events may be missed. Additionally each event function might have the following attributes: terminal (bool, optional): Whether to terminate integration if this event occurs. Implicitly False if not assigned. direction (float, optional): Direction of a zero crossing. If direction is positive, event will only trigger when going from negative to positive, and vice versa if direction is negative. If 0, then either direction will trigger event. Implicitly 0 if not assigned. You can assign attributes like `event.terminal = True` to any function in Python. vectorized (bool, optional): Whether `fun` is implemented in a vectorized fashion. Default is False. options: Options passed to a chosen solver. All options available for already implemented solvers are listed in `scipy.integrate.solve_ivp() <https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.solve_ivp.html>`_: Returns: Bunch object with the following fields defined: t (ndarray, shape (`n_points`,)): Time points. y (ndarray, shape (`n_points`,)): Values of the solution at t. sol (OdeSolution): Found solution as OdeSolution instance from MMG 3DOF simulation. t_events (list of ndarray or None): Contains for each event type a list of arrays at which an event of that type event was detected. None if events was None. y_events (list of ndarray or None): For each value of t_events, the corresponding value of the solution. None if events was None. nfev (int): Number of evaluations of the right-hand side. njev (int): Number of evaluations of the jacobian. nlu (int): Number of LU decomposition. status (int): Reason for algorithm termination: - -1: Integration step failed. - 0: The solver successfully reached the end of `tspan`. - 1: A termination event occurred. message (string): Human-readable description of the termination reason. success (bool): True if the solver reached the interval end or a termination event occurred (`status >= 0`). Examples: >>> duration = 200 # [s] >>> sampling = 2000 >>> time_list = np.linspace(0.00, duration, sampling) >>> δ_list = np.full(len(time_list), 35.0 * np.pi / 180.0) >>> npm_list = np.full(len(time_list), 17.95) >>> L_pp=7.00 >>> B=1.27 >>> d=0.46 >>> x_G=0.25 >>> D_p=0.216 >>> m=3.27*1.025 >>> I_zG=m*((0.25 * L_pp) ** 2) >>> A_R=0.0539 >>> η=D_p/0.345 >>> m_x=0.022*(0.5 * ρ * (L_pp ** 2) * d) >>> m_y=0.223*(0.5 * ρ * (L_pp ** 2) * d) >>> J_z=0.011*(0.5 * ρ * (L_pp ** 4) * d) >>> f_α=2.747 >>> ϵ=1.09 >>> t_R=0.387 >>> a_H=0.312 >>> x_H=-0.464*L_pp >>> γ_R=0.395 >>> l_R=-0.710 >>> κ=0.50 >>> t_P=0.220 >>> w_P0=0.40 >>> x_P=-0.650 >>> k_0 = 0.2931 >>> k_1 = -0.2753 >>> k_2 = -0.1385 >>> R_0_dash = 0.022 >>> X_vv_dash = -0.040 >>> X_vr_dash = 0.002 >>> X_rr_dash = 0.011 >>> X_vvvv_dash = 0.771 >>> Y_v_dash = -0.315 >>> Y_r_dash = 0.083 >>> Y_vvv_dash = -1.607 >>> Y_vvr_dash = 0.379 >>> Y_vrr_dash = -0.391 >>> Y_rrr_dash = 0.008 >>> N_v_dash = -0.137 >>> N_r_dash = -0.049 >>> N_vvv_dash = -0.030 >>> N_vvr_dash = -0.294 >>> N_vrr_dash = 0.055 >>> N_rrr_dash = -0.013 >>> sol = simulate_mmg_3dof( >>> L_pp=L_pp, >>> B=B, >>> d=d, >>> x_G=x_G, >>> D_p=D_p, >>> m=m, >>> I_zG=I_zG, >>> A_R=A_R, >>> η=η, >>> m_x=m_x, >>> m_y=m_y, >>> J_z=J_z, >>> f_α=f_α, >>> ϵ=ϵ, >>> t_R=t_R, >>> a_H=a_H, >>> x_H=x_H, >>> γ_R=γ_R, >>> l_R=l_R, >>> κ=κ, >>> t_P=t_P, >>> w_P0=w_P0, >>> x_P=x_P, >>> k_0=k_0, >>> k_1=k_1, >>> k_2=k_2, >>> X_0=X_0, >>> X_ββ=X_ββ, >>> X_βγ=X_βγ, >>> X_γγ=X_γγ, >>> X_vvvv_dash=X_vvvv_dash, >>> Y_β=Y_β, >>> Y_γ=Y_γ, >>> Y_βββ=Y_βββ, >>> Y_vvr_dash=Y_vvr_dash, >>> Y_vrr_dash=Y_vrr_dash, >>> Y_rrr_dash=Y_rrr_dash, >>> N_β=N_β, >>> N_γ=N_γ, >>> N_vvv_dash=N_vvv_dash, >>> N_vvr_dash=N_vvr_dash, >>> N_vrr_dash=N_vrr_dash, >>> N_rrr_dash=N_rrr_dash, >>> time_list, >>> δ_rad_list, >>> npm_list, >>> u0=2.29 * 0.512, >>> ) >>> result = sol.sol(time_list) Note: For more information, please see the following articles. - <NAME>., <NAME>. (2015) Introduction of MMG standard method for ship maneuvering predictions. J Mar Sci Technol 20, 37–52 https://doi.org/10.1007/s00773-014-0293-y """ spl_δ = interp1d(time_list, δ_list, "cubic", fill_value="extrapolate") spl_npm = interp1d(time_list, npm_list, "cubic", fill_value="extrapolate") def mmg_3dof_eom_solve_ivp(t, X): u, v, r, δ, npm = X U = np.sqrt(u ** 2 + (v - r * x_G) ** 2) β = 0.0 if U == 0.0 else np.arcsin(-(v - r * x_G) / U) v_dash = 0.0 if U == 0.0 else v / U r_dash = 0.0 if U == 0.0 else r * L_pp / U # w_P = w_P0 w_P = w_P0 * np.exp(-4.0 * (β - x_P * r_dash) ** 2) J = 0.0 if npm == 0.0 else (1 - w_P) * u / (npm * D_p) K_T = k_0 + k_1 * J + k_2 * J ** 2 β_R = β - l_R * r_dash γ_R = γ_R_minus if β_R < 0.0 else γ_R_plus v_R = U * γ_R * β_R u_R = ( np.sqrt(η * (κ * ϵ * 8.0 * k_0 * npm ** 2 * D_p ** 4 / np.pi) ** 2) if J == 0.0 else u * (1 - w_P) * ϵ * np.sqrt( η * (1.0 + κ * (np.sqrt(1.0 + 8.0 * K_T / (np.pi * J ** 2)) - 1)) ** 2 + (1 - η) ) ) U_R = np.sqrt(u_R ** 2 + v_R ** 2) α_R = δ - np.arctan2(v_R, u_R) F_N = 0.5 * A_R * ρ * f_α * (U_R ** 2) * np.sin(α_R) X_H = ( 0.5 * ρ * L_pp * d * (U ** 2) * ( -R_0_dash + X_vv_dash * (v_dash ** 2) + X_vr_dash * v_dash * r_dash + X_rr_dash * (r_dash ** 2) + X_vvvv_dash * (v_dash ** 4) ) ) X_R = -(1 - t_R) * F_N * np.sin(δ) X_P = (1 - t_P) * ρ * K_T * npm ** 2 * D_p ** 4 Y_H = ( 0.5 * ρ * L_pp * d * (U ** 2) * ( Y_v_dash * v_dash + Y_r_dash * r_dash + Y_vvv_dash * (v_dash ** 3) + Y_vvr_dash * (v_dash ** 2) * r_dash + Y_vrr_dash * v_dash * (r_dash ** 2) + Y_rrr_dash * (r_dash ** 3) ) ) Y_R = -(1 + a_H) * F_N * np.cos(δ) N_H = ( 0.5 * ρ * (L_pp ** 2) * d * (U ** 2) * ( N_v_dash * v_dash + N_r_dash * r_dash + N_vvv_dash * (v_dash ** 3) + N_vvr_dash * (v_dash ** 2) * r_dash + N_vrr_dash * v_dash * (r_dash ** 2) + N_rrr_dash * (r_dash ** 3) ) ) N_R = -(-0.5 + a_H * x_H) * F_N * np.cos(δ) d_u = ((X_H + X_R + X_P) + (m + m_y) * v * r + x_G * m * (r ** 2)) / (m + m_x) d_v = ( (x_G ** 2) * (m ** 2) * u * r - (N_H + N_R) * x_G * m + ((Y_H + Y_R) - (m + m_x) * u * r) * (I_zG + J_z + (x_G ** 2) * m) ) / ((I_zG + J_z + (x_G ** 2) * m) * (m + m_y) - (x_G ** 2) * (m ** 2)) d_r = (N_H + N_R - x_G * m * (d_v + u * r)) / (I_zG + J_z + (x_G ** 2) * m) d_δ = derivative(spl_δ, t) d_npm = derivative(spl_npm, t) return [d_u, d_v, d_r, d_δ, d_npm] sol = solve_ivp( mmg_3dof_eom_solve_ivp, [time_list[0], time_list[-1]], [u0, v0, r0, δ_list[0], npm_list[0]], dense_output=True, method=method, t_eval=t_eval, events=events, vectorized=vectorized, **options ) return sol def get_sub_values_from_simulation_result( u_list: List[float], v_list: List[float], r_list: List[float], δ_list: List[float], npm_list: List[float], basic_params: Mmg3DofBasicParams, maneuvering_params: Mmg3DofManeuveringParams, ρ: float = 1.025, return_all_vals: bool = False, ): """Get sub values of MMG calculation from simulation result. Args: u_list (List[float]): u list of MMG simulation result. v_list (List[float]): v list of MMG simulation result. r_list (List[float]): r list of MMG simulation result. δ_list (List[float]): δ list of MMG simulation result. npm_list (List[float]): npm list of MMG simulation result. basic_params (Mmg3DofBasicParams): u of MMG simulation result. maneuvering_params (Mmg3DofManeuveringParams): u of MMG simulation result. ρ (float, optional): seawater density [kg/m^3] Defaults to 1.025. return_all_vals (bool, optional): Whether all sub values are returned or not. Defaults to false. Returns: X_H_list (List[float]): List of X_H X_R_list (List[float]): List of X_R X_P_list (List[float]): List of X_P Y_H_list (List[float]): List of Y_H Y_R_list (List[float]): List of Y_R N_H_list (List[float]): List of N_H N_R_list (List[float]): List of N_R U_list (List[float], optional): List of U if return_all_vals is True β_list (List[float], optional): List of β if return_all_vals is True v_dash_list (List[float], optional): List of v_dash if return_all_vals is True r_dash_list (List[float], optional): List of r_dash if return_all_vals is True w_P_list (List[float], optional): List of w_P if return_all_vals is True J_list (List[float], optional): List of J if return_all_vals is True K_T_list (List[float], optional): List of K_T if return_all_vals is True v_R_list (List[float], optional): List of v_R if return_all_vals is True u_R_list (List[float], optional): List of u_R if return_all_vals is True U_R_list (List[float], optional): List of U_R if return_all_vals is True α_R_list (List[float], optional): List of α_R if return_all_vals is True F_N_list (List[float], optional): List of F_N if return_all_vals is True """ U_list = list( map( lambda u, v, r: np.sqrt(u ** 2 + (v - r * basic_params.x_G) ** 2), u_list, v_list, r_list, ) ) β_list = list( map( lambda U, v, r: 0.0 if U == 0.0 else np.arcsin(-(v - r * basic_params.x_G) / U), U_list, v_list, r_list, ) ) v_dash_list = list(map(lambda U, v: 0.0 if U == 0.0 else v / U, U_list, v_list)) r_dash_list = list( map(lambda U, r: 0.0 if U == 0.0 else r * basic_params.L_pp / U, U_list, r_list) ) β_P_list = list( map( lambda β, r_dash: β - basic_params.x_P * r_dash, β_list, r_dash_list, ) ) # w_P_list = [basic_params.w_P0 for i in range(len(r_dash_list))] w_P_list = list( map(lambda β_P: basic_params.w_P0 * np.exp(-4.0 * β_P ** 2), β_P_list) ) J_list = list( map( lambda w_P, u, npm: 0.0 if npm == 0.0 else (1 - w_P) * u / (npm * basic_params.D_p), w_P_list, u_list, npm_list, ) ) K_T_list = list( map( lambda J: maneuvering_params.k_0 + maneuvering_params.k_1 * J + maneuvering_params.k_2 * J ** 2, J_list, ) ) β_R_list = list( map( lambda β, r_dash: β - basic_params.l_R * r_dash, β_list, r_dash_list, ) ) γ_R_list = list( map( lambda β_R: basic_params.γ_R_minus if β_R < 0.0 else basic_params.γ_R_plus, β_R_list, ) ) v_R_list = list( map( lambda U, γ_R, β_R: U * γ_R * β_R, U_list, γ_R_list, β_R_list, ) ) u_R_list = list( map( lambda u, J, npm, K_T, w_P: np.sqrt( basic_params.η * ( basic_params.κ * basic_params.ϵ * 8.0 * maneuvering_params.k_0 * npm ** 2 * basic_params.D_p ** 4 / np.pi ) ** 2 ) if J == 0.0 else u * (1 - w_P) * basic_params.ϵ * np.sqrt( basic_params.η * ( 1.0 + basic_params.κ * (np.sqrt(1.0 + 8.0 * K_T / (np.pi * J ** 2)) - 1) ) ** 2 + (1 - basic_params.η) ), u_list, J_list, npm_list, K_T_list, w_P_list, ) ) U_R_list = list( map(lambda u_R, v_R: np.sqrt(u_R ** 2 + v_R ** 2), u_R_list, v_R_list) ) α_R_list = list( map(lambda δ, u_R, v_R: δ - np.arctan2(v_R, u_R), δ_list, u_R_list, v_R_list) ) F_N_list = list( map( lambda U_R, α_R: 0.5 * basic_params.A_R * ρ * basic_params.f_α * (U_R ** 2) *
np.sin(α_R)
numpy.sin
"""Tests of cleverhans.attacks_tf """ # pylint: disable=missing-docstring from functools import partial import unittest import numpy as np import tensorflow as tf from cleverhans.devtools.checks import CleverHansTest from cleverhans.attacks_tf import ( fgm, pgd_attack, UnrolledAdam, UnrolledGradientDescent, parallel_apply_transformations, ) from cleverhans.devtools.mocks import random_feed_dict from cleverhans.model import Model class SimpleModel(Model): """ A very simple neural network """ def __init__(self, scope="simple", nb_classes=2, **kwargs): del kwargs Model.__init__(self, scope, nb_classes, locals()) def fprop(self, x, **kwargs): del kwargs with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE): w1 = tf.constant([[1.5, 0.3], [-2, 0.3]], dtype=tf.as_dtype(x.dtype)) w2 = tf.constant([[-2.4, 1.2], [0.5, -2.3]], dtype=tf.as_dtype(x.dtype)) h1 = tf.nn.sigmoid(tf.matmul(x, w1)) res = tf.matmul(h1, w2) return {self.O_LOGITS: res, self.O_PROBS: tf.nn.softmax(res)} class TestAttackTF(CleverHansTest): def setUp(self): super(TestAttackTF, self).setUp() self.sess = tf.Session() self.model = SimpleModel() def test_fgm_gradient_max(self): input_dim = 2 nb_classes = 3 batch_size = 4 rng = np.random.RandomState([2017, 8, 23]) x = tf.placeholder(tf.float32, [batch_size, input_dim]) weights = tf.placeholder(tf.float32, [input_dim, nb_classes]) logits = tf.matmul(x, weights) probs = tf.nn.softmax(logits) adv_x = fgm(x, probs) random_example = rng.randint(batch_size) random_feature = rng.randint(input_dim) output = tf.slice(adv_x, [random_example, random_feature], [1, 1]) (dx,) = tf.gradients(output, x) # The following line catches GitHub issue #243 self.assertIsNotNone(dx) dx = self.sess.run(dx, feed_dict=random_feed_dict(rng, [x, weights])) ground_truth = np.zeros((batch_size, input_dim)) ground_truth[random_example, random_feature] = 1.0 self.assertClose(dx, ground_truth) def helper_pgd_attack( self, unrolled_optimizer, targeted, nb_iters=20, epsilon=0.5, clip_min=-5.0, clip_max=5.0, assert_threshold=0.5, ): def loss_fn(input_image, label, targeted): res = self.model.fprop(input_image) logits = res[self.model.O_LOGITS] multiplier = 1.0 if targeted else -1.0 return multiplier * tf.nn.sparse_softmax_cross_entropy_with_logits( labels=label, logits=logits ) x_val_ph = tf.placeholder(tf.float32, shape=[100, 2]) x_val = np.random.randn(100, 2).astype(np.float32) init_model_output = self.model.fprop(x_val_ph) init_model_logits = init_model_output[self.model.O_LOGITS] if targeted: labels = np.random.random_integers(0, 1, size=(100,)) else: labels = tf.stop_gradient(tf.argmax(init_model_logits, axis=1)) def _project_perturbation( perturbation, epsilon, input_image, clip_min, clip_max ): clipped_perturbation = tf.clip_by_value(perturbation, -epsilon, epsilon) new_image = tf.clip_by_value( input_image + clipped_perturbation, clip_min, clip_max ) return new_image - input_image x_adv = pgd_attack( loss_fn=partial(loss_fn, targeted=targeted), input_image=x_val_ph, label=labels, epsilon=epsilon, num_steps=nb_iters, optimizer=unrolled_optimizer, project_perturbation=_project_perturbation, clip_min=clip_min, clip_max=clip_max, ) final_model_output = self.model.fprop(x_adv) final_model_logits = final_model_output[self.model.O_LOGITS] if not targeted: logits1, logits2 = self.sess.run( [init_model_logits, final_model_logits], feed_dict={x_val_ph: x_val} ) preds1 = np.argmax(logits1, axis=1) preds2 = np.argmax(logits2, axis=1) self.assertTrue( np.mean(preds1 == preds2) < assert_threshold, np.mean(preds1 == preds2) ) else: logits_adv = self.sess.run(final_model_logits, feed_dict={x_val_ph: x_val}) preds_adv =
np.argmax(logits_adv, axis=1)
numpy.argmax
# coding: utf-8 from __future__ import print_function from __future__ import division import torch import torch.nn as nn import torch.nn.functional as F from libcity.model.abstract_model import AbstractModel from math import sin, cos, sqrt, atan2, radians import numpy as np def identity_loss(y_true, y_pred): return torch.mean(y_pred - 0 * y_true) class CARA1(nn.Module): def hard_sigmoid(self, x): x = torch.tensor(x / 6 + 0.5) x = F.threshold(-x, -1, -1) x = F.threshold(-x, 0, 0) return x def __init__(self, output_dim, input_dim, init='glorot_uniform', inner_init='orthogonal', **kwargs): super(CARA1, self).__init__() self.output_dim = output_dim self.init = init self.inner_init = inner_init self.activation = self.hard_sigmoid self.inner_activation = nn.Tanh() self.build(input_dim) def add_weight(self, shape, initializer): ts = torch.zeros(shape) if initializer == 'glorot_uniform': ts = nn.init.xavier_normal_(ts) elif initializer == 'orthogonal': ts = nn.init.orthogonal_(ts) return nn.Parameter(ts) def build(self, input_shape): # self.input_spec = [InputSpec(shape=input_shape)] self.input_dim = input_shape self.W_z = self.add_weight((self.input_dim, self.output_dim), initializer=self.init) self.U_z = self.add_weight((self.output_dim, self.output_dim), initializer=self.init) self.b_z = self.add_weight((self.output_dim,), initializer='zero') self.W_r = self.add_weight((self.input_dim, self.output_dim), initializer=self.init) self.U_r = self.add_weight((self.output_dim, self.output_dim), initializer=self.init) self.b_r = self.add_weight((self.output_dim,), initializer='zero') self.W_h = self.add_weight((self.input_dim, self.output_dim), initializer=self.init) self.U_h = self.add_weight((self.output_dim, self.output_dim), initializer=self.init) self.b_h = self.add_weight((self.output_dim,), initializer='zero') self.A_h = self.add_weight((self.output_dim, self.output_dim), initializer=self.init) self.A_u = self.add_weight((self.output_dim, self.output_dim), initializer=self.init) self.b_a_h = self.add_weight((self.output_dim,), initializer='zero') self.b_a_u = self.add_weight((self.output_dim,), initializer='zero') self.W_t = self.add_weight((self.input_dim, self.output_dim), initializer=self.init) self.U_t = self.add_weight((1, self.output_dim), initializer=self.init) self.b_t = self.add_weight((self.output_dim,), initializer='zero') self.W_g = self.add_weight((self.input_dim, self.output_dim), initializer=self.init) self.U_g = self.add_weight((1, self.output_dim), initializer=self.init) self.b_g = self.add_weight((self.output_dim,), initializer='zero') def preprocess_input(self, x): return x def forward(self, x): """ X : batch * timeLen * dims(有拓展) """ tlen = x.shape[1] output = torch.zeros((x.shape[0], self.output_dim)) for i in range(tlen): output = self.step(x[:, i, :], output) return output def step(self, x, states): """ 用于多批次同一时间 states为上一次多批次统一时间数据 """ h_tm1 = states # phi_t u = x[:, self.output_dim: 2 * self.output_dim] # delta_t t = x[:, 2 * self.output_dim: (2 * self.output_dim) + 1] # delta_g g = x[:, (2 * self.output_dim) + 1:] # phi_v x = x[:, :self.output_dim] t = self.inner_activation(torch.matmul(t, self.U_t)) g = self.inner_activation(torch.matmul(g, self.U_g)) # Time-based gate t1 = self.inner_activation(torch.matmul(x, self.W_t) + t + self.b_t) # Geo-based gate g1 = self.inner_activation(torch.matmul(x, self.W_g) + g + self.b_g) # Contextual Attention Gate a = self.inner_activation( torch.matmul(h_tm1, self.A_h) + torch.matmul(u, self.A_u) + self.b_a_h + self.b_a_u) x_z = torch.matmul(x, self.W_z) + self.b_z x_r = torch.matmul(x, self.W_r) + self.b_r x_h = torch.matmul(x, self.W_h) + self.b_h u_z_ = torch.matmul((1 - a) * u, self.W_z) + self.b_z u_r_ = torch.matmul((1 - a) * u, self.W_r) + self.b_r u_h_ = torch.matmul((1 - a) * u, self.W_h) + self.b_h u_z = torch.matmul(a * u, self.W_z) + self.b_z u_r = torch.matmul(a * u, self.W_r) + self.b_r u_h = torch.matmul(a * u, self.W_h) + self.b_h # update gate z = self.inner_activation(x_z + torch.matmul(h_tm1, self.U_z) + u_z) # reset gate r = self.inner_activation(x_r + torch.matmul(h_tm1, self.U_r) + u_r) # hidden state hh = self.activation(x_h + torch.matmul(r * t1 * g1 * h_tm1, self.U_h) + u_h) h = z * h_tm1 + (1 - z) * hh h = (1 + u_z_ + u_r_ + u_h_) * h return h # return h def bpr_triplet_loss(x): positive_item_latent, negative_item_latent = x reg = 0 loss = 1 - torch.log(torch.sigmoid( torch.sum(positive_item_latent, dim=-1, keepdim=True) - torch.sum(negative_item_latent, dim=-1, keepdim=True))) - reg return loss class Recommender(nn.Module): def __init__(self, num_users, num_items, num_times, latent_dim, maxvenue=5): super(Recommender, self).__init__() self.maxVenue = maxvenue self.latent_dim = latent_dim # num * maxVenue * dim self.U_Embedding = nn.Embedding(num_users, latent_dim) self.V_Embedding = nn.Embedding(num_items, latent_dim) self.T_Embedding = nn.Embedding(num_times, latent_dim) torch.nn.init.uniform_(self.U_Embedding.weight) torch.nn.init.uniform_(self.V_Embedding.weight) torch.nn.init.uniform_(self.T_Embedding.weight) self.rnn = nn.Sequential( CARA1(latent_dim, latent_dim, input_shape=(self.maxVenue, (self.latent_dim * 2) + 2,), unroll=True)) # latent_dim * 2 + 2 = v_embedding + t_embedding + time_gap + distance def forward(self, x): # INPUT = [self.user_input, self.time_input, self.gap_time_input, self.pos_distance_input, # self.neg_distance_input, self.checkins_input, # self.neg_checkins_input] # pass # User latent factor user_input = torch.tensor(x[0]) time_input = torch.tensor(x[1]) gap_time_input = torch.tensor(x[2], dtype=torch.float32) pos_distance_input = torch.tensor(x[3], dtype=torch.float32) neg_distance_input = torch.tensor(x[4], dtype=torch.float32) checkins_input = torch.tensor(x[5]) neg_checkins_input = torch.tensor(x[6]) self.u_latent = self.U_Embedding(user_input) self.t_latent = self.T_Embedding(time_input) h, w = gap_time_input.shape gap_time_input = gap_time_input.view(h, w, 1) rnn_input = torch.cat([self.V_Embedding(checkins_input), self.T_Embedding(time_input), gap_time_input], -1) neg_rnn_input = torch.cat([self.V_Embedding(neg_checkins_input), self.T_Embedding(time_input), gap_time_input], -1) h, w = pos_distance_input.shape pos_distance_input = pos_distance_input.view(h, w, 1) h, w = neg_distance_input.shape neg_distance_input = neg_distance_input.view(h, w, 1) rnn_input = torch.cat([rnn_input, pos_distance_input], -1) neg_rnn_input = torch.cat([neg_rnn_input, neg_distance_input], -1) self.checkins_emb = self.rnn(rnn_input) self.neg_checkins_emb = self.rnn(neg_rnn_input) pred = (self.checkins_emb * self.u_latent).sum(dim=1) neg_pred = (self.neg_checkins_emb * self.u_latent).sum(dim=1) return bpr_triplet_loss([pred, neg_pred]) def rank(self, uid, hist_venues, hist_times, hist_time_gap, hist_distances): # hist_venues = hist_venues + [candidate_venue] # hist_times = hist_times + [time] # hist_time_gap = hist_time_gap + [time_gap] # hist_distances = hist_distances + [distance] # u_latent = self.U_Embedding(torch.tensor(uid)) # v_latent = self.V_Embedding(torch.tensor(hist_venues)) # t_latent = self.T_Embedding(torch.tensor(hist_times)) u_latent = self.U_Embedding.weight[uid] v_latent = self.V_Embedding.weight[hist_venues.reshape(-1)].view(hist_venues.shape[0], hist_venues.shape[1], -1) t_latent = self.T_Embedding.weight[hist_times.reshape(-1)].view(hist_times.shape[0], hist_times.shape[1], -1) h, w = hist_time_gap.shape hist_time_gap = hist_time_gap.reshape(h, w, 1) h, w = hist_distances.shape hist_distances = hist_distances.reshape(h, w, 1) rnn_input = torch.cat([t_latent, torch.tensor(hist_time_gap, dtype=torch.float32)], dim=-1) rnn_input = torch.cat([rnn_input, torch.tensor(hist_distances, dtype=torch.float32)], dim=-1) rnn_input = torch.cat([v_latent, rnn_input], dim=-1) dynamic_latent = self.rnn(rnn_input) scores = torch.mul(dynamic_latent, u_latent).sum(1) # scores = np.dot(dynamic_latent, u_latent) return scores class CARA(AbstractModel): """rnn model with long-term history attention""" def __init__(self, config, data_feature): super(CARA, self).__init__(config, data_feature) self.loc_size = data_feature['loc_size'] self.tim_size = data_feature['tim_size'] self.uid_size = data_feature['uid_size'] self.poi_profile = data_feature['poi_profile'] self.id2locid = data_feature['id2locid'] self.id2loc = [] for i in range(self.loc_size - 1): self.id2loc.append(self.id2locid[str(i)]) self.id2loc.append(self.loc_size) self.id2loc = np.array(self.id2loc) self.coor = self.poi_profile['coordinates'].apply(eval) self.rec = Recommender(self.uid_size, self.loc_size, self.tim_size, 10) def get_time_interval(self, x): y = x[:, :-1] y = np.concatenate([x[:, 0, None], y], axis=1) return x - y def get_time_interval2(self, x): y = x[:-1] y = np.concatenate([x[0, None], y], axis=0) return x - y def get_pos_distance(self, x): x = np.array(x.tolist()) y = np.concatenate([x[:, 0, None, :], x[:, :-1, :]], axis=1) r = 6373.0 rx = np.radians(x) ry = np.radians(y) d = x - y a = np.sin(d[:, :, 0] / 2) ** 2 + np.cos(rx[:, :, 0]) * np.cos(ry[:, :, 0]) * np.sin(d[:, :, 1] / 2) ** 2 c = 2 * np.arctan2(np.sqrt(a), np.sqrt(1 - a)) return r * c def get_pos_distance2(self, x): x = np.array(x.tolist()) y = np.concatenate([x[0, None, :], x[:-1, :]], axis=0) r = 6373.0 rx = np.radians(x) ry = np.radians(y) d = x - y a = np.sin(d[:, 0] / 2) ** 2 + np.cos(rx[:, 0]) * np.cos(ry[:, 0]) * np.sin(d[:, 1] / 2) ** 2 c = 2 * np.arctan2(np.sqrt(a), np.sqrt(1-a)) return r * c def get_distance(self, lat1, lng1, lat2, lng2): r = 6373.0 lat1 = radians(lat1) lon1 = radians(lng1) lat2 = radians(lat2) lon2 = radians(lng2) dlon = lon2 - lon1 dlat = lat2 - lat1 a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2 c = 2 * atan2(sqrt(a), sqrt(1 - a)) distance = int(r * c) return distance def get_neg_checkins(self, vis, x, y): len1, len2 = x.shape x_res = [] x_res_distance = y[:].copy() for i in range(len1): visits = x[i] j = np.random.randint(self.loc_size - 1) while j in vis[i]: j = np.random.randint(self.loc_size - 1) tmp = visits[:].copy() tmp[-1] = j x_res.append(tmp) j1 = self.coor[self.id2loc[visits[-1]]] j = self.coor[self.id2loc[j]] x_res_distance[i, -1] = self.get_distance(j1[0], j1[1], j[0], j[1]) return x_res, x_res_distance def forward(self, batch): hloc = np.array(batch['current_loc'])[:, :5] target = np.array(batch['target']) h = target.shape target = target.reshape((*h, 1)) hloc = np.concatenate([hloc, target], axis=1) hloc1 = self.id2loc[hloc] tloc =
np.array(batch['current_tim'])
numpy.array
import numpy as np import matplotlib.pyplot as plt from cvxopt import matrix from cvxopt import solvers from utils import * ## Define kernel RBF def rbfKernel(x,y,k_params=0.5): x = np.expand_dims(x,axis=1) y = np.expand_dims(y,axis=1) val = np.exp(-(np.linalg.norm(x-y)**2)/k_params**2) val = np.array(val) val = np.reshape(val,(1,1)) return val def polyKernel(x,y,k_params=2): x = np.expand_dims(x,axis=1) y = np.expand_dims(y,axis=1) val = (np.matmul(x.T, y) + 1) ** k_params return val def linKernel(x,y): x = np.expand_dims(x,axis=1) y = np.expand_dims(y,axis=1) val = (np.matmul(x.T, y)) return val class SVM(object): def __init__(self, C_reg=0.5, thresh=1e-5, kernel='poly', k_params=3): super(SVM,self).__init__() self.C_reg = C_reg self.thresh = thresh self.alpha = None self.intercept = None self.sv = None self.sv_l = None self.kernel = kernel self.k_params = k_params def fit(self, X_train,y_train, print_options=False): P = np.zeros((X_train.shape[0],X_train.shape[0])) for i in range(X_train.shape[0]): for j in range(X_train.shape[0]): if self.kernel=='rbf': P[i,j] = 1*y_train[i]*y_train[j]*rbfKernel(X_train[i],X_train[j],k_params=self.k_params) if self.kernel=='poly': P[i,j] = 1*y_train[i]*y_train[j]*polyKernel(X_train[i],X_train[j],k_params=self.k_params) if self.kernel=='lin': P[i,j] = 1*y_train[i]*y_train[j]*linKernel(X_train[i],X_train[j]) q = -1*np.ones((X_train.shape[0],1)) G = np.vstack((-1*np.eye(X_train.shape[0]),np.eye(X_train.shape[0]))) h = np.vstack((
np.zeros((X_train.shape[0],1))
numpy.zeros
import numpy as np import pandas as pd ots = pd.read_csv("data/ot.csv",sep=',') ots=ots.sort_values('Pedido') xlsx_file = "data/layout.xlsx" layout = pd.read_excel(xlsx_file, sheet_name="layout") adyacencia=pd.read_excel(xlsx_file, sheet_name="adyacencia") cant_ordenes=len(ots['Pedido'].unique()) pasillos=layout['pasillo'].unique() tiempo_pickeo=20 velocidad=20 lista=[] for x in range(cant_ordenes): obj=list(ots.loc[ots["Pedido"]==x+1]["Cod.Prod"]) lista.append(obj) ordenes=
np.array(lista)
numpy.array
import numpy as np import cv2 from PIL import Image import torch.utils.data as D from tqdm import tqdm from torchvision import transforms from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True trans = transforms.Compose([ transforms.Resize((256,256)), transforms.ToTensor() ]) def get_img(root): img_ = [] mask_ = [] for idx in tqdm(range(1, 1001)): img = Image.open(f'{root}/{idx}.jpg') i1 = img.crop((0,0,256,256)) i2 = img.crop((235,235,491,491)) i3 = img.crop((235,0,491,256)) i4 = img.crop((0,235,256,491)) mask = Image.open(f'{root}/{idx}_gt.png') m1 = mask.crop((0,0,256,256)) m2 = mask.crop((235,235,491,491)) m3 = mask.crop((235,0,491,256)) m4 = mask.crop((0,235,256,491)) # img_.append(img) img_.append(i1) img_.append(i2) img_.append(i3) img_.append(i4) # mask_.append(mask) mask_.append(m1) mask_.append(m2) mask_.append(m3) mask_.append(m4) img_ = img_[:3600] mask_ = mask_[:3600] return img_, mask_ def gamma_transform(img, gamma): gamma_table = [np.power(x / 255.0, gamma) * 255.0 for x in range(256)] gamma_table = np.round(np.array(gamma_table)).astype(np.uint8) return cv2.LUT(img, gamma_table) def random_gamma_transform(img, gamma_vari): log_gamma_vari = np.log(gamma_vari) alpha =
np.random.uniform(-log_gamma_vari, log_gamma_vari)
numpy.random.uniform
"""Implementations of Feature the model scattering objects. Provides some basic implementations of scattering objects that are frequently used. Classes -------- Scatterer Abstract base class for scatterers PointParticle Generates point particles Ellipse Generetes 2-d elliptical particles Sphere Generates 3-d spheres Ellipsoid Generates 3-d ellipsoids """ from typing import Callable, Tuple import numpy as np from . import backend as D from .features import Feature, MERGE_STRATEGY_APPEND from .image import Image from . import image from .types import PropertyLike, ArrayLike import warnings class Scatterer(Feature): """Base abstract class for scatterers. A scatterer is defined by a 3-dimensional volume of voxels. To each voxel corresponds an occupancy factor, i.e., how much of that voxel does the scatterer occupy. However, this number is not necessarily limited to the [0, 1] range. It can be any number, and its interpretation is left to the optical device that images the scatterer. This abstract class implements the `_process_properties` method to convert the position to voxel units, as well as the `_process_and_get` method to upsample the calculation and crop empty slices. Parameters ---------- position : array_like of length 2 or 3 The position of the particle. Third index is optional, and represents the position in the direction normal to the camera plane. z : float The position in the direction normal to the camera plane. Used if `position` is of length 2. value : float A default value of the characteristic of the particle. Used by optics unless a more direct property is set (eg. `refractive_index` for `Brightfield` and `intensity` for `Fluorescence`). position_unit : "meter" or "pixel" The unit of the provided position property. Other Parameters ---------------- upsample_axes : tuple of ints Sets the axes along which the calculation is upsampled (default is None, which implies all axes are upsampled). crop_zeros : bool Whether to remove slices in which all elements are zero. """ __list_merge_strategy__ = MERGE_STRATEGY_APPEND __distributed__ = False def __init__( self, position: PropertyLike[ArrayLike[float]] = (32, 32), z: PropertyLike[float] = 0.0, value: PropertyLike[float] = 1.0, position_unit: PropertyLike[str] = "pixel", upsample: PropertyLike[int] = 1, **kwargs ): self._processed_properties = False super().__init__( position=position, z=z, value=value, position_unit=position_unit, upsample=upsample, **kwargs ) def _process_properties(self, properties: dict) -> dict: # Rescales the position property self._processed_properties = True if "position" in properties: if properties["position_unit"] == "meter": properties["position"] = ( np.array(properties["position"]) / np.array(properties["voxel_size"])[: len(properties["position"])] / properties.get("upscale", 1) ) properties["z"] = ( np.array(properties["z"]) / np.array(properties["voxel_size"])[: len(properties["position"])] / properties.get("upscale", 1) ) return properties def _process_and_get( self, *args, voxel_size, upsample, upsample_axes=None, crop_empty=True, **kwargs ): # Post processes the created object to handle upsampling, # as well as cropping empty slices. if not self._processed_properties: warnings.warn( "Overridden _process_properties method does not call super. " + "This is likely to result in errors if used with " + "Optics.upscale != 1." ) # Calculates upsampled voxel_size if upsample_axes is None: upsample_axes = range(3) voxel_size = np.array(voxel_size) for axis in upsample_axes: voxel_size[axis] /= upsample # calls parent _process_and_get new_image = super()._process_and_get( *args, voxel_size=voxel_size, upsample=upsample, **kwargs ) new_image = new_image[0] if new_image.size == 0: warnings.warn( "Scatterer created that is smaller than a pixel. " + "This may yield inconsistent results." + " Consider using upsample on the scatterer," + " or upscale on the optics.", Warning, ) # Downsamples the image along the axes it was upsampled if upsample != 1 and upsample_axes: # Pad image to ensure it is divisible by upsample increase = np.array(new_image.shape) for axis in upsample_axes: increase[axis] = upsample - (new_image.shape[axis] % upsample) pad_width = [(0, inc) for inc in increase] new_image = np.pad(new_image, pad_width, mode="constant") # Finds reshape size for downsampling new_shape = [] for axis in range(new_image.ndim): if axis in upsample_axes: new_shape += [new_image.shape[axis] // upsample, upsample] else: new_shape += [new_image.shape[axis]] # Downsamples new_image = np.reshape(new_image, new_shape).mean( axis=tuple(np.array(upsample_axes, dtype=np.int32) * 2 + 1) ) # Crops empty slices if crop_empty: new_image = new_image[~np.all(new_image == 0, axis=(1, 2))] new_image = new_image[:, ~np.all(new_image == 0, axis=(0, 2))] new_image = new_image[:, :, ~np.all(new_image == 0, axis=(0, 1))] return [Image(new_image)] class PointParticle(Scatterer): """Generates a point particle A point particle is approximated by the size of a pixel. For subpixel positioning, the position is interpolated linearly. Parameters ---------- position : array_like of length 2 or 3 The position of the particle. Third index is optional, and represents the position in the direction normal to the camera plane. z : float The position in the direction normal to the camera plane. Used if `position` is of length 2. value : float A default value of the characteristic of the particle. Used by optics unless a more direct property is set: (eg. `refractive_index` for `Brightfield` and `intensity` for `Fluorescence`). """ def __init__(self, **kwargs): kwargs.pop("upsample", False) kwargs.pop("upsample_axes", False) super().__init__(upsample=1, upsample_axes=(), **kwargs) def get(self, image, **kwargs): return np.ones((1, 1, 1)) class Ellipse(Scatterer): """Generates an elliptical disk scatterer Parameters ---------- radius : float or array_like [float (, float)] Radius of the ellipse in meters. If only one value, assume circular. rotation : float Orientation angle of the ellipse in the camera plane in radians. position : array_like[float, float (, float)] The position of the particle. Third index is optional, and represents the position in the direction normal to the camera plane. z : float The position in the direction normal to the camera plane. Used if `position` is of length 2. value : float A default value of the characteristic of the particle. Used by optics unless a more direct property is set: (eg. `refractive_index` for `Brightfield` and `intensity` for `Fluorescence`). upsample : int Upsamples the calculations of the pixel occupancy fraction. """ def __init__( self, radius: PropertyLike[float] = 1e-6, rotation: PropertyLike[float] = 0, **kwargs ): super().__init__( radius=radius, rotation=rotation, upsample_axes=(0, 1), **kwargs ) def _process_properties(self, properties: dict) -> dict: """Preprocess the input to the method .get() Ensures that the radius is an array of length 2. If the radius is a single value, the particle is made circular """ properties = super()._process_properties(properties) # Ensure radius is of length 2 radius = np.array(properties["radius"]) if radius.ndim == 0: radius = np.array((properties["radius"], properties["radius"])) elif radius.size == 1: radius = np.array((*radius,) * 2) else: radius = radius[:2] properties["radius"] = radius return properties def get(self, *ignore, radius, rotation, voxel_size, **kwargs): # Create a grid to calculate on rad = radius[:2] / voxel_size[:2] ceil = int(np.max(np.ceil(rad))) X, Y = np.meshgrid(np.arange(-ceil, ceil), np.arange(-ceil, ceil)) # Rotate the grid if rotation != 0: Xt = X * np.cos(-rotation) + Y * np.sin(-rotation) Yt = -X * np.sin(-rotation) + Y * np.cos(-rotation) X = Xt Y = Yt # Evaluate ellipse mask = ((X * X) / (rad[0] * rad[0]) + (Y * Y) / (rad[1] * rad[1]) < 1) * 1.0 mask = np.expand_dims(mask, axis=-1) return mask class Sphere(Scatterer): """Generates a spherical scatterer Parameters ---------- radius : float Radius of the sphere in meters. position : array_like[float, float (, float)] The position of the particle. Third index is optional, and represents the position in the direction normal to the camera plane. z : float The position in the direction normal to the camera plane. Used if `position` is of length 2. value : float A default value of the characteristic of the particle. Used by optics unless a more direct property is set: (eg. `refractive_index` for `Brightfield` and `intensity` for `Fluorescence`). upsample : int Upsamples the calculations of the pixel occupancy fraction. """ def __init__(self, radius: PropertyLike[float] = 1e-6, **kwargs): super().__init__(radius=radius, **kwargs) def get(self, image, radius, voxel_size, **kwargs): # Create a grid to calculate on rad = radius / voxel_size rad_ceil = np.ceil(rad) x = np.arange(-rad_ceil[0], rad_ceil[0]) y = np.arange(-rad_ceil[1], rad_ceil[1]) z = np.arange(-rad_ceil[2], rad_ceil[2]) X, Y, Z = np.meshgrid((x / rad[0]) ** 2, (y / rad[1]) ** 2, (z / rad[2]) ** 2) mask = (X + Y + Z <= 1) * 1.0 return mask class Ellipsoid(Scatterer): """Generates an ellipsoidal scatterer Parameters ---------- radius : float or array_like[float (, float, float)] Radius of the ellipsoid in meters. If only one value, assume spherical. rotation : float Rotation of the ellipsoid in about the x, y and z axis. position : array_like[float, float (, float)] The position of the particle. Third index is optional, and represents the position in the direction normal to the camera plane. z : float The position in the direction normal to the camera plane. Used if `position` is of length 2. value : float A default value of the characteristic of the particle. Used by optics unless a more direct property is set: (eg. `refractive_index` for `Brightfield` and `intensity` for `Fluorescence`). upsample : int Upsamples the calculations of the pixel occupancy fraction. """ def __init__( self, radius: PropertyLike[float] = 1e-6, rotation: PropertyLike[float] = 0, **kwargs ): super().__init__(radius=radius, rotation=rotation, **kwargs) def _process_properties(self, propertydict): """Preprocess the input to the method .get() Ensures that the radius and the rotation properties both are arrays of length 3. If the radius is a single value, the particle is made a sphere If the radius are two values, the smallest value is appended as the third value The rotation vector is padded with zeros until it is of length 3 """ propertydict = super()._process_properties(propertydict) # Ensure radius has three values radius = np.array(propertydict["radius"]) if radius.ndim == 0: radius = np.array([radius]) if radius.size == 1: # If only one value, assume sphere radius = (*radius,) * 3 elif radius.size == 2: # If two values, duplicate the minor axis radius = (*radius, np.min(radius[-1])) elif radius.size == 3: # If three values, convert to tuple for consistency radius = (*radius,) propertydict["radius"] = radius # Ensure rotation has three values rotation = np.array(propertydict["rotation"]) if rotation.ndim == 0: rotation = np.array([rotation]) if rotation.size == 1: # If only one value, pad with two zeros rotation = (*rotation, 0, 0) elif rotation.size == 2: # If two values, pad with one zero rotation = (*rotation, 0) elif rotation.size == 3: # If three values, convert to tuple for consistency rotation = (*rotation,) propertydict["rotation"] = rotation return propertydict def get(self, image, radius, rotation, voxel_size, **kwargs): radius_in_pixels = radius / voxel_size max_rad = np.max(radius) / voxel_size rad_ceil = np.ceil(max_rad) # Create grid to calculate on x = np.arange(-rad_ceil[0], rad_ceil[0]) y = np.arange(-rad_ceil[1], rad_ceil[1]) z = np.arange(-rad_ceil[2], rad_ceil[2]) X, Y, Z = np.meshgrid(x, y, z) # Rotate the grid cos = np.cos(rotation) sin = np.sin(rotation) XR = ( (cos[0] * cos[1] * X) + (cos[0] * sin[1] * sin[2] - sin[0] * cos[2]) * Y + (cos[0] * sin[1] * cos[2] + sin[0] * sin[2]) * Z ) YR = ( (sin[0] * cos[1] * X) + (sin[0] * sin[1] * sin[2] + cos[0] * cos[2]) * Y + (sin[0] * sin[1] * cos[2] - cos[0] * sin[2]) * Z ) ZR = (-sin[1] * X) + cos[1] * sin[2] * Y + cos[1] * cos[2] * Z mask = ( (XR / radius_in_pixels[0]) ** 2 + (YR / radius_in_pixels[1]) ** 2 + (ZR / radius_in_pixels[2]) ** 2 < 1 ) * 1.0 return mask class MieScatterer(Scatterer): """Base implementation of a Mie particle. New Mie-theory scatterers can be implemented by extending this class, and passing a function that calculates the coefficients of the harmonics up to order `L`. To beprecise, the feature expects a wrapper function that takes the current values of the properties, as well as a inner function that takes an integer as the only parameter, and calculates the coefficients up to that integer. The return format is expected to be a tuple with two values, corresponding to `an` and `bn`. See `deeptrack.backend.mie_coefficients` for an example. Parameters ---------- coefficients : Callable[int] -> Tuple[ndarray, ndarray] Function that returns the harmonics coefficients. offset_z : "auto" or float Distance from the particle in the z direction the field is evaluated. If "auto", this is calculated from the pixel size and `collection_angle` collection_angle : "auto" or float The maximum collection angle in radians. If "auto", this is calculated from the objective NA (which is true if the objective is the limiting aperature). polarization_angle : float Angle of the polarization of the incoming light relative to the x-axis. L : int or str The number of terms used to evaluate the mie theory. If `"auto"`, it determines the number of terms automatically. position : array_like[float, float (, float)] The position of the particle. Third index is optional, and represents the position in the direction normal to the camera plane. z : float The position in the direction normal to the camera plane. Used if `position` is of length 2. """ def __init__( self, coefficients: Callable[..., Callable[[int], Tuple[ArrayLike, ArrayLike]]], offset_z: PropertyLike[str] = "auto", polarization_angle: PropertyLike[float] = 0, collection_angle: PropertyLike[str] = "auto", L: PropertyLike[str] = "auto", **kwargs ): kwargs.pop("is_field", None) kwargs.pop("crop_empty", None) super().__init__( is_field=True, crop_empty=False, L=L, offset_z=offset_z, polarization_angle=polarization_angle, collection_angle=collection_angle, coefficients=coefficients, **kwargs ) def _process_properties(self, properties): properties = super()._process_properties(properties) if properties["L"] == "auto": try: v = 2 * np.pi * np.max(properties["radius"]) / properties["wavelength"] properties["L"] = int(np.ceil(v + 4 * (v ** (1 / 3)) + 2)) except (ValueError, TypeError): pass if properties["collection_angle"] == "auto": properties["collection_angle"] = np.arcsin( properties["NA"] / properties["refractive_index_medium"] ) if properties["offset_z"] == "auto": properties["offset_z"] = ( 32 * min(properties["voxel_size"][:2]) / np.sin(properties["collection_angle"]) * properties["upscale"] ) return properties def get( self, inp, position, upscaled_output_region, voxel_size, padding, wavelength, refractive_index_medium, L, offset_z, collection_angle, polarization_angle, coefficients, upscale=1, **kwargs ): xSize = ( padding[2] + upscaled_output_region[2] - upscaled_output_region[0] + padding[0] ) ySize = ( padding[3] + upscaled_output_region[3] - upscaled_output_region[1] + padding[1] ) arr = image.pad_image_to_fft(np.zeros((xSize, ySize))) # Evluation grid x = np.arange(-padding[0], arr.shape[0] - padding[0]) - (position[0]) * upscale y = np.arange(-padding[1], arr.shape[1] - padding[1]) - (position[1]) * upscale X, Y = np.meshgrid(x * voxel_size[0], y * voxel_size[1], indexing="ij") R2 =
np.sqrt(X ** 2 + Y ** 2)
numpy.sqrt
""" Test functions for UMFPACK wrappers """ from __future__ import division, print_function, absolute_import import random import unittest import warnings from numpy.testing import assert_array_almost_equal, run_module_suite, dec from scipy import rand, matrix, diag, eye from scipy.sparse import csc_matrix, linalg, spdiags, SparseEfficiencyWarning import numpy as np import scikits.umfpack as um _is_32bit_platform = np.intp(0).itemsize < 8 # Force int64 index dtype even when indices fit into int32. def _to_int64(x): y = csc_matrix(x).copy() y.indptr = y.indptr.astype(np.int64) y.indices = y.indices.astype(np.int64) return y class _DeprecationAccept(unittest.TestCase): def setUp(self): self.mgr = warnings.catch_warnings() self.mgr.__enter__() warnings.simplefilter('ignore', SparseEfficiencyWarning) def tearDown(self): self.mgr.__exit__() class TestScipySolvers(_DeprecationAccept): """Tests inverting a sparse linear system""" def test_solve_complex_umfpack(self): # Solve with UMFPACK: double precision complex linalg.use_solver(useUmfpack=True) a = self.a.astype('D') b = self.b x = linalg.spsolve(a, b) assert_array_almost_equal(a*x, b) @dec.skipif(_is_32bit_platform) def test_solve_complex_long_umfpack(self): # Solve with UMFPACK: double precision complex, long indices linalg.use_solver(useUmfpack=True) a = _to_int64(self.a.astype('D')) b = self.b x = linalg.spsolve(a, b) assert_array_almost_equal(a*x, b) def test_solve_umfpack(self): # Solve with UMFPACK: double precision linalg.use_solver(useUmfpack=True) a = self.a.astype('d') b = self.b x = linalg.spsolve(a, b) assert_array_almost_equal(a*x, b) @dec.skipif(_is_32bit_platform) def test_solve_long_umfpack(self): # Solve with UMFPACK: double precision linalg.use_solver(useUmfpack=True) a = _to_int64(self.a.astype('d')) b = self.b x = linalg.spsolve(a, b) assert_array_almost_equal(a*x, b) def test_solve_sparse_rhs(self): # Solve with UMFPACK: double precision, sparse rhs linalg.use_solver(useUmfpack=True) a = self.a.astype('d') b = csc_matrix(self.b).T x = linalg.spsolve(a, b) assert_array_almost_equal(a*x, self.b) def test_factorized_umfpack(self): # Prefactorize (with UMFPACK) matrix for solving with multiple rhs linalg.use_solver(useUmfpack=True) a = self.a.astype('d') solve = linalg.factorized(a) x1 = solve(self.b) assert_array_almost_equal(a*x1, self.b) x2 = solve(self.b2)
assert_array_almost_equal(a*x2, self.b2)
numpy.testing.assert_array_almost_equal
import numpy as np import torch import torch.nn as nn import warnings from typing import Iterable from datetime import datetime, timedelta import ptan import ptan.ignite as ptan_ignite from ignite.engine import Engine from ignite.metrics import RunningAverage from ignite.contrib.handlers import tensorboard_logger as tb_logger @torch.no_grad() def calc_values_of_states(states, net, device="cpu"): mean_vals = [] for batch in
np.array_split(states, 64)
numpy.array_split
import numpy as np import pandas as pds import argparse import warnings np.random.seed(12349) # cost functions available def cost_function_misclassification_rate(labels): # labels = labels.astype(int) class_prob_arr = np.bincount(labels) / len(labels) return 1 - np.max(class_prob_arr) def cost_function_entropy(labels): # labels = labels.astype(int) class_prob_arr = np.bincount(labels) / len(labels) class_prob_arr = class_prob_arr[class_prob_arr != 0] return np.sum(class_prob_arr * np.log2(1./class_prob_arr)) def cost_function_gini_index(labels): # labels = labels.astype(int) class_prob_arr = np.bincount(labels) / len(labels) return 1 - np.sum(
np.square(class_prob_arr)
numpy.square
import numpy as np import galsim import batoid from test_helpers import timer, init_gpu @timer def test_zernikeGQ(): if __name__ == '__main__': nx=1024 rings=10 tol=1e-4 else: nx=128 rings=5 tol=1e-3 telescope = batoid.Optic.fromYaml("LSST_r.yaml") telescope.clearObscuration() telescope['LSST.M1'].obscuration = batoid.ObscNegation( batoid.ObscCircle(4.18) ) zSquare = batoid.analysis.zernike( telescope, 0.0, 0.0, 625e-9, nx=nx, jmax=28, reference='chief' ) zGQ = batoid.analysis.zernikeGQ( telescope, 0.0, 0.0, 625e-9, rings=rings, jmax=28, reference='chief' ) np.testing.assert_allclose( zSquare, zGQ, rtol=0, atol=tol ) # Repeat with annular Zernikes telescope['LSST.M1'].obscuration = batoid.ObscNegation( batoid.ObscAnnulus(0.61*4.18, 4.18) ) zSquare = batoid.analysis.zernike( telescope, 0.0, 0.0, 625e-9, nx=nx, jmax=28, reference='chief', eps=0.61 ) zGQ = batoid.analysis.zernikeGQ( telescope, 0.0, 0.0, 625e-9, rings=rings, jmax=28, reference='chief', eps=0.61 ) np.testing.assert_allclose( zSquare, zGQ, rtol=0, atol=tol ) # Try off-axis zSquare = batoid.analysis.zernike( telescope, np.deg2rad(0.2), np.deg2rad(0.1), 625e-9, nx=nx, jmax=28, reference='chief', eps=0.61 ) zGQ = batoid.analysis.zernikeGQ( telescope, np.deg2rad(0.2), np.deg2rad(0.1), 625e-9, rings=rings, jmax=28, reference='chief', eps=0.61 ) np.testing.assert_allclose( zSquare, zGQ, rtol=0, atol=tol ) # Try reference == mean # Try off-axis zSquare = batoid.analysis.zernike( telescope, np.deg2rad(0.2), np.deg2rad(0.1), 625e-9, nx=nx, jmax=28, reference='mean', eps=0.61 ) zGQ = batoid.analysis.zernikeGQ( telescope, np.deg2rad(0.2), np.deg2rad(0.1), 625e-9, rings=rings, jmax=28, reference='mean', eps=0.61 ) # Z1-3 less reliable, but mostly uninteresting anyway... np.testing.assert_allclose( zSquare[4:], zGQ[4:], rtol=0, atol=tol ) @timer def test_huygensPSF(): telescope = batoid.Optic.fromYaml("LSST_r.yaml") # Test that we can infer dy from dx properly psf1 = batoid.analysis.huygensPSF( telescope, np.deg2rad(0.1), np.deg2rad(0.1), 620e-9, nx=64, nxOut=32, dx=10e-6, ) psf2 = batoid.analysis.huygensPSF( telescope, np.deg2rad(0.1), np.deg2rad(0.1), 620e-9, nx=64, nxOut=32, dx=10e-6, dy=10e-6 ) assert np.array_equal(psf1.primitiveVectors, psf2.primitiveVectors) np.testing.assert_allclose(psf1.array, psf2.array, rtol=1e-14, atol=1e-15) # Test vector vs scalar dx,dy psf1 = batoid.analysis.huygensPSF( telescope, np.deg2rad(0.1), np.deg2rad(0.1), 620e-9, nx=64, nxOut=32, dx=[10e-6, 0], dy=[0, 11e-6] ) psf2 = batoid.analysis.huygensPSF( telescope,
np.deg2rad(0.1)
numpy.deg2rad
import numpy as np import pkg_resources from seekr import kmer_counts class TestBasicCounter: def _create_basic_counter_with_data(self, **kwargs): infasta = 'tests/data/example.fa' infasta = pkg_resources.resource_filename('seekr', infasta) counter = kmer_counts.BasicCounter(infasta=infasta, silent=True, log2=kmer_counts.Log2.post, **kwargs) return counter def test_counter_init(self): counter = self._create_basic_counter_with_data() assert len(counter.seqs) == 5 assert counter.seqs[0] == 'AAAAAA' def test_occurrences_k1(self): counter = self._create_basic_counter_with_data(k=1) row = np.zeros(4) expected = row.copy() expected[0] = 1000 row = counter.occurrences(row, counter.seqs[0]) assert
np.allclose(row, expected)
numpy.allclose
# -*- coding: utf-8 -*- # Copyright © 2018 PyHelp Project Contributors # https://github.com/cgq-qgc/pyhelp # # This file is part of PyHelp. # Licensed under the terms of the GNU General Public License. # ---- Standard Library Imports import os import os.path as osp from multiprocessing import Pool import multiprocessing as mp import time import csv import calendar # ---- Third Party imports import numpy as np # ---- Local Libraries Imports from pyhelp import HELP3O DEL_TEMPFILES = True # ---- Run HELP def run_help_singlecell(item): """Run HELP for a single cell.""" cellname, outparam = item HELP3O.run_simulation(*outparam) results = read_monthly_help_output(outparam[5]) if DEL_TEMPFILES: os.remove(outparam[5]) return (cellname, results) def run_help_allcells(cellparams, ncore=None): """Run HELP in batch for multiple cells.""" output = {} ncore = max(mp.cpu_count() if ncore is None else ncore, 1) tstart = time.perf_counter() calcul_progress = 0 N = len(cellparams) pool = Pool(ncore) for cell in pool.imap_unordered(run_help_singlecell, cellparams.items()): output[cell[0]] = cell[1] calcul_progress += 1 progress_pct = calcul_progress/N*100 tpassed = time.perf_counter() - tstart tremain = (100-progress_pct)*tpassed/progress_pct/60 print(('\rHELP simulation in progress: %3.1f%% (%0.1f min remaining)' " ") % (progress_pct, tremain), end='') calcul_time = (time.perf_counter() - tstart) print('\nTask completed in %0.2f sec' % calcul_time) return output # ---- Read HELP output def read_monthly_help_output(filename): """ Read the monthly output from .OUT HELP file and return the data as numpy arrays stored in a dictionary. Support the output format that was modified from HELP 3.07 (see PR#2). """ with open(filename, 'r') as csvfile: csvread = list(csv.reader(csvfile)) arr_years = [] vstack_precip = [] vstack_runoff = [] vstack_evapo = [] vstack_subrun1 = [] vstack_subrun2 = [] vstack_percol = [] vstack_rechg = [] year = None i = 0 while True: if i+1 >= len(csvread): break if len(csvread[i]) == 0: i += 1 continue line = csvread[i][0] if 'MONTHLY TOTALS' in line: year = int(line.split()[-1]) arr_years.append(year) subrun1 = None subrun2 = np.zeros(12).astype('float32') percol = None while True: i += 1 if len(csvread[i]) == 0: continue line = csvread[i][0] if '**********' in line: break elif 'PRECIPITATION' in line: precip = np.array(line.split()[-12:]).astype('float32') elif 'RUNOFF' in line: runoff = np.array(line.split()[-12:]).astype('float32') elif 'EVAPOTRANSPIRATION' in line: evapo = np.array(line.split()[-12:]).astype('float32') elif 'LAT. DRAINAGE' in line: if subrun1 is None: subrun1 = np.array( line.split()[-12:]).astype('float32') else: subrun2 += np.array( line.split()[-12:]).astype('float32') elif 'PERCOLATION' in line: if percol is None: percol = np.array(line.split()[-12:]).astype('float32') rechg = np.array(line.split()[-12:]).astype('float32') vstack_precip.append(precip) vstack_runoff.append(runoff) vstack_evapo.append(np.array(evapo).astype('float32')) vstack_rechg.append(np.array(rechg).astype('float32')) vstack_percol.append(np.array(percol).astype('float32')) if subrun1 is None: vstack_subrun1.append(np.zeros(12).astype('float32')) else: vstack_subrun1.append(subrun1) vstack_subrun2.append(subrun2) elif 'FINAL WATER STORAGE' in line: break i += 1 data = {'years': np.array(arr_years).astype('uint16'), 'precip': np.vstack(vstack_precip), 'runoff': np.vstack(vstack_runoff), 'evapo': np.vstack(vstack_evapo), 'subrun1': np.vstack(vstack_subrun1), 'subrun2': np.vstack(vstack_subrun2), 'perco': np.vstack(vstack_percol), 'rechg':
np.vstack(vstack_rechg)
numpy.vstack
from six import with_metaclass from abc import ABCMeta, abstractmethod from collections import defaultdict import numpy try: from collections.abc import Set, Mapping except ImportError: from collections import Set, Mapping class AccumulatorABC(with_metaclass(ABCMeta)): ''' ABC for an accumulator. Derived must implement: identity: returns a new object of same type as self, such that self + self.identity() == self add(other): adds an object of same type as self to self Concrete implementations are provided for __add__, __radd__, __iadd__ ''' @abstractmethod def identity(self): pass @abstractmethod def add(self, other): pass def __add__(self, other): ret = self.identity() ret.add(self) ret.add(other) return ret def __radd__(self, other): ret = self.identity() ret.add(other) ret.add(self) return ret def __iadd__(self, other): self.add(other) return self class value_accumulator(AccumulatorABC): ''' Holds a value of arbitrary type, with identity as constructed by default_factory ''' def __init__(self, default_factory, initial=None): self.value = default_factory() if initial is None else initial self.default_factory = default_factory def __repr__(self): if type(self.default_factory) is type: defrepr = self.default_factory.__name__ else: defrepr = repr(self.default_factory) return "value_accumulator(%s, %r)" % (defrepr, self.value) def identity(self): return value_accumulator(self.default_factory) def add(self, other): if isinstance(other, value_accumulator): self.value = self.value + other.value else: self.value = self.value + other class set_accumulator(set, AccumulatorABC): ''' A set with accumulator semantics ''' def identity(self): return set_accumulator() def add(self, other): if isinstance(other, Set): set.update(self, other) else: set.add(self, other) class dict_accumulator(dict, AccumulatorABC): ''' Like a dict but also has accumulator semantics It is assumed that the contents of the dict have accumulator semantics ''' def identity(self): ret = dict_accumulator() for key, value in self.items(): ret[key] = value.identity() return ret def add(self, other): if isinstance(other, Mapping): for key, value in other.items(): if key not in self: if isinstance(value, AccumulatorABC): self[key] = value.identity() else: raise ValueError self[key] += value else: raise ValueError class defaultdict_accumulator(defaultdict, AccumulatorABC): ''' Like a defaultdict but also has accumulator semantics It is assumed that the contents of the dict have accumulator semantics ''' def identity(self): return defaultdict_accumulator(self.default_factory) def add(self, other): for key, value in other.items(): self[key] += value class column_accumulator(AccumulatorABC): def __init__(self, value): if not isinstance(value, numpy.ndarray): raise ValueError("column_accumulator only works with numpy arrays") self._empty =
numpy.zeros(dtype=value.dtype, shape=(0,) + value.shape[1:])
numpy.zeros
import logging import numpy as np from . import timer def inv_logtransform(plog): """ Transform the power spectrum for the log field to the power spectrum of delta. Inputs ------ plog - power spectrum of log field computed at points on a Fourier grid Outputs ------- p - power spectrum of the delta field """ xi_log = np.fft.ifftn(plog) xi = np.exp(xi_log) - 1 p = np.fft.fftn(xi).real.astype('float') return p def logtransform(p, return_corrected=False): """ Transform the power spectrum of delta to the power spectrum of the log field. Inputs ------ p - power spectrum computed at points on a Fourier grid Outputs ------- plog - power spectrum of the log field p_corrected - corrected input power spectrum to match. """ if return_corrected: p_corrected = p.copy() xi = np.fft.ifftn(p.astype('complex')) if not np.min(xi.real) > -1: logging.critical("simbox fatal error with log transform! P(k) amp is too high maybe...") raise ValueError logxi = np.log(1 + xi) plog = np.fft.fftn(logxi).real.astype('float') plog.flat[0] = 0 # Set negative modes to 0 plog[plog<0] = 0 if return_corrected: # Do the inverse transform to compute the corrected input power spectrum p_corrected = inv_logtransform(plog) return plog, p_corrected return plog def gofft(grid): """ Forward FFT """ with timer.Timer("FFT shape %s time"%str(grid.shape)): n = np.prod(grid.shape) dk = 1./n*np.fft.fftn(grid) return dk def gofftinv(grid): """ inverse FFT """ with timer.Timer("inv FFT shape %s time"%str(grid.shape)): n = np.prod(grid.shape) d = n*np.fft.ifftn(grid) return d class SimBox: """ """ def __init__(self, pk_model, shape, length, lognorm=False, apply_window=False): """ Generate Gaussian and lognormal simulations in a box. Inputs ------ pk_model shape length lognorm """ self.shape = shape self.length = length self.step =
np.array(self.length)
numpy.array
""" Routines related to flexure, air2vac, etc. """ import inspect import numpy as np import copy from matplotlib import pyplot as plt from matplotlib import gridspec from scipy import interpolate from astropy import units from astropy.coordinates import solar_system, ICRS from astropy.coordinates import UnitSphericalRepresentation, CartesianRepresentation from astropy.time import Time from linetools.spectra import xspectrum1d from pypeit import msgs from pypeit.core import arc from pypeit.core import qa from pypeit import utils from pypeit import debugger def load_sky_spectrum(sky_file): """ Load a sky spectrum into an XSpectrum1D object Args: sky_file: str Returns: sky_spec: XSpectrum1D spectrum """ sky_spec = xspectrum1d.XSpectrum1D.from_file(sky_file) return sky_spec def flex_shift(obj_skyspec, arx_skyspec, mxshft=20): """ Calculate shift between object sky spectrum and archive sky spectrum Parameters ---------- obj_skyspec arx_skyspec Returns ------- flex_dict: dict Contains flexure info """ flex_dict = {} # Determine the brightest emission lines msgs.warn("If we use Paranal, cut down on wavelength early on") arx_amp, arx_amp_cont, arx_cent, arx_wid, _, arx_w, arx_yprep, nsig = arc.detect_lines(arx_skyspec.flux.value) obj_amp, obj_amp_cont, obj_cent, obj_wid, _, obj_w, obj_yprep, nsig_obj= arc.detect_lines(obj_skyspec.flux.value) # Keep only 5 brightest amplitude lines (xxx_keep is array of # indices within arx_w of the 5 brightest) arx_keep = np.argsort(arx_amp[arx_w])[-5:] obj_keep = np.argsort(obj_amp[obj_w])[-5:] # Calculate wavelength (Angstrom per pixel) arx_disp = np.append(arx_skyspec.wavelength.value[1]-arx_skyspec.wavelength.value[0], arx_skyspec.wavelength.value[1:]-arx_skyspec.wavelength.value[:-1]) #arx_disp = (np.amax(arx_sky.wavelength.value)-np.amin(arx_sky.wavelength.value))/arx_sky.wavelength.size obj_disp = np.append(obj_skyspec.wavelength.value[1]-obj_skyspec.wavelength.value[0], obj_skyspec.wavelength.value[1:]-obj_skyspec.wavelength.value[:-1]) #obj_disp = (np.amax(obj_sky.wavelength.value)-np.amin(obj_sky.wavelength.value))/obj_sky.wavelength.size # Calculate resolution (lambda/delta lambda_FWHM)..maybe don't need # this? can just use sigmas arx_idx = (arx_cent+0.5).astype(np.int)[arx_w][arx_keep] # The +0.5 is for rounding arx_res = arx_skyspec.wavelength.value[arx_idx]/\ (arx_disp[arx_idx]*(2*np.sqrt(2*np.log(2)))*arx_wid[arx_w][arx_keep]) obj_idx = (obj_cent+0.5).astype(np.int)[obj_w][obj_keep] # The +0.5 is for rounding obj_res = obj_skyspec.wavelength.value[obj_idx]/ \ (obj_disp[obj_idx]*(2*np.sqrt(2*np.log(2)))*obj_wid[obj_w][obj_keep]) #obj_res = (obj_sky.wavelength.value[0]+(obj_disp*obj_cent[obj_w][obj_keep]))/( # obj_disp*(2*np.sqrt(2*np.log(2)))*obj_wid[obj_w][obj_keep]) if not np.all(
np.isfinite(obj_res)
numpy.isfinite
from __future__ import division import glob import numpy as NP from functools import reduce import numpy.ma as MA import progressbar as PGB import h5py import healpy as HP import warnings import copy import astropy.cosmology as CP from astropy.time import Time, TimeDelta from astropy.io import fits from astropy import units as U from astropy import constants as FCNST from scipy import interpolate from astroutils import DSP_modules as DSP from astroutils import constants as CNST from astroutils import nonmathops as NMO from astroutils import mathops as OPS from astroutils import lookup_operations as LKP import prisim from prisim import interferometry as RI from prisim import primary_beams as PB from prisim import delay_spectrum as DS try: from pyuvdata import UVBeam except ImportError: uvbeam_module_found = False else: uvbeam_module_found = True prisim_path = prisim.__path__[0]+'/' cosmoPlanck15 = CP.Planck15 # Planck 2015 cosmology cosmo100 = cosmoPlanck15.clone(name='Modified Planck 2015 cosmology with h=1.0', H0=100.0) # Modified Planck 2015 cosmology with h=1.0, H= 100 km/s/Mpc ################################################################################ def write_PRISim_bispectrum_phase_to_npz(infile_prefix, outfile_prefix, triads=None, bltriplet=None, hdf5file_prefix=None, infmt='npz', datakey='noisy', blltol=0.1): """ ---------------------------------------------------------------------------- Write closure phases computed in a PRISim simulation to a NPZ file with appropriate format for further analysis. Inputs: infile_prefix [string] HDF5 file or NPZ file created by a PRISim simulation or its replication respectively. If infmt is specified as 'hdf5', then hdf5file_prefix will be ignored and all the observing info will be read from here. If infmt is specified as 'npz', then hdf5file_prefix needs to be specified in order to read the observing parameters. triads [list or numpy array or None] Antenna triads given as a list of 3-element lists or a ntriads x 3 array. Each element in the inner list is an antenna label. They will be converted to strings internally. If set to None, then all triads determined by bltriplet will be used. If specified, then inputs in blltol and bltriplet will be ignored. bltriplet [numpy array or None] 3x3 numpy array containing the 3 baseline vectors. The first axis denotes the three baselines, the second axis denotes the East, North, Up coordinates of the baseline vector. Units are in m. Will be used only if triads is set to None. outfile_prefix [string] Prefix of the NPZ file. It will be appended by '_noiseless', '_noisy', and '_noise' and further by extension '.npz' infmt [string] Format of the input file containing visibilities. Accepted values are 'npz' (default), and 'hdf5'. If infmt is specified as 'npz', then hdf5file_prefix also needs to be specified for reading the observing parameters datakey [string] Specifies which -- 'noiseless', 'noisy' (default), or 'noise' -- visibilities are to be written to the output. If set to None, and infmt is 'hdf5', then all three sets of visibilities are written. The datakey string will also be added as a suffix in the output file. blltol [scalar] Baseline length tolerance (in m) for matching baseline vectors in triads. It must be a scalar. Default = 0.1 m. Will be used only if triads is set to None and bltriplet is to be used. ---------------------------------------------------------------------------- """ if not isinstance(infile_prefix, str): raise TypeError('Input infile_prefix must be a string') if not isinstance(outfile_prefix, str): raise TypeError('Input outfile_prefix must be a string') if (triads is None) and (bltriplet is None): raise ValueError('One of triads or bltriplet must be set') if triads is None: if not isinstance(bltriplet, NP.ndarray): raise TypeError('Input bltriplet must be a numpy array') if not isinstance(blltol, (int,float)): raise TypeError('Input blltol must be a scalar') if bltriplet.ndim != 2: raise ValueError('Input bltriplet must be a 2D numpy array') if bltriplet.shape[0] != 3: raise ValueError('Input bltriplet must contain three baseline vectors') if bltriplet.shape[1] != 3: raise ValueError('Input bltriplet must contain baseline vectors along three corrdinates in the ENU frame') else: if not isinstance(triads, (list, NP.ndarray)): raise TypeError('Input triads must be a list or numpy array') triads = NP.asarray(triads).astype(str) if not isinstance(infmt, str): raise TypeError('Input infmt must be a string') if infmt.lower() not in ['npz', 'hdf5']: raise ValueError('Input file format must be npz or hdf5') if infmt.lower() == 'npz': if not isinstance(hdf5file_prefix, str): raise TypeError('If infmt is npz, then hdf5file_prefix needs to be specified for observing parameters information') if datakey is None: datakey = ['noisy'] if isinstance(datakey, str): datakey = [datakey] elif not isinstance(datakey, list): raise TypeError('Input datakey must be a list') for dkey in datakey: if dkey.lower() not in ['noiseless', 'noisy', 'noise']: raise ValueError('Invalid input found in datakey') if infmt.lower() == 'hdf5': fullfnames_with_extension = glob.glob(infile_prefix + '*' + infmt.lower()) fullfnames_without_extension = [fname.split('.hdf5')[0] for fname in fullfnames_with_extension] else: fullfnames_without_extension = [infile_prefix] if len(fullfnames_without_extension) == 0: raise IOError('No input files found with pattern {0}'.format(infile_prefix)) try: if infmt.lower() == 'hdf5': simvis = RI.InterferometerArray(None, None, None, init_file=fullfnames_without_extension[0]) else: simvis = RI.InterferometerArray(None, None, None, init_file=hdf5file_prefix) except: raise IOError('Input PRISim file does not contain a valid PRISim output') latitude = simvis.latitude longitude = simvis.longitude location = ('{0:.5f}d'.format(longitude), '{0:.5f}d'.format(latitude)) last = simvis.lst / 15.0 / 24.0 # from degrees to fraction of day last = last.reshape(-1,1) daydata = NP.asarray(simvis.timestamp[0]).ravel() if infmt.lower() == 'npz': simvisinfo = NP.load(fullfnames_without_extension[0]+'.'+infmt.lower()) skyvis = simvisinfo['noiseless'][0,...] vis = simvisinfo['noisy'] noise = simvisinfo['noise'] n_realize = vis.shape[0] else: n_realize = len(fullfnames_without_extension) cpdata = {} outfile = {} for fileind in range(n_realize): if infmt.lower() == 'npz': simvis.vis_freq = vis[fileind,...] simvis.vis_noise_freq = noise[fileind,...] else: simvis = RI.InterferometerArray(None, None, None, init_file=fullfnames_without_extension[fileind]) if fileind == 0: if triads is None: triads, bltriplets = simvis.getThreePointCombinations(unique=False) # triads = NP.asarray(prisim_BSP_info['antenna_triplets']).reshape(-1,3) # bltriplets = NP.asarray(prisim_BSP_info['baseline_triplets']) triads = NP.asarray(triads).reshape(-1,3) bltriplets = NP.asarray(bltriplets) blinds = [] matchinfo = LKP.find_NN(bltriplet, bltriplets.reshape(-1,3), distance_ULIM=blltol) revind = [] for blnum in NP.arange(bltriplet.shape[0]): if len(matchinfo[0][blnum]) == 0: revind += [blnum] if len(revind) > 0: flip_factor = NP.ones(3, dtype=NP.float) flip_factor[NP.array(revind)] = -1 rev_bltriplet = bltriplet * flip_factor.reshape(-1,1) matchinfo = LKP.find_NN(rev_bltriplet, bltriplets.reshape(-1,3), distance_ULIM=blltol) for blnum in NP.arange(bltriplet.shape[0]): if len(matchinfo[0][blnum]) == 0: raise ValueError('Some baselines in the triplet are not found in the model triads') triadinds = [] for blnum in NP.arange(bltriplet.shape[0]): triadind, blind = NP.unravel_index(NP.asarray(matchinfo[0][blnum]), (bltriplets.shape[0], bltriplets.shape[1])) triadinds += [triadind] triadind_intersection = NP.intersect1d(triadinds[0], NP.intersect1d(triadinds[1], triadinds[2])) if triadind_intersection.size == 0: raise ValueError('Specified triad not found in the PRISim model. Try other permutations of the baseline vectors and/or reverse individual baseline vectors in the triad before giving up.') triads = triads[triadind_intersection,:] selected_bltriplets = bltriplets[triadind_intersection,:,:].reshape(-1,3,3) prisim_BSP_info = simvis.getClosurePhase(antenna_triplets=triads.tolist(), delay_filter_info=None, specsmooth_info=None, spectral_window_info=None, unique=False) if fileind == 0: triads = NP.asarray(prisim_BSP_info['antenna_triplets']).reshape(-1,3) # Re-establish the triads returned after the first iteration (to accunt for any order flips) for outkey in datakey: if fileind == 0: outfile[outkey] = outfile_prefix + '_{0}.npz'.format(outkey) if outkey == 'noiseless': if fileind == 0: # cpdata = prisim_BSP_info['closure_phase_skyvis'][triadind_intersection,:,:][NP.newaxis,...] cpdata[outkey] = prisim_BSP_info['closure_phase_skyvis'][NP.newaxis,...] else: # cpdata = NP.concatenate((cpdata, prisim_BSP_info['closure_phase_skyvis'][triadind_intersection,:,:][NP.newaxis,...]), axis=0) cpdata[outkey] = NP.concatenate((cpdata[outkey], prisim_BSP_info['closure_phase_skyvis'][NP.newaxis,...]), axis=0) if outkey == 'noisy': if fileind == 0: # cpdata = prisim_BSP_info['closure_phase_vis'][triadind_intersection,:,:][NP.newaxis,...] cpdata[outkey] = prisim_BSP_info['closure_phase_vis'][NP.newaxis,...] else: # cpdata = NP.concatenate((cpdata, prisim_BSP_info['closure_phase_vis'][triadind_intersection,:,:][NP.newaxis,...]), axis=0) cpdata[outkey] = NP.concatenate((cpdata[outkey], prisim_BSP_info['closure_phase_vis'][NP.newaxis,...]), axis=0) if outkey == 'noise': if fileind == 0: # cpdata = prisim_BSP_info['closure_phase_noise'][triadind_intersection,:,:] cpdata[outkey] = prisim_BSP_info['closure_phase_noise'][NP.newaxis,:,:] else: # cpdata = NP.concatenate((cpdata, prisim_BSP_info['closure_phase_noise'][triadind_intersection,:,:][NP.newaxis,...]), axis=0) cpdata[outkey] = NP.concatenate((cpdata[outkey], prisim_BSP_info['closure_phase_noise'][NP.newaxis,...]), axis=0) for outkey in datakey: cpdata[outkey] = NP.rollaxis(cpdata[outkey], 3, start=0) flagsdata = NP.zeros(cpdata[outkey].shape, dtype=NP.bool) NP.savez_compressed(outfile[outkey], closures=cpdata[outkey], flags=flagsdata, triads=triads, last=last+NP.zeros((1,n_realize)), days=daydata+NP.arange(n_realize)) ################################################################################ def loadnpz(npzfile, longitude=0.0, latitude=0.0, lst_format='fracday'): """ ---------------------------------------------------------------------------- Read an input NPZ file containing closure phase data output from CASA and return a dictionary Inputs: npzfile [string] Input NPZ file including full path containing closure phase data. It must have the following files/keys inside: 'closures' [numpy array] Closure phase (radians). It is of shape (nlst,ndays,ntriads,nchan) 'triads' [numpy array] Array of triad tuples, of shape (ntriads,3) 'flags' [numpy array] Array of flags (boolean), of shape (nlst,ndays,ntriads,nchan) 'last' [numpy array] Array of LST for each day (CASA units which is MJD+6713). Shape is (nlst,ndays) 'days' [numpy array] Array of days, shape is (ndays,) 'averaged_closures' [numpy array] optional array of closure phases averaged across days. Shape is (nlst,ntriads,nchan) 'std_dev_lst' [numpy array] optional array of standard deviation of closure phases across days. Shape is (nlst,ntriads,nchan) 'std_dev_triads' [numpy array] optional array of standard deviation of closure phases across triads. Shape is (nlst,ndays,nchan) latitude [scalar int or float] Latitude of site (in degrees). Default=0.0 deg. longitude [scalar int or float] Longitude of site (in degrees). Default=0.0 deg. lst_format [string] Specifies the format/units in which the 'last' key is to be interpreted. If set to 'hourangle', the LST is in units of hour angle. If set to 'fracday', the fractional portion of the 'last' value is the LST in units of days. Output: cpinfo [dictionary] Contains one top level keys, namely, 'raw' Under key 'raw' which holds a dictionary, the subkeys include 'cphase' (nlst,ndays,ntriads,nchan), 'triads' (ntriads,3), 'lst' (nlst,ndays), and 'flags' (nlst,ndays,ntriads,nchan), and some other optional keys ---------------------------------------------------------------------------- """ npzdata = NP.load(npzfile) cpdata = npzdata['closures'] triadsdata = npzdata['triads'] flagsdata = npzdata['flags'] location = ('{0:.5f}d'.format(longitude), '{0:.5f}d'.format(latitude)) daydata = Time(npzdata['days'].astype(NP.float64), scale='utc', format='jd', location=location) # lstdata = Time(npzdata['last'].astype(NP.float64) - 6713.0, scale='utc', format='mjd', location=('+21.4278d', '-30.7224d')).sidereal_time('apparent') # Subtract 6713 based on CASA convention to obtain MJD if lst_format.lower() == 'hourangle': lstHA = npzdata['last'] lstday = daydata.reshape(1,-1) + TimeDelta(NP.zeros(lstHA.shape[0]).reshape(-1,1)*U.s) elif lst_format.lower() == 'fracday': lstfrac, lstint = NP.modf(npzdata['last']) lstday = Time(lstint.astype(NP.float64) - 6713.0, scale='utc', format='mjd', location=location) # Subtract 6713 based on CASA convention to obtain MJD lstHA = lstfrac * 24.0 # in hours else: raise ValueError('Input lst_format invalid') cp = cpdata.astype(NP.float64) flags = flagsdata.astype(NP.bool) cpinfo = {} datapool = ['raw'] for dpool in datapool: cpinfo[dpool] = {} if dpool == 'raw': qtys = ['cphase', 'triads', 'flags', 'lst', 'lst-day', 'days', 'dayavg', 'std_triads', 'std_lst'] for qty in qtys: if qty == 'cphase': cpinfo[dpool][qty] = NP.copy(cp) elif qty == 'triads': cpinfo[dpool][qty] = NP.copy(triadsdata) elif qty == 'flags': cpinfo[dpool][qty] = NP.copy(flags) elif qty == 'lst': cpinfo[dpool][qty] = NP.copy(lstHA) elif qty == 'lst-day': cpinfo[dpool][qty] = NP.copy(lstday.jd) elif qty == 'days': cpinfo[dpool][qty] = NP.copy(daydata.jd) elif qty == 'dayavg': if 'averaged_closures' in npzdata: cpinfo[dpool][qty] = NP.copy(cp_dayavg) elif qty == 'std_triads': if 'std_dev_triad' in npzdata: cpinfo[dpool][qty] = NP.copy(cp_std_triads) elif qty == 'std_lst': if 'std_dev_lst' in npzdata: cpinfo[dpool][qty] = NP.copy(cp_std_lst) return cpinfo ################################################################################ def npz2hdf5(npzfile, hdf5file, longitude=0.0, latitude=0.0, lst_format='fracday'): """ ---------------------------------------------------------------------------- Read an input NPZ file containing closure phase data output from CASA and save it to HDF5 format Inputs: npzfile [string] Input NPZ file including full path containing closure phase data. It must have the following files/keys inside: 'closures' [numpy array] Closure phase (radians). It is of shape (nlst,ndays,ntriads,nchan) 'triads' [numpy array] Array of triad tuples, of shape (ntriads,3) 'flags' [numpy array] Array of flags (boolean), of shape (nlst,ndays,ntriads,nchan) 'last' [numpy array] Array of LST for each day (CASA units ehich is MJD+6713). Shape is (nlst,ndays) 'days' [numpy array] Array of days, shape is (ndays,) 'averaged_closures' [numpy array] optional array of closure phases averaged across days. Shape is (nlst,ntriads,nchan) 'std_dev_lst' [numpy array] optional array of standard deviation of closure phases across days. Shape is (nlst,ntriads,nchan) 'std_dev_triads' [numpy array] optional array of standard deviation of closure phases across triads. Shape is (nlst,ndays,nchan) hdf5file [string] Output HDF5 file including full path. latitude [scalar int or float] Latitude of site (in degrees). Default=0.0 deg. longitude [scalar int or float] Longitude of site (in degrees). Default=0.0 deg. lst_format [string] Specifies the format/units in which the 'last' key is to be interpreted. If set to 'hourangle', the LST is in units of hour angle. If set to 'fracday', the fractional portion of the 'last' value is the LST in units of days. ---------------------------------------------------------------------------- """ npzdata = NP.load(npzfile) cpdata = npzdata['closures'] triadsdata = npzdata['triads'] flagsdata = npzdata['flags'] location = ('{0:.5f}d'.format(longitude), '{0:.5f}d'.format(latitude)) daydata = Time(npzdata['days'].astype(NP.float64), scale='utc', format='jd', location=location) # lstdata = Time(npzdata['last'].astype(NP.float64) - 6713.0, scale='utc', format='mjd', location=('+21.4278d', '-30.7224d')).sidereal_time('apparent') # Subtract 6713 based on CASA convention to obtain MJD if lst_format.lower() == 'hourangle': lstHA = npzdata['last'] lstday = daydata.reshape(1,-1) + TimeDelta(NP.zeros(lstHA.shape[0]).reshape(-1,1)*U.s) elif lst_format.lower() == 'fracday': lstfrac, lstint = NP.modf(npzdata['last']) lstday = Time(lstint.astype(NP.float64) - 6713.0, scale='utc', format='mjd', location=location) # Subtract 6713 based on CASA convention to obtain MJD lstHA = lstfrac * 24.0 # in hours else: raise ValueError('Input lst_format invalid') cp = cpdata.astype(NP.float64) flags = flagsdata.astype(NP.bool) if 'averaged_closures' in npzdata: day_avg_cpdata = npzdata['averaged_closures'] cp_dayavg = day_avg_cpdata.astype(NP.float64) if 'std_dev_triad' in npzdata: std_triads_cpdata = npzdata['std_dev_triad'] cp_std_triads = std_triads_cpdata.astype(NP.float64) if 'std_dev_lst' in npzdata: std_lst_cpdata = npzdata['std_dev_lst'] cp_std_lst = std_lst_cpdata.astype(NP.float64) with h5py.File(hdf5file, 'w') as fobj: datapool = ['raw'] for dpool in datapool: if dpool == 'raw': qtys = ['cphase', 'triads', 'flags', 'lst', 'lst-day', 'days', 'dayavg', 'std_triads', 'std_lst'] for qty in qtys: data = None if qty == 'cphase': data = NP.copy(cp) elif qty == 'triads': data = NP.copy(triadsdata) elif qty == 'flags': data = NP.copy(flags) elif qty == 'lst': data = NP.copy(lstHA) elif qty == 'lst-day': data = NP.copy(lstday.jd) elif qty == 'days': data = NP.copy(daydata.jd) elif qty == 'dayavg': if 'averaged_closures' in npzdata: data = NP.copy(cp_dayavg) elif qty == 'std_triads': if 'std_dev_triad' in npzdata: data = NP.copy(cp_std_triads) elif qty == 'std_lst': if 'std_dev_lst' in npzdata: data = NP.copy(cp_std_lst) if data is not None: dset = fobj.create_dataset('{0}/{1}'.format(dpool, qty), data=data, compression='gzip', compression_opts=9) ################################################################################ def save_CPhase_cross_power_spectrum(xcpdps, outfile): """ ---------------------------------------------------------------------------- Save cross-power spectrum information in a dictionary to a HDF5 file Inputs: xcpdps [dictionary] This dictionary is essentially an output of the member function compute_power_spectrum() of class ClosurePhaseDelaySpectrum. It has the following key-value structure: 'triads' ((ntriads,3) array), 'triads_ind', ((ntriads,) array), 'lstXoffsets' ((ndlst_range,) array), 'lst' ((nlst,) array), 'dlst' ((nlst,) array), 'lst_ind' ((nlst,) array), 'days' ((ndays,) array), 'day_ind' ((ndays,) array), 'dday' ((ndays,) array), 'oversampled' and 'resampled' corresponding to whether resample was set to False or True in call to member function FT(). Values under keys 'triads_ind' and 'lst_ind' are numpy array corresponding to triad and time indices used in selecting the data. Values under keys 'oversampled' and 'resampled' each contain a dictionary with the following keys and values: 'z' [numpy array] Redshifts corresponding to the band centers in 'freq_center'. It has shape=(nspw,) 'lags' [numpy array] Delays (in seconds). It has shape=(nlags,) 'kprll' [numpy array] k_parallel modes (in h/Mpc) corresponding to 'lags'. It has shape=(nspw,nlags) 'freq_center' [numpy array] contains the center frequencies (in Hz) of the frequency subbands of the subband delay spectra. It is of size n_win. It is roughly equivalent to redshift(s) 'freq_wts' [numpy array] Contains frequency weights applied on each frequency sub-band during the subband delay transform. It is of size n_win x nchan. 'bw_eff' [numpy array] contains the effective bandwidths (in Hz) of the subbands being delay transformed. It is of size n_win. It is roughly equivalent to width in redshift or along line-of-sight 'shape' [string] shape of the frequency window function applied. Usual values are 'rect' (rectangular), 'bhw' (Blackman-Harris), 'bnw' (Blackman-Nuttall). 'fftpow' [scalar] the power to which the FFT of the window was raised. The value is be a positive scalar with default = 1.0 'lag_corr_length' [numpy array] It is the correlation timescale (in pixels) of the subband delay spectra. It is proportional to inverse of effective bandwidth. It is of size n_win. The unit size of a pixel is determined by the difference between adjacent pixels in lags under key 'lags' which in turn is effectively inverse of the effective bandwidth of the subband specified in bw_eff It further contains one or more of the following keys named 'whole', 'submodel', 'residual', and 'errinfo' each of which is a dictionary. 'whole' contains power spectrum info about the input closure phases. 'submodel' contains power spectrum info about the model that will have been subtracted (as closure phase) from the 'whole' model. 'residual' contains power spectrum info about the closure phases obtained as a difference between 'whole' and 'submodel'. It contains the following keys and values: 'mean' [numpy array] Delay power spectrum incoherently estimated over the axes specified in xinfo['axes'] using the 'mean' key in input cpds or attribute cPhaseDS['processed']['dspec']. It has shape that depends on the combination of input parameters. See examples below. If both collapse_axes and avgcov are not set, those axes will be replaced with square covariance matrices. If collapse_axes is provided but avgcov is False, those axes will be of shape 2*Naxis-1. 'median' [numpy array] Delay power spectrum incoherently averaged over the axes specified in incohax using the 'median' key in input cpds or attribute cPhaseDS['processed']['dspec']. It has shape that depends on the combination of input parameters. See examples below. If both collapse_axes and avgcov are not set, those axes will be replaced with square covariance matrices. If collapse_axes is provided bu avgcov is False, those axes will be of shape 2*Naxis-1. 'diagoffsets' [dictionary] Same keys corresponding to keys under 'collapse_axes' in input containing the diagonal offsets for those axes. If 'avgcov' was set, those entries will be removed from 'diagoffsets' since all the leading diagonal elements have been collapsed (averaged) further. Value under each key is a numpy array where each element in the array corresponds to the index of that leading diagonal. This should match the size of the output along that axis in 'mean' or 'median' above. 'diagweights' [dictionary] Each key is an axis specified in collapse_axes and the value is a numpy array of weights corresponding to the diagonal offsets in that axis. 'axesmap' [dictionary] If covariance in cross-power is calculated but is not collapsed, the number of dimensions in the output will have changed. This parameter tracks where the original axis is now placed. The keys are the original axes that are involved in incoherent cross-power, and the values are the new locations of those original axes in the output. 'nsamples_incoh' [integer] Number of incoherent samples in producing the power spectrum 'nsamples_coh' [integer] Number of coherent samples in producing the power spectrum outfile [string] Full path to the external HDF5 file where the cross- power spectrum information provided in xcpdps will be saved ---------------------------------------------------------------------------- """ if not isinstance(xcpdps, dict): raise TypeError('Input xcpdps must be a dictionary') with h5py.File(outfile, 'w') as fileobj: hdrgrp = fileobj.create_group('header') hdrkeys = ['triads', 'triads_ind', 'lst', 'lst_ind', 'dlst', 'days', 'day_ind', 'dday'] for key in hdrkeys: dset = hdrgrp.create_dataset(key, data=xcpdps[key]) sampling = ['oversampled', 'resampled'] sampling_keys = ['z', 'kprll', 'lags', 'freq_center', 'bw_eff', 'shape', 'freq_wts', 'lag_corr_length'] dpool_keys = ['whole', 'submodel', 'residual', 'errinfo'] for smplng in sampling: if smplng in xcpdps: smplgrp = fileobj.create_group(smplng) for key in sampling_keys: dset = smplgrp.create_dataset(key, data=xcpdps[smplng][key]) for dpool in dpool_keys: if dpool in xcpdps[smplng]: dpoolgrp = smplgrp.create_group(dpool) keys = ['diagoffsets', 'diagweights', 'axesmap', 'nsamples_incoh', 'nsamples_coh'] for key in keys: if key in xcpdps[smplng][dpool]: if isinstance(xcpdps[smplng][dpool][key], dict): subgrp = dpoolgrp.create_group(key) for subkey in xcpdps[smplng][dpool][key]: dset = subgrp.create_dataset(str(subkey), data=xcpdps[smplng][dpool][key][subkey]) else: dset = dpoolgrp.create_dataset(key, data=xcpdps[smplng][dpool][key]) for stat in ['mean', 'median']: if stat in xcpdps[smplng][dpool]: if isinstance(xcpdps[smplng][dpool][stat], list): for ii in range(len(xcpdps[smplng][dpool][stat])): dset = dpoolgrp.create_dataset(stat+'/diagcomb_{0}'.format(ii), data=xcpdps[smplng][dpool][stat][ii].si.value) dset.attrs['units'] = str(xcpdps[smplng][dpool][stat][ii].si.unit) else: dset = dpoolgrp.create_dataset(stat, data=xcpdps[smplng][dpool][stat].si.value) dset.attrs['units'] = str(xcpdps[smplng][dpool][stat].si.unit) ################################################################################ def read_CPhase_cross_power_spectrum(infile): """ ---------------------------------------------------------------------------- Read information about cross power spectrum from an external HDF5 file into a dictionary. This is the counterpart to save_CPhase_corss_power_spectrum() Input: infile [string] Full path to the external HDF5 file that contains info about cross-power spectrum. Output: xcpdps [dictionary] This dictionary has structure the same as output of the member function compute_power_spectrum() of class ClosurePhaseDelaySpectrum. It has the following key-value structure: 'triads' ((ntriads,3) array), 'triads_ind', ((ntriads,) array), 'lstXoffsets' ((ndlst_range,) array), 'lst' ((nlst,) array), 'dlst' ((nlst,) array), 'lst_ind' ((nlst,) array), 'days' ((ndays,) array), 'day_ind' ((ndays,) array), 'dday' ((ndays,) array), 'oversampled' and 'resampled' corresponding to whether resample was set to False or True in call to member function FT(). Values under keys 'triads_ind' and 'lst_ind' are numpy array corresponding to triad and time indices used in selecting the data. Values under keys 'oversampled' and 'resampled' each contain a dictionary with the following keys and values: 'z' [numpy array] Redshifts corresponding to the band centers in 'freq_center'. It has shape=(nspw,) 'lags' [numpy array] Delays (in seconds). It has shape=(nlags,) 'kprll' [numpy array] k_parallel modes (in h/Mpc) corresponding to 'lags'. It has shape=(nspw,nlags) 'freq_center' [numpy array] contains the center frequencies (in Hz) of the frequency subbands of the subband delay spectra. It is of size n_win. It is roughly equivalent to redshift(s) 'freq_wts' [numpy array] Contains frequency weights applied on each frequency sub-band during the subband delay transform. It is of size n_win x nchan. 'bw_eff' [numpy array] contains the effective bandwidths (in Hz) of the subbands being delay transformed. It is of size n_win. It is roughly equivalent to width in redshift or along line-of-sight 'shape' [string] shape of the frequency window function applied. Usual values are 'rect' (rectangular), 'bhw' (Blackman-Harris), 'bnw' (Blackman-Nuttall). 'fftpow' [scalar] the power to which the FFT of the window was raised. The value is be a positive scalar with default = 1.0 'lag_corr_length' [numpy array] It is the correlation timescale (in pixels) of the subband delay spectra. It is proportional to inverse of effective bandwidth. It is of size n_win. The unit size of a pixel is determined by the difference between adjacent pixels in lags under key 'lags' which in turn is effectively inverse of the effective bandwidth of the subband specified in bw_eff It further contains one or more of the following keys named 'whole', 'submodel', 'residual', and 'errinfo' each of which is a dictionary. 'whole' contains power spectrum info about the input closure phases. 'submodel' contains power spectrum info about the model that will have been subtracted (as closure phase) from the 'whole' model. 'residual' contains power spectrum info about the closure phases obtained as a difference between 'whole' and 'submodel'. It contains the following keys and values: 'mean' [numpy array] Delay power spectrum incoherently estimated over the axes specified in xinfo['axes'] using the 'mean' key in input cpds or attribute cPhaseDS['processed']['dspec']. It has shape that depends on the combination of input parameters. See examples below. If both collapse_axes and avgcov are not set, those axes will be replaced with square covariance matrices. If collapse_axes is provided but avgcov is False, those axes will be of shape 2*Naxis-1. 'median' [numpy array] Delay power spectrum incoherently averaged over the axes specified in incohax using the 'median' key in input cpds or attribute cPhaseDS['processed']['dspec']. It has shape that depends on the combination of input parameters. See examples below. If both collapse_axes and avgcov are not set, those axes will be replaced with square covariance matrices. If collapse_axes is provided bu avgcov is False, those axes will be of shape 2*Naxis-1. 'diagoffsets' [dictionary] Same keys corresponding to keys under 'collapse_axes' in input containing the diagonal offsets for those axes. If 'avgcov' was set, those entries will be removed from 'diagoffsets' since all the leading diagonal elements have been collapsed (averaged) further. Value under each key is a numpy array where each element in the array corresponds to the index of that leading diagonal. This should match the size of the output along that axis in 'mean' or 'median' above. 'diagweights' [dictionary] Each key is an axis specified in collapse_axes and the value is a numpy array of weights corresponding to the diagonal offsets in that axis. 'axesmap' [dictionary] If covariance in cross-power is calculated but is not collapsed, the number of dimensions in the output will have changed. This parameter tracks where the original axis is now placed. The keys are the original axes that are involved in incoherent cross-power, and the values are the new locations of those original axes in the output. 'nsamples_incoh' [integer] Number of incoherent samples in producing the power spectrum 'nsamples_coh' [integer] Number of coherent samples in producing the power spectrum outfile [string] Full path to the external HDF5 file where the cross- power spectrum information provided in xcpdps will be saved ---------------------------------------------------------------------------- """ if not isinstance(infile, str): raise TypeError('Input infile must be a string') xcpdps = {} with h5py.File(infile, 'r') as fileobj: hdrgrp = fileobj['header'] hdrkeys = ['triads', 'triads_ind', 'lst', 'lst_ind', 'dlst', 'days', 'day_ind', 'dday'] for key in hdrkeys: xcpdps[key] = hdrgrp[key].value sampling = ['oversampled', 'resampled'] sampling_keys = ['z', 'kprll', 'lags', 'freq_center', 'bw_eff', 'shape', 'freq_wts', 'lag_corr_length'] dpool_keys = ['whole', 'submodel', 'residual', 'errinfo'] for smplng in sampling: if smplng in fileobj: smplgrp = fileobj[smplng] xcpdps[smplng] = {} for key in sampling_keys: xcpdps[smplng][key] = smplgrp[key].value for dpool in dpool_keys: if dpool in smplgrp: xcpdps[smplng][dpool] = {} dpoolgrp = smplgrp[dpool] keys = ['diagoffsets', 'diagweights', 'axesmap', 'nsamples_incoh', 'nsamples_coh'] for key in keys: if key in dpoolgrp: if isinstance(dpoolgrp[key], h5py.Group): xcpdps[smplng][dpool][key] = {} for subkey in dpoolgrp[key]: xcpdps[smplng][dpool][key][int(subkey)] = dpoolgrp[key][subkey].value elif isinstance(dpoolgrp[key], h5py.Dataset): xcpdps[smplng][dpool][key] = dpoolgrp[key].value else: raise TypeError('Invalid h5py data type encountered') for stat in ['mean', 'median']: if stat in dpoolgrp: if isinstance(dpoolgrp[stat], h5py.Dataset): valunits = dpoolgrp[stat].attrs['units'] xcpdps[smplng][dpool][stat] = dpoolgrp[stat].value * U.Unit(valunits) elif isinstance(dpoolgrp[stat], h5py.Group): xcpdps[smplng][dpool][stat] = [] for diagcomb_ind in range(len(dpoolgrp[stat].keys())): if 'diagcomb_{0}'.format(diagcomb_ind) in dpoolgrp[stat]: valunits = dpoolgrp[stat]['diagcomb_{0}'.format(diagcomb_ind)].attrs['units'] xcpdps[smplng][dpool][stat] += [dpoolgrp[stat]['diagcomb_{0}'.format(diagcomb_ind)].value * U.Unit(valunits)] return xcpdps ################################################################################ def incoherent_cross_power_spectrum_average(xcpdps, excpdps=None, diagoffsets=None): """ ---------------------------------------------------------------------------- Perform incoherent averaging of cross power spectrum along specified axes Inputs: xcpdps [dictionary or list of dictionaries] If provided as a list of dictionaries, each dictionary consists of cross power spectral information coming possible from different sources, and they will be averaged be averaged incoherently. If a single dictionary is provided instead of a list of dictionaries, the said averaging does not take place. Each dictionary is essentially an output of the member function compute_power_spectrum() of class ClosurePhaseDelaySpectrum. It has the following key-value structure: 'triads' ((ntriads,3) array), 'triads_ind', ((ntriads,) array), 'lstXoffsets' ((ndlst_range,) array), 'lst' ((nlst,) array), 'dlst' ((nlst,) array), 'lst_ind' ((nlst,) array), 'days' ((ndays,) array), 'day_ind' ((ndays,) array), 'dday' ((ndays,) array), 'oversampled' and 'resampled' corresponding to whether resample was set to False or True in call to member function FT(). Values under keys 'triads_ind' and 'lst_ind' are numpy array corresponding to triad and time indices used in selecting the data. Values under keys 'oversampled' and 'resampled' each contain a dictionary with the following keys and values: 'z' [numpy array] Redshifts corresponding to the band centers in 'freq_center'. It has shape=(nspw,) 'lags' [numpy array] Delays (in seconds). It has shape=(nlags,) 'kprll' [numpy array] k_parallel modes (in h/Mpc) corresponding to 'lags'. It has shape=(nspw,nlags) 'freq_center' [numpy array] contains the center frequencies (in Hz) of the frequency subbands of the subband delay spectra. It is of size n_win. It is roughly equivalent to redshift(s) 'freq_wts' [numpy array] Contains frequency weights applied on each frequency sub-band during the subband delay transform. It is of size n_win x nchan. 'bw_eff' [numpy array] contains the effective bandwidths (in Hz) of the subbands being delay transformed. It is of size n_win. It is roughly equivalent to width in redshift or along line-of-sight 'shape' [string] shape of the frequency window function applied. Usual values are 'rect' (rectangular), 'bhw' (Blackman-Harris), 'bnw' (Blackman-Nuttall). 'fftpow' [scalar] the power to which the FFT of the window was raised. The value is be a positive scalar with default = 1.0 'lag_corr_length' [numpy array] It is the correlation timescale (in pixels) of the subband delay spectra. It is proportional to inverse of effective bandwidth. It is of size n_win. The unit size of a pixel is determined by the difference between adjacent pixels in lags under key 'lags' which in turn is effectively inverse of the effective bandwidth of the subband specified in bw_eff It further contains 3 keys named 'whole', 'submodel', and 'residual' each of which is a dictionary. 'whole' contains power spectrum info about the input closure phases. 'submodel' contains power spectrum info about the model that will have been subtracted (as closure phase) from the 'whole' model. 'residual' contains power spectrum info about the closure phases obtained as a difference between 'whole' and 'submodel'. It contains the following keys and values: 'mean' [numpy array] Delay power spectrum incoherently estimated over the axes specified in xinfo['axes'] using the 'mean' key in input cpds or attribute cPhaseDS['processed']['dspec']. It has shape that depends on the combination of input parameters. See examples below. If both collapse_axes and avgcov are not set, those axes will be replaced with square covariance matrices. If collapse_axes is provided but avgcov is False, those axes will be of shape 2*Naxis-1. 'median' [numpy array] Delay power spectrum incoherently averaged over the axes specified in incohax using the 'median' key in input cpds or attribute cPhaseDS['processed']['dspec']. It has shape that depends on the combination of input parameters. See examples below. If both collapse_axes and avgcov are not set, those axes will be replaced with square covariance matrices. If collapse_axes is provided bu avgcov is False, those axes will be of shape 2*Naxis-1. 'diagoffsets' [dictionary] Same keys corresponding to keys under 'collapse_axes' in input containing the diagonal offsets for those axes. If 'avgcov' was set, those entries will be removed from 'diagoffsets' since all the leading diagonal elements have been collapsed (averaged) further. Value under each key is a numpy array where each element in the array corresponds to the index of that leading diagonal. This should match the size of the output along that axis in 'mean' or 'median' above. 'diagweights' [dictionary] Each key is an axis specified in collapse_axes and the value is a numpy array of weights corresponding to the diagonal offsets in that axis. 'axesmap' [dictionary] If covariance in cross-power is calculated but is not collapsed, the number of dimensions in the output will have changed. This parameter tracks where the original axis is now placed. The keys are the original axes that are involved in incoherent cross-power, and the values are the new locations of those original axes in the output. 'nsamples_incoh' [integer] Number of incoherent samples in producing the power spectrum 'nsamples_coh' [integer] Number of coherent samples in producing the power spectrum excpdps [dictionary or list of dictionaries] If provided as a list of dictionaries, each dictionary consists of cross power spectral information of subsample differences coming possible from different sources, and they will be averaged be averaged incoherently. This is optional. If not set (default=None), no incoherent averaging happens. If a single dictionary is provided instead of a list of dictionaries, the said averaging does not take place. Each dictionary is essentially an output of the member function compute_power_spectrum_uncertainty() of class ClosurePhaseDelaySpectrum. It has the following key-value structure: 'triads' ((ntriads,3) array), 'triads_ind', ((ntriads,) array), 'lstXoffsets' ((ndlst_range,) array), 'lst' ((nlst,) array), 'dlst' ((nlst,) array), 'lst_ind' ((nlst,) array), 'days' ((ndaycomb,) array), 'day_ind' ((ndaycomb,) array), 'dday' ((ndaycomb,) array), 'oversampled' and 'resampled' corresponding to whether resample was set to False or True in call to member function FT(). Values under keys 'triads_ind' and 'lst_ind' are numpy array corresponding to triad and time indices used in selecting the data. Values under keys 'oversampled' and 'resampled' each contain a dictionary with the following keys and values: 'z' [numpy array] Redshifts corresponding to the band centers in 'freq_center'. It has shape=(nspw,) 'lags' [numpy array] Delays (in seconds). It has shape=(nlags,) 'kprll' [numpy array] k_parallel modes (in h/Mpc) corresponding to 'lags'. It has shape=(nspw,nlags) 'freq_center' [numpy array] contains the center frequencies (in Hz) of the frequency subbands of the subband delay spectra. It is of size n_win. It is roughly equivalent to redshift(s) 'freq_wts' [numpy array] Contains frequency weights applied on each frequency sub-band during the subband delay transform. It is of size n_win x nchan. 'bw_eff' [numpy array] contains the effective bandwidths (in Hz) of the subbands being delay transformed. It is of size n_win. It is roughly equivalent to width in redshift or along line-of-sight 'shape' [string] shape of the frequency window function applied. Usual values are 'rect' (rectangular), 'bhw' (Blackman-Harris), 'bnw' (Blackman-Nuttall). 'fftpow' [scalar] the power to which the FFT of the window was raised. The value is be a positive scalar with default = 1.0 'lag_corr_length' [numpy array] It is the correlation timescale (in pixels) of the subband delay spectra. It is proportional to inverse of effective bandwidth. It is of size n_win. The unit size of a pixel is determined by the difference between adjacent pixels in lags under key 'lags' which in turn is effectively inverse of the effective bandwidth of the subband specified in bw_eff It further contains a key named 'errinfo' which is a dictionary. It contains information about power spectrum uncertainties obtained from subsample differences. It contains the following keys and values: 'mean' [numpy array] Delay power spectrum uncertainties incoherently estimated over the axes specified in xinfo['axes'] using the 'mean' key in input cpds or attribute cPhaseDS['errinfo']['dspec']. It has shape that depends on the combination of input parameters. See examples below. If both collapse_axes and avgcov are not set, those axes will be replaced with square covariance matrices. If collapse_axes is provided but avgcov is False, those axes will be of shape 2*Naxis-1. 'median' [numpy array] Delay power spectrum uncertainties incoherently averaged over the axes specified in incohax using the 'median' key in input cpds or attribute cPhaseDS['errinfo']['dspec']. It has shape that depends on the combination of input parameters. See examples below. If both collapse_axes and avgcov are not set, those axes will be replaced with square covariance matrices. If collapse_axes is provided but avgcov is False, those axes will be of shape 2*Naxis-1. 'diagoffsets' [dictionary] Same keys corresponding to keys under 'collapse_axes' in input containing the diagonal offsets for those axes. If 'avgcov' was set, those entries will be removed from 'diagoffsets' since all the leading diagonal elements have been collapsed (averaged) further. Value under each key is a numpy array where each element in the array corresponds to the index of that leading diagonal. This should match the size of the output along that axis in 'mean' or 'median' above. 'diagweights' [dictionary] Each key is an axis specified in collapse_axes and the value is a numpy array of weights corresponding to the diagonal offsets in that axis. 'axesmap' [dictionary] If covariance in cross-power is calculated but is not collapsed, the number of dimensions in the output will have changed. This parameter tracks where the original axis is now placed. The keys are the original axes that are involved in incoherent cross-power, and the values are the new locations of those original axes in the output. 'nsamples_incoh' [integer] Number of incoherent samples in producing the power spectrum 'nsamples_coh' [integer] Number of coherent samples in producing the power spectrum diagoffsets [NoneType or dictionary or list of dictionaries] This info is used for incoherent averaging along specified diagonals along specified axes. This incoherent averaging is performed after incoherently averaging multiple cross-power spectra (if any). If set to None, this incoherent averaging is not performed. Many combinations of axes and diagonals can be specified as individual dictionaries in a list. If only one dictionary is specified, then it assumed that only one combination of axes and diagonals is requested. If a list of dictionaries is given, each dictionary in the list specifies a different combination for incoherent averaging. Each dictionary should have the following key-value pairs. The key is the axis number (allowed values are 1, 2, 3) that denote the axis type (1=LST, 2=Days, 3=Triads to be averaged), and the value under they keys is a list or numpy array of diagonals to be averaged incoherently. These axes-diagonal combinations apply to both the inputs xcpdps and excpdps, except axis=2 does not apply to excpdps (since it is made of subsample differences already) and will be skipped. Outputs: A tuple consisting of two dictionaries. The first dictionary contains the incoherent averaging of xcpdps as specified by the inputs, while the second consists of incoherent of excpdps as specified by the inputs. The structure of these dictionaries are practically the same as the dictionary inputs xcpdps and excpdps respectively. The only differences in dictionary structure are: * Under key ['oversampled'/'resampled']['whole'/'submodel'/'residual' /'effinfo']['mean'/'median'] is a list of numpy arrays, where each array in the list corresponds to the dictionary in the list in input diagoffsets that defines the axes-diagonal combination. ---------------------------------------------------------------------------- """ if isinstance(xcpdps, dict): xcpdps = [xcpdps] if not isinstance(xcpdps, list): raise TypeError('Invalid data type provided for input xcpdps') if excpdps is not None: if isinstance(excpdps, dict): excpdps = [excpdps] if not isinstance(excpdps, list): raise TypeError('Invalid data type provided for input excpdps') if len(xcpdps) != len(excpdps): raise ValueError('Inputs xcpdps and excpdps found to have unequal number of values') out_xcpdps = {'triads': xcpdps[0]['triads'], 'triads_ind': xcpdps[0]['triads_ind'], 'lst': xcpdps[0]['lst'], 'lst_ind': xcpdps[0]['lst_ind'], 'dlst': xcpdps[0]['dlst'], 'days': xcpdps[0]['days'], 'day_ind': xcpdps[0]['day_ind'], 'dday': xcpdps[0]['dday']} out_excpdps = None if excpdps is not None: out_excpdps = {'triads': excpdps[0]['triads'], 'triads_ind': excpdps[0]['triads_ind'], 'lst': excpdps[0]['lst'], 'lst_ind': excpdps[0]['lst_ind'], 'dlst': excpdps[0]['dlst'], 'days': excpdps[0]['days'], 'day_ind': excpdps[0]['day_ind'], 'dday': excpdps[0]['dday']} for smplng in ['oversampled', 'resampled']: if smplng in xcpdps[0]: out_xcpdps[smplng] = {'z': xcpdps[0][smplng]['z'], 'kprll': xcpdps[0][smplng]['kprll'], 'lags': xcpdps[0][smplng]['lags'], 'freq_center': xcpdps[0][smplng]['freq_center'], 'bw_eff': xcpdps[0][smplng]['bw_eff'], 'shape': xcpdps[0][smplng]['shape'], 'freq_wts': xcpdps[0][smplng]['freq_wts'], 'lag_corr_length': xcpdps[0][smplng]['lag_corr_length']} if excpdps is not None: out_excpdps[smplng] = {'z': excpdps[0][smplng]['z'], 'kprll': excpdps[0][smplng]['kprll'], 'lags': excpdps[0][smplng]['lags'], 'freq_center': excpdps[0][smplng]['freq_center'], 'bw_eff': excpdps[0][smplng]['bw_eff'], 'shape': excpdps[0][smplng]['shape'], 'freq_wts': excpdps[0][smplng]['freq_wts'], 'lag_corr_length': excpdps[0][smplng]['lag_corr_length']} for dpool in ['whole', 'submodel', 'residual']: if dpool in xcpdps[0][smplng]: out_xcpdps[smplng][dpool] = {'diagoffsets': xcpdps[0][smplng][dpool]['diagoffsets'], 'axesmap': xcpdps[0][smplng][dpool]['axesmap']} for stat in ['mean', 'median']: if stat in xcpdps[0][smplng][dpool]: out_xcpdps[smplng][dpool][stat] = {} arr = [] diagweights = [] for i in range(len(xcpdps)): arr += [xcpdps[i][smplng][dpool][stat].si.value] arr_units = xcpdps[i][smplng][dpool][stat].si.unit if isinstance(xcpdps[i][smplng][dpool]['diagweights'], dict): diagwts = 1.0 diagwts_shape = NP.ones(xcpdps[i][smplng][dpool][stat].ndim, dtype=NP.int) for ax in xcpdps[i][smplng][dpool]['diagweights']: tmp_shape = NP.copy(diagwts_shape) tmp_shape[xcpdps[i][smplng][dpool]['axesmap'][ax]] = xcpdps[i][smplng][dpool]['diagweights'][ax].size diagwts = diagwts * xcpdps[i][smplng][dpool]['diagweights'][ax].reshape(tuple(tmp_shape)) elif isinstance(xcpdps[i][smplng][dpool]['diagweights'], NP.ndarray): diagwts = NP.copy(xcpdps[i][smplng][dpool]['diagweights']) else: raise TypeError('Diagonal weights in input must be a dictionary or a numpy array') diagweights += [diagwts] diagweights = NP.asarray(diagweights) arr = NP.asarray(arr) arr = NP.nansum(arr * diagweights, axis=0) / NP.nansum(diagweights, axis=0) * arr_units diagweights = NP.nansum(diagweights, axis=0) out_xcpdps[smplng][dpool][stat] = arr out_xcpdps[smplng][dpool]['diagweights'] = diagweights for dpool in ['errinfo']: if dpool in excpdps[0][smplng]: out_excpdps[smplng][dpool] = {'diagoffsets': excpdps[0][smplng][dpool]['diagoffsets'], 'axesmap': excpdps[0][smplng][dpool]['axesmap']} for stat in ['mean', 'median']: if stat in excpdps[0][smplng][dpool]: out_excpdps[smplng][dpool][stat] = {} arr = [] diagweights = [] for i in range(len(excpdps)): arr += [excpdps[i][smplng][dpool][stat].si.value] arr_units = excpdps[i][smplng][dpool][stat].si.unit if isinstance(excpdps[i][smplng][dpool]['diagweights'], dict): diagwts = 1.0 diagwts_shape = NP.ones(excpdps[i][smplng][dpool][stat].ndim, dtype=NP.int) for ax in excpdps[i][smplng][dpool]['diagweights']: tmp_shape = NP.copy(diagwts_shape) tmp_shape[excpdps[i][smplng][dpool]['axesmap'][ax]] = excpdps[i][smplng][dpool]['diagweights'][ax].size diagwts = diagwts * excpdps[i][smplng][dpool]['diagweights'][ax].reshape(tuple(tmp_shape)) elif isinstance(excpdps[i][smplng][dpool]['diagweights'], NP.ndarray): diagwts = NP.copy(excpdps[i][smplng][dpool]['diagweights']) else: raise TypeError('Diagonal weights in input must be a dictionary or a numpy array') diagweights += [diagwts] diagweights = NP.asarray(diagweights) arr = NP.asarray(arr) arr = NP.nansum(arr * diagweights, axis=0) / NP.nansum(diagweights, axis=0) * arr_units diagweights = NP.nansum(diagweights, axis=0) out_excpdps[smplng][dpool][stat] = arr out_excpdps[smplng][dpool]['diagweights'] = diagweights if diagoffsets is not None: if isinstance(diagoffsets, dict): diagoffsets = [diagoffsets] if not isinstance(diagoffsets, list): raise TypeError('Input diagoffsets must be a list of dictionaries') for ind in range(len(diagoffsets)): for ax in diagoffsets[ind]: if not isinstance(diagoffsets[ind][ax], (list, NP.ndarray)): raise TypeError('Values in input dictionary diagoffsets must be a list or numpy array') diagoffsets[ind][ax] = NP.asarray(diagoffsets[ind][ax]) for smplng in ['oversampled', 'resampled']: if smplng in out_xcpdps: for dpool in ['whole', 'submodel', 'residual']: if dpool in out_xcpdps[smplng]: masks = [] for ind in range(len(diagoffsets)): mask_ones = NP.ones(out_xcpdps[smplng][dpool]['diagweights'].shape, dtype=NP.bool) mask_agg = None for ax in diagoffsets[ind]: mltdim_slice = [slice(None)] * mask_ones.ndim mltdim_slice[out_xcpdps[smplng][dpool]['axesmap'][ax].squeeze()] = NP.where(NP.isin(out_xcpdps[smplng][dpool]['diagoffsets'][ax], diagoffsets[ind][ax]))[0] mask_tmp = NP.copy(mask_ones) mask_tmp[tuple(mltdim_slice)] = False if mask_agg is None: mask_agg = NP.copy(mask_tmp) else: mask_agg = NP.logical_or(mask_agg, mask_tmp) masks += [NP.copy(mask_agg)] diagwts = NP.copy(out_xcpdps[smplng][dpool]['diagweights']) out_xcpdps[smplng][dpool]['diagweights'] = [] for stat in ['mean', 'median']: if stat in out_xcpdps[smplng][dpool]: arr = NP.copy(out_xcpdps[smplng][dpool][stat].si.value) arr_units = out_xcpdps[smplng][dpool][stat].si.unit out_xcpdps[smplng][dpool][stat] = [] for ind in range(len(diagoffsets)): masked_diagwts = MA.array(diagwts, mask=masks[ind]) axes_to_avg = tuple([out_xcpdps[smplng][dpool]['axesmap'][ax][0] for ax in diagoffsets[ind]]) out_xcpdps[smplng][dpool][stat] += [MA.sum(arr * masked_diagwts, axis=axes_to_avg, keepdims=True) / MA.sum(masked_diagwts, axis=axes_to_avg, keepdims=True) * arr_units] if len(out_xcpdps[smplng][dpool]['diagweights']) < len(diagoffsets): out_xcpdps[smplng][dpool]['diagweights'] += [MA.sum(masked_diagwts, axis=axes_to_avg, keepdims=True)] if excpdps is not None: for smplng in ['oversampled', 'resampled']: if smplng in out_excpdps: for dpool in ['errinfo']: if dpool in out_excpdps[smplng]: masks = [] for ind in range(len(diagoffsets)): mask_ones = NP.ones(out_excpdps[smplng][dpool]['diagweights'].shape, dtype=NP.bool) mask_agg = None for ax in diagoffsets[ind]: if ax != 2: mltdim_slice = [slice(None)] * mask_ones.ndim mltdim_slice[out_excpdps[smplng][dpool]['axesmap'][ax].squeeze()] = NP.where(NP.isin(out_excpdps[smplng][dpool]['diagoffsets'][ax], diagoffsets[ind][ax]))[0] mask_tmp = NP.copy(mask_ones) mask_tmp[tuple(mltdim_slice)] = False if mask_agg is None: mask_agg = NP.copy(mask_tmp) else: mask_agg = NP.logical_or(mask_agg, mask_tmp) masks += [NP.copy(mask_agg)] diagwts = NP.copy(out_excpdps[smplng][dpool]['diagweights']) out_excpdps[smplng][dpool]['diagweights'] = [] for stat in ['mean', 'median']: if stat in out_excpdps[smplng][dpool]: arr = NP.copy(out_excpdps[smplng][dpool][stat].si.value) arr_units = out_excpdps[smplng][dpool][stat].si.unit out_excpdps[smplng][dpool][stat] = [] for ind in range(len(diagoffsets)): masked_diagwts = MA.array(diagwts, mask=masks[ind]) axes_to_avg = tuple([out_excpdps[smplng][dpool]['axesmap'][ax][0] for ax in diagoffsets[ind] if ax!=2]) out_excpdps[smplng][dpool][stat] += [MA.sum(arr * masked_diagwts, axis=axes_to_avg, keepdims=True) / MA.sum(masked_diagwts, axis=axes_to_avg, keepdims=True) * arr_units] if len(out_excpdps[smplng][dpool]['diagweights']) < len(diagoffsets): out_excpdps[smplng][dpool]['diagweights'] += [MA.sum(masked_diagwts, axis=axes_to_avg, keepdims=True)] return (out_xcpdps, out_excpdps) ################################################################################ def incoherent_kbin_averaging(xcpdps, kbins=None, num_kbins=None, kbintype='log'): """ ---------------------------------------------------------------------------- Averages the power spectrum incoherently by binning in bins of k. Returns the power spectrum in units of both standard power spectrum and \Delta^2 Inputs: xcpdps [dictionary] A dictionary that contains the incoherent averaged power spectrum along LST and/or triads axes. This dictionary is essentially the one(s) returned as the output of the function incoherent_cross_power_spectrum_average() kbins [NoneType, list or numpy array] Bins in k. If set to None (default), it will be determined automatically based on the inputs in num_kbins, and kbintype. If num_kbins is None and kbintype='linear', the negative and positive values of k are folded into a one-sided power spectrum. In this case, the bins will approximately have the same resolution as the k-values in the input power spectrum for all the spectral windows. num_kbins [NoneType or integer] Number of k-bins. Used only if kbins is set to None. If kbintype is set to 'linear', the negative and positive values of k are folded into a one-sided power spectrum. In this case, the bins will approximately have the same resolution as the k-values in the input power spectrum for all the spectral windows. kbintype [string] Specifies the type of binning, used only if kbins is set to None. Accepted values are 'linear' and 'log' for linear and logarithmic bins respectively. Outputs: Dictionary containing the power spectrum information. At the top level, it contains keys specifying the sampling to be 'oversampled' or 'resampled'. Under each of these keys is another dictionary containing the following keys: 'z' [numpy array] Redshifts corresponding to the band centers in 'freq_center'. It has shape=(nspw,) 'lags' [numpy array] Delays (in seconds). It has shape=(nlags,). 'freq_center' [numpy array] contains the center frequencies (in Hz) of the frequency subbands of the subband delay spectra. It is of size n_win. It is roughly equivalent to redshift(s) 'freq_wts' [numpy array] Contains frequency weights applied on each frequency sub-band during the subband delay transform. It is of size n_win x nchan. 'bw_eff' [numpy array] contains the effective bandwidths (in Hz) of the subbands being delay transformed. It is of size n_win. It is roughly equivalent to width in redshift or along line-of-sight 'shape' [string] shape of the frequency window function applied. Usual values are 'rect' (rectangular), 'bhw' (Blackman-Harris), 'bnw' (Blackman-Nuttall). 'fftpow' [scalar] the power to which the FFT of the window was raised. The value is be a positive scalar with default = 1.0 'lag_corr_length' [numpy array] It is the correlation timescale (in pixels) of the subband delay spectra. It is proportional to inverse of effective bandwidth. It is of size n_win. The unit size of a pixel is determined by the difference between adjacent pixels in lags under key 'lags' which in turn is effectively inverse of the effective bandwidth of the subband specified in bw_eff It further contains 3 keys named 'whole', 'submodel', and 'residual' or one key named 'errinfo' each of which is a dictionary. 'whole' contains power spectrum info about the input closure phases. 'submodel' contains power spectrum info about the model that will have been subtracted (as closure phase) from the 'whole' model. 'residual' contains power spectrum info about the closure phases obtained as a difference between 'whole' and 'submodel'. 'errinfo' contains power spectrum information about the subsample differences. There is also another dictionary under key 'kbininfo' that contains information about k-bins. These dictionaries contain the following keys and values: 'whole'/'submodel'/'residual'/'errinfo' [dictionary] It contains the following keys and values: 'mean' [dictionary] Delay power spectrum information under the 'mean' statistic incoherently obtained by averaging the input power spectrum in bins of k. It contains output power spectrum expressed as two quantities each of which is a dictionary with the following key-value pairs: 'PS' [list of numpy arrays] Standard power spectrum in units of 'K2 Mpc3'. Each numpy array in the list maps to a specific combination of axes and axis diagonals chosen for incoherent averaging in earlier processing such as in the function incoherent_cross_power_spectrum_average(). The numpy array has a shape similar to the input power spectrum, but that last axis (k-axis) will have a different size that depends on the k-bins that were used in the incoherent averaging along that axis. 'Del2' [list of numpy arrays] power spectrum in Delta^2 units of 'K2'. Each numpy array in the list maps to a specific combination of axes and axis diagonals chosen for incoherent averaging in earlier processing such as in the function incoherent_cross_power_spectrum_average(). The numpy array has a shape similar to the input power spectrum, but that last axis (k-axis) will have a different size that depends on the k-bins that were used in the incoherent averaging along that axis. 'median' [dictionary] Delay power spectrum information under the 'median' statistic incoherently obtained by averaging the input power spectrum in bins of k. It contains output power spectrum expressed as two quantities each of which is a dictionary with the following key-value pairs: 'PS' [list of numpy arrays] Standard power spectrum in units of 'K2 Mpc3'. Each numpy array in the list maps to a specific combination of axes and axis diagonals chosen for incoherent averaging in earlier processing such as in the function incoherent_cross_power_spectrum_average(). The numpy array has a shape similar to the input power spectrum, but that last axis (k-axis) will have a different size that depends on the k-bins that were used in the incoherent averaging along that axis. 'Del2' [list of numpy arrays] power spectrum in Delta^2 units of 'K2'. Each numpy array in the list maps to a specific combination of axes and axis diagonals chosen for incoherent averaging in earlier processing such as in the function incoherent_cross_power_spectrum_average(). The numpy array has a shape similar to the input power spectrum, but that last axis (k-axis) will have a different size that depends on the k-bins that were used in the incoherent averaging along that axis. 'kbininfo' [dictionary] Contains the k-bin information. It contains the following key-value pairs: 'counts' [list] List of numpy arrays where each numpy array in the stores the counts in the determined k-bins. Each numpy array in the list corresponds to a spectral window (redshift subband). The shape of each numpy array is (nkbins,) 'kbin_edges' [list] List of numpy arrays where each numpy array contains the k-bin edges. Each array in the list corresponds to a spectral window (redshift subband). The shape of each array is (nkbins+1,). 'kbinnum' [list] List of numpy arrays containing the bin number under which the k value falls. Each array in the list corresponds to a spectral window (redshift subband). The shape of each array is (nlags,). 'ri' [list] List of numpy arrays containing the reverse indices for each k-bin. Each array in the list corresponds to a spectral window (redshift subband). The shape of each array is (nlags+nkbins+1,). 'whole'/'submodel'/'residual' or 'errinfo' [dictionary] k-bin info estimated for the different datapools under different stats and PS definitions. It has the keys 'mean' and 'median' for the mean and median statistic respectively. Each of them contain a dictionary with the following key-value pairs: 'PS' [list] List of numpy arrays where each numpy array contains a standard power spectrum typically in units of 'K2 Mpc3'. Its shape is the same as input power spectrum except the k-axis which now has nkbins number of elements. 'Del2' [list] List of numpy arrays where each numpy array contains a Delta^2 power spectrum typically in units of 'K2'. Its shape is the same as input power spectrum except the k-axis which now has nkbins number of elements. ---------------------------------------------------------------------------- """ if not isinstance(xcpdps, dict): raise TypeError('Input xcpdps must be a dictionary') if kbins is not None: if not isinstance(kbins, (list,NP.ndarray)): raise TypeError('Input kbins must be a list or numpy array') else: if not isinstance(kbintype, str): raise TypeError('Input kbintype must be a string') if kbintype.lower() not in ['linear', 'log']: raise ValueError('Input kbintype must be set to "linear" or "log"') if kbintype.lower() == 'log': if num_kbins is None: num_kbins = 10 psinfo = {} keys = ['triads', 'triads_ind', 'lst', 'lst_ind', 'dlst', 'days', 'day_ind', 'dday'] for key in keys: psinfo[key] = xcpdps[key] sampling = ['oversampled', 'resampled'] sampling_keys = ['z', 'freq_center', 'bw_eff', 'shape', 'freq_wts', 'lag_corr_length'] dpool_keys = ['whole', 'submodel', 'residual', 'errinfo'] for smplng in sampling: if smplng in xcpdps: psinfo[smplng] = {} for key in sampling_keys: psinfo[smplng][key] = xcpdps[smplng][key] kprll = xcpdps[smplng]['kprll'] lags = xcpdps[smplng]['lags'] eps = 1e-10 if kbins is None: dkprll = NP.max(NP.mean(NP.diff(kprll, axis=-1), axis=-1)) if kbintype.lower() == 'linear': bins_kprll = NP.linspace(eps, NP.abs(kprll).max()+eps, num=kprll.shape[1]/2+1, endpoint=True) else: bins_kprll = NP.geomspace(eps, NP.abs(kprll).max()+eps, num=num_kbins+1, endpoint=True) bins_kprll = NP.insert(bins_kprll, 0, -eps) else: bins_kprll = NP.asarray(kbins) num_kbins = bins_kprll.size - 1 psinfo[smplng]['kbininfo'] = {'counts': [], 'kbin_edges': [], 'kbinnum': [], 'ri': []} for spw in range(kprll.shape[0]): counts, kbin_edges, kbinnum, ri = OPS.binned_statistic(NP.abs(kprll[spw,:]), statistic='count', bins=bins_kprll) counts = counts.astype(NP.int) psinfo[smplng]['kbininfo']['counts'] += [NP.copy(counts)] psinfo[smplng]['kbininfo']['kbin_edges'] += [kbin_edges / U.Mpc] psinfo[smplng]['kbininfo']['kbinnum'] += [NP.copy(kbinnum)] psinfo[smplng]['kbininfo']['ri'] += [NP.copy(ri)] for dpool in dpool_keys: if dpool in xcpdps[smplng]: psinfo[smplng][dpool] = {} psinfo[smplng]['kbininfo'][dpool] = {} keys = ['diagoffsets', 'diagweights', 'axesmap'] for key in keys: psinfo[smplng][dpool][key] = xcpdps[smplng][dpool][key] for stat in ['mean', 'median']: if stat in xcpdps[smplng][dpool]: psinfo[smplng][dpool][stat] = {'PS': [], 'Del2': []} psinfo[smplng]['kbininfo'][dpool][stat] = [] for combi in range(len(xcpdps[smplng][dpool][stat])): outshape = NP.asarray(xcpdps[smplng][dpool][stat][combi].shape) outshape[-1] = num_kbins tmp_dps = NP.full(tuple(outshape), NP.nan, dtype=NP.complex) * U.Unit(xcpdps[smplng][dpool][stat][combi].unit) tmp_Del2 = NP.full(tuple(outshape), NP.nan, dtype=NP.complex) * U.Unit(xcpdps[smplng][dpool][stat][combi].unit / U.Mpc**3) tmp_kprll = NP.full(tuple(outshape), NP.nan, dtype=NP.float) / U.Mpc for spw in range(kprll.shape[0]): counts = NP.copy(psinfo[smplng]['kbininfo']['counts'][spw]) ri = NP.copy(psinfo[smplng]['kbininfo']['ri'][spw]) print('Processing datapool={0}, stat={1}, LST-Day-Triad combination={2:0d}, spw={3:0d}...'.format(dpool, stat, combi, spw)) progress = PGB.ProgressBar(widgets=[PGB.Percentage(), PGB.Bar(marker='-', left=' |', right='| '), PGB.Counter(), '/{0:0d} k-bins '.format(num_kbins), PGB.ETA()], maxval=num_kbins).start() for binnum in range(num_kbins): if counts[binnum] > 0: ind_kbin = ri[ri[binnum]:ri[binnum+1]] tmp_dps[spw,...,binnum] = NP.nanmean(NP.take(xcpdps[smplng][dpool][stat][combi][spw], ind_kbin, axis=-1), axis=-1) k_shape = NP.ones(NP.take(xcpdps[smplng][dpool][stat][combi][spw], ind_kbin, axis=-1).ndim, dtype=NP.int) k_shape[-1] = -1 tmp_Del2[spw,...,binnum] = NP.nanmean(NP.abs(kprll[spw,ind_kbin].reshape(tuple(k_shape))/U.Mpc)**3 * NP.take(xcpdps[smplng][dpool][stat][combi][spw], ind_kbin, axis=-1), axis=-1) / (2*NP.pi**2) tmp_kprll[spw,...,binnum] = NP.nansum(NP.abs(kprll[spw,ind_kbin].reshape(tuple(k_shape))/U.Mpc) * NP.abs(NP.take(xcpdps[smplng][dpool][stat][combi][spw], ind_kbin, axis=-1)), axis=-1) / NP.nansum(NP.abs(NP.take(xcpdps[smplng][dpool][stat][combi][spw], ind_kbin, axis=-1)), axis=-1) progress.update(binnum+1) progress.finish() psinfo[smplng][dpool][stat]['PS'] += [copy.deepcopy(tmp_dps)] psinfo[smplng][dpool][stat]['Del2'] += [copy.deepcopy(tmp_Del2)] psinfo[smplng]['kbininfo'][dpool][stat] += [copy.deepcopy(tmp_kprll)] return psinfo ################################################################################ class ClosurePhase(object): """ ---------------------------------------------------------------------------- Class to hold and operate on Closure Phase information. It has the following attributes and member functions. Attributes: extfile [string] Full path to external file containing information of ClosurePhase instance. The file is in HDF5 format cpinfo [dictionary] Contains the following top level keys, namely, 'raw', 'processed', and 'errinfo' Under key 'raw' which holds a dictionary, the subkeys include 'cphase' (nlst,ndays,ntriads,nchan), 'triads' (ntriads,3), 'lst' (nlst,ndays), and 'flags' (nlst,ndays,ntriads,nchan). Under the 'processed' key are more subkeys, namely, 'native', 'prelim', and optionally 'submodel' and 'residual' each holding a dictionary. Under 'native' dictionary, the subsubkeys for further dictionaries are 'cphase' (masked array: (nlst,ndays,ntriads,nchan)), 'eicp' (complex masked array: (nlst,ndays,ntriads,nchan)), and 'wts' (masked array: (nlst,ndays,ntriads,nchan)). Under 'prelim' dictionary, the subsubkeys for further dictionaries are 'tbins' (numpy array of tbin centers after smoothing), 'dtbins' (numpy array of tbin intervals), 'wts' (masked array: (ntbins,ndays,ntriads,nchan)), 'eicp' and 'cphase'. The dictionaries under 'eicp' are indexed by keys 'mean' (complex masked array: (ntbins,ndays,ntriads,nchan)), and 'median' (complex masked array: (ntbins,ndays,ntriads,nchan)). The dictionaries under 'cphase' are indexed by keys 'mean' (masked array: (ntbins,ndays,ntriads,nchan)), 'median' (masked array: (ntbins,ndays,ntriads,nchan)), 'rms' (masked array: (ntbins,ndays,ntriads,nchan)), and 'mad' (masked array: (ntbins,ndays,ntriads,nchan)). The last one denotes Median Absolute Deviation. Under 'submodel' dictionary, the subsubkeys for further dictionaries are 'cphase' (masked array: (nlst,ndays,ntriads,nchan)), and 'eicp' (complex masked array: (nlst,ndays,ntriads,nchan)). Under 'residual' dictionary, the subsubkeys for further dictionaries are 'cphase' and 'eicp'. These are dictionaries too. The dictionaries under 'eicp' are indexed by keys 'mean' (complex masked array: (ntbins,ndays,ntriads,nchan)), and 'median' (complex masked array: (ntbins,ndays,ntriads,nchan)). The dictionaries under 'cphase' are indexed by keys 'mean' (masked array: (ntbins,ndays,ntriads,nchan)), and 'median' (masked array: (ntbins,ndays,ntriads,nchan)). Under key 'errinfo', it contains the following keys and values: 'list_of_pair_of_pairs' List of pair of pairs for which differences of complex exponentials have been computed, where the elements are bins of days. The number of elements in the list is ncomb. And each element is a smaller (4-element) list of pair of pairs 'eicp_diff' Difference of complex exponentials between pairs of day bins. This will be used in evaluating noise properties in power spectrum. It is a dictionary with two keys '0' and '1' where each contains the difference from a pair of subsamples. Each of these keys contains a numpy array of shape (nlstbins,ncomb,2,ntriads,nchan) 'wts' Weights in difference of complex exponentials obtained by sum of squares of weights that are associated with the pair that was used in the differencing. It is a dictionary with two keys '0' and '1' where each contains the weights associated It is of shape (nlstbins,ncomb,2,ntriads,nchan) Member functions: __init__() Initialize an instance of class ClosurePhase expicp() Compute and return complex exponential of the closure phase as a masked array smooth_in_tbins() Smooth the complex exponentials of closure phases in LST bins. Both mean and median smoothing is produced. subtract() Subtract complex exponential of the bispectrum phase from the current instance and updates the cpinfo attribute subsample_differencing() Create subsamples and differences between subsamples to evaluate noise properties from the data set. save() Save contents of attribute cpinfo in external HDF5 file ---------------------------------------------------------------------------- """ def __init__(self, infile, freqs, infmt='npz'): """ ------------------------------------------------------------------------ Initialize an instance of class ClosurePhase Inputs: infile [string] Input file including full path. It could be a NPZ with raw data, or a HDF5 file that could contain raw or processed data. The input file format is specified in the input infmt. If it is a NPZ file, it must contain the following keys/files: 'closures' [numpy array] Closure phase (radians). It is of shape (nlst,ndays,ntriads,nchan) 'triads' [numpy array] Array of triad tuples, of shape (ntriads,3) 'flags' [numpy array] Array of flags (boolean), of shape (nlst,ndays,ntriads,nchan) 'last' [numpy array] Array of LST for each day (CASA units which is MJD+6713). Shape is (nlst,ndays) 'days' [numpy array] Array of days, shape is (ndays,) 'averaged_closures' [numpy array] optional array of closure phases averaged across days. Shape is (nlst,ntriads,nchan) 'std_dev_lst' [numpy array] optional array of standard deviation of closure phases across days. Shape is (nlst,ntriads,nchan) 'std_dev_triads' [numpy array] optional array of standard deviation of closure phases across triads. Shape is (nlst,ndays,nchan) freqs [numpy array] Frequencies (in Hz) in the input. Size is nchan. infmt [string] Input file format. Accepted values are 'npz' (default) and 'hdf5'. ------------------------------------------------------------------------ """ if not isinstance(infile, str): raise TypeError('Input infile must be a string') if not isinstance(freqs, NP.ndarray): raise TypeError('Input freqs must be a numpy array') freqs = freqs.ravel() if not isinstance(infmt, str): raise TypeError('Input infmt must be a string') if infmt.lower() not in ['npz', 'hdf5']: raise ValueError('Input infmt must be "npz" or "hdf5"') if infmt.lower() == 'npz': infilesplit = infile.split('.npz') infile_noext = infilesplit[0] self.cpinfo = loadnpz(infile) # npz2hdf5(infile, infile_noext+'.hdf5') self.extfile = infile_noext + '.hdf5' else: # if not isinstance(infile, h5py.File): # raise TypeError('Input infile is not a valid HDF5 file') self.extfile = infile self.cpinfo = NMO.load_dict_from_hdf5(self.extfile) if freqs.size != self.cpinfo['raw']['cphase'].shape[-1]: raise ValueError('Input frequencies do not match with dimensions of the closure phase data') self.f = freqs self.df = freqs[1] - freqs[0] force_expicp = False if 'processed' not in self.cpinfo: force_expicp = True else: if 'native' not in self.cpinfo['processed']: force_expicp = True self.expicp(force_action=force_expicp) if 'prelim' not in self.cpinfo['processed']: self.cpinfo['processed']['prelim'] = {} self.cpinfo['errinfo'] = {} ############################################################################ def expicp(self, force_action=False): """ ------------------------------------------------------------------------ Compute the complex exponential of the closure phase as a masked array Inputs: force_action [boolean] If set to False (default), the complex exponential is computed only if it has not been done so already. Otherwise the computation is forced. ------------------------------------------------------------------------ """ if 'processed' not in self.cpinfo: self.cpinfo['processed'] = {} force_action = True if 'native' not in self.cpinfo['processed']: self.cpinfo['processed']['native'] = {} force_action = True if 'cphase' not in self.cpinfo['processed']['native']: self.cpinfo['processed']['native']['cphase'] = MA.array(self.cpinfo['raw']['cphase'].astype(NP.float64), mask=self.cpinfo['raw']['flags']) force_action = True if not force_action: if 'eicp' not in self.cpinfo['processed']['native']: self.cpinfo['processed']['native']['eicp'] = NP.exp(1j * self.cpinfo['processed']['native']['cphase']) self.cpinfo['processed']['native']['wts'] = MA.array(NP.logical_not(self.cpinfo['raw']['flags']).astype(NP.float), mask=self.cpinfo['raw']['flags']) else: self.cpinfo['processed']['native']['eicp'] = NP.exp(1j * self.cpinfo['processed']['native']['cphase']) self.cpinfo['processed']['native']['wts'] = MA.array(NP.logical_not(self.cpinfo['raw']['flags']).astype(NP.float), mask=self.cpinfo['raw']['flags']) ############################################################################ def smooth_in_tbins(self, daybinsize=None, ndaybins=None, lstbinsize=None): """ ------------------------------------------------------------------------ Smooth the complex exponentials of closure phases in time bins. Both mean and median smoothing is produced. Inputs: daybinsize [Nonetype or scalar] Day bin size (in days) over which mean and median are estimated across different days for a fixed LST bin. If set to None, it will look for value in input ndaybins. If both are None, no smoothing is performed. Only one of daybinsize or ndaybins must be set to non-None value. ndaybins [NoneType or integer] Number of bins along day axis. Only if daybinsize is set to None. It produces bins that roughly consist of equal number of days in each bin regardless of how much the days in each bin are separated from each other. If both are None, no smoothing is performed. Only one of daybinsize or ndaybins must be set to non-None value. lstbinsize [NoneType or scalar] LST bin size (in seconds) over which mean and median are estimated across the LST. If set to None, no smoothing is performed ------------------------------------------------------------------------ """ if (ndaybins is not None) and (daybinsize is not None): raise ValueError('Only one of daybinsize or ndaybins should be set') if (daybinsize is not None) or (ndaybins is not None): if daybinsize is not None: if not isinstance(daybinsize, (int,float)): raise TypeError('Input daybinsize must be a scalar') dres = NP.diff(self.cpinfo['raw']['days']).min() # in days dextent = self.cpinfo['raw']['days'].max() - self.cpinfo['raw']['days'].min() + dres # in days if daybinsize > dres: daybinsize = NP.clip(daybinsize, dres, dextent) eps = 1e-10 daybins = NP.arange(self.cpinfo['raw']['days'].min(), self.cpinfo['raw']['days'].max() + dres + eps, daybinsize) ndaybins = daybins.size daybins = NP.concatenate((daybins, [daybins[-1]+daybinsize+eps])) if ndaybins > 1: daybinintervals = daybins[1:] - daybins[:-1] daybincenters = daybins[:-1] + 0.5 * daybinintervals else: daybinintervals = NP.asarray(daybinsize).reshape(-1) daybincenters = daybins[0] + 0.5 * daybinintervals counts, daybin_edges, daybinnum, ri = OPS.binned_statistic(self.cpinfo['raw']['days'], statistic='count', bins=daybins) counts = counts.astype(NP.int) # if 'prelim' not in self.cpinfo['processed']: # self.cpinfo['processed']['prelim'] = {} # self.cpinfo['processed']['prelim']['eicp'] = {} # self.cpinfo['processed']['prelim']['cphase'] = {} # self.cpinfo['processed']['prelim']['daybins'] = daybincenters # self.cpinfo['processed']['prelim']['diff_dbins'] = daybinintervals wts_daybins = NP.zeros((self.cpinfo['processed']['native']['eicp'].shape[0], counts.size, self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3])) eicp_dmean = NP.zeros((self.cpinfo['processed']['native']['eicp'].shape[0], counts.size, self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3]), dtype=NP.complex128) eicp_dmedian = NP.zeros((self.cpinfo['processed']['native']['eicp'].shape[0], counts.size, self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3]), dtype=NP.complex128) cp_drms = NP.zeros((self.cpinfo['processed']['native']['eicp'].shape[0], counts.size, self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3])) cp_dmad = NP.zeros((self.cpinfo['processed']['native']['eicp'].shape[0], counts.size, self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3])) for binnum in xrange(counts.size): ind_daybin = ri[ri[binnum]:ri[binnum+1]] wts_daybins[:,binnum,:,:] = NP.sum(self.cpinfo['processed']['native']['wts'][:,ind_daybin,:,:].data, axis=1) eicp_dmean[:,binnum,:,:] = NP.exp(1j*NP.angle(MA.mean(self.cpinfo['processed']['native']['eicp'][:,ind_daybin,:,:], axis=1))) eicp_dmedian[:,binnum,:,:] = NP.exp(1j*NP.angle(MA.median(self.cpinfo['processed']['native']['eicp'][:,ind_daybin,:,:].real, axis=1) + 1j * MA.median(self.cpinfo['processed']['native']['eicp'][:,ind_daybin,:,:].imag, axis=1))) cp_drms[:,binnum,:,:] = MA.std(self.cpinfo['processed']['native']['cphase'][:,ind_daybin,:,:], axis=1).data cp_dmad[:,binnum,:,:] = MA.median(NP.abs(self.cpinfo['processed']['native']['cphase'][:,ind_daybin,:,:] - NP.angle(eicp_dmedian[:,binnum,:,:][:,NP.newaxis,:,:])), axis=1).data # mask = wts_daybins <= 0.0 # self.cpinfo['processed']['prelim']['wts'] = MA.array(wts_daybins, mask=mask) # self.cpinfo['processed']['prelim']['eicp']['mean'] = MA.array(eicp_dmean, mask=mask) # self.cpinfo['processed']['prelim']['eicp']['median'] = MA.array(eicp_dmedian, mask=mask) # self.cpinfo['processed']['prelim']['cphase']['mean'] = MA.array(NP.angle(eicp_dmean), mask=mask) # self.cpinfo['processed']['prelim']['cphase']['median'] = MA.array(NP.angle(eicp_dmedian), mask=mask) # self.cpinfo['processed']['prelim']['cphase']['rms'] = MA.array(cp_drms, mask=mask) # self.cpinfo['processed']['prelim']['cphase']['mad'] = MA.array(cp_dmad, mask=mask) else: if not isinstance(ndaybins, int): raise TypeError('Input ndaybins must be an integer') if ndaybins <= 0: raise ValueError('Input ndaybins must be positive') days_split = NP.array_split(self.cpinfo['raw']['days'], ndaybins) daybincenters = NP.asarray([NP.mean(days) for days in days_split]) daybinintervals = NP.asarray([days.max()-days.min() for days in days_split]) counts = NP.asarray([days.size for days in days_split]) wts_split = NP.array_split(self.cpinfo['processed']['native']['wts'].data, ndaybins, axis=1) # mask_split = NP.array_split(self.cpinfo['processed']['native']['wts'].mask, ndaybins, axis=1) wts_daybins = NP.asarray([NP.sum(wtsitem, axis=1) for wtsitem in wts_split]) # ndaybins x nlst x ntriads x nchan wts_daybins = NP.moveaxis(wts_daybins, 0, 1) # nlst x ndaybins x ntriads x nchan mask_split = NP.array_split(self.cpinfo['processed']['native']['eicp'].mask, ndaybins, axis=1) eicp_split = NP.array_split(self.cpinfo['processed']['native']['eicp'].data, ndaybins, axis=1) eicp_dmean = MA.array([MA.mean(MA.array(eicp_split[i], mask=mask_split[i]), axis=1) for i in range(daybincenters.size)]) # ndaybins x nlst x ntriads x nchan eicp_dmean = NP.exp(1j * NP.angle(eicp_dmean)) eicp_dmean = NP.moveaxis(eicp_dmean, 0, 1) # nlst x ndaybins x ntriads x nchan eicp_dmedian = MA.array([MA.median(MA.array(eicp_split[i].real, mask=mask_split[i]), axis=1) + 1j * MA.median(MA.array(eicp_split[i].imag, mask=mask_split[i]), axis=1) for i in range(daybincenters.size)]) # ndaybins x nlst x ntriads x nchan eicp_dmedian = NP.exp(1j * NP.angle(eicp_dmedian)) eicp_dmedian = NP.moveaxis(eicp_dmedian, 0, 1) # nlst x ndaybins x ntriads x nchan cp_split = NP.array_split(self.cpinfo['processed']['native']['cphase'].data, ndaybins, axis=1) cp_drms = NP.array([MA.std(MA.array(cp_split[i], mask=mask_split[i]), axis=1).data for i in range(daybincenters.size)]) # ndaybins x nlst x ntriads x nchan cp_drms = NP.moveaxis(cp_drms, 0, 1) # nlst x ndaybins x ntriads x nchan cp_dmad = NP.array([MA.median(NP.abs(cp_split[i] - NP.angle(eicp_dmedian[:,[i],:,:])), axis=1).data for i in range(daybincenters.size)]) # ndaybins x nlst x ntriads x nchan cp_dmad = NP.moveaxis(cp_dmad, 0, 1) # nlst x ndaybins x ntriads x nchan if 'prelim' not in self.cpinfo['processed']: self.cpinfo['processed']['prelim'] = {} self.cpinfo['processed']['prelim']['eicp'] = {} self.cpinfo['processed']['prelim']['cphase'] = {} self.cpinfo['processed']['prelim']['daybins'] = daybincenters self.cpinfo['processed']['prelim']['diff_dbins'] = daybinintervals mask = wts_daybins <= 0.0 self.cpinfo['processed']['prelim']['wts'] = MA.array(wts_daybins, mask=mask) self.cpinfo['processed']['prelim']['eicp']['mean'] = MA.array(eicp_dmean, mask=mask) self.cpinfo['processed']['prelim']['eicp']['median'] = MA.array(eicp_dmedian, mask=mask) self.cpinfo['processed']['prelim']['cphase']['mean'] = MA.array(NP.angle(eicp_dmean), mask=mask) self.cpinfo['processed']['prelim']['cphase']['median'] = MA.array(NP.angle(eicp_dmedian), mask=mask) self.cpinfo['processed']['prelim']['cphase']['rms'] = MA.array(cp_drms, mask=mask) self.cpinfo['processed']['prelim']['cphase']['mad'] = MA.array(cp_dmad, mask=mask) rawlst = NP.degrees(NP.unwrap(NP.radians(self.cpinfo['raw']['lst'] * 15.0), discont=NP.pi, axis=0)) / 15.0 # in hours but unwrapped to have no discontinuities if NP.any(rawlst > 24.0): rawlst -= 24.0 if rawlst.shape[0] > 1: # LST bin only if there are multiple LST if lstbinsize is not None: if not isinstance(lstbinsize, (int,float)): raise TypeError('Input lstbinsize must be a scalar') lstbinsize = lstbinsize / 3.6e3 # in hours tres = NP.diff(rawlst[:,0]).min() # in hours textent = rawlst[:,0].max() - rawlst[:,0].min() + tres # in hours eps = 1e-10 if 'prelim' not in self.cpinfo['processed']: self.cpinfo['processed']['prelim'] = {} no_change_in_lstbins = False if lstbinsize > tres: lstbinsize = NP.clip(lstbinsize, tres, textent) lstbins = NP.arange(rawlst[:,0].min(), rawlst[:,0].max() + tres + eps, lstbinsize) nlstbins = lstbins.size lstbins = NP.concatenate((lstbins, [lstbins[-1]+lstbinsize+eps])) if nlstbins > 1: lstbinintervals = lstbins[1:] - lstbins[:-1] lstbincenters = lstbins[:-1] + 0.5 * lstbinintervals else: lstbinintervals = NP.asarray(lstbinsize).reshape(-1) lstbincenters = lstbins[0] + 0.5 * lstbinintervals self.cpinfo['processed']['prelim']['lstbins'] = lstbincenters self.cpinfo['processed']['prelim']['dlstbins'] = lstbinintervals no_change_in_lstbins = False else: # Perform no binning and keep the current LST resolution, data and weights warnings.warn('LST bin size found to be smaller than the LST resolution in the data. No LST binning/averaging will be performed.') lstbinsize = tres lstbins = NP.arange(rawlst[:,0].min(), rawlst[:,0].max() + lstbinsize + eps, lstbinsize) nlstbins = lstbins.size - 1 if nlstbins > 1: lstbinintervals = lstbins[1:] - lstbins[:-1] else: lstbinintervals = NP.asarray(lstbinsize).reshape(-1) self.cpinfo['processed']['prelim']['dlstbins'] = lstbinintervals self.cpinfo['processed']['prelim']['lstbins'] = lstbins[:-1] # Ensure that the LST bins are inside the min/max envelope to # error-free interpolation later self.cpinfo['processed']['prelim']['lstbins'][0] += eps self.cpinfo['processed']['prelim']['lstbins'][-1] -= eps no_change_in_lstbins = True counts, lstbin_edges, lstbinnum, ri = OPS.binned_statistic(rawlst[:,0], statistic='count', bins=lstbins) counts = counts.astype(NP.int) if 'wts' not in self.cpinfo['processed']['prelim']: outshape = (counts.size, self.cpinfo['processed']['native']['eicp'].shape[1], self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3]) else: outshape = (counts.size, self.cpinfo['processed']['prelim']['wts'].shape[1], self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3]) wts_lstbins = NP.zeros(outshape) eicp_tmean = NP.zeros(outshape, dtype=NP.complex128) eicp_tmedian = NP.zeros(outshape, dtype=NP.complex128) cp_trms = NP.zeros(outshape) cp_tmad = NP.zeros(outshape) for binnum in xrange(counts.size): if no_change_in_lstbins: ind_lstbin = [binnum] else: ind_lstbin = ri[ri[binnum]:ri[binnum+1]] if 'wts' not in self.cpinfo['processed']['prelim']: indict = self.cpinfo['processed']['native'] else: indict = self.cpinfo['processed']['prelim'] wts_lstbins[binnum,:,:,:] = NP.sum(indict['wts'][ind_lstbin,:,:,:].data, axis=0) if 'wts' not in self.cpinfo['processed']['prelim']: eicp_tmean[binnum,:,:,:] = NP.exp(1j*NP.angle(MA.mean(indict['eicp'][ind_lstbin,:,:,:], axis=0))) eicp_tmedian[binnum,:,:,:] = NP.exp(1j*NP.angle(MA.median(indict['eicp'][ind_lstbin,:,:,:].real, axis=0) + 1j * MA.median(self.cpinfo['processed']['native']['eicp'][ind_lstbin,:,:,:].imag, axis=0))) cp_trms[binnum,:,:,:] = MA.std(indict['cphase'][ind_lstbin,:,:,:], axis=0).data cp_tmad[binnum,:,:,:] = MA.median(NP.abs(indict['cphase'][ind_lstbin,:,:,:] - NP.angle(eicp_tmedian[binnum,:,:,:][NP.newaxis,:,:,:])), axis=0).data else: eicp_tmean[binnum,:,:,:] = NP.exp(1j*NP.angle(MA.mean(NP.exp(1j*indict['cphase']['mean'][ind_lstbin,:,:,:]), axis=0))) eicp_tmedian[binnum,:,:,:] = NP.exp(1j*NP.angle(MA.median(NP.cos(indict['cphase']['median'][ind_lstbin,:,:,:]), axis=0) + 1j * MA.median(NP.sin(indict['cphase']['median'][ind_lstbin,:,:,:]), axis=0))) cp_trms[binnum,:,:,:] = MA.std(indict['cphase']['mean'][ind_lstbin,:,:,:], axis=0).data cp_tmad[binnum,:,:,:] = MA.median(NP.abs(indict['cphase']['median'][ind_lstbin,:,:,:] - NP.angle(eicp_tmedian[binnum,:,:,:][NP.newaxis,:,:,:])), axis=0).data mask = wts_lstbins <= 0.0 self.cpinfo['processed']['prelim']['wts'] = MA.array(wts_lstbins, mask=mask) if 'eicp' not in self.cpinfo['processed']['prelim']: self.cpinfo['processed']['prelim']['eicp'] = {} if 'cphase' not in self.cpinfo['processed']['prelim']: self.cpinfo['processed']['prelim']['cphase'] = {} self.cpinfo['processed']['prelim']['eicp']['mean'] = MA.array(eicp_tmean, mask=mask) self.cpinfo['processed']['prelim']['eicp']['median'] = MA.array(eicp_tmedian, mask=mask) self.cpinfo['processed']['prelim']['cphase']['mean'] = MA.array(NP.angle(eicp_tmean), mask=mask) self.cpinfo['processed']['prelim']['cphase']['median'] = MA.array(NP.angle(eicp_tmedian), mask=mask) self.cpinfo['processed']['prelim']['cphase']['rms'] = MA.array(cp_trms, mask=mask) self.cpinfo['processed']['prelim']['cphase']['mad'] = MA.array(cp_tmad, mask=mask) # else: # # Perform no binning and keep the current LST resolution, data and weights # warnings.warn('LST bin size found to be smaller than the LST resolution in the data. No LST binning/averaging will be performed.') # lstbinsize = tres # lstbins = NP.arange(rawlst[:,0].min(), rawlst[:,0].max() + lstbinsize + eps, lstbinsize) # nlstbins = lstbins.size - 1 # if nlstbins > 1: # lstbinintervals = lstbins[1:] - lstbins[:-1] # lstbincenters = lstbins[:-1] + 0.5 * lstbinintervals # else: # lstbinintervals = NP.asarray(lstbinsize).reshape(-1) # lstbincenters = lstbins[0] + 0.5 * lstbinintervals # if 'prelim' not in self.cpinfo['processed']: # self.cpinfo['processed']['prelim'] = {} # self.cpinfo['processed']['prelim']['lstbins'] = lstbincenters # self.cpinfo['processed']['prelim']['dlstbins'] = lstbinintervals if (rawlst.shape[0] <= 1) or (lstbinsize is None): nlstbins = rawlst.shape[0] lstbins = NP.mean(rawlst, axis=1) if 'prelim' not in self.cpinfo['processed']: self.cpinfo['processed']['prelim'] = {} self.cpinfo['processed']['prelim']['lstbins'] = lstbins if lstbinsize is not None: self.cpinfo['processed']['prelim']['dlstbins'] = NP.asarray(lstbinsize).reshape(-1) else: self.cpinfo['processed']['prelim']['dlstbins'] = NP.zeros(1) ############################################################################ def subtract(self, cphase): """ ------------------------------------------------------------------------ Subtract complex exponential of the bispectrum phase from the current instance and updates the cpinfo attribute Inputs: cphase [masked array] Bispectrum phase array as a maked array. It must be of same size as freqs along the axis specified in input axis. Action: Updates 'submodel' and 'residual' keys under attribute cpinfo under key 'processed' ------------------------------------------------------------------------ """ if not isinstance(cphase, NP.ndarray): raise TypeError('Input cphase must be a numpy array') if not isinstance(cphase, MA.MaskedArray): cphase = MA.array(cphase, mask=NP.isnan(cphase)) if not OPS.is_broadcastable(cphase.shape, self.cpinfo['processed']['prelim']['cphase']['median'].shape): raise ValueError('Input cphase has shape incompatible with that in instance attribute') else: minshape = tuple(NP.ones(self.cpinfo['processed']['prelim']['cphase']['median'].ndim - cphase.ndim, dtype=NP.int)) + cphase.shape cphase = cphase.reshape(minshape) # cphase = NP.broadcast_to(cphase, minshape) eicp = NP.exp(1j*cphase) self.cpinfo['processed']['submodel'] = {} self.cpinfo['processed']['submodel']['cphase'] = cphase self.cpinfo['processed']['submodel']['eicp'] = eicp self.cpinfo['processed']['residual'] = {'eicp': {}, 'cphase': {}} for key in ['mean', 'median']: eicpdiff = self.cpinfo['processed']['prelim']['eicp'][key] - eicp eicpratio = self.cpinfo['processed']['prelim']['eicp'][key] / eicp self.cpinfo['processed']['residual']['eicp'][key] = eicpdiff self.cpinfo['processed']['residual']['cphase'][key] = MA.array(NP.angle(eicpratio.data), mask=self.cpinfo['processed']['residual']['eicp'][key].mask) ############################################################################ def subsample_differencing(self, daybinsize=None, ndaybins=4, lstbinsize=None): """ ------------------------------------------------------------------------ Create subsamples and differences between subsamples to evaluate noise properties from the data set. Inputs: daybinsize [Nonetype or scalar] Day bin size (in days) over which mean and median are estimated across different days for a fixed LST bin. If set to None, it will look for value in input ndaybins. If both are None, no smoothing is performed. Only one of daybinsize or ndaybins must be set to non-None value. Must yield greater than or equal to 4 bins ndaybins [NoneType or integer] Number of bins along day axis. Only if daybinsize is set to None. It produces bins that roughly consist of equal number of days in each bin regardless of how much the days in each bin are separated from each other. If both are None, no smoothing is performed. Only one of daybinsize or ndaybins must be set to non-None value. If set, it must be set to greater than or equal to 4 lstbinsize [NoneType or scalar] LST bin size (in seconds) over which mean and median are estimated across the LST. If set to None, no smoothing is performed ------------------------------------------------------------------------ """ if (ndaybins is not None) and (daybinsize is not None): raise ValueError('Only one of daybinsize or ndaybins should be set') if (daybinsize is not None) or (ndaybins is not None): if daybinsize is not None: if not isinstance(daybinsize, (int,float)): raise TypeError('Input daybinsize must be a scalar') dres = NP.diff(self.cpinfo['raw']['days']).min() # in days dextent = self.cpinfo['raw']['days'].max() - self.cpinfo['raw']['days'].min() + dres # in days if daybinsize > dres: daybinsize = NP.clip(daybinsize, dres, dextent) eps = 1e-10 daybins = NP.arange(self.cpinfo['raw']['days'].min(), self.cpinfo['raw']['days'].max() + dres + eps, daybinsize) ndaybins = daybins.size daybins = NP.concatenate((daybins, [daybins[-1]+daybinsize+eps])) if ndaybins >= 4: daybinintervals = daybins[1:] - daybins[:-1] daybincenters = daybins[:-1] + 0.5 * daybinintervals else: raise ValueError('Could not find at least 4 bins along repeating days. Adjust binning interval.') counts, daybin_edges, daybinnum, ri = OPS.binned_statistic(self.cpinfo['raw']['days'], statistic='count', bins=daybins) counts = counts.astype(NP.int) wts_daybins = NP.zeros((self.cpinfo['processed']['native']['eicp'].shape[0], counts.size, self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3])) eicp_dmean = NP.zeros((self.cpinfo['processed']['native']['eicp'].shape[0], counts.size, self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3]), dtype=NP.complex128) eicp_dmedian = NP.zeros((self.cpinfo['processed']['native']['eicp'].shape[0], counts.size, self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3]), dtype=NP.complex128) cp_drms = NP.zeros((self.cpinfo['processed']['native']['eicp'].shape[0], counts.size, self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3])) cp_dmad = NP.zeros((self.cpinfo['processed']['native']['eicp'].shape[0], counts.size, self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3])) for binnum in xrange(counts.size): ind_daybin = ri[ri[binnum]:ri[binnum+1]] wts_daybins[:,binnum,:,:] = NP.sum(self.cpinfo['processed']['native']['wts'][:,ind_daybin,:,:].data, axis=1) eicp_dmean[:,binnum,:,:] = NP.exp(1j*NP.angle(MA.mean(self.cpinfo['processed']['native']['eicp'][:,ind_daybin,:,:], axis=1))) eicp_dmedian[:,binnum,:,:] = NP.exp(1j*NP.angle(MA.median(self.cpinfo['processed']['native']['eicp'][:,ind_daybin,:,:].real, axis=1) + 1j * MA.median(self.cpinfo['processed']['native']['eicp'][:,ind_daybin,:,:].imag, axis=1))) cp_drms[:,binnum,:,:] = MA.std(self.cpinfo['processed']['native']['cphase'][:,ind_daybin,:,:], axis=1).data cp_dmad[:,binnum,:,:] = MA.median(NP.abs(self.cpinfo['processed']['native']['cphase'][:,ind_daybin,:,:] - NP.angle(eicp_dmedian[:,binnum,:,:][:,NP.newaxis,:,:])), axis=1).data else: if not isinstance(ndaybins, int): raise TypeError('Input ndaybins must be an integer') if ndaybins < 4: raise ValueError('Input ndaybins must be greater than or equal to 4') days_split = NP.array_split(self.cpinfo['raw']['days'], ndaybins) daybincenters = NP.asarray([NP.mean(days) for days in days_split]) daybinintervals = NP.asarray([days.max()-days.min() for days in days_split]) counts = NP.asarray([days.size for days in days_split]) wts_split = NP.array_split(self.cpinfo['processed']['native']['wts'].data, ndaybins, axis=1) # mask_split = NP.array_split(self.cpinfo['processed']['native']['wts'].mask, ndaybins, axis=1) wts_daybins = NP.asarray([NP.sum(wtsitem, axis=1) for wtsitem in wts_split]) # ndaybins x nlst x ntriads x nchan wts_daybins = NP.moveaxis(wts_daybins, 0, 1) # nlst x ndaybins x ntriads x nchan mask_split = NP.array_split(self.cpinfo['processed']['native']['eicp'].mask, ndaybins, axis=1) eicp_split = NP.array_split(self.cpinfo['processed']['native']['eicp'].data, ndaybins, axis=1) eicp_dmean = MA.array([MA.mean(MA.array(eicp_split[i], mask=mask_split[i]), axis=1) for i in range(daybincenters.size)]) # ndaybins x nlst x ntriads x nchan eicp_dmean = NP.exp(1j * NP.angle(eicp_dmean)) eicp_dmean = NP.moveaxis(eicp_dmean, 0, 1) # nlst x ndaybins x ntriads x nchan eicp_dmedian = MA.array([MA.median(MA.array(eicp_split[i].real, mask=mask_split[i]), axis=1) + 1j * MA.median(MA.array(eicp_split[i].imag, mask=mask_split[i]), axis=1) for i in range(daybincenters.size)]) # ndaybins x nlst x ntriads x nchan eicp_dmedian = NP.exp(1j * NP.angle(eicp_dmedian)) eicp_dmedian = NP.moveaxis(eicp_dmedian, 0, 1) # nlst x ndaybins x ntriads x nchan cp_split = NP.array_split(self.cpinfo['processed']['native']['cphase'].data, ndaybins, axis=1) cp_drms = NP.array([MA.std(MA.array(cp_split[i], mask=mask_split[i]), axis=1).data for i in range(daybincenters.size)]) # ndaybins x nlst x ntriads x nchan cp_drms = NP.moveaxis(cp_drms, 0, 1) # nlst x ndaybins x ntriads x nchan cp_dmad = NP.array([MA.median(NP.abs(cp_split[i] - NP.angle(eicp_dmedian[:,[i],:,:])), axis=1).data for i in range(daybincenters.size)]) # ndaybins x nlst x ntriads x nchan cp_dmad = NP.moveaxis(cp_dmad, 0, 1) # nlst x ndaybins x ntriads x nchan mask = wts_daybins <= 0.0 wts_daybins = MA.array(wts_daybins, mask=mask) cp_dmean = MA.array(NP.angle(eicp_dmean), mask=mask) cp_dmedian = MA.array(NP.angle(eicp_dmedian), mask=mask) self.cpinfo['errinfo']['daybins'] = daybincenters self.cpinfo['errinfo']['diff_dbins'] = daybinintervals self.cpinfo['errinfo']['wts'] = {'{0}'.format(ind): None for ind in range(2)} self.cpinfo['errinfo']['eicp_diff'] = {'{0}'.format(ind): {} for ind in range(2)} rawlst = NP.degrees(NP.unwrap(NP.radians(self.cpinfo['raw']['lst'] * 15.0), discont=NP.pi, axis=0)) / 15.0 # in hours but unwrapped to have no discontinuities if NP.any(rawlst > 24.0): rawlst -= 24.0 if rawlst.shape[0] > 1: # LST bin only if there are multiple LST if lstbinsize is not None: if not isinstance(lstbinsize, (int,float)): raise TypeError('Input lstbinsize must be a scalar') lstbinsize = lstbinsize / 3.6e3 # in hours tres = NP.diff(rawlst[:,0]).min() # in hours textent = rawlst[:,0].max() - rawlst[:,0].min() + tres # in hours eps = 1e-10 no_change_in_lstbins = False if lstbinsize > tres: lstbinsize = NP.clip(lstbinsize, tres, textent) lstbins = NP.arange(rawlst[:,0].min(), rawlst[:,0].max() + tres + eps, lstbinsize) nlstbins = lstbins.size lstbins = NP.concatenate((lstbins, [lstbins[-1]+lstbinsize+eps])) if nlstbins > 1: lstbinintervals = lstbins[1:] - lstbins[:-1] lstbincenters = lstbins[:-1] + 0.5 * lstbinintervals else: lstbinintervals = NP.asarray(lstbinsize).reshape(-1) lstbincenters = lstbins[0] + 0.5 * lstbinintervals self.cpinfo['errinfo']['lstbins'] = lstbincenters self.cpinfo['errinfo']['dlstbins'] = lstbinintervals no_change_in_lstbins = False else: # Perform no binning and keep the current LST resolution warnings.warn('LST bin size found to be smaller than the LST resolution in the data. No LST binning/averaging will be performed.') lstbinsize = tres lstbins = NP.arange(rawlst[:,0].min(), rawlst[:,0].max() + lstbinsize + eps, lstbinsize) nlstbins = lstbins.size - 1 if nlstbins > 1: lstbinintervals = lstbins[1:] - lstbins[:-1] else: lstbinintervals = NP.asarray(lstbinsize).reshape(-1) self.cpinfo['errinfo']['dlstbins'] = lstbinintervals self.cpinfo['errinfo']['lstbins'] = lstbins[:-1] # Ensure that the LST bins are inside the min/max envelope to # error-free interpolation later self.cpinfo['errinfo']['lstbins'][0] += eps self.cpinfo['errinfo']['lstbins'][-1] -= eps no_change_in_lstbins = True counts, lstbin_edges, lstbinnum, ri = OPS.binned_statistic(rawlst[:,0], statistic='count', bins=lstbins) counts = counts.astype(NP.int) outshape = (counts.size, wts_daybins.shape[1], self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3]) wts_lstbins = NP.zeros(outshape) eicp_tmean = NP.zeros(outshape, dtype=NP.complex128) eicp_tmedian = NP.zeros(outshape, dtype=NP.complex128) cp_trms = NP.zeros(outshape) cp_tmad = NP.zeros(outshape) for binnum in xrange(counts.size): if no_change_in_lstbins: ind_lstbin = [binnum] else: ind_lstbin = ri[ri[binnum]:ri[binnum+1]] wts_lstbins[binnum,:,:,:] = NP.sum(wts_daybins[ind_lstbin,:,:,:].data, axis=0) eicp_tmean[binnum,:,:,:] = NP.exp(1j*NP.angle(MA.mean(NP.exp(1j*cp_dmean[ind_lstbin,:,:,:]), axis=0))) eicp_tmedian[binnum,:,:,:] = NP.exp(1j*NP.angle(MA.median(NP.cos(cp_dmedian[ind_lstbin,:,:,:]), axis=0) + 1j * MA.median(NP.sin(cp_dmedian[ind_lstbin,:,:,:]), axis=0))) mask = wts_lstbins <= 0.0 wts_lstbins = MA.array(wts_lstbins, mask=mask) eicp_tmean = MA.array(eicp_tmean, mask=mask) eicp_tmedian = MA.array(eicp_tmedian, mask=mask) else: wts_lstbins = MA.copy(wts_daybins) mask = wts_lstbins.mask eicp_tmean = MA.array(NP.exp(1j*NP.angle(NP.exp(1j*cp_dmean))), mask=mask) eicp_tmedian = MA.array(NP.exp(1j*NP.angle(NP.cos(cp_dmedian) + 1j * NP.sin(cp_dmedian))), mask=mask) if (rawlst.shape[0] <= 1) or (lstbinsize is None): nlstbins = rawlst.shape[0] lstbins = NP.mean(rawlst, axis=1) self.cpinfo['errinfo']['lstbins'] = lstbins if lstbinsize is not None: self.cpinfo['errinfo']['dlstbins'] = NP.asarray(lstbinsize).reshape(-1) else: self.cpinfo['errinfo']['dlstbins'] = NP.zeros(1) ncomb = NP.sum(NP.asarray([(ndaybins-i-1)*(ndaybins-i-2)*(ndaybins-i-3)/2 for i in range(ndaybins-3)])).astype(int) diff_outshape = (nlstbins, ncomb, self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3]) for diffind in range(2): self.cpinfo['errinfo']['eicp_diff']['{0}'.format(diffind)]['mean'] = MA.empty(diff_outshape, dtype=NP.complex) self.cpinfo['errinfo']['eicp_diff']['{0}'.format(diffind)]['median'] = MA.empty(diff_outshape, dtype=NP.complex) self.cpinfo['errinfo']['wts']['{0}'.format(diffind)] = MA.empty(diff_outshape, dtype=NP.float) ind = -1 self.cpinfo['errinfo']['list_of_pair_of_pairs'] = [] list_of_pair_of_pairs = [] for i in range(ndaybins-1): for j in range(i+1,ndaybins): for k in range(ndaybins-1): if (k != i) and (k != j): for m in range(k+1,ndaybins): if (m != i) and (m != j): pair_of_pairs = [set([i,j]), set([k,m])] if (pair_of_pairs not in list_of_pair_of_pairs) and (pair_of_pairs[::-1] not in list_of_pair_of_pairs): ind += 1 list_of_pair_of_pairs += [copy.deepcopy(pair_of_pairs)] self.cpinfo['errinfo']['list_of_pair_of_pairs'] += [[i,j,k,m]] for stat in ['mean', 'median']: if stat == 'mean': self.cpinfo['errinfo']['eicp_diff']['0'][stat][:,ind,:,:] = MA.array(0.5 * (eicp_tmean[:,j,:,:].data - eicp_tmean[:,i,:,:].data), mask=NP.logical_or(eicp_tmean[:,j,:,:].mask, eicp_tmean[:,i,:,:].mask)) self.cpinfo['errinfo']['eicp_diff']['1'][stat][:,ind,:,:] = MA.array(0.5 * (eicp_tmean[:,m,:,:].data - eicp_tmean[:,k,:,:].data), mask=NP.logical_or(eicp_tmean[:,m,:,:].mask, eicp_tmean[:,k,:,:].mask)) self.cpinfo['errinfo']['wts']['0'][:,ind,:,:] = MA.array(NP.sqrt(wts_lstbins[:,j,:,:].data**2 + wts_lstbins[:,i,:,:].data**2), mask=NP.logical_or(wts_lstbins[:,j,:,:].mask, wts_lstbins[:,i,:,:].mask)) self.cpinfo['errinfo']['wts']['1'][:,ind,:,:] = MA.array(NP.sqrt(wts_lstbins[:,m,:,:].data**2 + wts_lstbins[:,k,:,:].data**2), mask=NP.logical_or(wts_lstbins[:,m,:,:].mask, wts_lstbins[:,k,:,:].mask)) # self.cpinfo['errinfo']['eicp_diff']['0'][stat][:,ind,:,:] = 0.5 * (eicp_tmean[:,j,:,:] - eicp_tmean[:,i,:,:]) # self.cpinfo['errinfo']['eicp_diff']['1'][stat][:,ind,:,:] = 0.5 * (eicp_tmean[:,m,:,:] - eicp_tmean[:,k,:,:]) # self.cpinfo['errinfo']['wts']['0'][:,ind,:,:] = NP.sqrt(wts_lstbins[:,j,:,:]**2 + wts_lstbins[:,i,:,:]**2) # self.cpinfo['errinfo']['wts']['1'][:,ind,:,:] = NP.sqrt(wts_lstbins[:,m,:,:]**2 + wts_lstbins[:,k,:,:]**2) else: self.cpinfo['errinfo']['eicp_diff']['0'][stat][:,ind,:,:] = MA.array(0.5 * (eicp_tmedian[:,j,:,:].data - eicp_tmedian[:,i,:,:].data), mask=NP.logical_or(eicp_tmedian[:,j,:,:].mask, eicp_tmedian[:,i,:,:].mask)) self.cpinfo['errinfo']['eicp_diff']['1'][stat][:,ind,:,:] = MA.array(0.5 * (eicp_tmedian[:,m,:,:].data - eicp_tmedian[:,k,:,:].data), mask=NP.logical_or(eicp_tmedian[:,m,:,:].mask, eicp_tmedian[:,k,:,:].mask)) # self.cpinfo['errinfo']['eicp_diff']['0'][stat][:,ind,:,:] = 0.5 * (eicp_tmedian[:,j,:,:] - eicp_tmedian[:,i,:,:]) # self.cpinfo['errinfo']['eicp_diff']['1'][stat][:,ind,:,:] = 0.5 * (eicp_tmedian[:,m,:,:] - eicp_tmedian[:,k,:,:]) mask0 = self.cpinfo['errinfo']['wts']['0'] <= 0.0 mask1 = self.cpinfo['errinfo']['wts']['1'] <= 0.0 self.cpinfo['errinfo']['eicp_diff']['0'][stat] = MA.array(self.cpinfo['errinfo']['eicp_diff']['0'][stat], mask=mask0) self.cpinfo['errinfo']['eicp_diff']['1'][stat] = MA.array(self.cpinfo['errinfo']['eicp_diff']['1'][stat], mask=mask1) self.cpinfo['errinfo']['wts']['0'] = MA.array(self.cpinfo['errinfo']['wts']['0'], mask=mask0) self.cpinfo['errinfo']['wts']['1'] = MA.array(self.cpinfo['errinfo']['wts']['1'], mask=mask1) ############################################################################ def save(self, outfile=None): """ ------------------------------------------------------------------------ Save contents of attribute cpinfo in external HDF5 file Inputs: outfile [NoneType or string] Output file (HDF5) to save contents to. If set to None (default), it will be saved in the file pointed to by the extfile attribute of class ClosurePhase ------------------------------------------------------------------------ """ if outfile is None: outfile = self.extfile NMO.save_dict_to_hdf5(self.cpinfo, outfile, compressinfo={'compress_fmt': 'gzip', 'compress_opts': 9}) ################################################################################ class ClosurePhaseDelaySpectrum(object): """ ---------------------------------------------------------------------------- Class to hold and operate on Closure Phase information. It has the following attributes and member functions. Attributes: cPhase [instance of class ClosurePhase] Instance of class ClosurePhase f [numpy array] Frequencies (in Hz) in closure phase spectra df [float] Frequency resolution (in Hz) in closure phase spectra cPhaseDS [dictionary] Possibly oversampled Closure Phase Delay Spectrum information. cPhaseDS_resampled [dictionary] Resampled Closure Phase Delay Spectrum information. Member functions: __init__() Initialize instance of class ClosurePhaseDelaySpectrum FT() Fourier transform of complex closure phase spectra mapping from frequency axis to delay axis. subset() Return triad and time indices to select a subset of processed data compute_power_spectrum() Compute power spectrum of closure phase data. It is in units of Mpc/h. rescale_power_spectrum() Rescale power spectrum to dimensional quantity by converting the ratio given visibility amplitude information average_rescaled_power_spectrum() Average the rescaled power spectrum with physical units along certain axes with inverse variance or regular averaging beam3Dvol() Compute three-dimensional volume of the antenna power pattern along two transverse axes and one LOS axis. ---------------------------------------------------------------------------- """ def __init__(self, cPhase): """ ------------------------------------------------------------------------ Initialize instance of class ClosurePhaseDelaySpectrum Inputs: cPhase [class ClosurePhase] Instance of class ClosurePhase ------------------------------------------------------------------------ """ if not isinstance(cPhase, ClosurePhase): raise TypeError('Input cPhase must be an instance of class ClosurePhase') self.cPhase = cPhase self.f = self.cPhase.f self.df = self.cPhase.df self.cPhaseDS = None self.cPhaseDS_resampled = None ############################################################################ def FT(self, bw_eff, freq_center=None, shape=None, fftpow=None, pad=None, datapool='prelim', visscaleinfo=None, method='fft', resample=True, apply_flags=True): """ ------------------------------------------------------------------------ Fourier transform of complex closure phase spectra mapping from frequency axis to delay axis. Inputs: bw_eff [scalar or numpy array] effective bandwidths (in Hz) on the selected frequency windows for subband delay transform of closure phases. If a scalar value is provided, the same will be applied to all frequency windows freq_center [scalar, list or numpy array] frequency centers (in Hz) of the selected frequency windows for subband delay transform of closure phases. The value can be a scalar, list or numpy array. If a scalar is provided, the same will be applied to all frequency windows. Default=None uses the center frequency from the class attribute named channels shape [string] frequency window shape for subband delay transform of closure phases. Accepted values for the string are 'rect' or 'RECT' (for rectangular), 'bnw' and 'BNW' (for Blackman-Nuttall), and 'bhw' or 'BHW' (for Blackman-Harris). Default=None sets it to 'rect' (rectangular window) fftpow [scalar] the power to which the FFT of the window will be raised. The value must be a positive scalar. Default = 1.0 pad [scalar] padding fraction relative to the number of frequency channels for closure phases. Value must be a non-negative scalar. For e.g., a pad of 1.0 pads the frequency axis with zeros of the same width as the number of channels. After the delay transform, the transformed closure phases are downsampled by a factor of 1+pad. If a negative value is specified, delay transform will be performed with no padding. Default=None sets to padding factor to 1.0 datapool [string] Specifies which data set is to be Fourier transformed visscaleinfo [dictionary] Dictionary containing reference visibilities based on which the closure phases will be scaled to units of visibilities. It contains the following keys and values: 'vis' [numpy array or instance of class InterferometerArray] Reference visibilities from the baselines that form the triad. It can be an instance of class RI.InterferometerArray or a numpy array. If an instance of class InterferometerArray, the baseline triplet must be set in key 'bltriplet' and value in key 'lst' will be ignored. If the value under this key 'vis' is set to a numpy array, it must be of shape (nbl=3, nlst_vis, nchan). In this case the value under key 'bltriplet' will be ignored. The nearest LST will be looked up and applied after smoothing along LST based on the smoothing parameter 'smooth' 'bltriplet' [Numpy array] Will be used in searching for matches to these three baseline vectors if the value under key 'vis' is set to an instance of class InterferometerArray. However, if value under key 'vis' is a numpy array, this key 'bltriplet' will be ignored. 'lst' [numpy array] Reference LST (in hours). It is of shape (nlst_vis,). It will be used only if value under key 'vis' is a numpy array, otherwise it will be ignored and read from the instance of class InterferometerArray passed under key 'vis'. If the specified LST range does not cover the data LST range, those LST will contain NaN in the delay spectrum 'smoothinfo' [dictionary] Dictionary specifying smoothing and/or interpolation parameters. It has the following keys and values: 'op_type' [string] Specifies the interpolating operation. Must be specified (no default). Accepted values are 'interp1d' (scipy.interpolate), 'median' (skimage.filters), 'tophat' (astropy.convolution) and 'gaussian' (astropy.convolution) 'interp_kind' [string (optional)] Specifies the interpolation kind (if 'op_type' is set to 'interp1d'). For accepted values, see scipy.interpolate.interp1d() 'window_size' [integer (optional)] Specifies the size of the interpolating/smoothing kernel. Only applies when 'op_type' is set to 'median', 'tophat' or 'gaussian' The kernel is a tophat function when 'op_type' is set to 'median' or 'tophat'. If refers to FWHM when 'op_type' is set to 'gaussian' resample [boolean] If set to True (default), resample the delay spectrum axis to independent samples along delay axis. If set to False, return the results as is even if they may be be oversampled and not all samples may be independent method [string] Specifies the Fourier transform method to be used. Accepted values are 'fft' (default) for FFT and 'nufft' for non-uniform FFT apply_flags [boolean] If set to True (default), weights determined from flags will be applied. If False, no weights from flagging will be applied, and thus even flagged data will be included Outputs: A dictionary that contains the oversampled (if resample=False) or resampled (if resample=True) delay spectrum information. It has the following keys and values: 'freq_center' [numpy array] contains the center frequencies (in Hz) of the frequency subbands of the subband delay spectra. It is of size n_win. It is roughly equivalent to redshift(s) 'freq_wts' [numpy array] Contains frequency weights applied on each frequency sub-band during the subband delay transform. It is of size n_win x nchan. 'bw_eff' [numpy array] contains the effective bandwidths (in Hz) of the subbands being delay transformed. It is of size n_win. It is roughly equivalent to width in redshift or along line-of-sight 'shape' [string] shape of the window function applied. Accepted values are 'rect' (rectangular), 'bhw' (Blackman-Harris), 'bnw' (Blackman-Nuttall). 'fftpow' [scalar] the power to which the FFT of the window was raised. The value is be a positive scalar with default = 1.0 'npad' [scalar] Numbber of zero-padded channels before performing the subband delay transform. 'lags' [numpy array] lags of the subband delay spectra after padding in frequency during the transform. It is of size nlags=nchan+npad if resample=True, where npad is the number of frequency channels padded specified under the key 'npad'. If resample=False, nlags = number of delays after resampling only independent delays. The lags roughly correspond to k_parallel. 'lag_kernel' [numpy array] delay transform of the frequency weights under the key 'freq_wts'. It is of size n_win x nlst x ndays x ntriads x nlags. nlags=nchan+npad if resample=True, where npad is the number of frequency channels padded specified under the key 'npad'. If resample=False, nlags = number of delays after resampling only independent delays. 'lag_corr_length' [numpy array] It is the correlation timescale (in pixels) of the subband delay spectra. It is proportional to inverse of effective bandwidth. It is of size n_win. The unit size of a pixel is determined by the difference between adjacent pixels in lags under key 'lags' which in turn is effectively inverse of the effective bandwidth of the subband specified in bw_eff 'whole' [dictionary] Delay spectrum results corresponding to bispectrum phase in 'prelim' key of attribute cpinfo. Contains the following keys and values: 'dspec' [dictionary] Contains the following keys and values: 'twts' [numpy array] Weights from time-based flags that went into time-averaging. Shape=(nlst,ndays,ntriads,nchan) 'mean' [numpy array] Delay spectrum of closure phases based on their mean across time intervals. Shape=(nspw,nlst,ndays,ntriads,nlags) 'median' [numpy array] Delay spectrum of closure phases based on their median across time intervals. Shape=(nspw,nlst,ndays,ntriads,nlags) 'submodel' [dictionary] Delay spectrum results corresponding to bispectrum phase in 'submodel' key of attribute cpinfo. Contains the following keys and values: 'dspec' [numpy array] Delay spectrum of closure phases Shape=(nspw,nlst,ndays,ntriads,nlags) 'residual' [dictionary] Delay spectrum results corresponding to bispectrum phase in 'residual' key of attribute cpinfo after subtracting 'submodel' bispectrum phase from that of 'prelim'. It contains the following keys and values: 'dspec' [dictionary] Contains the following keys and values: 'twts' [numpy array] Weights from time-based flags that went into time-averaging. Shape=(nlst,ndays,ntriads,nchan) 'mean' [numpy array] Delay spectrum of closure phases based on their mean across time intervals. Shape=(nspw,nlst,ndays,ntriads,nlags) 'median' [numpy array] Delay spectrum of closure phases based on their median across time intervals. Shape=(nspw,nlst,ndays,ntriads,nlags) 'errinfo' [dictionary] It has two keys 'dspec0' and 'dspec1' each of which are dictionaries with the following keys and values: 'twts' [numpy array] Weights for the subsample difference. It is of shape (nlst, ndays, ntriads, nchan) 'mean' [numpy array] Delay spectrum of the subsample difference obtained by using the mean statistic. It is of shape (nspw, nlst, ndays, ntriads, nlags) 'median' [numpy array] Delay spectrum of the subsample difference obtained by using the median statistic. It is of shape (nspw, nlst, ndays, ntriads, nlags) ------------------------------------------------------------------------ """ try: bw_eff except NameError: raise NameError('Effective bandwidth must be specified') else: if not isinstance(bw_eff, (int, float, list, NP.ndarray)): raise TypeError('Value of effective bandwidth must be a scalar, list or numpy array') bw_eff = NP.asarray(bw_eff).reshape(-1) if NP.any(bw_eff <= 0.0): raise ValueError('All values in effective bandwidth must be strictly positive') if freq_center is None: freq_center = NP.asarray(self.f[self.f.size/2]).reshape(-1) elif isinstance(freq_center, (int, float, list, NP.ndarray)): freq_center = NP.asarray(freq_center).reshape(-1) if NP.any((freq_center <= self.f.min()) | (freq_center >= self.f.max())): raise ValueError('Value(s) of frequency center(s) must lie strictly inside the observing band') else: raise TypeError('Values(s) of frequency center must be scalar, list or numpy array') if (bw_eff.size == 1) and (freq_center.size > 1): bw_eff = NP.repeat(bw_eff, freq_center.size) elif (bw_eff.size > 1) and (freq_center.size == 1): freq_center = NP.repeat(freq_center, bw_eff.size) elif bw_eff.size != freq_center.size: raise ValueError('Effective bandwidth(s) and frequency center(s) must have same number of elements') if shape is not None: if not isinstance(shape, str): raise TypeError('Window shape must be a string') if shape not in ['rect', 'bhw', 'bnw', 'RECT', 'BHW', 'BNW']: raise ValueError('Invalid value for window shape specified.') else: shape = 'rect' if fftpow is None: fftpow = 1.0 else: if not isinstance(fftpow, (int, float)): raise TypeError('Power to raise window FFT by must be a scalar value.') if fftpow < 0.0: raise ValueError('Power for raising FFT of window by must be positive.') if pad is None: pad = 1.0 else: if not isinstance(pad, (int, float)): raise TypeError('pad fraction must be a scalar value.') if pad < 0.0: pad = 0.0 if verbose: print('\tPad fraction found to be negative. Resetting to 0.0 (no padding will be applied).') if not isinstance(datapool, str): raise TypeError('Input datapool must be a string') if datapool.lower() not in ['prelim']: raise ValueError('Specified datapool not supported') if visscaleinfo is not None: if not isinstance(visscaleinfo, dict): raise TypeError('Input visscaleinfo must be a dictionary') if 'vis' not in visscaleinfo: raise KeyError('Input visscaleinfo does not contain key "vis"') if not isinstance(visscaleinfo['vis'], RI.InterferometerArray): if 'lst' not in visscaleinfo: raise KeyError('Input visscaleinfo does not contain key "lst"') lst_vis = visscaleinfo['lst'] * 15.0 if not isinstance(visscaleinfo['vis'], (NP.ndarray,MA.MaskedArray)): raise TypeError('Input visibilities must be a numpy or a masked array') if not isinstance(visscaleinfo['vis'], MA.MaskedArray): visscaleinfo['vis'] = MA.array(visscaleinfo['vis'], mask=NP.isnan(visscaleinfo['vis'])) vistriad = MA.copy(visscaleinfo['vis']) else: if 'bltriplet' not in visscaleinfo: raise KeyError('Input dictionary visscaleinfo does not contain key "bltriplet"') blind, blrefind, dbl = LKP.find_1NN(visscaleinfo['vis'].baselines, visscaleinfo['bltriplet'], distance_ULIM=0.2, remove_oob=True) if blrefind.size != 3: blind_missing = NP.setdiff1d(NP.arange(3), blind, assume_unique=True) blind_next, blrefind_next, dbl_next = LKP.find_1NN(visscaleinfo['vis'].baselines, -1*visscaleinfo['bltriplet'][blind_missing,:], distance_ULIM=0.2, remove_oob=True) if blind_next.size + blind.size != 3: raise ValueError('Exactly three baselines were not found in the reference baselines') else: blind = NP.append(blind, blind_missing[blind_next]) blrefind = NP.append(blrefind, blrefind_next) else: blind_missing = [] vistriad = NP.transpose(visscaleinfo['vis'].skyvis_freq[blrefind,:,:], (0,2,1)) if len(blind_missing) > 0: vistriad[-blrefind_next.size:,:,:] = vistriad[-blrefind_next.size:,:,:].conj() vistriad = MA.array(vistriad, mask=NP.isnan(vistriad)) lst_vis = visscaleinfo['vis'].lst viswts = MA.array(NP.ones_like(vistriad.data), mask=vistriad.mask, dtype=NP.float) lst_out = self.cPhase.cpinfo['processed']['prelim']['lstbins'] * 15.0 if lst_vis.size == 1: # Apply the visibility scaling from one reference LST to all LST vis_ref = vistriad * NP.ones(lst_out.size).reshape(1,-1,1) wts_ref = viswts * NP.ones(lst_out.size).reshape(1,-1,1) else: vis_ref, wts_ref = OPS.interpolate_masked_array_1D(vistriad, viswts, 1, visscaleinfo['smoothinfo'], inploc=lst_vis, outloc=lst_out) if not isinstance(method, str): raise TypeError('Input method must be a string') if method.lower() not in ['fft', 'nufft']: raise ValueError('Specified FFT method not supported') if not isinstance(apply_flags, bool): raise TypeError('Input apply_flags must be boolean') flagwts = 1.0 visscale = 1.0 if datapool.lower() == 'prelim': if method.lower() == 'fft': freq_wts = NP.empty((bw_eff.size, self.f.size), dtype=NP.float_) # nspw x nchan frac_width = DSP.window_N2width(n_window=None, shape=shape, fftpow=fftpow, area_normalize=False, power_normalize=True) window_loss_factor = 1 / frac_width n_window = NP.round(window_loss_factor * bw_eff / self.df).astype(NP.int) ind_freq_center, ind_channels, dfrequency = LKP.find_1NN(self.f.reshape(-1,1), freq_center.reshape(-1,1), distance_ULIM=0.51*self.df, remove_oob=True) sortind = NP.argsort(ind_channels) ind_freq_center = ind_freq_center[sortind] ind_channels = ind_channels[sortind] dfrequency = dfrequency[sortind] n_window = n_window[sortind] for i,ind_chan in enumerate(ind_channels): window = NP.sqrt(frac_width * n_window[i]) * DSP.window_fftpow(n_window[i], shape=shape, fftpow=fftpow, centering=True, peak=None, area_normalize=False, power_normalize=True) window_chans = self.f[ind_chan] + self.df * (NP.arange(n_window[i]) - int(n_window[i]/2)) ind_window_chans, ind_chans, dfreq = LKP.find_1NN(self.f.reshape(-1,1), window_chans.reshape(-1,1), distance_ULIM=0.51*self.df, remove_oob=True) sind = NP.argsort(ind_window_chans) ind_window_chans = ind_window_chans[sind] ind_chans = ind_chans[sind] dfreq = dfreq[sind] window = window[ind_window_chans] window = NP.pad(window, ((ind_chans.min(), self.f.size-1-ind_chans.max())), mode='constant', constant_values=((0.0,0.0))) freq_wts[i,:] = window npad = int(self.f.size * pad) lags = DSP.spectral_axis(self.f.size + npad, delx=self.df, use_real=False, shift=True) result = {'freq_center': freq_center, 'shape': shape, 'freq_wts': freq_wts, 'bw_eff': bw_eff, 'fftpow': fftpow, 'npad': npad, 'lags': lags, 'lag_corr_length': self.f.size / NP.sum(freq_wts, axis=-1), 'whole': {'dspec': {'twts': self.cPhase.cpinfo['processed'][datapool]['wts']}}, 'residual': {'dspec': {'twts': self.cPhase.cpinfo['processed'][datapool]['wts']}}, 'errinfo': {'dspec0': {'twts': self.cPhase.cpinfo['errinfo']['wts']['0']}, 'dspec1': {'twts': self.cPhase.cpinfo['errinfo']['wts']['1']}}, 'submodel': {}} if visscaleinfo is not None: visscale = NP.nansum(NP.transpose(vis_ref[NP.newaxis,NP.newaxis,:,:,:], axes=(0,3,1,2,4)) * freq_wts[:,NP.newaxis,NP.newaxis,NP.newaxis,:], axis=-1, keepdims=True) / NP.nansum(freq_wts[:,NP.newaxis,NP.newaxis,NP.newaxis,:], axis=-1, keepdims=True) # nspw x nlst x (ndays=1) x (nbl=3) x (nchan=1) visscale = NP.sqrt(1.0/NP.nansum(1/NP.abs(visscale)**2, axis=-2, keepdims=True)) # nspw x nlst x (ndays=1) x (ntriads=1) x (nchan=1) for dpool in ['errinfo', 'prelim', 'submodel', 'residual']: if dpool.lower() == 'errinfo': for diffind in range(2): if apply_flags: flagwts = NP.copy(self.cPhase.cpinfo['errinfo']['wts']['{0}'.format(diffind)].data) flagwts = flagwts[NP.newaxis,...] # nlst x ndays x ntriads x nchan --> (nspw=1) x nlst x ndays x ntriads x nchan flagwts = 1.0 * flagwts / NP.mean(flagwts, axis=-1, keepdims=True) # (nspw=1) x nlst x ndays x ntriads x nchan for stat in self.cPhase.cpinfo[dpool]['eicp_diff']['{0}'.format(diffind)]: eicp = NP.copy(self.cPhase.cpinfo[dpool]['eicp_diff']['{0}'.format(diffind)][stat].data) # Minimum shape as stored # eicp = NP.copy(self.cPhase.cpinfo[dpool]['eicp_diff']['{0}'.format(diffind)][stat].filled(0.0)) # Minimum shape as stored eicp = NP.broadcast_to(eicp, self.cPhase.cpinfo[dpool]['eicp_diff']['{0}'.format(diffind)][stat].shape) # Broadcast to final shape eicp = eicp[NP.newaxis,...] # nlst x ndayscomb x ntriads x nchan --> (nspw=1) x nlst x ndayscomb x ntriads x nchan ndim_padtuple = [(0,0)]*(eicp.ndim-1) + [(0,npad)] # [(0,0), (0,0), (0,0), (0,0), (0,npad)] result[dpool]['dspec{0}'.format(diffind)][stat] = DSP.FT1D(NP.pad(eicp*flagwts*freq_wts[:,NP.newaxis,NP.newaxis,NP.newaxis,:]*visscale.filled(NP.nan), ndim_padtuple, mode='constant'), ax=-1, inverse=True, use_real=False, shift=True) * (npad + self.f.size) * self.df else: if dpool in self.cPhase.cpinfo['processed']: if apply_flags: flagwts = NP.copy(self.cPhase.cpinfo['processed'][datapool]['wts'].data) flagwts = flagwts[NP.newaxis,...] # nlst x ndays x ntriads x nchan --> (nspw=1) x nlst x ndays x ntriads x nchan flagwts = 1.0 * flagwts / NP.mean(flagwts, axis=-1, keepdims=True) # (nspw=1) x nlst x ndays x ntriads x nchan if dpool == 'submodel': eicp = NP.copy(self.cPhase.cpinfo['processed'][dpool]['eicp'].data) # Minimum shape as stored # eicp = NP.copy(self.cPhase.cpinfo['processed'][dpool]['eicp'].filled(1.0)) # Minimum shape as stored eicp = NP.broadcast_to(eicp, self.cPhase.cpinfo['processed'][datapool]['eicp']['mean'].shape) # Broadcast to final shape eicp = eicp[NP.newaxis,...] # nlst x ndays x ntriads x nchan --> (nspw=1) x nlst x ndays x ntriads x nchan ndim_padtuple = [(0,0)]*(eicp.ndim-1) + [(0,npad)] # [(0,0), (0,0), (0,0), (0,0), (0,npad)] result[dpool]['dspec'] = DSP.FT1D(NP.pad(eicp*flagwts*freq_wts[:,NP.newaxis,NP.newaxis,NP.newaxis,:]*visscale.filled(NP.nan), ndim_padtuple, mode='constant'), ax=-1, inverse=True, use_real=False, shift=True) * (npad + self.f.size) * self.df else: for key in self.cPhase.cpinfo['processed'][dpool]['eicp']: eicp = NP.copy(self.cPhase.cpinfo['processed'][dpool]['eicp'][key].data) # eicp = NP.copy(self.cPhase.cpinfo['processed'][dpool]['eicp'][key].filled(1.0)) eicp = eicp[NP.newaxis,...] # nlst x ndays x ntriads x nchan --> (nspw=1) x nlst x ndays x ntriads x nchan ndim_padtuple = [(0,0)]*(eicp.ndim-1) + [(0,npad)] # [(0,0), (0,0), (0,0), (0,0), (0,npad)] if dpool == 'prelim': result['whole']['dspec'][key] = DSP.FT1D(NP.pad(eicp*flagwts*freq_wts[:,NP.newaxis,NP.newaxis,NP.newaxis,:]*visscale.filled(NP.nan), ndim_padtuple, mode='constant'), ax=-1, inverse=True, use_real=False, shift=True) * (npad + self.f.size) * self.df else: result[dpool]['dspec'][key] = DSP.FT1D(NP.pad(eicp*flagwts*freq_wts[:,NP.newaxis,NP.newaxis,NP.newaxis,:]*visscale.filled(NP.nan), ndim_padtuple, mode='constant'), ax=-1, inverse=True, use_real=False, shift=True) * (npad + self.f.size) * self.df result['lag_kernel'] = DSP.FT1D(NP.pad(flagwts*freq_wts[:,NP.newaxis,NP.newaxis,NP.newaxis,:], ndim_padtuple, mode='constant'), ax=-1, inverse=True, use_real=False, shift=True) * (npad + self.f.size) * self.df self.cPhaseDS = result if resample: result_resampled = copy.deepcopy(result) downsample_factor = NP.min((self.f.size + npad) * self.df / bw_eff) result_resampled['lags'] = DSP.downsampler(result_resampled['lags'], downsample_factor, axis=-1, method='interp', kind='linear') result_resampled['lag_kernel'] = DSP.downsampler(result_resampled['lag_kernel'], downsample_factor, axis=-1, method='interp', kind='linear') for dpool in ['errinfo', 'prelim', 'submodel', 'residual']: if dpool.lower() == 'errinfo': for diffind in self.cPhase.cpinfo[dpool]['eicp_diff']: for key in self.cPhase.cpinfo[dpool]['eicp_diff'][diffind]: result_resampled[dpool]['dspec'+diffind][key] = DSP.downsampler(result_resampled[dpool]['dspec'+diffind][key], downsample_factor, axis=-1, method='FFT') if dpool in self.cPhase.cpinfo['processed']: if dpool == 'submodel': result_resampled[dpool]['dspec'] = DSP.downsampler(result_resampled[dpool]['dspec'], downsample_factor, axis=-1, method='FFT') else: for key in self.cPhase.cpinfo['processed'][datapool]['eicp']: if dpool == 'prelim': result_resampled['whole']['dspec'][key] = DSP.downsampler(result_resampled['whole']['dspec'][key], downsample_factor, axis=-1, method='FFT') else: result_resampled[dpool]['dspec'][key] = DSP.downsampler(result_resampled[dpool]['dspec'][key], downsample_factor, axis=-1, method='FFT') self.cPhaseDS_resampled = result_resampled return result_resampled else: return result ############################################################################ def subset(self, selection=None): """ ------------------------------------------------------------------------ Return triad and time indices to select a subset of processed data Inputs: selection [NoneType or dictionary] Selection parameters based on which triad, LST, and day indices will be returned. If set to None (default), all triad, LST, and day indices will be returned. Otherwise it must be a dictionary with the following keys and values: 'triads' [NoneType or list of 3-element tuples] If set to None (default), indices of all triads are returned. Otherwise, the specific triads must be specified such as [(1,2,3), (1,2,4), ...] and their indices will be returned 'lst' [NoneType, list or numpy array] If set to None (default), indices of all LST are returned. Otherwise must be a list or numpy array containing indices to LST. 'days' [NoneType, list or numpy array] If set to None (default), indices of all days are returned. Otherwise must be a list or numpy array containing indices to days. Outputs: Tuple (triad_ind, lst_ind, day_ind, day_ind_eicpdiff) containing the triad, LST, day, and day-pair (for subsample differences) indices, each as a numpy array ------------------------------------------------------------------------ """ if selection is None: selsection = {} else: if not isinstance(selection, dict): raise TypeError('Input selection must be a dictionary') triads = map(tuple, self.cPhase.cpinfo['raw']['triads']) if 'triads' not in selection: selection['triads'] = triads if selection['triads'] is None: selection['triads'] = triads triad_ind = [triads.index(triad) for triad in selection['triads']] triad_ind = NP.asarray(triad_ind) lst_ind = None if 'lst' not in selection: if 'prelim' in self.cPhase.cpinfo['processed']: lst_ind = NP.arange(self.cPhase.cpinfo['processed']['prelim']['wts'].shape[0]) else: if selection['lst'] is None: if 'prelim' in self.cPhase.cpinfo['processed']: lst_ind = NP.arange(self.cPhase.cpinfo['processed']['prelim']['wts'].shape[0]) elif isinstance(selection['lst'], (list,NP.ndarray)): if 'prelim' in self.cPhase.cpinfo['processed']: lst_ind = selection['lst'] if NP.any(NP.logical_or(lst_ind < 0, lst_ind >= self.cPhase.cpinfo['processed']['prelim']['wts'].shape[0])): raise ValueError('Input processed lst indices out of bounds') else: raise TypeError('Wrong type for processed lst indices') if lst_ind is None: raise ValueError('LST index selection could not be performed') day_ind = None day_ind_eicpdiff = None if 'days' not in selection: if 'prelim' in self.cPhase.cpinfo['processed']: day_ind = NP.arange(self.cPhase.cpinfo['processed']['prelim']['wts'].shape[1]) if 'errinfo' in self.cPhase.cpinfo: day_ind_eicpdiff = NP.arange(len(self.cPhase.cpinfo['errinfo']['list_of_pair_of_pairs'])) else: if selection['days'] is None: if 'prelim' in self.cPhase.cpinfo['processed']: day_ind = NP.arange(self.cPhase.cpinfo['processed']['prelim']['wts'].shape[1]) if 'errinfo' in self.cPhase.cpinfo: day_ind_eicpdiff = NP.arange(len(self.cPhase.cpinfo['errinfo']['list_of_pair_of_pairs'])) elif isinstance(selection['days'], (list,NP.ndarray)): if 'prelim' in self.cPhase.cpinfo['processed']: day_ind = selection['days'] if NP.any(NP.logical_or(day_ind < 0, day_ind >= self.cPhase.cpinfo['processed']['prelim']['wts'].shape[1])): raise ValueError('Input processed day indices out of bounds') if 'errinfo' in self.cPhase.cpinfo: day_ind_eicpdiff = [i for i,item in enumerate(self.cPhase.cpinfo['errinfo']['list_of_pair_of_pairs']) if len(set(item)-set(selection['days']))==0] else: raise TypeError('Wrong type for processed day indices') if day_ind is None: raise ValueError('Day index selection could not be performed') return (triad_ind, lst_ind, day_ind, day_ind_eicpdiff) ############################################################################ def compute_power_spectrum(self, cpds=None, selection=None, autoinfo=None, xinfo=None, cosmo=cosmo100, units='K', beamparms=None): """ ------------------------------------------------------------------------ Compute power spectrum of closure phase data. It is in units of Mpc/h Inputs: cpds [dictionary] A dictionary that contains the 'oversampled' (if resample=False) and/or 'resampled' (if resample=True) delay spectrum information. If it is not specified the attributes cPhaseDS['processed'] and cPhaseDS_resampled['processed'] are used. Under each of these keys, it holds a dictionary that has the following keys and values: 'freq_center' [numpy array] contains the center frequencies (in Hz) of the frequency subbands of the subband delay spectra. It is of size n_win. It is roughly equivalent to redshift(s) 'freq_wts' [numpy array] Contains frequency weights applied on each frequency sub-band during the subband delay transform. It is of size n_win x nchan. 'bw_eff' [numpy array] contains the effective bandwidths (in Hz) of the subbands being delay transformed. It is of size n_win. It is roughly equivalent to width in redshift or along line-of-sight 'shape' [string] shape of the window function applied. Accepted values are 'rect' (rectangular), 'bhw' (Blackman-Harris), 'bnw' (Blackman-Nuttall). 'fftpow' [scalar] the power to which the FFT of the window was raised. The value is be a positive scalar with default = 1.0 'npad' [scalar] Numbber of zero-padded channels before performing the subband delay transform. 'lags' [numpy array] lags of the subband delay spectra after padding in frequency during the transform. It is of size nlags. The lags roughly correspond to k_parallel. 'lag_kernel' [numpy array] delay transform of the frequency weights under the key 'freq_wts'. It is of size n_bl x n_win x nlags x n_t. 'lag_corr_length' [numpy array] It is the correlation timescale (in pixels) of the subband delay spectra. It is proportional to inverse of effective bandwidth. It is of size n_win. The unit size of a pixel is determined by the difference between adjacent pixels in lags under key 'lags' which in turn is effectively inverse of the effective bandwidth of the subband specified in bw_eff 'processed' [dictionary] Contains the following keys and values: 'dspec' [dictionary] Contains the following keys and values: 'twts' [numpy array] Weights from time-based flags that went into time-averaging. Shape=(ntriads,npol,nchan,nt) 'mean' [numpy array] Delay spectrum of closure phases based on their mean across time intervals. Shape=(nspw,npol,nt,ntriads,nlags) 'median' [numpy array] Delay spectrum of closure phases based on their median across time intervals. Shape=(nspw,npol,nt,ntriads,nlags) selection [NoneType or dictionary] Selection parameters based on which triad, LST, and day indices will be returned. If set to None (default), all triad, LST, and day indices will be returned. Otherwise it must be a dictionary with the following keys and values: 'triads' [NoneType or list of 3-element tuples] If set to None (default), indices of all triads are returned. Otherwise, the specific triads must be specified such as [(1,2,3), (1,2,4), ...] and their indices will be returned 'lst' [NoneType, list or numpy array] If set to None (default), indices of all LST are returned. Otherwise must be a list or numpy array containing indices to LST. 'days' [NoneType, list or numpy array] If set to None (default), indices of all days are returned. Otherwise must be a list or numpy array containing indices to days. autoinfo [NoneType or dictionary] Specifies parameters for processing before power spectrum in auto or cross modes. If set to None, a dictionary will be created with the default values as described below. The dictionary must have the following keys and values: 'axes' [NoneType/int/list/tuple/numpy array] Axes that will be averaged coherently before squaring (for auto) or cross-multiplying (for cross) power spectrum. If set to None (default), no axes are averaged coherently. If set to int, list, tuple or numpy array, those axes will be averaged coherently after applying the weights specified under key 'wts' along those axes. 1=lst, 2=days, 3=triads. 'wts' [NoneType/list/numpy array] If not provided (equivalent to setting it to None) or set to None (default), it is set to a one element list which is a one element numpy array of unity. Otherwise, it must be a list of same number of elements as in key 'axes' and each of these must be a numpy broadcast compatible array corresponding to each of the axis specified in 'axes' xinfo [NoneType or dictionary] Specifies parameters for processing cross power spectrum. If set to None, a dictionary will be created with the default values as described below. The dictionary must have the following keys and values: 'axes' [NoneType/int/list/tuple/numpy array] Axes over which power spectrum will be computed incoherently by cross- multiplication. If set to None (default), no cross- power spectrum is computed. If set to int, list, tuple or numpy array, cross-power over those axes will be computed incoherently by cross-multiplication. The cross-spectrum over these axes will be computed after applying the pre- and post- cross-multiplication weights specified in key 'wts'. 1=lst, 2=days, 3=triads. 'collapse_axes' [list] The axes that will be collpased after the cross-power matrix is produced by cross-multiplication. If this key is not set, it will be initialized to an empty list (default), in which case none of the axes is collapsed and the full cross-power matrix will be output. it must be a subset of values under key 'axes'. This will reduce it from a square matrix along that axis to collapsed values along each of the leading diagonals. 1=lst, 2=days, 3=triads. 'dlst' [scalar] LST interval (in mins) or difference between LST pairs which will be determined and used for cross-power spectrum. Will only apply if values under 'axes' contains the LST axis(=1). 'dlst_range' [scalar, numpy array, or NoneType] Specifies the LST difference(s) in minutes that are to be used in the computation of cross-power spectra. If a scalar, only the diagonal consisting of pairs with that LST difference will be computed. If a numpy array, those diagonals consisting of pairs with that LST difference will be computed. If set to None (default), the main diagonal (LST difference of 0) and the first off-main diagonal (LST difference of 1 unit) corresponding to pairs with 0 and 1 unit LST difference are computed. Applies only if key 'axes' contains LST axis (=1). 'avgcov' [boolean] It specifies if the collapse of square covariance matrix is to be collapsed further to a single number after applying 'postX' weights. If not set or set to False (default), this late stage collapse will not be performed. Otherwise, it will be averaged in a weighted average sense where the 'postX' weights would have already been applied during the collapsing operation 'wts' [NoneType or Dictionary] If not set, a default dictionary (see default values below) will be created. It must have the follwoing keys and values: 'preX' [list of numpy arrays] It contains pre-cross- multiplication weights. It is a list where each element in the list is a numpy array, and the number of elements in the list must match the number of entries in key 'axes'. If 'axes' is set None, 'preX' may be set to a list with one element which is a numpy array of ones. The number of elements in each of the numpy arrays must be numpy broadcastable into the number of elements along that axis in the delay spectrum. 'preXnorm' [boolean] If False (default), no normalization is done after the application of weights. If set to True, the delay spectrum will be normalized by the sum of the weights. 'postX' [list of numpy arrays] It contains post-cross- multiplication weights. It is a list where each element in the list is a numpy array, and the number of elements in the list must match the number of entries in key 'axes'. If 'axes' is set None, 'preX' may be set to a list with one element which is a numpy array of ones. The number of elements in each of the numpy arrays must be numpy broadcastable into the number of elements along that axis in the delay spectrum. 'preXnorm' [boolean] If False (default), no normalization is done after the application of 'preX' weights. If set to True, the delay spectrum will be normalized by the sum of the weights. 'postXnorm' [boolean] If False (default), no normalization is done after the application of postX weights. If set to True, the delay cross power spectrum will be normalized by the sum of the weights. cosmo [instance of cosmology class from astropy] An instance of class FLRW or default_cosmology of astropy cosmology module. Default uses Planck 2015 cosmology, with H0=100 h km/s/Mpc units [string] Specifies the units of output power spectum. Accepted values are 'Jy' and 'K' (default)) and the power spectrum will be in corresponding squared units. Output: Dictionary with the keys 'triads' ((ntriads,3) array), 'triads_ind', ((ntriads,) array), 'lstXoffsets' ((ndlst_range,) array), 'lst' ((nlst,) array), 'dlst' ((nlst,) array), 'lst_ind' ((nlst,) array), 'days' ((ndays,) array), 'day_ind' ((ndays,) array), 'dday' ((ndays,) array), 'oversampled' and 'resampled' corresponding to whether resample was set to False or True in call to member function FT(). Values under keys 'triads_ind' and 'lst_ind' are numpy array corresponding to triad and time indices used in selecting the data. Values under keys 'oversampled' and 'resampled' each contain a dictionary with the following keys and values: 'z' [numpy array] Redshifts corresponding to the band centers in 'freq_center'. It has shape=(nspw,) 'lags' [numpy array] Delays (in seconds). It has shape=(nlags,). 'kprll' [numpy array] k_parallel modes (in h/Mpc) corresponding to 'lags'. It has shape=(nspw,nlags) 'freq_center' [numpy array] contains the center frequencies (in Hz) of the frequency subbands of the subband delay spectra. It is of size n_win. It is roughly equivalent to redshift(s) 'freq_wts' [numpy array] Contains frequency weights applied on each frequency sub-band during the subband delay transform. It is of size n_win x nchan. 'bw_eff' [numpy array] contains the effective bandwidths (in Hz) of the subbands being delay transformed. It is of size n_win. It is roughly equivalent to width in redshift or along line-of-sight 'shape' [string] shape of the frequency window function applied. Usual values are 'rect' (rectangular), 'bhw' (Blackman-Harris), 'bnw' (Blackman-Nuttall). 'fftpow' [scalar] the power to which the FFT of the window was raised. The value is be a positive scalar with default = 1.0 'lag_corr_length' [numpy array] It is the correlation timescale (in pixels) of the subband delay spectra. It is proportional to inverse of effective bandwidth. It is of size n_win. The unit size of a pixel is determined by the difference between adjacent pixels in lags under key 'lags' which in turn is effectively inverse of the effective bandwidth of the subband specified in bw_eff It further contains 3 keys named 'whole', 'submodel', and 'residual' each of which is a dictionary. 'whole' contains power spectrum info about the input closure phases. 'submodel' contains power spectrum info about the model that will have been subtracted (as closure phase) from the 'whole' model. 'residual' contains power spectrum info about the closure phases obtained as a difference between 'whole' and 'submodel'. It contains the following keys and values: 'mean' [numpy array] Delay power spectrum incoherently estiamted over the axes specified in xinfo['axes'] using the 'mean' key in input cpds or attribute cPhaseDS['processed']['dspec']. It has shape that depends on the combination of input parameters. See examples below. If both collapse_axes and avgcov are not set, those axes will be replaced with square covariance matrices. If collapse_axes is provided but avgcov is False, those axes will be of shape 2*Naxis-1. 'median' [numpy array] Delay power spectrum incoherently averaged over the axes specified in incohax using the 'median' key in input cpds or attribute cPhaseDS['processed']['dspec']. It has shape that depends on the combination of input parameters. See examples below. If both collapse_axes and avgcov are not set, those axes will be replaced with square covariance matrices. If collapse_axes is provided bu avgcov is False, those axes will be of shape 2*Naxis-1. 'diagoffsets' [dictionary] Same keys corresponding to keys under 'collapse_axes' in input containing the diagonal offsets for those axes. If 'avgcov' was set, those entries will be removed from 'diagoffsets' since all the leading diagonal elements have been collapsed (averaged) further. Value under each key is a numpy array where each element in the array corresponds to the index of that leading diagonal. This should match the size of the output along that axis in 'mean' or 'median' above. 'diagweights' [dictionary] Each key is an axis specified in collapse_axes and the value is a numpy array of weights corresponding to the diagonal offsets in that axis. 'axesmap' [dictionary] If covariance in cross-power is calculated but is not collapsed, the number of dimensions in the output will have changed. This parameter tracks where the original axis is now placed. The keys are the original axes that are involved in incoherent cross-power, and the values are the new locations of those original axes in the output. 'nsamples_incoh' [integer] Number of incoherent samples in producing the power spectrum 'nsamples_coh' [integer] Number of coherent samples in producing the power spectrum Examples: (1) Input delay spectrum of shape (Nspw, Nlst, Ndays, Ntriads, Nlags) autoinfo = {'axes': 2, 'wts': None} xinfo = {'axes': None, 'avgcov': False, 'collapse_axes': [], 'wts':{'preX': None, 'preXnorm': False, 'postX': None, 'postXnorm': False}} Output delay power spectrum has shape (Nspw, Nlst, 1, Ntriads, Nlags) (2) Input delay spectrum of shape (Nspw, Nlst, Ndays, Ntriads, Nlags) autoinfo = {'axes': 2, 'wts': None} xinfo = {'axes': [1,3], 'avgcov': False, 'collapse_axes': [], 'wts':{'preX': None, 'preXnorm': False, 'postX': None, 'postXnorm': False}, 'dlst_range': None} Output delay power spectrum has shape (Nspw, 2, Nlst, 1, Ntriads, Ntriads, Nlags) diagoffsets = {1: NP.arange(n_dlst_range)}, axesmap = {1: [1,2], 3: [4,5]} (3) Input delay spectrum of shape (Nspw, Nlst, Ndays, Ntriads, Nlags) autoinfo = {'axes': 2, 'wts': None} xinfo = {'axes': [1,3], 'avgcov': False, 'collapse_axes': [3], 'dlst_range': [0.0, 1.0, 2.0]} Output delay power spectrum has shape (Nspw, 3, Nlst, 1, 2*Ntriads-1, Nlags) diagoffsets = {1: NP.arange(n_dlst_range), 3: NP.arange(-Ntriads,Ntriads)}, axesmap = {1: [1,2], 3: [4]} (4) Input delay spectrum of shape (Nspw, Nlst, Ndays, Ntriads, Nlags) autoinfo = {'axes': None, 'wts': None} xinfo = {'axes': [1,3], 'avgcov': False, 'collapse_axes': [1,3], 'dlst_range': [1.0, 2.0, 3.0, 4.0]} Output delay power spectrum has shape (Nspw, 4, Ndays, 2*Ntriads-1, Nlags) diagoffsets = {1: NP.arange(n_dlst_range), 3: NP.arange(-Ntriads,Ntriads)}, axesmap = {1: [1], 3: [3]} (5) Input delay spectrum of shape (Nspw, Nlst, Ndays, Ntriads, Nlags) autoinfo = {'axes': None, 'wts': None} xinfo = {'axes': [1,3], 'avgcov': True, 'collapse_axes': [3], 'dlst_range': None} Output delay power spectrum has shape (Nspw, 2, Nlst, Ndays, 1, Nlags) diagoffsets = {1: NP.arange(n_dlst_range)}, axesmap = {1: [1,2], 3: [4]} (6) Input delay spectrum of shape (Nspw, Nlst, Ndays, Ntriads, Nlags) autoinfo = {'axes': None, 'wts': None} xinfo = {'axes': [1,3], 'avgcov': True, 'collapse_axes': []} Output delay power spectrum has shape (Nspw, 1, Ndays, 1, Nlags) diagoffsets = {}, axesmap = {1: [1], 3: [3]} ------------------------------------------------------------------------ """ if not isinstance(units,str): raise TypeError('Input parameter units must be a string') if units.lower() == 'k': if not isinstance(beamparms, dict): raise TypeError('Input beamparms must be a dictionary') if 'freqs' not in beamparms: beamparms['freqs'] = self.f beamparms_orig = copy.deepcopy(beamparms) if autoinfo is None: autoinfo = {'axes': None, 'wts': [NP.ones(1, dtpye=NP.float)]} elif not isinstance(autoinfo, dict): raise TypeError('Input autoinfo must be a dictionary') if 'axes' not in autoinfo: autoinfo['axes'] = None else: if autoinfo['axes'] is not None: if not isinstance(autoinfo['axes'], (list,tuple,NP.ndarray,int)): raise TypeError('Value under key axes in input autoinfo must be an integer, list, tuple or numpy array') else: autoinfo['axes'] = NP.asarray(autoinfo['axes']).reshape(-1) if 'wts' not in autoinfo: if autoinfo['axes'] is not None: autoinfo['wts'] = [NP.ones(1, dtype=NP.float)] * len(autoinfo['axes']) else: autoinfo['wts'] = [NP.ones(1, dtype=NP.float)] else: if autoinfo['axes'] is not None: if not isinstance(autoinfo['wts'], list): raise TypeError('wts in input autoinfo must be a list of numpy arrays') else: if len(autoinfo['wts']) != len(autoinfo['axes']): raise ValueError('Input list of wts must be same as length of autoinfo axes') else: autoinfo['wts'] = [NP.ones(1, dtype=NP.float)] if xinfo is None: xinfo = {'axes': None, 'wts': {'preX': [NP.ones(1, dtpye=NP.float)], 'postX': [NP.ones(1, dtpye=NP.float)], 'preXnorm': False, 'postXnorm': False}} elif not isinstance(xinfo, dict): raise TypeError('Input xinfo must be a dictionary') if 'axes' not in xinfo: xinfo['axes'] = None else: if not isinstance(xinfo['axes'], (list,tuple,NP.ndarray,int)): raise TypeError('Value under key axes in input xinfo must be an integer, list, tuple or numpy array') else: xinfo['axes'] = NP.asarray(xinfo['axes']).reshape(-1) if 'wts' not in xinfo: xinfo['wts'] = {} for xkey in ['preX', 'postX']: if xinfo['axes'] is not None: xinfo['wts'][xkey] = [NP.ones(1, dtype=NP.float)] * len(xinfo['axes']) else: xinfo['wts'][xkey] = [NP.ones(1, dtype=NP.float)] xinfo['wts']['preXnorm'] = False xinfo['wts']['postXnorm'] = False else: if xinfo['axes'] is not None: if not isinstance(xinfo['wts'], dict): raise TypeError('wts in input xinfo must be a dictionary') for xkey in ['preX', 'postX']: if not isinstance(xinfo['wts'][xkey], list): raise TypeError('{0} wts in input xinfo must be a list of numpy arrays'.format(xkey)) else: if len(xinfo['wts'][xkey]) != len(xinfo['axes']): raise ValueError('Input list of {0} wts must be same as length of xinfo axes'.format(xkey)) else: for xkey in ['preX', 'postX']: xinfo['wts'][xkey] = [NP.ones(1, dtype=NP.float)] if 'preXnorm' not in xinfo['wts']: xinfo['wts']['preXnorm'] = False if 'postXnorm' not in xinfo['wts']: xinfo['wts']['postXnorm'] = False if not isinstance(xinfo['wts']['preXnorm'], NP.bool): raise TypeError('preXnorm in input xinfo must be a boolean') if not isinstance(xinfo['wts']['postXnorm'], NP.bool): raise TypeError('postXnorm in input xinfo must be a boolean') if 'avgcov' not in xinfo: xinfo['avgcov'] = False if not isinstance(xinfo['avgcov'], NP.bool): raise TypeError('avgcov under input xinfo must be boolean') if 'collapse_axes' not in xinfo: xinfo['collapse_axes'] = [] if not isinstance(xinfo['collapse_axes'], (int,list,tuple,NP.ndarray)): raise TypeError('collapse_axes under input xinfo must be an integer, tuple, list or numpy array') else: xinfo['collapse_axes'] = NP.asarray(xinfo['collapse_axes']).reshape(-1) if (autoinfo['axes'] is not None) and (xinfo['axes'] is not None): if NP.intersect1d(autoinfo['axes'], xinfo['axes']).size > 0: raise ValueError("Inputs autoinfo['axes'] and xinfo['axes'] must have no intersection") cohax = autoinfo['axes'] if cohax is None: cohax = [] incohax = xinfo['axes'] if incohax is None: incohax = [] if selection is None: selection = {'triads': None, 'lst': None, 'days': None} else: if not isinstance(selection, dict): raise TypeError('Input selection must be a dictionary') if cpds is None: cpds = {} sampling = ['oversampled', 'resampled'] for smplng in sampling: if smplng == 'oversampled': cpds[smplng] = copy.deepcopy(self.cPhaseDS) else: cpds[smplng] = copy.deepcopy(self.cPhaseDS_resampled) triad_ind, lst_ind, day_ind, day_ind_eicpdiff = self.subset(selection=selection) result = {'triads': self.cPhase.cpinfo['raw']['triads'][triad_ind], 'triads_ind': triad_ind, 'lst': self.cPhase.cpinfo['processed']['prelim']['lstbins'][lst_ind], 'lst_ind': lst_ind, 'dlst': self.cPhase.cpinfo['processed']['prelim']['dlstbins'][lst_ind], 'days': self.cPhase.cpinfo['processed']['prelim']['daybins'][day_ind], 'day_ind': day_ind, 'dday': self.cPhase.cpinfo['processed']['prelim']['diff_dbins'][day_ind]} dlstbin = NP.mean(self.cPhase.cpinfo['processed']['prelim']['dlstbins']) if 'dlst_range' in xinfo: if xinfo['dlst_range'] is None: dlst_range = None lstshifts = NP.arange(2) # LST index offsets of 0 and 1 are only estimated else: dlst_range = NP.asarray(xinfo['dlst_range']).ravel() / 60.0 # Difference in LST between a pair of LST (in hours) if dlst_range.size == 1: dlst_range = NP.insert(dlst_range, 0, 0.0) lstshifts = NP.arange(max([0, NP.ceil(1.0*dlst_range.min()/dlstbin).astype(NP.int)]), min([NP.ceil(1.0*dlst_range.max()/dlstbin).astype(NP.int), result['lst'].size])) else: dlst_range = None lstshifts = NP.arange(2) # LST index offsets of 0 and 1 are only estimated result['lstXoffsets'] = lstshifts * dlstbin # LST interval corresponding to diagonal offsets created by the LST covariance for smplng in sampling: result[smplng] = {} wl = FCNST.c / (cpds[smplng]['freq_center'] * U.Hz) z = CNST.rest_freq_HI / cpds[smplng]['freq_center'] - 1 dz = CNST.rest_freq_HI / cpds[smplng]['freq_center']**2 * cpds[smplng]['bw_eff'] dkprll_deta = DS.dkprll_deta(z, cosmo=cosmo) kprll = dkprll_deta.reshape(-1,1) * cpds[smplng]['lags'] rz_los = cosmo.comoving_distance(z) # in Mpc/h drz_los = FCNST.c * cpds[smplng]['bw_eff']*U.Hz * (1+z)**2 / (CNST.rest_freq_HI * U.Hz) / (cosmo.H0 * cosmo.efunc(z)) # in Mpc/h if units == 'Jy': jacobian1 = 1 / (cpds[smplng]['bw_eff'] * U.Hz) jacobian2 = drz_los / (cpds[smplng]['bw_eff'] * U.Hz) temperature_from_fluxdensity = 1.0 elif units == 'K': beamparms = copy.deepcopy(beamparms_orig) omega_bw = self.beam3Dvol(beamparms, freq_wts=cpds[smplng]['freq_wts']) jacobian1 = 1 / (omega_bw * U.Hz) # The steradian is present but not explicitly assigned jacobian2 = rz_los**2 * drz_los / (cpds[smplng]['bw_eff'] * U.Hz) temperature_from_fluxdensity = wl**2 / (2*FCNST.k_B) else: raise ValueError('Input value for units invalid') factor = jacobian1 * jacobian2 * temperature_from_fluxdensity**2 result[smplng]['z'] = z result[smplng]['kprll'] = kprll result[smplng]['lags'] = NP.copy(cpds[smplng]['lags']) result[smplng]['freq_center'] = cpds[smplng]['freq_center'] result[smplng]['bw_eff'] = cpds[smplng]['bw_eff'] result[smplng]['shape'] = cpds[smplng]['shape'] result[smplng]['freq_wts'] = cpds[smplng]['freq_wts'] result[smplng]['lag_corr_length'] = cpds[smplng]['lag_corr_length'] for dpool in ['whole', 'submodel', 'residual']: if dpool in cpds[smplng]: result[smplng][dpool] = {} inpshape = list(cpds[smplng]['whole']['dspec']['mean'].shape) inpshape[1] = lst_ind.size inpshape[2] = day_ind.size inpshape[3] = triad_ind.size if len(cohax) > 0: nsamples_coh = NP.prod(NP.asarray(inpshape)[NP.asarray(cohax)]) else: nsamples_coh = 1 if len(incohax) > 0: nsamples = NP.prod(NP.asarray(inpshape)[NP.asarray(incohax)]) nsamples_incoh = nsamples * (nsamples - 1) else: nsamples_incoh = 1 twts_multidim_idx = NP.ix_(lst_ind,day_ind,triad_ind,NP.arange(1)) # shape=(nlst,ndays,ntriads,1) dspec_multidim_idx = NP.ix_(NP.arange(wl.size),lst_ind,day_ind,triad_ind,NP.arange(inpshape[4])) # shape=(nspw,nlst,ndays,ntriads,nchan) max_wt_in_chan = NP.max(NP.sum(cpds[smplng]['whole']['dspec']['twts'].data, axis=(0,1,2))) select_chan = NP.argmax(NP.sum(cpds[smplng]['whole']['dspec']['twts'].data, axis=(0,1,2))) twts = NP.copy(cpds[smplng]['whole']['dspec']['twts'].data[:,:,:,[select_chan]]) # shape=(nlst,ndays,ntriads,nlags=1) if nsamples_coh > 1: awts_shape = tuple(NP.ones(cpds[smplng]['whole']['dspec']['mean'].ndim, dtype=NP.int)) awts = NP.ones(awts_shape, dtype=NP.complex) awts_shape = NP.asarray(awts_shape) for caxind,caxis in enumerate(cohax): curr_awts_shape = NP.copy(awts_shape) curr_awts_shape[caxis] = -1 awts = awts * autoinfo['wts'][caxind].reshape(tuple(curr_awts_shape)) for stat in ['mean', 'median']: if dpool == 'submodel': dspec = NP.copy(cpds[smplng][dpool]['dspec'][dspec_multidim_idx]) else: dspec = NP.copy(cpds[smplng][dpool]['dspec'][stat][dspec_multidim_idx]) if nsamples_coh > 1: if stat == 'mean': dspec = NP.sum(twts[twts_multidim_idx][NP.newaxis,...] * awts * dspec[dspec_multidim_idx], axis=cohax, keepdims=True) / NP.sum(twts[twts_multidim_idx][NP.newaxis,...] * awts, axis=cohax, keepdims=True) else: dspec = NP.median(dspec[dspec_multidim_idx], axis=cohax, keepdims=True) if nsamples_incoh > 1: expandax_map = {} wts_shape = tuple(NP.ones(dspec.ndim, dtype=NP.int)) preXwts = NP.ones(wts_shape, dtype=NP.complex) wts_shape = NP.asarray(wts_shape) for incaxind,incaxis in enumerate(xinfo['axes']): curr_wts_shape = NP.copy(wts_shape) curr_wts_shape[incaxis] = -1 preXwts = preXwts * xinfo['wts']['preX'][incaxind].reshape(tuple(curr_wts_shape)) dspec1 = NP.copy(dspec) dspec2 = NP.copy(dspec) preXwts1 = NP.copy(preXwts) preXwts2 = NP.copy(preXwts) for incax in NP.sort(incohax)[::-1]: dspec1 = NP.expand_dims(dspec1, axis=incax) preXwts1 = NP.expand_dims(preXwts1, axis=incax) if incax == 1: preXwts1_outshape = list(preXwts1.shape) preXwts1_outshape[incax+1] = dspec1.shape[incax+1] preXwts1_outshape = tuple(preXwts1_outshape) preXwts1 = NP.broadcast_to(preXwts1, preXwts1_outshape).copy() # For some strange reason the NP.broadcast_to() creates a "read-only" immutable array which is changed to writeable by copy() preXwts2_tmp = NP.expand_dims(preXwts2, axis=incax) preXwts2_shape = NP.asarray(preXwts2_tmp.shape) preXwts2_shape[incax] = lstshifts.size preXwts2_shape[incax+1] = preXwts1_outshape[incax+1] preXwts2_shape = tuple(preXwts2_shape) preXwts2 = NP.broadcast_to(preXwts2_tmp, preXwts2_shape).copy() # For some strange reason the NP.broadcast_to() creates a "read-only" immutable array which is changed to writeable by copy() dspec2_tmp = NP.expand_dims(dspec2, axis=incax) dspec2_shape = NP.asarray(dspec2_tmp.shape) dspec2_shape[incax] = lstshifts.size # dspec2_shape = NP.insert(dspec2_shape, incax, lstshifts.size) dspec2_shape = tuple(dspec2_shape) dspec2 = NP.broadcast_to(dspec2_tmp, dspec2_shape).copy() # For some strange reason the NP.broadcast_to() creates a "read-only" immutable array which is changed to writeable by copy() for lstshiftind, lstshift in enumerate(lstshifts): dspec2[:,lstshiftind,...] = NP.roll(dspec2_tmp[:,0,...], lstshift, axis=incax) dspec2[:,lstshiftind,:lstshift,...] = NP.nan preXwts2[:,lstshiftind,...] = NP.roll(preXwts2_tmp[:,0,...], lstshift, axis=incax) preXwts2[:,lstshiftind,:lstshift,...] = NP.nan else: dspec2 = NP.expand_dims(dspec2, axis=incax+1) preXwts2 = NP.expand_dims(preXwts2, axis=incax+1) expandax_map[incax] = incax + NP.arange(2) for ekey in expandax_map: if ekey > incax: expandax_map[ekey] += 1 result[smplng][dpool][stat] = factor.reshape((-1,)+tuple(NP.ones(dspec1.ndim-1, dtype=NP.int))) * (dspec1*U.Unit('Jy Hz') * preXwts1) * (dspec2*U.Unit('Jy Hz') * preXwts2).conj() if xinfo['wts']['preXnorm']: result[smplng][dpool][stat] = result[smplng][dpool][stat] / NP.nansum(preXwts1 * preXwts2.conj(), axis=NP.union1d(NP.where(logical_or(NP.asarray(preXwts1.shape)>1, NP.asarray(preXwts2.shape)>1))), keepdims=True) # Normalize by summing the weights over the expanded axes if (len(xinfo['collapse_axes']) > 0) or (xinfo['avgcov']): # if any one of collapsing of incoherent axes or # averaging of full covariance is requested diagoffsets = {} # Stores the correlation index difference along each axis. diagweights = {} # Stores the number of points summed in the trace along the offset diagonal for colaxind, colax in enumerate(xinfo['collapse_axes']): if colax == 1: shp = NP.ones(dspec.ndim, dtype=NP.int) shp[colax] = lst_ind.size multdim_idx = tuple([NP.arange(axdim) for axdim in shp]) diagweights[colax] = NP.sum(NP.logical_not(NP.isnan(dspec[multdim_idx]))) - lstshifts # diagweights[colax] = result[smplng][dpool][stat].shape[expandax_map[colax][-1]] - lstshifts if stat == 'mean': result[smplng][dpool][stat] = NP.nanmean(result[smplng][dpool][stat], axis=expandax_map[colax][-1]) else: result[smplng][dpool][stat] = NP.nanmedian(result[smplng][dpool][stat], axis=expandax_map[colax][-1]) diagoffsets[colax] = lstshifts else: pspec_unit = result[smplng][dpool][stat].si.unit result[smplng][dpool][stat], offsets, diagwts = OPS.array_trace(result[smplng][dpool][stat].si.value, offsets=None, axis1=expandax_map[colax][0], axis2=expandax_map[colax][1], outaxis='axis1') diagwts_shape = NP.ones(result[smplng][dpool][stat].ndim, dtype=NP.int) diagwts_shape[expandax_map[colax][0]] = diagwts.size diagoffsets[colax] = offsets diagweights[colax] = NP.copy(diagwts) result[smplng][dpool][stat] = result[smplng][dpool][stat] * pspec_unit / diagwts.reshape(diagwts_shape) for ekey in expandax_map: if ekey > colax: expandax_map[ekey] -= 1 expandax_map[colax] = NP.asarray(expandax_map[colax][0]).ravel() wts_shape = tuple(NP.ones(result[smplng][dpool][stat].ndim, dtype=NP.int)) postXwts = NP.ones(wts_shape, dtype=NP.complex) wts_shape = NP.asarray(wts_shape) for colaxind, colax in enumerate(xinfo['collapse_axes']): curr_wts_shape = NP.copy(wts_shape) curr_wts_shape[expandax_map[colax]] = -1 postXwts = postXwts * xinfo['wts']['postX'][colaxind].reshape(tuple(curr_wts_shape)) result[smplng][dpool][stat] = result[smplng][dpool][stat] * postXwts axes_to_sum = tuple(NP.asarray([expandax_map[colax] for colax in xinfo['collapse_axes']]).ravel()) # for post-X normalization and collapse of covariance matrix if xinfo['wts']['postXnorm']: result[smplng][dpool][stat] = result[smplng][dpool][stat] / NP.nansum(postXwts, axis=axes_to_sum, keepdims=True) # Normalize by summing the weights over the collapsed axes if xinfo['avgcov']: # collapse the axes further (postXwts have already # been applied) diagoffset_weights = 1.0 for colaxind in zip(*sorted(zip(NP.arange(xinfo['collapse_axes'].size), xinfo['collapse_axes']), reverse=True))[0]: # It is important to sort the collapsable axes in # reverse order before deleting elements below, # otherwise the axes ordering may be get messed up diagoffset_weights_shape = NP.ones(result[smplng][dpool][stat].ndim, dtype=NP.int) diagoffset_weights_shape[expandax_map[xinfo['collapse_axes'][colaxind]][0]] = diagweights[xinfo['collapse_axes'][colaxind]].size diagoffset_weights = diagoffset_weights * diagweights[xinfo['collapse_axes'][colaxind]].reshape(diagoffset_weights_shape) del diagoffsets[xinfo['collapse_axes'][colaxind]] result[smplng][dpool][stat] = NP.nansum(result[smplng][dpool][stat]*diagoffset_weights, axis=axes_to_sum, keepdims=True) / NP.nansum(diagoffset_weights, axis=axes_to_sum, keepdims=True) else: result[smplng][dpool][stat] = factor.reshape((-1,)+tuple(NP.ones(dspec.ndim-1, dtype=NP.int))) * NP.abs(dspec * U.Jy)**2 diagoffsets = {} expandax_map = {} if units == 'Jy': result[smplng][dpool][stat] = result[smplng][dpool][stat].to('Jy2 Mpc') elif units == 'K': result[smplng][dpool][stat] = result[smplng][dpool][stat].to('K2 Mpc3') else: raise ValueError('Input value for units invalid') result[smplng][dpool]['diagoffsets'] = diagoffsets result[smplng][dpool]['diagweights'] = diagweights result[smplng][dpool]['axesmap'] = expandax_map result[smplng][dpool]['nsamples_incoh'] = nsamples_incoh result[smplng][dpool]['nsamples_coh'] = nsamples_coh return result ############################################################################ def compute_power_spectrum_uncertainty(self, cpds=None, selection=None, autoinfo=None,xinfo=None, cosmo=cosmo100, units='K', beamparms=None): """ ------------------------------------------------------------------------ Compute uncertainty in the power spectrum of closure phase data. It is in units of Mpc/h Inputs: cpds [dictionary] A dictionary that contains the 'oversampled' (if resample=False) and/or 'resampled' (if resample=True) delay spectrum information on the key 'errinfo'. If it is not specified the attributes cPhaseDS['errinfo'] and cPhaseDS_resampled['errinfo'] are used. Under each of these sampling keys, it holds a dictionary that has the following keys and values: 'freq_center' [numpy array] contains the center frequencies (in Hz) of the frequency subbands of the subband delay spectra. It is of size n_win. It is roughly equivalent to redshift(s) 'freq_wts' [numpy array] Contains frequency weights applied on each frequency sub-band during the subband delay transform. It is of size n_win x nchan. 'bw_eff' [numpy array] contains the effective bandwidths (in Hz) of the subbands being delay transformed. It is of size n_win. It is roughly equivalent to width in redshift or along line-of-sight 'shape' [string] shape of the window function applied. Accepted values are 'rect' (rectangular), 'bhw' (Blackman-Harris), 'bnw' (Blackman-Nuttall). 'fftpow' [scalar] the power to which the FFT of the window was raised. The value is be a positive scalar with default = 1.0 'npad' [scalar] Numbber of zero-padded channels before performing the subband delay transform. 'lags' [numpy array] lags of the subband delay spectra after padding in frequency during the transform. It is of size nlags. The lags roughly correspond to k_parallel. 'lag_kernel' [numpy array] delay transform of the frequency weights under the key 'freq_wts'. It is of size n_bl x n_win x nlags x n_t. 'lag_corr_length' [numpy array] It is the correlation timescale (in pixels) of the subband delay spectra. It is proportional to inverse of effective bandwidth. It is of size n_win. The unit size of a pixel is determined by the difference between adjacent pixels in lags under key 'lags' which in turn is effectively inverse of the effective bandwidth of the subband specified in bw_eff 'errinfo' [dictionary] It has two keys 'dspec0' and 'dspec1' each of which are dictionaries with the following keys and values: 'twts' [numpy array] Weights for the subsample difference. It is of shape (nlst, ndays, ntriads, nchan) 'mean' [numpy array] Delay spectrum of the subsample difference obtained by using the mean statistic. It is of shape (nspw, nlst, ndays, ntriads, nlags) 'median' [numpy array] Delay spectrum of the subsample difference obtained by using the median statistic. It is of shape (nspw, nlst, ndays, ntriads, nlags) selection [NoneType or dictionary] Selection parameters based on which triad, LST, and day indices will be returned. If set to None (default), all triad, LST, and day indices will be returned. Otherwise it must be a dictionary with the following keys and values: 'triads' [NoneType or list of 3-element tuples] If set to None (default), indices of all triads are returned. Otherwise, the specific triads must be specified such as [(1,2,3), (1,2,4), ...] and their indices will be returned 'lst' [NoneType, list or numpy array] If set to None (default), indices of all LST are returned. Otherwise must be a list or numpy array containing indices to LST. 'days' [NoneType, list or numpy array] If set to None (default), indices of all days are returned. Otherwise must be a list or numpy array containing indices to days. autoinfo [NoneType or dictionary] Specifies parameters for processing before power spectrum in auto or cross modes. If set to None, a dictionary will be created with the default values as described below. The dictionary must have the following keys and values: 'axes' [NoneType/int/list/tuple/numpy array] Axes that will be averaged coherently before squaring (for auto) or cross-multiplying (for cross) power spectrum. If set to None (default), no axes are averaged coherently. If set to int, list, tuple or numpy array, those axes will be averaged coherently after applying the weights specified under key 'wts' along those axes. 1=lst, 3=triads. Value of 2 for axes is not allowed since that denotes repeated days and it is along this axis that cross-power is computed regardless. 'wts' [NoneType/list/numpy array] If not provided (equivalent to setting it to None) or set to None (default), it is set to a one element list which is a one element numpy array of unity. Otherwise, it must be a list of same number of elements as in key 'axes' and each of these must be a numpy broadcast compatible array corresponding to each of the axis specified in 'axes' xinfo [NoneType or dictionary] Specifies parameters for processing cross power spectrum. If set to None, a dictionary will be created with the default values as described below. The dictionary must have the following keys and values: 'axes' [NoneType/int/list/tuple/numpy array] Axes over which power spectrum will be computed incoherently by cross- multiplication. If set to None (default), no cross- power spectrum is computed. If set to int, list, tuple or numpy array, cross-power over those axes will be computed incoherently by cross-multiplication. The cross-spectrum over these axes will be computed after applying the pre- and post- cross-multiplication weights specified in key 'wts'. 1=lst, 3=triads. Value of 2 for axes is not allowed since that denotes repeated days and it is along this axis that cross-power is computed regardless. 'collapse_axes' [list] The axes that will be collpased after the cross-power matrix is produced by cross-multiplication. If this key is not set, it will be initialized to an empty list (default), in which case none of the axes is collapsed and the full cross-power matrix will be output. it must be a subset of values under key 'axes'. This will reduce it from a square matrix along that axis to collapsed values along each of the leading diagonals. 1=lst, 3=triads. 'dlst' [scalar] LST interval (in mins) or difference between LST pairs which will be determined and used for cross-power spectrum. Will only apply if values under 'axes' contains the LST axis(=1). 'dlst_range' [scalar, numpy array, or NoneType] Specifies the LST difference(s) in minutes that are to be used in the computation of cross-power spectra. If a scalar, only the diagonal consisting of pairs with that LST difference will be computed. If a numpy array, those diagonals consisting of pairs with that LST difference will be computed. If set to None (default), the main diagonal (LST difference of 0) and the first off-main diagonal (LST difference of 1 unit) corresponding to pairs with 0 and 1 unit LST difference are computed. Applies only if key 'axes' contains LST axis (=1). 'avgcov' [boolean] It specifies if the collapse of square covariance matrix is to be collapsed further to a single number after applying 'postX' weights. If not set or set to False (default), this late stage collapse will not be performed. Otherwise, it will be averaged in a weighted average sense where the 'postX' weights would have already been applied during the collapsing operation 'wts' [NoneType or Dictionary] If not set, a default dictionary (see default values below) will be created. It must have the follwoing keys and values: 'preX' [list of numpy arrays] It contains pre-cross- multiplication weights. It is a list where each element in the list is a numpy array, and the number of elements in the list must match the number of entries in key 'axes'. If 'axes' is set None, 'preX' may be set to a list with one element which is a numpy array of ones. The number of elements in each of the numpy arrays must be numpy broadcastable into the number of elements along that axis in the delay spectrum. 'preXnorm' [boolean] If False (default), no normalization is done after the application of weights. If set to True, the delay spectrum will be normalized by the sum of the weights. 'postX' [list of numpy arrays] It contains post-cross- multiplication weights. It is a list where each element in the list is a numpy array, and the number of elements in the list must match the number of entries in key 'axes'. If 'axes' is set None, 'preX' may be set to a list with one element which is a numpy array of ones. The number of elements in each of the numpy arrays must be numpy broadcastable into the number of elements along that axis in the delay spectrum. 'preXnorm' [boolean] If False (default), no normalization is done after the application of 'preX' weights. If set to True, the delay spectrum will be normalized by the sum of the weights. 'postXnorm' [boolean] If False (default), no normalization is done after the application of postX weights. If set to True, the delay cross power spectrum will be normalized by the sum of the weights. cosmo [instance of cosmology class from astropy] An instance of class FLRW or default_cosmology of astropy cosmology module. Default uses Planck 2015 cosmology, with H0=100 h km/s/Mpc units [string] Specifies the units of output power spectum. Accepted values are 'Jy' and 'K' (default)) and the power spectrum will be in corresponding squared units. Output: Dictionary with the keys 'triads' ((ntriads,3) array), 'triads_ind', ((ntriads,) array), 'lstXoffsets' ((ndlst_range,) array), 'lst' ((nlst,) array), 'dlst' ((nlst,) array), 'lst_ind' ((nlst,) array), 'days' ((ndaycomb,) array), 'day_ind' ((ndaycomb,) array), 'dday' ((ndaycomb,) array), 'oversampled' and 'resampled' corresponding to whether resample was set to False or True in call to member function FT(). Values under keys 'triads_ind' and 'lst_ind' are numpy array corresponding to triad and time indices used in selecting the data. Values under keys 'oversampled' and 'resampled' each contain a dictionary with the following keys and values: 'z' [numpy array] Redshifts corresponding to the band centers in 'freq_center'. It has shape=(nspw,) 'lags' [numpy array] Delays (in seconds). It has shape=(nlags,). 'kprll' [numpy array] k_parallel modes (in h/Mpc) corresponding to 'lags'. It has shape=(nspw,nlags) 'freq_center' [numpy array] contains the center frequencies (in Hz) of the frequency subbands of the subband delay spectra. It is of size n_win. It is roughly equivalent to redshift(s) 'freq_wts' [numpy array] Contains frequency weights applied on each frequency sub-band during the subband delay transform. It is of size n_win x nchan. 'bw_eff' [numpy array] contains the effective bandwidths (in Hz) of the subbands being delay transformed. It is of size n_win. It is roughly equivalent to width in redshift or along line-of-sight 'shape' [string] shape of the frequency window function applied. Usual values are 'rect' (rectangular), 'bhw' (Blackman-Harris), 'bnw' (Blackman-Nuttall). 'fftpow' [scalar] the power to which the FFT of the window was raised. The value is be a positive scalar with default = 1.0 'lag_corr_length' [numpy array] It is the correlation timescale (in pixels) of the subband delay spectra. It is proportional to inverse of effective bandwidth. It is of size n_win. The unit size of a pixel is determined by the difference between adjacent pixels in lags under key 'lags' which in turn is effectively inverse of the effective bandwidth of the subband specified in bw_eff It further contains a key named 'errinfo' which is a dictionary. It contains information about power spectrum uncertainties obtained from subsample differences. It contains the following keys and values: 'mean' [numpy array] Delay power spectrum uncertainties incoherently estimated over the axes specified in xinfo['axes'] using the 'mean' key in input cpds or attribute cPhaseDS['errinfo']['dspec']. It has shape that depends on the combination of input parameters. See examples below. If both collapse_axes and avgcov are not set, those axes will be replaced with square covariance matrices. If collapse_axes is provided but avgcov is False, those axes will be of shape 2*Naxis-1. 'median' [numpy array] Delay power spectrum uncertainties incoherently averaged over the axes specified in incohax using the 'median' key in input cpds or attribute cPhaseDS['errinfo']['dspec']. It has shape that depends on the combination of input parameters. See examples below. If both collapse_axes and avgcov are not set, those axes will be replaced with square covariance matrices. If collapse_axes is provided but avgcov is False, those axes will be of shape 2*Naxis-1. 'diagoffsets' [dictionary] Same keys corresponding to keys under 'collapse_axes' in input containing the diagonal offsets for those axes. If 'avgcov' was set, those entries will be removed from 'diagoffsets' since all the leading diagonal elements have been collapsed (averaged) further. Value under each key is a numpy array where each element in the array corresponds to the index of that leading diagonal. This should match the size of the output along that axis in 'mean' or 'median' above. 'diagweights' [dictionary] Each key is an axis specified in collapse_axes and the value is a numpy array of weights corresponding to the diagonal offsets in that axis. 'axesmap' [dictionary] If covariance in cross-power is calculated but is not collapsed, the number of dimensions in the output will have changed. This parameter tracks where the original axis is now placed. The keys are the original axes that are involved in incoherent cross-power, and the values are the new locations of those original axes in the output. 'nsamples_incoh' [integer] Number of incoherent samples in producing the power spectrum 'nsamples_coh' [integer] Number of coherent samples in producing the power spectrum Examples: (1) Input delay spectrum of shape (Nspw, Nlst, Ndays, Ntriads, Nlags) autoinfo = {'axes': 2, 'wts': None} xinfo = {'axes': None, 'avgcov': False, 'collapse_axes': [], 'wts':{'preX': None, 'preXnorm': False, 'postX': None, 'postXnorm': False}} This will not do anything because axes cannot include value 2 which denote the 'days' axis and the uncertainties are obtained through subsample differencing along days axis regardless. Output delay power spectrum has shape (Nspw, Nlst, Ndaycomb, Ntriads, Nlags) (2) Input delay spectrum of shape (Nspw, Nlst, Ndays, Ntriads, Nlags) autoinfo = {'axes': 2, 'wts': None} xinfo = {'axes': [1,3], 'avgcov': False, 'collapse_axes': [], 'wts':{'preX': None, 'preXnorm': False, 'postX': None, 'postXnorm': False}, 'dlst_range': None} This will not do anything about coherent averaging along axis=2 because axes cannot include value 2 which denote the 'days' axis and the uncertainties are obtained through subsample differencing along days axis regardless. Output delay power spectrum has shape (Nspw, 2, Nlst, Ndaycomb, Ntriads, Ntriads, Nlags) diagoffsets = {1: NP.arange(n_dlst_range)}, axesmap = {1: [1,2], 3: [4,5]} (3) Input delay spectrum of shape (Nspw, Nlst, Ndays, Ntriads, Nlags) autoinfo = {'axes': 2, 'wts': None} xinfo = {'axes': [1,3], 'avgcov': False, 'collapse_axes': [3], 'dlst_range': [0.0, 1.0, 2.0]} This will not do anything about coherent averaging along axis=2 because axes cannot include value 2 which denote the 'days' axis and the uncertainties are obtained through subsample differencing along days axis regardless. Output delay power spectrum has shape (Nspw, 3, Nlst, 1, 2*Ntriads-1, Nlags) diagoffsets = {1: NP.arange(n_dlst_range), 3: NP.arange(-Ntriads,Ntriads)}, axesmap = {1: [1,2], 3: [4]} (4) Input delay spectrum of shape (Nspw, Nlst, Ndays, Ntriads, Nlags) autoinfo = {'axes': None, 'wts': None} xinfo = {'axes': [1,3], 'avgcov': False, 'collapse_axes': [1,3], 'dlst_range': [1.0, 2.0, 3.0, 4.0]} Output delay power spectrum has shape (Nspw, 4, Ndaycomb, 2*Ntriads-1, Nlags) diagoffsets = {1: NP.arange(n_dlst_range), 3: NP.arange(-Ntriads,Ntriads)}, axesmap = {1: [1], 3: [3]} (5) Input delay spectrum of shape (Nspw, Nlst, Ndays, Ntriads, Nlags) autoinfo = {'axes': None, 'wts': None} xinfo = {'axes': [1,3], 'avgcov': True, 'collapse_axes': [3], 'dlst_range': None} Output delay power spectrum has shape (Nspw, 2, Nlst, Ndays, 1, Nlags) diagoffsets = {1: NP.arange(n_dlst_range)}, axesmap = {1: [1,2], 3: [4]} (6) Input delay spectrum of shape (Nspw, Nlst, Ndays, Ntriads, Nlags) autoinfo = {'axes': None, 'wts': None} xinfo = {'axes': [1,3], 'avgcov': True, 'collapse_axes': []} Output delay power spectrum has shape (Nspw, 1, Ndays, 1, Nlags) diagoffsets = {}, axesmap = {1: [1], 3: [3]} ------------------------------------------------------------------------ """ if not isinstance(units,str): raise TypeError('Input parameter units must be a string') if units.lower() == 'k': if not isinstance(beamparms, dict): raise TypeError('Input beamparms must be a dictionary') if 'freqs' not in beamparms: beamparms['freqs'] = self.f beamparms_orig = copy.deepcopy(beamparms) if autoinfo is None: autoinfo = {'axes': None, 'wts': [NP.ones(1, dtpye=NP.float)]} elif not isinstance(autoinfo, dict): raise TypeError('Input autoinfo must be a dictionary') if 'axes' not in autoinfo: autoinfo['axes'] = None else: if autoinfo['axes'] is not None: if not isinstance(autoinfo['axes'], (list,tuple,NP.ndarray,int)): raise TypeError('Value under key axes in input autoinfo must be an integer, list, tuple or numpy array') else: autoinfo['axes'] = NP.asarray(autoinfo['axes']).reshape(-1) if 'wts' not in autoinfo: if autoinfo['axes'] is not None: autoinfo['wts'] = [NP.ones(1, dtype=NP.float)] * len(autoinfo['axes']) else: autoinfo['wts'] = [NP.ones(1, dtype=NP.float)] else: if autoinfo['axes'] is not None: if not isinstance(autoinfo['wts'], list): raise TypeError('wts in input autoinfo must be a list of numpy arrays') else: if len(autoinfo['wts']) != len(autoinfo['axes']): raise ValueError('Input list of wts must be same as length of autoinfo axes') else: autoinfo['wts'] = [NP.ones(1, dtype=NP.float)] if xinfo is None: xinfo = {'axes': None, 'wts': {'preX': [NP.ones(1, dtpye=NP.float)], 'postX': [NP.ones(1, dtpye=NP.float)], 'preXnorm': False, 'postXnorm': False}} elif not isinstance(xinfo, dict): raise TypeError('Input xinfo must be a dictionary') if 'axes' not in xinfo: xinfo['axes'] = None else: if not isinstance(xinfo['axes'], (list,tuple,NP.ndarray,int)): raise TypeError('Value under key axes in input xinfo must be an integer, list, tuple or numpy array') else: xinfo['axes'] = NP.asarray(xinfo['axes']).reshape(-1) if 'wts' not in xinfo: xinfo['wts'] = {} for xkey in ['preX', 'postX']: if xinfo['axes'] is not None: xinfo['wts'][xkey] = [NP.ones(1, dtype=NP.float)] * len(xinfo['axes']) else: xinfo['wts'][xkey] = [NP.ones(1, dtype=NP.float)] xinfo['wts']['preXnorm'] = False xinfo['wts']['postXnorm'] = False else: if xinfo['axes'] is not None: if not isinstance(xinfo['wts'], dict): raise TypeError('wts in input xinfo must be a dictionary') for xkey in ['preX', 'postX']: if not isinstance(xinfo['wts'][xkey], list): raise TypeError('{0} wts in input xinfo must be a list of numpy arrays'.format(xkey)) else: if len(xinfo['wts'][xkey]) != len(xinfo['axes']): raise ValueError('Input list of {0} wts must be same as length of xinfo axes'.format(xkey)) else: for xkey in ['preX', 'postX']: xinfo['wts'][xkey] = [NP.ones(1, dtype=NP.float)] if 'preXnorm' not in xinfo['wts']: xinfo['wts']['preXnorm'] = False if 'postXnorm' not in xinfo['wts']: xinfo['wts']['postXnorm'] = False if not isinstance(xinfo['wts']['preXnorm'], NP.bool): raise TypeError('preXnorm in input xinfo must be a boolean') if not isinstance(xinfo['wts']['postXnorm'], NP.bool): raise TypeError('postXnorm in input xinfo must be a boolean') if 'avgcov' not in xinfo: xinfo['avgcov'] = False if not isinstance(xinfo['avgcov'], NP.bool): raise TypeError('avgcov under input xinfo must be boolean') if 'collapse_axes' not in xinfo: xinfo['collapse_axes'] = [] if not isinstance(xinfo['collapse_axes'], (int,list,tuple,NP.ndarray)): raise TypeError('collapse_axes under input xinfo must be an integer, tuple, list or numpy array') else: xinfo['collapse_axes'] = NP.asarray(xinfo['collapse_axes']).reshape(-1) if (autoinfo['axes'] is not None) and (xinfo['axes'] is not None): if
NP.intersect1d(autoinfo['axes'], xinfo['axes'])
numpy.intersect1d
import keras.layers as kl from keras.models import Model from keras import regularizers import pandas as pd import numpy as np import matplotlib.pyplot as plt class NNModel: def __init__(self, input_shape): self.input_shape = input_shape def make_model(self): input_data = kl.Input(shape=(1, self.input_shape)) lstm = kl.LSTM(5, input_shape=(1, self.input_shape), return_sequences=True, activity_regularizer=regularizers.l2(0.003), recurrent_regularizer=regularizers.l2(0), dropout=0.2, recurrent_dropout=0.2)(input_data) perc = kl.Dense(5, activation="sigmoid", activity_regularizer=regularizers.l2(0.005))(lstm) lstm2 = kl.LSTM(2, activity_regularizer=regularizers.l2(0.01), recurrent_regularizer=regularizers.l2(0.001), dropout=0.2, recurrent_dropout=0.2)(perc) out = kl.Dense(1, activation="sigmoid", activity_regularizer=regularizers.l2(0.001))(lstm2) model = Model(input_data, out) self.model = model def train_model(self, x, y, epochs, model_name, save_model=True): self.model.compile(optimizer="adam", loss="mean_squared_error", metrics=["mse", "acc"]) # load data train_x = np.reshape(np.array(x), (len(x), 1, self.input_shape)) train_y = np.array(y) # train_stock = np.array(pd.read_csv("train_stock.csv")) # train model self.model.fit(train_x, train_y, epochs=epochs) if save_model: self.model.save(f"models/saved_models/{model_name}.h5", overwrite=True, include_optimizer=True) def test_model(self, x, y): test_x = np.reshape(
np.array(x)
numpy.array
# Copyright 2022 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """eval""" import os import ast import codecs import argparse import cv2 import numpy as np from sklearn.metrics import roc_auc_score from mindspore import context from mindspore.train.serialization import load_checkpoint, load_param_into_net from mindspore import ops from src.dataset import createDataset from src.stpm import STPM parser = argparse.ArgumentParser(description='test') parser.add_argument('--category', type=str, default='screw') parser.add_argument('--device_id', type=int, default=0, help='Device id') parser.add_argument('--data_url', type=str, default="/") parser.add_argument('--save_sample', type=ast.literal_eval, default=False, help='Whether to save the infer image') parser.add_argument('--save_sample_path', type=str, default="", help='The path to save infer image') parser.add_argument('--ckpt_path', type=str, default='./', help="The path to save checkpoint") parser.add_argument('--num_class', type=int, default=1000, help="The num of class") parser.add_argument('--out_size', type=int, default=256, help="out size") args = parser.parse_args() class SaveImageTool: def __init__(self, save_sample_path): self.save_sample_path = save_sample_path def cvt2heatmap(self, gray): heatmap = cv2.applyColorMap(np.uint8(gray), cv2.COLORMAP_JET) return heatmap def heatmap_on_image(self, heatmap, image): out = np.float32(heatmap) / 255 + np.float32(image) / 255 out = out / np.max(out) return np.uint8(255 * out) def min_max_norm(self, image): a_min, a_max = image.min(), image.max() return (image - a_min) / (a_max - a_min) def save_anomaly_map(self, anomaly_map, a_maps, input_img, gt_img, file_name, category): anomaly_map_norm = self.min_max_norm(anomaly_map) anomaly_map_norm_hm = self.cvt2heatmap(anomaly_map_norm * 255) # 64x64 map am64 = self.min_max_norm(a_maps[0]) am64 = self.cvt2heatmap(am64 * 255) # 32x32 map am32 = self.min_max_norm(a_maps[1]) am32 = self.cvt2heatmap(am32 * 255) # 16x16 map am16 = self.min_max_norm(a_maps[2]) am16 = self.cvt2heatmap(am16 * 255) # anomaly map on image heatmap = self.cvt2heatmap(anomaly_map_norm * 255) hm_on_img = self.heatmap_on_image(heatmap, input_img) # save images save_path = os.path.join(self.save_sample_path, f'{category}_{file_name}') cv2.imwrite(os.path.join(save_path + '.jpg'), input_img) cv2.imwrite(os.path.join(save_path + '_am64.jpg'), am64) cv2.imwrite(os.path.join(save_path + '_am32.jpg'), am32) cv2.imwrite(os.path.join(save_path + '_am16.jpg'), am16) cv2.imwrite(os.path.join(save_path + '_amap.jpg'), anomaly_map_norm_hm) cv2.imwrite(os.path.join(save_path + '_amap_on_img.jpg'), hm_on_img) cv2.imwrite(os.path.join(save_path + '_gt.jpg'), gt_img) def normalize(self, in_x): n, c, _, _ = in_x.shape if n != 1: raise ValueError(f"Only currently support batch size=1 in saving infer image. But got {n}.") if c != 3: raise ValueError(f"Only currently support that the channel of the input image is 3. But got {c}.") mean = [-0.485 / 0.229, -0.456 / 0.224, -0.406 / 0.255] std = [1 / 0.229, 1 / 0.224, 1 / 0.255] for i in range(c): in_x[:, i, :, :] = (in_x[:, i, :, :] - mean[i]) / std[i] return in_x def cal_anomaly_map(fs_list, ft_list, out_size=224): """cal_anomaly_map""" unsqueeze = ops.ExpandDims() Sum = ops.ReduceSum(keep_dims=False) Norm = ops.L2Normalize(axis=1) amap_mode = 'mul' if amap_mode == 'mul': anomaly_map = np.ones([out_size, out_size]) else: anomaly_map = np.zeros([out_size, out_size]) map_list = [] for i in range(len(ft_list)): fs = fs_list[i] ft = ft_list[i] fs_norm = Norm(fs) ft_norm = Norm(ft) num = fs_norm * ft_norm cos = Sum(num, 1) a_map = 1 - cos a_map = unsqueeze(a_map, 1) a_map = a_map[0, 0, :, :].asnumpy() a_map = cv2.resize(a_map, (out_size, out_size)) map_list.append(a_map) if amap_mode == 'mul': anomaly_map *= a_map else: anomaly_map += a_map return anomaly_map, map_list if __name__ == "__main__": context.set_context(mode=context.GRAPH_MODE, device_target='Ascend', save_graphs=False, device_id=args.device_id) _, ds_test = createDataset(args.data_url, args.category, save_sample=args.save_sample, out_size=args.out_size) net = STPM(args, is_train=False) param = load_checkpoint(os.path.join(args.ckpt_path)) load_param_into_net(net, param) net.set_train(False) gt_list_px_lvl = [] pred_list_px_lvl = [] gt_list_img_lvl = [] pred_list_img_lvl = [] if args.save_sample: if args.save_sample_path == "": current_path = os.path.abspath(os.path.dirname(__file__)) args.save_sample_path = os.path.join(current_path, f'scripts/eval_{args.category}/sample') print(f"The image generated by inference will be saved in this path: {args.save_sample_path}") os.makedirs(args.save_sample_path, exist_ok=True) for data in ds_test.create_dict_iterator(): gt = data['gt'] label = data['label'] x = data['img'] features_s, features_t = net(x) amap, a_map_list = cal_anomaly_map(features_s, features_t, out_size=args.out_size) gt_np = gt.asnumpy()[0, 0].astype(int) gt_list_px_lvl.extend(gt_np.ravel()) pred_list_px_lvl.extend(amap.ravel()) gt_list_img_lvl.append(label.asnumpy()[0]) pred_list_img_lvl.append(amap.max()) if args.save_sample: filename = data['filename'] filename = str(codecs.decode(filename.asnumpy().tostring()).strip(b'\x00'.decode())) x = x.asnumpy() img_tool = SaveImageTool(args.save_sample_path) input_x = img_tool.normalize(x) input_x =
np.transpose(input_x, (0, 2, 3, 1))
numpy.transpose
# -*- coding: utf-8 -*- """ Trains a model on the source dataset Then evaluates it on three OOD datasets: - OOD instances - OOD backgrounds - OOD instances and background """ from re import I import torch import numpy as np import PIL import random import os from src import dataset, helpers, custom_dataloaders from torch.utils.data import DataLoader import matplotlib.pyplot as plt from torchvision.models import Inception3 from torchvision import transforms import torchvision from tqdm import tqdm from torch.nn import functional as F import torch.nn as nn import torch.optim as optim import numpy as np import yaml import argparse import json # Ignore warining import warnings warnings.filterwarnings("ignore") # output dictionnary output = {} # Companion functions def return_f1(precision, recall): return 2 * (np.array(precision) * np.array(recall)) / (np.array(precision) + np.array(recall)) def plot_curves(precision, recall): """ add points to compute the p/r curves """ precisions_plot = [[0]] recall_plot = [[recall[0]]] precisions_plot.append(precision) recall_plot.append(recall) precisions_plot.append([precision[-1]]) recall_plot.append([0.5]) precisions_plot = sum(precisions_plot, []) recall_plot = sum(recall_plot, []) return precisions_plot, recall_plot # Setting up the seed seed = 42 torch.backends.cudnn.deterministic = True random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) np.random.seed(seed) # Load the configuration file config = 'config.yml' with open(config, 'rb') as f: configuration = yaml.load(f, Loader=yaml.FullLoader) # Retrieve the directories models_dir = configuration.get("models_dir") files_dir = configuration.get("files_dir") figs_dir = configuration.get("figs_dir") data_dir = configuration.get("dataset_dir") # Retrieve the options for the model pretrained : configuration.get('pretrained', True) model_family = configuration.get('model', 'inception') model_weight = configuration.get('model_weight') image_size = configuration.get('image_size') # label_name = configuration.get('label_name', 'labels.csv') output_directory = configuration.get("output_directory", 'results') # If the directory does not exist, create the temporary folder # where the checkpoints will be stored. weights_dir = 'weights' checkpoints_dir = os.path.join(weights_dir, 'losses') if not os.path.isdir(weights_dir): os.mkdir(weights_dir) if not os.path.isdir(checkpoints_dir): os.mkdir(checkpoints_dir) # Arguments parser = argparse.ArgumentParser(description = 'Model training') parser.add_argument('--device', default = 'cuda:0', help = "GPU device") parser.add_argument('--n_epochs', default = 30, help = "Number of training epochs", type=int) parser.add_argument('--batch_size', default = 64, help = "Batch size", type=int) parser.add_argument('--print_every', default = 20, help = "Evaluate the model every print_every iterations.", type=int) parser.add_argument('--name', default = 'model', help = "Name of the model.") # Additional arguments with which we will edit the label files parser.add_argument('--array_type', default = None, help = "Class of arrays to consider.") parser.add_argument('--max_count', default = None, help = 'Maximal number of samples to sample from.', type = int) parser.add_argument('--source_domain', default = None, help = "The source domain. The target domain will be deduced.") parser.add_argument('--bootstrap', default = 50, help = "Number of bootstrap iterations to estimate the dimensionality", type = int) parser.add_argument('--n_iter', default = 10, help = "Number of train/dimensionality estimation/test iterations", type = int) args = parser.parse_args() # Transforms # If the model is resnet, then we crop the image to be 224 * 224 # If the model is custom, then the size should be specified by the user. if model_family == 'inception': transforms = torchvision.transforms.Compose([ torchvision.transforms.ToPILImage(), torchvision.transforms.RandomHorizontalFlip(), torchvision.transforms.RandomVerticalFlip(), torchvision.transforms.RandomRotation((90,90)), torchvision.transforms.RandomRotation((-90,-90)), torchvision.transforms.ColorJitter(brightness=0.1, contrast=0.2, saturation=0, hue=0), torchvision.transforms.ToTensor(), torchvision.transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]), ]) elif model_family == 'resnet': transforms = torchvision.transforms.Compose([ torchvision.transforms.ToPILImage(), torchvision.transforms.RandomHorizontalFlip(), torchvision.transforms.RandomVerticalFlip(), torchvision.transforms.RandomRotation((90,90)), torchvision.transforms.RandomRotation((-90,-90)), torchvision.transforms.ColorJitter(brightness=0.1, contrast=0.2, saturation=0, hue=0), torchvision.transforms.ToTensor(), # torchvision.transforms.CenterCrop(224), torchvision.transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]), ]) elif model_family == 'custom': transforms = torchvision.transforms.Compose([ torchvision.transforms.ToPILImage(), torchvision.transforms.RandomHorizontalFlip(), torchvision.transforms.RandomVerticalFlip(), torchvision.transforms.RandomRotation((90,90)), torchvision.transforms.RandomRotation((-90,-90)), torchvision.transforms.ColorJitter(brightness=0.1, contrast=0.2, saturation=0, hue=0), torchvision.transforms.ToTensor(), torchvision.transforms.CenterCrop(image_size), torchvision.transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]), ]) # Data # Load the data # Load the correct image size if model_family == 'resnet': dataset_dir = os.path.join(data_dir, 'PITW_224') elif model_family == 'inception': dataset_dir = os.path.join(data_dir, 'PITW_299') training_path = os.path.join(dataset_dir, 'train') annotations_train = os.path.join(training_path, "labels.csv") validation_path = os.path.join(dataset_dir, 'validation') annotations_validation = os.path.join(validation_path, 'labels.csv') test_path = os.path.join(dataset_dir, 'test') annotations_test = os.path.join(test_path, 'labels.csv') ood_background = os.path.join(dataset_dir, "test_ood_background") annotations_ood_background = os.path.join(ood_background, 'labels.csv') ood_instances = os.path.join(dataset_dir, "test_ood_instances") annotations_ood_instances = os.path.join(ood_instances, 'labels.csv') # ood dataset : custom path ood_dir = 'PITW_urban' ood = os.path.join(ood_dir, "test") annotations_ood = os.path.join(ood, 'labels.csv') training_data = dataset.BDPVClassificationDataset(annotations_train, training_path, transform = transforms, max_count = args.max_count) validation_data = dataset.BDPVClassificationDataset(annotations_validation, validation_path, transform = transforms, max_count = args.max_count) test_data = dataset.BDPVClassificationDataset(annotations_test, test_path, transform = transforms, max_count = args.max_count) ood_background_data = dataset.BDPVClassificationDataset(annotations_ood_background, ood_background, transform = transforms, max_count = args.max_count) ood_instances_data = dataset.BDPVClassificationDataset(annotations_ood_instances, ood_instances, transform = transforms, max_count = args.max_count) ood_data = dataset.BDPVClassificationDataset(annotations_ood, ood, transform = transforms, max_count = args.max_count) # Initialize the data train = DataLoader(training_data, batch_size = args.batch_size, shuffle = True) val = DataLoader(validation_data, batch_size = args.batch_size, shuffle = True) test = DataLoader(test_data, batch_size = args.batch_size, shuffle = True) background_ood = DataLoader(ood_background_data, batch_size = args.batch_size, shuffle = True) instances_ood = DataLoader(ood_instances_data, batch_size = args.batch_size, shuffle = True) ood_dataset = DataLoader(ood_data, batch_size = args.batch_size, shuffle = True) dataloader = {"train": train, 'val' : val, 'test' : test} ood_dataloader = {'ood_background' : background_ood, 'ood_instances' : instances_ood, 'ood' : ood_dataset} def train(dataloader): """ trains the model """ # Initialize the model if model_family == 'inception': # Load the model fine-tuned on NRW model = Inception3(num_classes = 2, aux_logits = True, transform_input = False, init_weights = True) # Load the architecture checkpoint = torch.load(model_weight, map_location = args.device) # Load the weights model.load_state_dict(checkpoint['model_state_dict']) # Upload the weights in the model model = model.to(args.device) # move the model to the device elif model_family == 'resnet': # Load the model and send it to the GPU model = torchvision.models.resnet50(pretrained = True) # Last layer should have an output shape of 2. model.fc = nn.Sequential( nn.Linear(2048, 2), nn.ReLU(inplace=True)) model = model.to(args.device) elif model_family == 'custom': # Load the model and send it to the GPU model = torch.load(model_weight) model = model.to(args.device) # Train # Initialization of the model # Layers to update for param in model.parameters(): param.requires_grad = True # Criterion criterion = nn.BCELoss() # Parameters to update and optimizer params_to_update = [] for _, param in model.named_parameters(): if param.requires_grad == True: params_to_update.append(param) optimizer = optim.Adam(params_to_update, lr = 0.0001) # Training running_loss = 0 waiting = 0 steps = 0 early_stop = False train_losses, test_losses = [], [] threshold = 0.5 # Threshold set by default. Will be fine tuned afterwards for epoch in range(args.n_epochs): for inputs, labels, _ in tqdm(dataloader["train"]): steps += 1 labels = labels.to(torch.float32) inputs, labels = inputs.to(args.device), labels.to(args.device) optimizer.zero_grad() if model_family == 'inception': # Accomodate for the particular architecture of inception outputs, aux_outputs = model(inputs) outputs, aux_outputs = F.softmax(outputs, dim=1), F.softmax(aux_outputs, dim=1) outputs, aux_outputs = outputs[:,1], aux_outputs[:,1] loss = criterion(outputs, labels) + 0.4 * criterion(aux_outputs, labels) else: outputs = model(inputs) outputs = F.softmax(outputs, dim=1) outputs = outputs[:,1] loss = criterion(outputs, labels) loss.backward() optimizer.step() running_loss += loss.item() # Evaluate the model if steps % args.print_every == 0: test_loss = 0 model.eval() with torch.no_grad(): true_positives, false_positives, true_negatives, false_negatives = 0, 0, 0, 0 total = 0 for inputs, labels, names in dataloader["val"]: labels = labels.to(torch.float32) inputs, labels = inputs.to(args.device), labels.to(args.device) outputs = model.forward(inputs) outputs = F.softmax(outputs, dim=1) # the model returns the unnormalized probs. Softmax it to get probs outputs = outputs[:,1] batch_loss = criterion(outputs, labels) test_loss += batch_loss.item() predicted = (outputs >= threshold).long() # return 0 or 1 if an array has been detected # compute the accuracy of the classification tp, fp, tn, fn = helpers.confusion(predicted, labels) true_positives += tp false_positives += fp true_negatives += tn false_negatives += fn train_total = 100 * running_loss / len(dataloader["train"]) test_total = 100 * test_loss / len(dataloader["val"]) train_losses.append(train_total) test_losses.append(test_total) # Add to the SummaryWriter # loss_writer.add_scalar("train", train_total, steps) # loss_writer.add_scalar("test", test_total, steps) # Compute the F1 score precision = np.divide(true_positives, (true_positives + false_positives)) recall = np.divide(true_positives, (true_positives + false_negatives)) f1 = 2 * np.divide((precision * recall), (precision + recall)) print( f"Epoch {epoch+1}/{args.n_epochs}..." f"Step {steps}...." f"Train loss : {train_total:.3f}......" f"Val loss : {test_total:.3f}......" f"F1 score : {f1:.3f}" ) running_loss = 0 model.train() # early stopping condition # if the model fails to improve for five subsequent epochs on the # validation set, we stop the training if steps == args.print_every: # first time on the validation set min_val_loss = test_total best_model = args.name + '_' + str(steps) + '.pth' # Save the model torch.save(model, os.path.join(checkpoints_dir, best_model)) else: if not test_total < min_val_loss: waiting += 1 else: # save the model, erase the former best model best_model = args.name + '_' + str(steps) + '.pth' torch.save(model, os.path.join(checkpoints_dir, best_model)) waiting = 0 # Reset the number of epochs we have to wait. min_val_loss = test_total # Uptdate the new minimum loss if waiting == 8: early_stop = True print('Model failed to improve for 5 subsequent epochs on the validation dataset.') break if early_stop : # early stop if necessary. print('Training interrupted.') model.eval() break # Save the best checkpoint as the best model if not os.path.isdir(models_dir): os.mkdir(models_dir) # Load the best model model = torch.load(os.path.join(checkpoints_dir, best_model)) model = model.to(args.device) # Save it best_model_name = args.name + '.pth' torch.save(model, os.path.join(models_dir, best_model_name)) # Fine tune the classification threshold print('Model trained. Now computing the precision and recall curves on the test set.') # Thresholds to be considered results_models = {} thresholds = np.linspace(0.01,.99, 99) model.eval() # Now we compute the precision/recall curve on the test set directly for # comprison with the out domain labels # Forward pass on the validation dataset and accumulate the probabilities # in a single vector. probabilities = [] all_labels = [] with torch.no_grad(): for data in tqdm(dataloader["test"]): # i +=1 images, labels, _ = data # move the images to the device images = images.to(args.device) labels = labels.detach().cpu().numpy() all_labels.append(list(labels)) # calculate outputs by running images through the network and computing the prediction using the threshold outputs = model(images) probs = F.softmax(outputs, dim=1).detach().cpu().numpy() # the model returns the unnormalized probs. Softmax it to get probs probabilities.append(list(probs[:,1])) # Convert the probabilities and labels as an array probabilities = sum(probabilities, []) probabilities =
np.array(probabilities)
numpy.array
# Copyright 2017 Regents of the University of California # # Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with # the distribution. # # 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import os, sys, time, copy, collections, math, json import numpy as np import scipy as sp import matplotlib from matplotlib import pyplot as plt import llops as yp # Custom scale bar object from matplotlib_scalebar.scalebar import ScaleBar # Libwallerlab imports from llops import display from llops import Roi class StopAndStareAcquisition(): # Initialization def __init__(self, hardware_controller_list, system_metadata, illumination_type='bf', illumination_sequence=None, frame_spacing_mm=1, object_size_mm=(0.5, 0.5), reuse_illumination_sequence=True, max_exposure_time_s=2, exposure_time_pad_s=0.0, velocity_mm_s=None, exposure_time_s=None, debug=False, trigger_mode='software', motion_acceleration_mm_s_2=1e3, flip_pathway=False, acquisition_timeout_s=3, illumination_na_pad=0.03, illumination_color={'w': 127}, settle_time_s=0): # Parse options self.illumination_type = illumination_type self.settle_time_s = settle_time_s self.object_size_mm = object_size_mm self.frame_spacing_mm = frame_spacing_mm self.flip_pathway = flip_pathway self.exposure_time_pad_s = exposure_time_pad_s self.debug = debug self.motion_acceleration_mm_s_2 = motion_acceleration_mm_s_2 self.velocity_mm_s = velocity_mm_s self.max_exposure_time_s = max_exposure_time_s self.illumination_na_pad = illumination_na_pad self.illumination_color = illumination_color self.acquisition_timeout_s = acquisition_timeout_s # Define controller objects, which act as hardware interfaces. # These should be in an ordered dictionary because the order which they # are initialized matters when using a mix of hardware and software triggering. self.hardware_controller_list = collections.OrderedDict() # First add hardware triggered elements so they perform their set-up before we trigger software elements for controller in hardware_controller_list: if controller.trigger_mode is 'hardware': self.hardware_controller_list[controller.type] = controller controller.reset() controller.seq_clear() # Then, add software triggered elements for controller in hardware_controller_list: if controller.trigger_mode is 'software': self.hardware_controller_list[controller.type] = controller controller.reset() controller.seq_clear() # Check to be sure a sequence acquisition is not running assert 'camera' in self.hardware_controller_list, 'Did not find camera controller!' # Store metadata object self.metadata = system_metadata # Ensure we have all necessary metadata for basic acquisition assert self.metadata.objective.na is not None, 'Missing objective.na in metadata.' assert self.metadata.objective.mag is not None, 'Missing objective.mag in metadata.' assert self.metadata.camera.pixel_size_um is not None, 'Missing pixel size in metadata.' # Update effective pixel size (for scale bar) self.metadata.system.eff_pixel_size_um = self.metadata.camera.pixel_size_um / (self.metadata.objective.mag * self.metadata.system.mag) # Trigger Constants self.TRIG_MODE_EVERY_PATTERN = 1 self.TRIG_MODE_ITERATION = -1 self.TRIG_MODE_START = -2 # Frame state time sequence, will default to a sequence of one exposure time per frame if left as None self.time_sequence_s = None self.exposure_time_s = None self.hardware_sequence_timing = None # Turn off fast sequencing for illumination by default since this is only avaolable with certain LED arrays if 'illumination' in self.hardware_controller_list: self.hardware_controller_list['illumination'].use_fast_sequence = False # print(type(self.)) self.metadata.type = 'stop and stare' assert 'illumination' in self.hardware_controller_list, 'Stop and Stare acquisition requires programmable light source' assert 'position' in self.hardware_controller_list, 'Stop and Stare acquisition requires programmable positioning device' # Generate motion pathway self.hardware_controller_list['position'].state_sequence = self.genStopAndStarePathwayRaster( self.object_size_mm, self.frame_spacing_mm) # Generate illumination sequence illuminaiton_pattern_sequence = [self.illumination_type] * \ len(self.hardware_controller_list['position'].state_sequence) self.hardware_controller_list['illumination'].state_sequence = self.genMultiContrastSequence( illuminaiton_pattern_sequence) # Tell device not to use feedback self.hardware_controller_list['illumination'].trigger_wait_flag = False self.hardware_controller_list['illumination'].command('trs.0.500.0') self.hardware_controller_list['illumination'].command('trs.1.500.0') self.hardware_controller_list['position'].goToPosition((0,0)) self.hardware_controller_list['position'].command('ENCODER X 1') self.hardware_controller_list['position'].command('ENCODER Y 1') self.hardware_controller_list['position'].command('ENCW X 100') self.hardware_controller_list['position'].command('ENCW Y 100') def acquire(self, exposure_time_ms=50): # Allocate memory for frames if self.hardware_controller_list['camera'].isSequenceRunning(): self.hardware_controller_list['camera'].sequenceStop() self.hardware_controller_list['camera'].setBufferSizeMb( 20 * len(self.hardware_controller_list['position'].state_sequence)) # Set camera exposure self.hardware_controller_list['camera'].setExposure(exposure_time_ms / 1e3) self.hardware_controller_list['camera'].setTriggerMode('hardware') self.hardware_controller_list['camera'].runSequence() self.hardware_controller_list['illumination'].bf() # Snap one image to ensure all acquisitons are started self.hardware_controller_list['camera'].snap() # generate frame_list t0 = time.time() frames_acquired = 0 frame_list = [] for frame in yp.display.progressBar(self.hardware_controller_list['position'].state_sequence, name='Frames Acquired'): pos = frame['states'] x = pos[0][0]['value']['x'] y = pos[0][0]['value']['y'] self.hardware_controller_list['position'].goToPosition((x, y), blocking=True) time.sleep(self.settle_time_s) frame_list.append(self.hardware_controller_list['camera'].snap()) frames_acquired += 1 # print('Acquired %d of %d frames' % (frames_acquired, len(self.hardware_controller_list['position'].state_sequence))) t_acq_sns = time.time() - t0 print("Acquisition took %.4f seconds" % (t_acq_sns)) # Create dataset from htdeblur.mddataset import MotionDeblurDataset dataset = MotionDeblurDataset() # Assign acquisition time self.metadata.acquisition_time_s = t_acq_sns # Apply simple geometric transformations if self.metadata.camera.transpose: frame_list = frame_list.transpose(0, 2, 1) if self.metadata.camera.flip_x: frame_list = np.flip(frame_list, 2) if self.metadata.camera.flip_y: frame_list = np.flip(frame_list, 1) # Assign dataset.frame_list = [frame for frame in frame_list] # Set frame state list self.n_frames = len(self.hardware_controller_list['position'].state_sequence) frame_state_list = [] for frame_index in range(self.n_frames): single_frame_state_list = {} # Loop over hardware controllers and record their state sequences for hardware_controller_name in self.hardware_controller_list: hardware_controller = self.hardware_controller_list[hardware_controller_name] if hardware_controller.state_sequence is not None: single_frame_state_list[hardware_controller_name] = hardware_controller.state_sequence[frame_index] # Record time_sequence_s single_frame_state_list['time_sequence_s'] = [0] # Add to list of all frames frame_state_list.append(single_frame_state_list) dataset.metadata = self.metadata dataset.type = 'stop_and_stare' dataset.frame_state_list = frame_state_list return dataset def genStopAndStarePathwayRaster(self, object_size_mm, frame_spacing_mm, major_axis=1, include_minor_axis=False): # Determine major axis if major_axis is None: major_axis = np.argmax(np.asarray(object_size_mm)) if object_size_mm[0] == object_size_mm[1]: major_axis = 1 # Detemine number of measurements measurement_count = np.ceil(np.asarray(object_size_mm) / np.asarray(frame_spacing_mm) ).astype(np.int) # two components in x and y # Determine slightly smaller frame spacing for optimal coverage of object frame_spacing_mm = (object_size_mm[0] / measurement_count[0], object_size_mm[1] / measurement_count[1]) # Error checking assert np.any(measurement_count > 1), "image_size must be smaller than object_size!" print("Image size requires %d x %d images" % (measurement_count[0], measurement_count[1])) # This variable will be populated by the loop below raster_segments = np.zeros((measurement_count[0] * 2, 2)) # Generate raster points raster_end_point_list = [] pathway = [] linear_segment_index = 0 # This variable keeps track of linear segments, for use with path planning for row in np.arange(measurement_count[0]): if row % 2 == 0: for index, col in enumerate(range(measurement_count[1])): # Add pathway to list pathway.append({'x_start': frame_spacing_mm[1] * col, 'y_start': frame_spacing_mm[0] * row, 'x_end': frame_spacing_mm[1] * col, 'y_end': frame_spacing_mm[0] * row, 'linear_segment_index': linear_segment_index}) else: for index, col in enumerate(reversed(range(measurement_count[1]))): # Add pathway to list frame_spacing_mm[0] * row pathway.append({'x_start': frame_spacing_mm[1] * col, 'y_start': frame_spacing_mm[0] * row, 'x_end': frame_spacing_mm[1] * col, 'y_end': frame_spacing_mm[0] * row, 'linear_segment_index': linear_segment_index}) linear_segment_index += 1 # make the center the mean of the pathway path_means = [] for path in pathway: path_mean = ((path['y_start']), (path['x_start'])) path_means.append(path_mean) # mean = np.sum(np.asarray(path_means), axis=1) / len(path_means) mean = np.sum(np.asarray(path_means), axis=0) / len(path_means) for path in pathway: path['x_start'] -= mean[1] path['x_end'] -= mean[1] path['y_start'] -= mean[0] path['y_end'] -= mean[0] # return pathway state_sequence = [] for path in pathway: # Store common information about this frame common_state_dict = {} common_state_dict['frame_time'] = self.hardware_controller_list['camera'].getExposure() common_state_dict['led_update_rate_us'] = None common_state_dict['linear_segment_index'] = None common_state_dict['frame_distance'] = 0 common_state_dict['exposure_distance'] = 0 common_state_dict['velocity'] = self.velocity_mm_s common_state_dict['acceleration'] = self.motion_acceleration_mm_s_2 common_state_dict['n_blur_positions_exposure'] = 1 common_state_dict['position_delta_x_mm'] = 0 common_state_dict['position_delta_y_mm'] = 0 path_dict = {'value': {'time_index' : 0, 'x': path['x_start'], 'y': path['y_start']}} state_sequence.append({'states' : [[path_dict]], 'common' : common_state_dict}) return(state_sequence) def plotPathway(self): sequence_list = self.hardware_controller_list['position'].state_sequence point_list_start = [] point_list_end = [] for sequence in sequence_list: start_pos = (sequence['states'][0][0]['value']['x'], sequence['states'][0][0]['value']['y']) end_pos = (sequence['states'][-1][0]['value']['x'], sequence['states'][-1][0]['value']['y']) point_list_start.append(start_pos) point_list_end.append(end_pos) point_list_start = np.asarray(point_list_start) point_list_end = np.asarray(point_list_end) plt.figure() for index in range(len(point_list_start)): plt.scatter(point_list_start[index, 0], point_list_start[index, 1], c='b') plt.scatter(point_list_end[index, 0], point_list_end[index, 1], c='r') plt.plot([point_list_start[index, 0], point_list_end[index, 0]], [point_list_start[index, 1], point_list_end[index, 1]], c='y') plt.xlabel('Position X (mm)') plt.ylabel('Position Y (mm)') plt.title('Pathway (b is start, y/o is end)') plt.gca().invert_yaxis() def genMultiContrastSequence(self, illumination_pattern_sequence, n_acquisitions=1, darkfield_annulus_width_na=0.1): led_list = np.arange(self.metadata.illumination.state_list.design.shape[0]) bf_mask = self.metadata.illumination.state_list.design[:, 0] ** 2 \ + self.metadata.illumination.state_list.design[:, 1] ** 2 < ( self.metadata.objective.na + self.illumination_na_pad) ** 2 led_list_bf = led_list[bf_mask] led_list_df = led_list[~bf_mask] led_list_an = led_list[~bf_mask & (self.metadata.illumination.state_list.design[:, 0] ** 2 + self.metadata.illumination.state_list.design[:, 1] ** 2 < (self.metadata.objective.na + darkfield_annulus_width_na) ** 2)] illumination_sequence = [] self.pattern_type_list = [] pattern_dict = {'dpc.top': np.ndarray.tolist(led_list_bf[self.metadata.illumination.state_list.design[bf_mask, 1] > 0]), 'dpc.bottom': np.ndarray.tolist(led_list_bf[self.metadata.illumination.state_list.design[bf_mask, 1] < 0]), 'dpc.left': np.ndarray.tolist(led_list_bf[self.metadata.illumination.state_list.design[bf_mask, 0] > 0]), 'dpc.right': np.ndarray.tolist(led_list_bf[self.metadata.illumination.state_list.design[bf_mask, 0] < 0]), 'single': [0], 'bf': np.ndarray.tolist(led_list_bf), 'df': np.ndarray.tolist(led_list_df), 'an': np.ndarray.tolist(led_list_an), 'full': np.ndarray.tolist(led_list) } # DPC does not flicker patterns within frames n_time_points_per_frame = 1 illumination_state_list = [] # Write image sequence to list for acquisition_index in range(n_acquisitions): # Loop over DPC patterns (frames) for frame_index, pattern in enumerate(illumination_pattern_sequence): single_frame_state_list_illumination = [] # Loop over time points (irrelevent for dpc) for time_index in range(n_time_points_per_frame): time_point_state_list = [] # Loop over DPC patterns (which are themselves frames) for led_idx in pattern_dict[pattern]: values_dict = {} for color_name in self.illumination_color: values_dict[color_name] = self.illumination_color[color_name] led_dict = { 'index': int(led_idx), 'time_index': 0, 'value': values_dict } # Append this to list with elements for each interframe time point time_point_state_list.append(led_dict) # Append to frame_dict single_frame_state_list_illumination.append(time_point_state_list) # Define illumination sequence illumination_state_list.append({'states' : single_frame_state_list_illumination, 'common' : {}}) # Define illumination list self.state_list = self.metadata.illumination.state_list.design return illumination_state_list class MotionDeblurAcquisition(): # Initialization def __init__(self, hardware_controller_list, system_metadata, illumination_sequence=None, motion_path_type='linear', use_l1_distance_for_motion_calculations=True, blur_vector_method='pseudo_random', kernel_pulse_count=150, saturation_factor=1.0, frame_spacing_mm=1, object_size_mm=(0.5, 0.5), reuse_illumination_sequence=True, max_exposure_time_s=2, max_velocity_mm_s=40.0, max_led_update_rate_us=0.01, exposure_time_pad_s=0.0, velocity_mm_s=None, exposure_time_s=None, debug=False, motion_acceleration_mm_s_2=1e3, extra_run_up_time_s=0, flip_pathway=False, segment_delay_s=0, initial_auto_exposure=False, acquisition_timeout_s=3, illumination_sequence_count=1, illumination_na_pad=0.03, illumination_color={'w': 127}, only_store_first_and_last_position=True): # Parse options self.motion_path_type = motion_path_type self.object_size_mm = object_size_mm self.frame_spacing_mm = frame_spacing_mm self.flip_pathway = flip_pathway self.use_l1_distance_for_motion_calculations = use_l1_distance_for_motion_calculations self.velocity_mm_s = velocity_mm_s self.exposure_time_pad_s = exposure_time_pad_s self.debug = debug self.motion_acceleration_mm_s_2 = motion_acceleration_mm_s_2 self.max_led_update_rate_us = max_led_update_rate_us self.max_exposure_time_s = max_exposure_time_s self.max_velocity_mm_s = max_velocity_mm_s self.illumination_na_pad = illumination_na_pad self.saturation_factor = saturation_factor self.reuse_illumination_sequence = reuse_illumination_sequence self.blur_vector_method = blur_vector_method self.kernel_pulse_count = kernel_pulse_count self.illumination_color = illumination_color self.extra_run_up_time_s = extra_run_up_time_s self.initial_auto_exposure = initial_auto_exposure self.acquisition_timeout_s = acquisition_timeout_s self.segment_delay_s = segment_delay_s self.only_store_first_and_last_position = only_store_first_and_last_position self.illumination_sequence = illumination_sequence self.illumination_sequence_count = illumination_sequence_count # Define controller objects, which act as hardware interfaces. # These should be in an ordered dictionary because the order which they # are initialized matters when using a mix of hardware and software triggering. self.hardware_controller_list = collections.OrderedDict() # First add hardware triggered elements so they perform their set-up before we trigger software elements for controller in hardware_controller_list: if hasattr(controller, 'trigger_mode'): if controller.trigger_mode is 'hardware': self.hardware_controller_list[controller.type] = controller controller.reset() controller.seq_clear() # Then, add software triggered elements for controller in hardware_controller_list: self.hardware_controller_list[controller.type] = controller controller.reset() controller.seq_clear() # Check to be sure a sequence acquisition is not running assert 'camera' in self.hardware_controller_list, 'Did not find camera controller!' # Store metadata object self.metadata = system_metadata # Ensure we have all necessary metadata for basic acquisition assert self.metadata.objective.na is not None, 'Missing objective.na in metadata.' assert self.metadata.objective.mag is not None, 'Missing objective.mag in metadata.' assert self.metadata.camera.pixel_size_um is not None, 'Missing pixel size in metadata.' # Update effective pixel size (for scale bar) self.metadata.system.eff_pixel_size_um = self.metadata.camera.pixel_size_um / (self.metadata.objective.mag * self.metadata.system.mag) # Trigger Constants self.TRIG_MODE_EVERY_PATTERN = 1 self.TRIG_MODE_ITERATION = -1 self.TRIG_MODE_START = -2 # Frame state time sequence, will default to a sequence of one exposure time per frame if left as None self.time_sequence_s = None self.exposure_time_s = None self.hardware_sequence_timing = None # Turn off fast sequencing for illumination by default since this is only avaolable with certain LED arrays if 'illumination' in self.hardware_controller_list: self.hardware_controller_list['illumination'].use_fast_sequence = False # Set metadata type self.metadata.type = 'motiondeblur' assert 'illumination' in self.hardware_controller_list, 'Motion deblur object requires programmable light source' assert 'position' in self.hardware_controller_list, 'Motion deblur object requires motion stage' # Initialize state_sequence self.state_sequence = [] # Generate position sequence self.hardware_controller_list['position'].state_sequence, self.time_sequence_s = self.genMotionPathway( pathway_type=self.motion_path_type, frame_spacing_mm=frame_spacing_mm) # Generate illumination sequence self.hardware_controller_list['illumination'].state_sequence = self.genMotionIlluminationSequenceRandom(illumination_sequence=illumination_sequence, sequence_count=self.illumination_sequence_count) # Set up subframe captures self.subframe_capture_count = len(self.hardware_controller_list['illumination'].state_sequence[0]) self.force_preload_all_frames = True self.hardware_controller_list['position'].continuous_states_between_frames = True # Configure illuination to use fast sequence updating if specified in options self.hardware_controller_list['illumination'].use_fast_sequence = True # Set bit depth self.illumination_sequence_bit_depth = 1 # Set extra options for position controller self.hardware_controller_list['position'].extra_run_up_time_s = self.extra_run_up_time_s # Calculate effective pixel size if it hasn't already been calculated self.metadata.system.eff_pixel_size_um = self.metadata.camera.pixel_size_um / \ (self.metadata.objective.mag * self.metadata.system.mag) def preAcquire(self): ''' This method sets up the camera for an acquisition ''' # Check that the length of motion, illuimination, pupil, and focal sequences are same (or None) frame_counts = [] for hardware_controller_name in list(self.hardware_controller_list): # Get controller object from dictionary hardware_controller = self.hardware_controller_list[hardware_controller_name] if hardware_controller.state_sequence is not None: # Reset Controller hardware_controller.reset() # Get number of frames in sequence. If there is no sequence, remove this element from hw_controller_list if hardware_controller.type is not 'camera': if hardware_controller.state_sequence is not None: frame_counts.append(len(hardware_controller.state_sequence)) else: self.hardware_controller_list.pop(hardware_controller_name) else: # Remove this controller from the list if hardware_controller_name is not 'camera': del self.hardware_controller_list[hardware_controller_name] # Turn on hardware triggering for initialization self.hardware_controller_list['camera'].setTriggerMode('hardware') # Set illumination parameters if 'illumination' in self.hardware_controller_list: # self.hardware_controller_list['illumination'].setColor(self.illumination_color) self.hardware_controller_list['illumination'].setSequenceBitDepth( self.illumination_sequence_bit_depth) # Ensure all hardware elements have the same number of frames if len(frame_counts) > 0: if not np.sum(np.mean(np.asarray(frame_counts)) == np.asarray(frame_counts)) == len(frame_counts): raise ValueError('Sequence lengths are not the same (or None).') else: self.n_frames = frame_counts[0] else: raise ValueError('No sequence provided!') # Initialize frame_list self.frame_list = np.zeros((self.n_frames, self.hardware_controller_list['camera'].getImageHeight(), self.hardware_controller_list['camera'].getImageWidth()), dtype=np.uint16) # Apply simple geometric transformations if self.metadata.camera.transpose: self.frame_list = self.frame_list.transpose(0, 2, 1) if self.metadata.camera.flip_x: self.frame_list = np.flip(self.frame_list, 2) if self.metadata.camera.flip_y: self.frame_list = np.flip(self.frame_list, 1) # Generate frame_state_list frame_state_list = [] if self.time_sequence_s is None: self.time_sequence_s = [] for _ in range(self.n_frames): self.time_sequence_s.append([0]) # Loop over frames for frame_index in range(self.n_frames): single_frame_state_list = {} # Loop over hardware controllers and record their state sequences for hardware_controller_name in self.hardware_controller_list: hardware_controller = self.hardware_controller_list[hardware_controller_name] if hardware_controller.state_sequence is not None: single_frame_state_list[hardware_controller_name] = hardware_controller.state_sequence[frame_index] # Record time_sequence_s single_frame_state_list['time_sequence_s'] = self.time_sequence_s[frame_index] # Add to list of all frames frame_state_list.append(single_frame_state_list) self.frame_state_list = frame_state_list # Perform auto-exposure if user desires if self.initial_auto_exposure: # Illuminate with first pattern if 'illumination' in self.hardware_controller_list: self.hardware_controller_list['illumination'].sequenceReset() self.hardware_controller_list['illumination'].time_sequence_s = [[0]] self.hardware_controller_list['illumination'].preloadSequence(0) self.hardware_controller_list['illumination'].sequenceStep() # Small delay to ensure illumination gets updated time.sleep(0.1) # Run Auto-Exposure self.hardware_controller_list['camera'].autoExposure() # Set camera memory footprint if (self.hardware_controller_list['camera'].getBufferTotalCapacity() < self.frame_list.shape[0]): self.frame_size_mb = int( np.ceil(float(self.frame_list.shape[0] / 1e6) * float(self.frame_list.shape[1]) * float(self.frame_list.shape[2]) * 2)) print('Allocating %dmb for frames' % self.frame_size_mb) self.hardware_controller_list['camera'].setBufferSizeMb(self.frame_size_mb) assert self.hardware_controller_list['camera'].getBufferTotalCapacity( ) >= self.frame_list.shape[0], 'Buffer size too small!' # Store initial time (acquisition start) t0 = time.time() # Tell camera to start waiting for frames self.hardware_controller_list['camera'].runSequence() # Keep track of how many images we have acquired self.total_frame_count = 0 def acquire(self, dataset=None, reset_devices=False): ''' This is a generic acquisition class, where LEDs are updated according to the sequence variable. ''' # Call preacquire. which initializes hardware and variables self.preAcquire() # Determine which frames can be preloaded before serial acquisition. If each frame is only one state, we assume that we can preload all frames. But, if the state of any hardware element changes within any frame, we will assume we can't preload the frames frame_count = 0 linear_segment_list = [] for frame_state in self.hardware_controller_list['position'].state_sequence: if frame_state['common']['linear_segment_index'] >= 0: frame_count += 1 if frame_state['common']['linear_segment_index'] not in linear_segment_list: linear_segment_list.append(frame_state['common']['linear_segment_index']) print("Found %d segments and %d frames" % (len(linear_segment_list), frame_count)) t_start = time.time() for linear_segment_index in linear_segment_list: self.frames_to_acquire = [] # Determine which linear segments to run for frame_index, frame_state in enumerate(self.hardware_controller_list['position'].state_sequence): if frame_state['common']['linear_segment_index'] == linear_segment_index: self.frames_to_acquire += [frame_index] self.n_frames_to_acquire = len(self.frames_to_acquire) x_start = self.hardware_controller_list['position'].state_sequence[self.frames_to_acquire[0]]['states'][0][0]['value']['x'] y_start = self.hardware_controller_list['position'].state_sequence[self.frames_to_acquire[0]]['states'][0][0]['value']['y'] x_end = self.hardware_controller_list['position'].state_sequence[self.frames_to_acquire[-1]]['states'][0][0]['value']['x'] y_end = self.hardware_controller_list['position'].state_sequence[self.frames_to_acquire[-1]]['states'][0][0]['value']['y'] print('Starting linear segment %d which has %d frames moving from (%.4f, %.4f)mm to (%.4f, %.4f)mm' % (linear_segment_index, self.n_frames_to_acquire, x_start, y_start, x_end, y_end)) frame_has_multiple_states = [] for frame_index in self.frames_to_acquire: number_of_states_in_current_frame = 0 for hardware_controller_name in self.hardware_controller_list: if hardware_controller_name is not 'camera' and self.hardware_controller_list[hardware_controller_name].state_sequence is not None: # Check if this frame can be preloaded (if it has more than one state, it can't be preloaded) number_of_states_in_current_frame = max(number_of_states_in_current_frame, len( self.hardware_controller_list[hardware_controller_name].state_sequence[frame_index]['states'])) # Check that the length of time_sequence_s matches the max number of state changes within this frame if number_of_states_in_current_frame > 1: frame_has_multiple_states.append(True) assert self.time_sequence_s is not None, "time_sequence_s can not be None if any frame has multiple states!" assert len(self.time_sequence_s[frame_index]) == number_of_states_in_current_frame, "time_sequence_s for frame %d is of wrong length!" % len( self.time_sequence_s[frame_index]['states']) else: frame_has_multiple_states.append(False) # Determine if the entire multi-frame sequence can be preloaded (this will be False if ther eis only one system state (e.g. LED pattern) within each frame) all_frames_will_be_preloaded = (not any(frame_has_multiple_states)) or self.force_preload_all_frames # Determine optimal exposure time for all frames if self.exposure_time_s is not None: self.hardware_controller_list['camera'].setExposure(self.exposure_time_s) elif self.time_sequence_s is not None and max(self.time_sequence_s[0]) > 0: frame_exposures = [] for frame_index in range(self.n_frames_to_acquire): frame_exposures.append(max(self.time_sequence_s[frame_index])) self.exposure_time_s = sum(frame_exposures) / (self.n_frames_to_acquire) self.hardware_controller_list['camera'].setExposure(self.exposure_time_s) else: self.exposure_time_s = self.hardware_controller_list['camera'].getExposure() # Check that exposure time is correct assert abs(self.exposure_time_s - self.hardware_controller_list['camera'].getExposure( )) < 1e-3, "Desired exposure time %.2f is not equal to device exposure %.2f. This is probably a MM issue" % (self.exposure_time_s, self.hardware_controller_list['camera'].getExposure()) # print('Using exposure time %.2fs (%d ms)' % (self.exposure_time_s, int(self.exposure_time_s * 1000))) # Check that time_sequence_s for multiple frames exists if there are inter-frame state changes if (not any(frame_has_multiple_states)) or self.time_sequence_s is None: self.time_sequence_s = [self.exposure_time_s] # Configure hardware triggering trigger_output_settings = [0, 0] trigger_input_settings = [0, 0] for hardware_controller_name in self.hardware_controller_list: hardware_controller = self.hardware_controller_list[hardware_controller_name] if hasattr(hardware_controller, 'trigger_mode') and 'hardware' in hardware_controller.trigger_mode: # Check that trigger pins are configured assert hardware_controller.trigger_pin is not None, 'Trigger pin must be configured for hardware triggering!' # Determine if we're performing preloadable acquisitions or not if self.subframe_capture_count > 1: if self.reuse_illumination_sequence: if hardware_controller_name == 'camera': if self.illumination_sequence_count == 1: trigger_output_settings[hardware_controller.trigger_pin] = self.TRIG_MODE_ITERATION trigger_input_settings[hardware_controller.trigger_pin] = self.TRIG_MODE_ITERATION else: trigger_output_settings[hardware_controller.trigger_pin] = len(self.hardware_controller_list['position'].state_sequence[0]['states']) // self.illumination_sequence_count trigger_input_settings[hardware_controller.trigger_pin] = len(self.hardware_controller_list['position'].state_sequence[0]['states']) // self.illumination_sequence_count elif hardware_controller_name == 'position': trigger_output_settings[hardware_controller.trigger_pin] = self.TRIG_MODE_START trigger_input_settings[hardware_controller.trigger_pin] = self.TRIG_MODE_START else: if hardware_controller_name == 'camera': trigger_output_settings[hardware_controller.trigger_pin] = self.subframe_capture_count trigger_input_settings[hardware_controller.trigger_pin] = self.subframe_capture_count elif hardware_controller_name == 'position': trigger_output_settings[hardware_controller.trigger_pin] = self.TRIG_MODE_START trigger_input_settings[hardware_controller.trigger_pin] = self.TRIG_MODE_START # Case where there is only one system state wihtin each frame (trigger each frame) elif all_frames_will_be_preloaded: trigger_output_settings[hardware_controller.trigger_pin] = self.TRIG_MODE_EVERY_PATTERN trigger_input_settings[hardware_controller.trigger_pin] = self.TRIG_MODE_EVERY_PATTERN # Case where we only want to trigger on first frame. This is probably not a good default. else: trigger_output_settings[hardware_controller.trigger_pin] = self.TRIG_MODE_ITERATION trigger_input_settings[hardware_controller.trigger_pin] = self.TRIG_MODE_ITERATION # Check that this hardware controller is ready for a sequence, if it is sequencable. if hardware_controller.state_sequence is not None: # Reset controller sequence to initial state hardware_controller.sequenceReset() time.sleep(0.1) # Wait until initialization is complete initialization_wait_time = 0 for hardware_controller_name in self.hardware_controller_list: while not self.hardware_controller_list[hardware_controller_name].isReadyForSequence(): time.sleep(0.05) initialization_wait_time += 0.05 if initialization_wait_time > self.acquisition_timeout_s: raise ValueError('Pre-acquisiton isReadyForSequence timeout for %s' % hardware_controller_name) # Tell the hardware controller about the acquisition time sequence if len(hardware_controller.state_sequence) == len(self.time_sequence_s): hardware_controller.time_sequence_s = [self.time_sequence_s[i] for i in self.frames_to_acquire] else: hardware_controller.time_sequence_s = [ [self.hardware_controller_list['camera'].getExposure()]] * self.n_frames_to_acquire # Set up triggering for hardware acquision self.hardware_controller_list['illumination'].trigger_output_settings = trigger_output_settings self.hardware_controller_list['illumination'].trigger_input_settings = trigger_input_settings # Determine which sequences get preloaded if all_frames_will_be_preloaded: # One system state per acquisition frame_preload_sequence = [-1] # Preload all frames at once else: frame_preload_sequence = range(self.n_frames_to_acquire) # Preload each frame serially # Loop over frames to capture (may only execute once if we're preloading all frames) for preload_index in frame_preload_sequence: # Loop over hardware controllers, preload, and determine necessary exposure time (if using inter-frame state changes) for hardware_controller_name in self.hardware_controller_list: # If we're using the motion stage, calculate the mechanical delay if hardware_controller_name == 'position': # Get velocity and acceleration from state sequence if preload_index == -1: index = 0 else: index = preload_index velocity = self.hardware_controller_list[hardware_controller_name].state_sequence[0]['common']['velocity'] acceleration = self.hardware_controller_list[hardware_controller_name].acceleration jerk = self.hardware_controller_list[hardware_controller_name].jerk # Calculate spin-up time and distance # http://www.wolframalpha.com/input/?i=v+%3D+t+*+(a+%2B+0.5*j+*+t)+solve+for+t # http://www.wolframalpha.com/input/?i=v+%3D+t+*+(a+%2B+(1%2F8)*j+*+t)+solve+for+t # Good reference: # http://www.et.byu.edu/~ered/ME537/Notes/Ch5.pdf # Total period if False: # First period (acceleration of acceleration) t_1 = acceleration / jerk # x_1 = 1/6 * jerk * t_1 ** 3 x_1 = acceleration ** 2 / (6 * jerk) * t_1 # v_1 = 1/2 * jerk * t_1 ** 2 v_1 = acceleration ** 2 / (2 * jerk) # Second period (linear region) dv = velocity - 2 * v_1 assert dv > 0 t_2 = dv / acceleration x_2 = v_1 * t_2 + 1/2 * acceleration * t_2 ** 2 v_2 = velocity - v_1 # Third period (decelleration of acceleration) t_3 = acceleration / jerk x_3 = (v_2 + acceleration ** 2 / (3 * jerk)) * t_3 v_3 = v_1 # Calculate spin-up distance and time spin_up_time_s = t_1 + t_2 + t_3 spin_up_distance_mm = x_1 + x_2 + x_3 assert (v_1 + v_2 + v_3 - velocity) < 1e-1, "Calculated velocity is %.4f, desired is %.4f" % (v_1 + v_2 + v_3, velocity) else: spin_up_time_s = velocity / acceleration spin_up_distance_mm = 1/2 * acceleration * spin_up_time_s ** 2 # Add extra spin_up time spin_up_time_s += self.extra_run_up_time_s spin_up_distance_mm += self.extra_run_up_time_s * velocity # spin_up_distance_mm = 0 spin_up_time_s = max(spin_up_time_s, 0.0001) self.hardware_controller_list['illumination'].setupTriggering(self.hardware_controller_list['position'].trigger_pin, int( self.hardware_controller_list['position'].trigger_pulse_width_us), int(spin_up_time_s * 1e6)) # convert to seconds # Tell motion stage to offset it's positions by these amounts self.hardware_controller_list['position'].preload_run_up_distance_mm = spin_up_distance_mm else: # no delay for other components self.hardware_controller_list[hardware_controller_name].trigger_start_delay_s = 0 if hardware_controller_name is not 'camera' and self.hardware_controller_list[hardware_controller_name].state_sequence is not None: if hardware_controller_name is not 'illumination' or linear_segment_index == 0: if hardware_controller_name == 'illumination' and self.reuse_illumination_sequence: self.hardware_controller_list[hardware_controller_name].preloadSequence(0) else: state_sequence_used = [ self.hardware_controller_list[hardware_controller_name].state_sequence[i] for i in self.frames_to_acquire] self.hardware_controller_list[hardware_controller_name].preloadSequence( preload_index, state_sequence=state_sequence_used) if preload_index < 0 or self.reuse_illumination_sequence: frames_to_wait_for = self.n_frames_to_acquire # wait for all frames else: frames_to_wait_for = 1 # Set trigger frame time based on first pathway TODO: This is a hack if 'position' in self.hardware_controller_list: self.hardware_controller_list['illumination'].trigger_frame_time_s[self.hardware_controller_list['camera'] .trigger_pin] = self.hardware_controller_list['position'].state_sequence[0]['common']['frame_time'] * 1e6 # Tell stage to start moving self.hardware_controller_list['position'].runSequence() if linear_segment_index == 0: t_start = time.time() # Tell illumination to start moving if self.reuse_illumination_sequence: self.hardware_controller_list['illumination'].runSequence( n_acquisitions=1 * self.n_frames_to_acquire) else: self.hardware_controller_list['illumination'].runSequence(n_acquisitions=1) # Wait for frames to be captured t_frame = time.time() frame_count = 0 while frame_count < frames_to_wait_for: if self.total_frame_count + frame_count == frames_to_wait_for: break else: if self.total_frame_count + frame_count == self.hardware_controller_list['camera'].getBufferSizeFrames(): time.sleep(0.01) if (time.time() - t_frame) > self.acquisition_timeout_s: print(self.hardware_controller_list['illumination'].response()) raise ValueError('Acquisition timeout (Total frame count: %d, Buffer size: %d, preload index %d, frames to wait for: %d)' % ( self.total_frame_count, self.hardware_controller_list['camera'].getBufferSizeFrames(), preload_index, frames_to_wait_for)) else: if ((self.total_frame_count + frame_count) % int((self.n_frames) / min(10, self.n_frames_to_acquire))) == 0: print('Acquired %d of %d frames' % ( self.hardware_controller_list['camera'].getBufferSizeFrames(), self.n_frames_to_acquire)) frame_count = self.hardware_controller_list['camera'].getBufferSizeFrames( ) - self.total_frame_count self.total_frame_count = self.hardware_controller_list['camera'].getBufferSizeFrames() t_frame = time.time() # Get sequence timing information time.sleep(0.1) print(self.hardware_controller_list['illumination'].response()) # Wait for hardware to stop for hardware_controller_name in self.hardware_controller_list: while not self.hardware_controller_list[hardware_controller_name].isReadyForSequence(): time.sleep(0.05) self.sequence_timing_dict = {} # Reset sequences for hardware_controller_name in self.hardware_controller_list: if hardware_controller_name is not 'camera': self.hardware_controller_list[hardware_controller_name].sequenceReset() # Let user know we're finished print('Finished linear segment %d' % linear_segment_index) time.sleep(self.segment_delay_s) t_acq = time.time() - t_start self.metadata.acquisition_time_s = t_acq print("Acquisition took %.4f seconds" % (t_acq)) # Call post-acquire functions dataset = self.postAcquire(dataset=dataset, reset_devices=reset_devices) # Return return dataset def postAcquire(self, dataset=None, reset_devices=True): """Post-acquisition steps for resetting hardware and preparing dataset.""" # Stop acquisition # self.hardware_controller_list['camera'].sequenceStop() # Parse dataset if dataset is None: from htdeblur.mddataset import MotionDeblurDataset dataset = MotionDeblurDataset() # Read frames and timestamps from buffer (self.frame_list, elapsed_frame_time_ms) = self.hardware_controller_list['camera'].readFramesFromBuffer() # Apply simple geometric transformations if self.metadata.camera.transpose: self.frame_list = self.frame_list.transpose(0, 2, 1) if self.metadata.camera.flip_x: self.frame_list = np.flip(self.frame_list, 2) if self.metadata.camera.flip_y: self.frame_list = np.flip(self.frame_list, 1) # Let user know we're finished print('Read frames from buffer.') # Store camera timing in a standardized timing dict self.sequence_timing_dict = {} self.sequence_timing_dict['sequence_timing'] = [] for frame_index, frame_time in enumerate(elapsed_frame_time_ms): timing_dict = {'trigger_number' : 0, 'acquisition_number' : frame_index, 'camera_start_time_us' : frame_time * 1000} self.sequence_timing_dict['sequence_timing'].append(timing_dict) # Reset all hardware elements if reset_devices: for hardware_controller_name in self.hardware_controller_list: self.hardware_controller_list[hardware_controller_name].reset() if self.only_store_first_and_last_position: for frame_state in self.frame_state_list[1:]: frame_state['position']['states'] = [frame_state['position']['states'][0], frame_state['position']['states'][-1]] # Remove repeated illumination patterns and time_sequence_s if we used the same illumination for each pulse if self.reuse_illumination_sequence: for frame_state in self.frame_state_list[1:]: frame_state['time_sequence_s'] = 'see_frame_#1' frame_state['illumination'] = 'see_frame_#1' # Illuminate with brightfield to indicate we're Finished self.hardware_controller_list['illumination'].bf() self.hardware_controller_list['position'].goToPosition((0,0)) # Save results to an itoools.Dataset object dataset.frame_list = self.frame_list dataset.frame_state_list = self.frame_state_list dataset.metadata = self.metadata dataset.type = 'motion_deblur' # Return return dataset def genMotionPathway(self, n_acquisitions=1, pathway_type='raster', frame_spacing_mm=1.): ''' This function generates a few example motion pathways. ''' if pathway_type is 'raster': pathway = self.genMotionPathwayRaster(self.object_size_mm, self.frame_spacing_mm) elif (pathway_type is 'linear') or (pathway_type is 'linear_x'): # predefine linear y sequence n_frames = int(math.ceil(self.object_size_mm[1] / self.frame_spacing_mm[1])) pathway = [] for frame_index in range(n_frames): pathway.append({'x_start': frame_index * self.frame_spacing_mm[1], 'x_end': (frame_index + 1) * self.frame_spacing_mm[1], 'y_start': 0, 'y_end': 0, 'linear_segment_index': 0}) elif pathway_type in ['linear_y']: # predefine linear y sequence n_frames = int(np.ceil(self.object_size_mm[0] / self.frame_spacing_mm[0])) pathway = [] for frame_index in range(n_frames): pathway.append({'y_start': -frame_index * self.frame_spacing_mm[0], 'y_end': -(frame_index + 1) * self.frame_spacing_mm[0], 'x_start': 0, 'x_end': 0, 'linear_segment_index': 0}) elif pathway_type is 'linear_diag': # predefine linear y sequence n_frames = int(np.ceil(self.object_size_mm[0] / self.frame_spacing_mm[0])) pathway = [] for frame_index in range(n_frames): pathway.append({'y_start': frame_index * self.frame_spacing_mm[0], 'y_end': (frame_index + 1) * self.frame_spacing_mm[0], 'x_start': frame_index * self.frame_spacing_mm[0], 'x_end': (frame_index + 1) * self.frame_spacing_mm[0], 'linear_segment_index': 0}) else: raise ValueError('Pathway type %s is not implemented.' % pathway_type) # make the center the mean of the pathway path_xmin = 1e8 path_ymin = 1e8 path_xmax = -1e8 path_ymax = -1e8 for path in pathway: path_mean = ((path['y_start']), (path['y_start'])) path_xmin = min(path_xmin, min([path['x_start'], path['x_end']])) path_xmax = max(path_xmax, max([path['x_start'], path['x_end']])) path_ymin = min(path_ymin, min([path['y_start'], path['y_end']])) path_ymax = max(path_ymax, max([path['y_start'], path['y_end']])) mean = ((path_ymax + path_ymin) / 2, (path_xmax + path_xmin) / 2) for path in pathway: path['x_start'] = path['x_start'] - mean[1] path['x_end'] = path['x_end'] - mean[1] path['y_start'] = path['y_start'] - mean[0] path['y_end'] = path['y_end'] - mean[0] # Flip pathway if user desired if self.flip_pathway: for path in pathway: path['x_start'] *= -1 path['x_end'] *= -1 path['y_start'] *= -1 path['y_end'] *= -1 position_state_list = [] time_sequence_s = [] # Write image sequence to list for acquisition_index in range(n_acquisitions): # Loop over DPC patterns (frames) for frame_index, position in enumerate(pathway): # define distance in terms of l1 or l2 distance distance_l2 = float(np.sqrt((position['x_end'] - position['x_start']) ** 2 + (position['y_end'] - position['y_start']) ** 2)) distance_l1 = float(abs(position['x_end'] - position['x_start']) + abs(position['y_end'] - position['y_start'])) if self.use_l1_distance_for_motion_calculations: position['frame_distance'] = int(round(distance_l1 * 1000)) / 1000 # round to nearest um else: position['frame_distance'] = int(round(distance_l2 * 1000)) / 1000 # round to nearest um # Determine number of qunatifiable positions in pathway position['n_blur_positions_frame'] = int( math.floor(position['frame_distance'] / (self.metadata.system.eff_pixel_size_um / 1000))) # Determine necessary velocity if self.velocity_mm_s is not None: position['velocity_mm_s'] = self.velocity_mm_s else: position['velocity_mm_s'] = self.max_velocity_mm_s # Use fastest speed possible # Calculate time between frames position['frame_time_s'] = position['frame_distance'] / position['velocity_mm_s'] # t = x / v # Determine camera exposure time for this frame position['exposure_time_s'] = int(math.floor((self.hardware_controller_list['camera'].calcExposureTimeFromBusyTime( position['frame_time_s']) - self.exposure_time_pad_s) * 1000)) / 1000 # round to nearest ms # Determine LED update rate dx_pixel = position['frame_distance'] / position['n_blur_positions_frame'] dt_pixel_raw = dx_pixel / position['velocity_mm_s'] position['led_update_rate_us'] = math.ceil(dt_pixel_raw * 1e6) # Round up to integer us # Determine new velocity (ps / update rate) new_velocity_mm_s = (self.metadata.system.eff_pixel_size_um / 1e3) / (position['led_update_rate_us'] / 1e6) if self.debug > 0: print('Reducing velocity to %.4f mm/s from %.4f mm/s to match illumination update rate of %d us' % (new_velocity_mm_s, position['velocity_mm_s'], position['led_update_rate_us'])) position['velocity_mm_s'] = new_velocity_mm_s # Update frame time based on velocity position['frame_time_s'] = position['frame_distance'] / position['velocity_mm_s'] # Determine number of pixels in exposure time position['n_blur_positions_exposure'] = math.floor(position['exposure_time_s'] / (position['led_update_rate_us'] / 1e6)) # Determine the distance traveled during the exposure time position['exposure_distance'] = position['n_blur_positions_exposure'] * position['led_update_rate_us'] / 1e6 * position['velocity_mm_s'] # Store acceleration position['acceleration_mm_s_2'] = self.motion_acceleration_mm_s_2 # Print information about this pattern if self.debug > 0: print('Segment %d, index %d will require %d blur positions per frame (%d during exposure), %.2fms exposure time (%.2fms total frame time), scan %.2fmm (%.2fmm with exposure), move at %.2fmm/s, and update speed %dus' % (position['linear_segment_index'], frame_index, position['n_blur_positions_frame'],position['n_blur_positions_exposure'], 1000. * position['exposure_time_s'], 1000. * position['frame_time_s'], position['frame_distance'], position['exposure_distance'], position['velocity_mm_s'], position['led_update_rate_us'])) # Check that all blur parameters are valid assert position['led_update_rate_us'] >= self.max_led_update_rate_us, "LED Array update rate (%dms) < max update rate (%dms)" % ( position['led_update_rate_us'], self.max_led_update_rate_us) assert position['exposure_time_s'] <= self.max_exposure_time_s, "Exposure time (%.3fs) > max_exposure_time_s (%.3f)" % ( position['exposure_time_s'], self.max_exposure_time_s) assert position['velocity_mm_s'] <= self.max_velocity_mm_s, "Velocity (%.3fs) > max_velocity_mm_s (%.3f)" % ( position['velocity_mm_s'], self.max_velocity_mm_s) # List for this positions single_frame_state_list_position = [] single_frame_time_sequence_s = [] # Determine movement direction direction = np.asarray((position['y_end'] - position['y_start'], position['x_end'] - position['x_start'])) direction /= np.linalg.norm(direction) # Store common information about this frame common_state_dict = {} common_state_dict['frame_time'] = position['frame_time_s'] common_state_dict['led_update_rate_us'] = position['led_update_rate_us'] common_state_dict['linear_segment_index'] = position['linear_segment_index'] common_state_dict['frame_distance'] = position['frame_distance'] common_state_dict['exposure_distance'] = position['exposure_distance'] common_state_dict['velocity'] = position['velocity_mm_s'] common_state_dict['acceleration'] = position['acceleration_mm_s_2'] common_state_dict['n_blur_positions_exposure'] = position['n_blur_positions_exposure'] common_state_dict['position_delta_x_mm'] = direction[1] * position['velocity_mm_s'] * position['led_update_rate_us'] / 1e6 common_state_dict['position_delta_y_mm'] = direction[0] * position['velocity_mm_s'] * position['led_update_rate_us'] / 1e6 # Loop over time points (irrelevent for dpc) for time_index in range(position['n_blur_positions_exposure']): time_point_state_list = [] x = position['x_start'] + direction[1] * abs(common_state_dict['position_delta_x_mm']) * time_index y = position['y_start'] + direction[0] * abs(common_state_dict['position_delta_x_mm']) * time_index # Append this to list with elements for each interframe time point time_point_state_list.append({'time_index': time_index, 'value': {'x': x, 'y': y}}) # Append to frame_dict single_frame_state_list_position.append(time_point_state_list) single_frame_time_sequence_s.append((time_index + 1) * position['led_update_rate_us'] / 1e6) # Define illumination sequence position_state_list.append({'states' : single_frame_state_list_position, 'common' : common_state_dict}) # Define time_sequence time_sequence_s.append(single_frame_time_sequence_s) # for state in position_state_list: # print(state['states'][0][0]['value']['x'] - state['states'][-1][0]['value']['x']) return (position_state_list, time_sequence_s) def genMotionPathwayRaster(self, object_size_mm, frame_spacing_mm, major_axis=None, include_minor_axis=False): # Hard-code major axis since the rest of the code doesn't respect it for now _major_axis = 1 # Detemine number of measurements measurement_count = np.ceil(np.asarray(object_size_mm) / np.asarray(frame_spacing_mm)).astype(np.int) # two components in x and y # Error checking assert np.any(measurement_count > 1), "image_size must be smaller than object_size!" print("Image size requires %d x %d images" % (measurement_count[0], measurement_count[1])) # If number of measurements along major axis is odd, center this row offset = [0, 0] offset[_major_axis] -= frame_spacing_mm[_major_axis] / 2 # Generate raster points raster_end_point_list = [] pathway = [] linear_segment_index = 0 # This variable keeps track of linear segments, for use with path planning for row in np.arange(measurement_count[0]): if row % 2 == 0: for index, col in enumerate(range(measurement_count[1])): # Add pathway to list pathway.append({'x_start': frame_spacing_mm[1] * col + offset[1], 'y_start': frame_spacing_mm[0] * row + offset[0], 'x_end': frame_spacing_mm[1] * (col + 1) + offset[1], 'y_end': frame_spacing_mm[0] * row + offset[0], 'linear_segment_index': linear_segment_index}) # Add minor stride if row < (measurement_count[0] - 1) and include_minor_axis: pathway.append({'x_start': frame_spacing_mm[1] * (measurement_count[1] - 1) + offset[1], 'y_start': frame_spacing_mm[0] * row + offset[0], 'x_end': frame_spacing_mm[1] * (measurement_count[1] - 1) + offset[1], 'y_end': frame_spacing_mm[0] * (row + 1) + offset[0], 'linear_segment_index': -1 * (linear_segment_index + 1)}) else: for index, col in enumerate(reversed(range(measurement_count[1]))): # Add pathway to list pathway.append({'x_start': frame_spacing_mm[1] * col - offset[1], 'y_start': frame_spacing_mm[0] * row - offset[0], 'x_end': frame_spacing_mm[1] * (col - 1) - offset[1], 'y_end': frame_spacing_mm[0] * row - offset[0], 'linear_segment_index': linear_segment_index}) # Add minor stride if row < (measurement_count[0] - 1) and include_minor_axis: pathway.append({'x_start': - offset[1], 'y_start': frame_spacing_mm[0] * row - offset[0], 'x_end': 0 - offset[1], 'y_end': frame_spacing_mm[0] * (row + 1) - offset[0], 'linear_segment_index': -1 * (linear_segment_index + 1)}) linear_segment_index += 1 print('Generated motion pathway with %d linear segments' % (linear_segment_index)) return pathway def plotPathway(self): sequence_list = self.hardware_controller_list['position'].state_sequence point_list_start = [] point_list_end = [] for sequence in sequence_list: start_pos = (sequence['states'][0][0]['value']['x'], sequence['states'][0][0]['value']['y']) end_pos = (sequence['states'][-1][0]['value']['x'], sequence['states'][-1][0]['value']['y']) point_list_start.append(start_pos) point_list_end.append(end_pos) point_list_start = np.asarray(point_list_start) point_list_end = np.asarray(point_list_end) plt.figure() for index in range(len(point_list_start)): plt.scatter(point_list_start[index, 0], point_list_start[index, 1], c='b') plt.scatter(point_list_end[index, 0], point_list_end[index, 1], c='r') plt.plot([point_list_start[index, 0], point_list_end[index, 0]], [point_list_start[index, 1], point_list_end[index, 1]], c='y') plt.xlabel('Position X (mm)') plt.ylabel('Position Y (mm)') plt.title('Pathway (b is start, y/o is end)') plt.gca().invert_yaxis() def genMotionIlluminationSequenceRandom(self, sequence_count=1, illumination_sequence=None): led_list = np.arange(self.metadata.illumination.state_list.design.shape[0]) bf_mask = self.metadata.illumination.state_list.design[:, 0] ** 2 \ + self.metadata.illumination.state_list.design[:, 1] ** 2 < ( self.metadata.objective.na + self.illumination_na_pad) ** 2 illumination_state_list = [] linear_segments_processed = {} # Loop over DPC patterns (frames) for frame_index, frame_position_dict in enumerate(self.hardware_controller_list['position'].state_sequence): frame_position_list = frame_position_dict['states'] # Get number of positions in blur kernel from this frame. Divide into subsequences pattern_count = len(frame_position_list) // sequence_count # Determine the number of non-zero illumination positions pattern_count_used = int(round(pattern_count * self.saturation_factor)) # Place patterns at the END of the full sequence pattern_count_start = 0 # Get linear segment index current_segment_index = frame_position_dict['common']['linear_segment_index'] if not self.reuse_illumination_sequence or frame_index == 0: blur_vector_full = [] # Generate several blur vectors for _ in range(sequence_count): # Use provided illumination seqence if given if illumination_sequence: blur_vector = illumination_sequence else: blur_vector = np.zeros(pattern_count) # Generate blur vector blur_vector = np.zeros(pattern_count) if self.blur_vector_method == 'strobe': blur_vector = np.zeros(pattern_count) blur_vector[pattern_count_start + pattern_count_used // 2] = 1 elif self.blur_vector_method == 'center': blur_vector =
np.zeros(pattern_count)
numpy.zeros
#!/usr/bin/env python from argparse import ArgumentParser import rospy # For importing cv2 packages we need to remove ros python paths import sys sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages') import cv2 sys.path.append('/opt/ros/kinetic/lib/python2.7/dist-packages') from sensor_msgs.msg import Image, PointCloud2 from cv_bridge import CvBridge, CvBridgeError import numpy as np from math import cos, sin, radians, sqrt from itertools import combinations import time # ganav imports from mmseg.apis import inference_segmentor, init_segmentor, show_result_pyplot from mmseg.core.evaluation import get_palette import torch import mmcv from shutil import copyfile import os import datetime import matplotlib.pyplot as plt from PIL import Image as im # path extrapolation imports from geometry_msgs.msg import Pose2D, Twist, PoseStamped from nav_msgs.msg import Path,Odometry class TerrainSeg(): def __init__(self): rospy.init_node('TerrainSeg') self.bridge = CvBridge() # Subscribe to the camera image and depth topics and set the appropriate callbacks self.depth_sub = rospy.Subscriber("/camera/color/image_raw", Image, self.img_callback) # rospy.Subscriber("/jackal_velocity_controller/cmd_vel", Twist, self.vel_callback) rospy.Subscriber("/odometry/filtered", Odometry, self.odom_callback) parser = ArgumentParser() # parser.add_argument('img', help='Image file') parser.add_argument('config', help='Config file') parser.add_argument('checkpoint', help='Checkpoint file') parser.add_argument('-p', default=".", type=str) parser.add_argument('-s', default="./vis.png", type=str) parser.add_argument('-d', action='store_true') parser.add_argument('--device', default='cuda:0', help='Device used for inference') parser.add_argument('--palette', default='rugd_group', help='Color palette used for segmentation map') args = parser.parse_args() self.pal = get_palette(args.palette) print(self.pal) self.model = init_segmentor(args.config, args.checkpoint, device=args.device) self.outVid = cv2.VideoWriter('subset5_pspnet.avi',cv2.VideoWriter_fourcc(*'DIVX'), 15, (688, 550)) self.numRed = 0 print(" Finished Initializing ") def img_callback(self, data): # print("Inside callback") t1 = time.time() try: self.img = self.bridge.imgmsg_to_cv2(data, "bgr8") except CvBridgeError as e: print (e) dim = (688, 550) self.resized_img = cv2.resize(self.img, dim, interpolation = cv2.INTER_AREA) result = inference_segmentor(self.model, self.resized_img) exists = 4 in result[0] self.numRed = self.numRed + exists print(self.numRed) # Result is a 1x1 list of a numpy array. i.e., result[0] contains a 688x550 array with numbers pointing to a color pallette # The color pallette is stored in pal # This function converts result into Mat format self.pred_img = self.model.show_result(self.resized_img, result, self.pal, show=False) # self.pred_img = self.resized_img if self.x_d >= 0: x_t =[] y_t =[] #time step vecotor (for how long the trajectory will be estimated bsed on the current velocity commands) time_steps =
np.arange(0.2, 3.1, 0.1)
numpy.arange
import numpy as np import numba as nb from dataclasses import dataclass from numba import types from numba.typed import Dict from numba import njit import pandas as pd import time import datetime import csv from openpyxl import load_workbook from pyModbusTCP.client import ModbusClient from pyModbusTCP import utils @dataclass class Data: Ppv: np.array Pbat: np.array Pperi: np.array soc: np.array soc0: int Pbs0: int E: dict class BatModDC(object): """Performance Simulation Class for DC-coupled PV-Battery systems :param parameter: PV battery system parameters :type parameter: dict :param d: array containing parameters :type d: numpy array :param ppv: normalized DC power output of the PV generator :type ppv: numpy array :param pl: AC load power :type pl: numpy array :param Pr: Residual power for battery charging :type Pr: numpy array :param Prpv: AC residual power :type Pr: numpy array :param Ppv: DC power output of the PV generator :type Ppv: numpy array :param ppv2ac: Normalized AC output power of the PV2AC conversion pathway to cover the AC power demand :type ppv2ac: numpy array :param Ppv2ac_out: Target AC output power of the PV2AC conversion pathway :type Ppv2ac_out: numpy array :param dt: time step width in seconds :type dt: integer """ _version = 0.1 def __init__(self, parameter, d, ppv, pl, dt): """Constructor method """ self.parameter = parameter self.d = d self.ppv = ppv self.pl = pl self.dt = dt self.th = False # Start threshold for the recharging of the battery self.spi = float() # Initialization and preallocation self.Real.Pr, self.Real.Prpv, self.Real.Ppv, self.Real.ppv2ac, self.Real.Ppv2ac_out = max_self_consumption(parameter, ppv, pl, pvmod=True) self.Real.Ppv2ac_out0 = 0 self.Real.Ppv2bat_in0 = 0 self.Real.Pbat = np.zeros_like(self.ppv) # DC power of the battery in W self.Real.soc = np.zeros_like(self.ppv) # State of charge of the battery self.Real.soc0 = 0 # State of charge of the battery in the first time step # Input power of the PV2BAT conversion pathway in W self.Real.Ppv2bat_in = np.zeros_like(self.ppv) # Output power of the BAT2AC conversion pathway in W self.Real.Pbat2ac_out = np.zeros_like(self.ppv) self.Real.Pbat2ac_out0 = 0 # AC power of the PV-battery system in W self.Real.Ppvbs = np.zeros_like(self.ppv) # Additional power consumption of other system components (e.g. AC power meter) in W self.Real.Pperi = np.ones(self.ppv.size) * self.parameter['P_PERI_AC'] self.Ideal.Ppv = np.maximum(0, self.ppv) * self.parameter['P_PV'] * 1000 self.Ideal.Pr = self.Ideal.Ppv - self.pl self.Ideal.Pbat = np.zeros_like(self.ppv) self.Ideal.soc = np.zeros_like(self.ppv) self.Ideal.Ppv2bat_in = np.zeros_like(self.ppv) self.Ideal.Ppv2bat_in = np.zeros_like(self.ppv) self.Ideal.Pbat2ac_out = np.zeros_like(self.ppv) self.Ideal.Ppvbs = np.zeros_like(self.ppv) @dataclass class Real(Data): Pr : np.array Prpv : np.array ppv2ac : np.array Ppv2ac_out : np.array Ppv2ac_out0 : int Ppv2bat_in : np.array Pbat2ac_out : np.array Ppvbs : np.array @dataclass class Ideal(Real): def __init__(self): super().__init__() def simulation(self, pvmod=True): """Manages the Performance Simulation Model for AC-coupled PV-Battery Systems """ self.Real.Ppv2ac_out, self.Real.Ppv2bat_in, self.Real.Ppv2bat_in0, self.Real.Pbat2ac_out, self.Real.Pbat2ac_out0, self.Real.Ppvbs, self.Real.Pbat, self.Real.soc, self.Real.soc0 = batmod_dc( self.d, self.dt, self.Real.soc0, self.Real.soc, self.Real.Pr, self.Real.Prpv, self.Real.Ppv, self.Real.Ppv2bat_in0, self.Real.Ppv2bat_in, self.Real.Pbat2ac_out0, self.Real.Pbat2ac_out, self.Real.Ppv2ac_out, self.Real.Ppvbs, self.Real.Pbat) self.Ideal.Pbat, self.Ideal.soc, self.Ideal.soc0 = batmod_dc_ideal(self.d, self.dt, self.Ideal.soc0, self.Ideal.soc, self.Ideal.Pr, self.Ideal.Pbat) # Define missing parameters self.Real.Ppv2ac = self.Real.Ppv2ac_out # AC output power of the PV2AC conversion pathway self.Real.Ppv2bat = self.Real.Ppv2bat_in # DC input power of the PV2BAT conversion pathway self.Ideal.Ppvbs = self.Ideal.Ppv - np.maximum(0, self.Ideal.Pbat) - (np.minimum(0, self.Ideal.Pbat)) # Realized AC power of the PV-battery system self.Ideal.Ppv2ac = self.Ideal.Ppv - np.maximum(0, self.Ideal.Pbat) # AC output power of the PV2AC conversion pathway self.Ideal.Ppv2bat = np.maximum(0, self.Ideal.Pbat) # DC input power of the PV2BAT conversion pathway print() def bat_mod_res(self): """Function to calculate the power flows and energy sums including curtailment of PV power """ self.Real.E = bat_res_mod(self.parameter, self.pl, self.Real.Ppv, self.Real.Pbat, self.dt, self.Real.Ppv2ac, self.Real.Ppv2bat, self.Real.Ppvbs, self.Real.Pperi) self.Ideal.E = bat_res_mod_ideal(self.parameter, self.pl, self.Ideal.Ppv, self.Ideal.Pbat, self.dt, self.Ideal.Ppv2ac, self.Ideal.Ppv2bat, self.Ideal.Ppvbs, self.Ideal.Pperi) def calculate_spi(self): self.spi = calculate_spi(_E_real=self.Real.E, _E_ideal=self.Ideal.E) def get_E(self): """Returns the energy sums of the simulation :return: Energy sums of the simulation in MWh :rtype: dict """ return self.Real.E, self.Ideal.E def get_soc(self): """Returns the state of charge of the battery :return: state of charge of the battery :rtype: numpy array """ return self.soc def get_Pbat(self): """Returns the DC power of the battery in W :return: DC power of the battery in W :rtype: numpy array """ return self.Pbat def get_SPI(self): return self.spi class BatModAC(object): """Performance Simulation Class for AC-coupled PV-Battery systems :param parameter: PV battery system parameters :type parameter: dict :param d: array containing parameters :type d: numpy array :param ppv: normalized DC power output of the PV generator :type ppv: numpy array :param pl: AC load power :type pl: numpy array :param Pr: AC residual power :type Pr: numpy array :param Ppv: DC power output of the PV generator :type Ppv: numpy array :param Ppvs: AC power output of the PV inverter taking into account the conversion losses and maximum output power of the PV inverter :type Ppvs: numpy array :param Pperi: Additional power consumption of other system components (e.g. AC power meter) in W :type Pperi: numpy array :param dt: time step width in seconds :type dt: integer """ _version = '0.1' def __init__(self, parameter, d, ppv, pl, dt): """Constructor method """ self.parameter = parameter self.d = d self.ppv = ppv self.pl = pl self.dt = dt self.spi = float() self.th = False # Start threshold for the recharging of the battery # Initialization and preallocation self.Real.Pr, self.Real.Ppv, self.Real.Ppvs, self.Real.Pperi = max_self_consumption(parameter, ppv, pl, pvmod=True) self.Real.Pbat = np.zeros_like(self.ppv) # DC power of the battery in W self.Real.Pbs = np.zeros_like(self.ppv) # AC power of the battery system in W self.Real.soc = np.zeros_like(self.ppv) # State of charge of the battery self.Real.soc0 = 0 # State of charge of the battery in the first time step self.Real.Pbs0 = 0 # State of the battery storage in the previous time step self.Ideal.Ppv = np.maximum(0, ppv) * parameter['P_PV'] * 1000 self.Ideal.Pr = self.Ideal.Ppv - pl self.Ideal.Pbat = np.zeros_like(self.ppv) self.Ideal.Pbs = np.zeros_like(self.ppv) self.Ideal.Pbs0 = 0 self.Ideal.soc = np.zeros_like(self.ppv) self.Ideal.soc0 = 0 self.Ideal.Ppvs = self.Ideal.Ppv self.Ideal.Pperi = np.zeros_like(self.ppv) @dataclass class Real(Data): Pr : np.array Ppvs : np.array Pbs : np.array @dataclass class Ideal(Real): def __init__(self): super().__init__() def simulation(self): """Manages the Performance Simulation Model for AC-coupled PV-Battery Systems """ self.Real.Pbat, self.Real.Pbs, self.Real.soc, self.Real.soc0, self.Real.Pbs0 = batmod_ac( self.d, self.dt, self.Real.soc0, self.Real.soc, self.Real.Pr, self.Real.Pbs0, self.Real.Pbs, self.Real.Pbat) self.Ideal.Pbs, self.Ideal.Pbat, self.Ideal.soc0, self.Ideal.soc = batmod_ac_ideal( self.d, self.dt, self.Ideal.soc0, self.Ideal.soc, self.Ideal.Pr, self.Ideal.Pbat) def bat_mod_res(self): """Function to calculate the power flows and energy sums including curtailment of PV power """ self.Real.E = bat_res_mod( self.parameter, self.pl, self.Real.Ppv, self.Real.Pbat, self.dt, self.Real.Ppvs, self.Real.Pbs, self.Real.Pperi) self.Ideal.E = bat_res_mod_ideal( self.parameter, self.pl, self.Ideal.Ppv, self.Ideal.Pbat, self.dt, self.Ideal.Ppvs, self.Ideal.Pbs, self.Ideal.Pperi) def calculate_spi(self): self.spi = calculate_spi(_E_real=self.Real.E, _E_ideal=self.Ideal.E) def get_E(self): """Returns the energy sums of the simulation :return: Energy sums of the simulation in MWh :rtype: dict """ return self.Real.E, self.Ideal.E def get_soc(self): """Returns the state of charge of the battery :return: state of charge of the battery :rtype: numpy array """ return self.soc def get_Pbat(self): """Returns the DC power of the battery in W :return: DC power of the battery in W :rtype: numpy array """ return self.Pbat def get_Pbs(self): """Returns the AC power of the battery system in W :return: AC power of the battery system in W :rtype: numpy array """ return self.Pbs def get_SPI(self): return self.spi class BatModPV(object): """Performance Simulation Class for PV-coupled PV-Battery systems :param parameter: PV battery system parameters :type parameter: dict :param d: array containing parameters :type d: numpy array :param ppv: normalized DC power output of the PV generator :type ppv: numpy array :param pl: AC load power :type pl: numpy array :param Pac: Power demand on the AC side :type Pac: numpy array :param Ppv: DC power output of the PV generator :type Ppv: numpy array :param Pperi: Additional power consumption of other system components (e.g. AC power meter) in W :type Pperi: numpy array :param dt: time step width in seconds :type dt: integer """ _version = '0.1' def __init__(self, parameter, d, ppv, pl, Pac, Ppv, Pperi, dt): """Constructor method """ self.parameter = parameter self.d = d self.ppv = ppv self.pl = pl self.Pac = Pac self.Ppv = Ppv self.Pperi = Pperi self.dt = dt # Initialization and preallocation self.Pbat = np.zeros_like(self.ppv) # DC power of the battery in W self.soc = np.zeros_like(self.ppv) # State of charge of the battery # Output power of the PV2AC conversion pathway in W self.Ppv2ac_out = np.zeros_like(self.ppv) # Input power of the PV2BAT conversion pathway in W self.Ppv2bat_in = np.zeros_like(self.ppv) self.Ppv2bat_in0 = 0 # Output power of the BAT2PV conversion pathway in W self.Pbat2pv_out = np.zeros_like(self.ppv) self.Pbat2pv_out0 = 0 # AC power of the PV-battery system in W self.Ppvbs = np.zeros_like(self.ppv) self.simulation() self.bat_mod_res() def simulation(self, pvmod=True): """Manages the Performance Simulation Model for AC-coupled PV-Battery Systems """ self.th = 0 # Start threshold for the recharging of the battery self.soc0 = 0 # Initial state of charge of the battery in the first time step # Simulation of the battery system #start = time.process_time() self.soc, self.soc0, self.Ppv, self.Ppvbs, self.Pbat, self.Ppv2ac_out, self.Pbat2pv_out, self.Ppv2bat_in = batmod_pv(self.d, self.dt, self.soc0, self.soc, self.Ppv, self.Pac, self.Ppv2bat_in0, self.Ppv2bat_in, self.Ppv2ac_out, self.Pbat2pv_out0, self.Pbat2pv_out, self.Ppvbs, self.Pbat) #print(time.process_time()-start) # Define missing parameters self.Ppv2ac = self.Ppv2ac_out # AC output power of the PV2AC conversion pathway self.Ppv2bat = self.Ppv2bat_in # DC input power of the PV2BAT conversion pathway def bat_mod_res(self): """Function to calculate the power flows and energy sums including curtailment of PV power """ self.E = bat_res_mod(self.parameter, self.pl, self.Ppv, self.Pbat, self.dt, self.Ppv2ac, self.Ppv2bat, self.Ppvbs, self.Pperi) def get_E(self): """Returns the energy sums of the simulation :return: Energy sums of the simulation in MWh :rtype: dict """ return self.E def get_soc(self): """Returns the state of charge of the battery :return: state of charge of the battery :rtype: numpy array """ return self.soc def get_Pbat(self): """Returns the DC power of the battery in W :return: DC power of the battery in W :rtype: numpy array """ return self.Pbat class ModBus(object): """Establishes connection to a battery system via ModBus protocol :param host: IP address of the host :type host: string :param port: Server port of the host :type port: integer :param unit_id: Unit-ID of the host :type unit_id: integer """ def __init__(self, host, port, unit_id, input_vals, dt, fname): """Constructor method """ self.host = host self.port = port self.unit_id = unit_id self.dt = dt self.input_vals = input_vals self.fname = fname self.open_connection() self.create_csv_file() self.start_loop() def open_connection(self): """Opens the connection to the host """ # Open ModBus connection try: self.c = ModbusClient(host=self.host, port=self.port, unit_id=self.unit_id, auto_open=True, auto_close=True) except ValueError: print("Error with host: {}, port: {} or unit-ID: {} params".format( self.host, self.port, self.unit_id)) def start_loop(self): """Starts the writing and reading process """ # Transform the array to fit the 1 minute time duration #self.set_vals = np.repeat(self.input_vals, self.dt * 60) i = 0 idx = pd.date_range(start=datetime.datetime.now(), periods=(self.input_vals.size), freq='S') while i < len(idx): if datetime.datetime.now().second == idx[i].second: # Set chrging value self.set_val = int(self.input_vals[i]) if self.set_val < 0: # Write negative value to battery charge power (AC) setpoint register self.c.write_single_register(1024, self.set_val & 0xFFFF) # Log writing time self.set_time = datetime.datetime.now() else: # Write positive value to battery charge power (AC) setpoint to register self.c.write_single_register(1024, self.set_val) # Log writing time self.set_time = datetime.datetime.now() try: # Read total AC power value from register _P_ac = self.c.read_holding_registers(172, 2) self.read_time_P_ac = datetime.datetime.now() except: print('Could not read register 172!') try: # Read actual battery charge/discharge power value from register _P_bat = self.c.read_holding_registers(582, 1) self.read_time_P_bat = datetime.datetime.now() except: print('Could not read register 582!') # Load content of two registers into a single float value zregs = utils.word_list_to_long(_P_ac, big_endian=False) # Decode and store float value of the AC-power self.P_ac = utils.decode_ieee(*zregs) # Store the DC charging power self.P_bat = np.int16(*_P_bat) # Read actual soc self.soc0 = self.read_soc(210) try: # Save the values to a csv file self.save_to_csv() except: print('Could not save to csv!') i += 1 def read_soc(self, reg): """Reads the state of charge of the battery """ # Load the actual state fo charge of the battery regs = self.c.read_holding_registers(reg, 2) # Load content of two registers into a single float value zregs = utils.word_list_to_long(regs, big_endian=False) return utils.decode_ieee(*zregs) def create_csv_file(self): """Creates a csv file from set and read values """ # Create a new csv-file with open(self.fname, 'w') as f: writer = csv.writer(f, dialect='excel') writer.writerow(['set_time', 'read_time_P_ac', 'read_time_P_bat', 'soc', 'set_value', 'P_ac', 'P_bat']) def save_to_csv(self): """Saves the set and read values to s csv file """ # Save the read values to a csv file with open(self.fname, "a") as f: wr = csv.writer(f, dialect='excel') wr.writerow([self.set_time, self.read_time_P_ac, self.read_time_P_bat, self.soc0, self.set_val, self.P_ac, self.P_bat]) def max_self_consumption(parameter, ppv, pl, pvmod=True, ideal=False): """Function for maximizing self consumption :param parameter: PV battery system parameters :type parameter: dict :param ppv: normalized DC power output of the PV generator :type ppv: numpy array :param pl: AC load power :type pl: numpy array """ # Maximize self consumption for AC-coupled systems if parameter['Top'] == 'AC': # DC power output of the PV generator if pvmod: # ppv: Normalized DC power output of the PV generator in kW/kWp if ideal: Ppv = np.maximum(0, ppv ) * parameter['P_PV'] * 1000 else: Ppv = np.minimum(ppv * parameter['P_PV'], parameter['P_PV2AC_in']) * 1000 else: # ppv: DC power output of the PV generator in W if ideal: Ppv = np.maximum(0, ppv) else: Ppv = np.minimum(ppv, parameter['P_PV2AC_in'] * 1000) # Normalized input power of the PV inverter ppvinvin = Ppv / parameter['P_PV2AC_in'] / 1000 # AC power output of the PV inverter taking into account the conversion losses and maximum # output power of the PV inverter Ppvs = np.minimum(np.maximum(0, Ppv-(parameter['PV2AC_a_in'] * ppvinvin * ppvinvin + parameter['PV2AC_b_in'] * ppvinvin + parameter['PV2AC_c_in'])), parameter['P_PV2AC_out'] * 1000) # 3.2 Residual power # Additional power consumption of other system components (e.g. AC power meter) in W Pperi = np.ones_like(ppv) * parameter['P_PERI_AC'] # Adding the standby consumption of the PV inverter in times without any AC power output of the PV system # to the additional power consumption Pperi[Ppvs == 0] += parameter['P_PVINV_AC'] # Residual power if ideal: Pr = Ppv - pl else: Pr = Ppvs - pl - Pperi return Pr, Ppv, Ppvs, Pperi # Maximize self consumption for DC-coupled systems elif parameter['Top'] == 'DC': # Initialization and preallocation Ppv2ac_in_ac = np.zeros_like(ppv) Ppv = np.empty_like(ppv) # DC power output of the PV generator if pvmod: # ppv: Normalized DC power output of the PV generator in kW/kWp Ppv = ppv * parameter['P_PV'] * 1000 else: Ppv = ppv # DC power output of the PV generator taking into account the maximum # DC input power of the PV2AC conversion pathway Ppv = np.minimum(Ppv, parameter['P_PV2AC_in'] * 1000) # Residual power # Power demand on the AC side Pac = pl + parameter['P_PERI_AC'] # Normalized AC output power of the PV2AC conversion pathway to cover the AC # power demand ppv2ac = np.minimum( Pac, parameter['P_PV2AC_out'] * 1000) / parameter['P_PV2AC_out'] / 1000 # Target DC input power of the PV2AC conversion pathway Ppv2ac_in_ac = np.minimum(Pac, parameter['P_PV2AC_out'] * 1000) + ( parameter['PV2AC_a_out'] * ppv2ac**2 + parameter['PV2AC_b_out'] * ppv2ac + parameter['PV2AC_c_out']) # Normalized DC input power of the PV2AC conversion pathway TODO 1 ppv2ac = Ppv / parameter['P_PV2AC_in'] / 1000 # Target AC output power of the PV2AC conversion pathway Ppv2ac_out = np.maximum( 0, Ppv - (parameter['PV2AC_a_in'] * ppv2ac**2 + parameter['PV2AC_b_in'] * ppv2ac + parameter['PV2AC_c_in'])) # Residual power for battery charging Prpv = Ppv - Ppv2ac_in_ac # Residual power for battery discharging Pr = Ppv2ac_out - Pac return Pr, Prpv, Ppv, ppv2ac, Ppv2ac_out # Maximize self consumption for PV-coupled systems elif parameter['Top'] == 'PV': # Preallocation # Pbat = np.zeros_like(ppv) # DC power of the battery in W # soc = np.zeros_like(ppv) # State of charge of the battery # Ppv2ac_out = np.zeros_like(ppv) # Output power of the PV2AC conversion pathway in W # Ppv2bat_in = np.zeros_like(ppv) # Input power of the PV2BAT conversion pathway in W # Pbat2pv_out = np.zeros_like(ppv) # Output power of the BAT2PV conversion pathway in W # Ppvbs = np.zeros_like(ppv) # AC power of the PV-battery system in W Ppv = np.empty_like(ppv) # DC power output of the PV generator # Additional power consumption of other system components (e.g. AC power meter) in W Pperi = np.ones_like(ppv) * parameter['P_PERI_AC'] # dt = 1 # Time increment in s # th = 0 # Start threshold for the recharging of the battery # soc0 = 0 # State of charge of the battery in the first time step # DC power output of the PV generator if pvmod: # ppv: Normalized DC power output of the PV generator in kW/kWp Ppv = ppv * parameter['P_PV'] * 1000 else: # ppv: DC power output of the PV generator in W Ppv = ppv # Power demand on the AC side Pac = pl + Pperi return Pac, Ppv, Pperi @nb.jit(nopython=True) def batmod_ac(d, _dt, _soc0, _soc, _Pr, _Pbs0, _Pbs, _Pbat): """Performance Simulation function for AC-coupled battery systems :param d: array containing parameters :type d: numpy array :param dt: time step width :type dt: integer :param soc0: state of charge in the previous time step :type soc0: float :param Pr: residual power :type Pr: numpy array :param Pbs0: AC-power of the battery system in the previous time step :type Pbs0: float :param Pbs: AC-power of the battery syste :type Pbs: numpy array :param Pbat: DC-power oof the battery :type Pbat: numpy array """ # Loading of particular variables _E_BAT = d[0] _eta_BAT = d[1] _t_CONSTANT = d[2] _P_SYS_SOC0_DC = d[3] _P_SYS_SOC0_AC = d[4] _P_SYS_SOC1_DC = d[5] _P_SYS_SOC1_AC = d[6] _AC2BAT_a_in = d[7] _AC2BAT_b_in = d[8] _AC2BAT_c_in = d[9] _BAT2AC_a_out = d[10] _BAT2AC_b_out = d[11] _BAT2AC_c_out = d[12] _P_AC2BAT_DEV = d[13] _P_BAT2AC_DEV = d[14] _P_BAT2AC_out = d[15] _P_AC2BAT_in = d[16] _t_DEAD = int(round(d[17])) _SOC_h = d[18] _P_AC2BAT_min = _AC2BAT_c_in _P_BAT2AC_min = _BAT2AC_c_out # Correction factor to avoid over charge and discharge the battery corr = 0.1 # Initialization of particular variables _tde = _t_CONSTANT > 0 # Binary variable to activate the first-order time delay element # Factor of the first-order time delay element _ftde = 1 - np.exp(-_dt / _t_CONSTANT) # First time step with regard to the dead time of the system control _tstart = np.maximum(2, 1 + _t_DEAD) _tend = int(_Pr.size) _th = 0 # Capacity of the battery, conversion from kWh to Wh _E_BAT *= 1000 # Effiency of the battery in percent _eta_BAT /= 100 # Check if the dead or settling time can be ignored and set flags accordingly if _dt >= (3 * _t_CONSTANT) or _tend == 1: _tstart = 1 T_DEAD = False else: T_DEAD = True if _dt >= _t_DEAD + 3 * _t_CONSTANT: SETTLING = False else: SETTLING = True for t in range(_tstart - 1, _tend): # Energy content of the battery in the previous time step E_b0 = _soc0 * _E_BAT # Calculate the AC power of the battery system from the residual power # with regard to the dead time of the system control if T_DEAD: P_bs = _Pr[t - _t_DEAD] else: P_bs = _Pr[t] # Check if the battery holds enough unused capacity for charging or discharging # Estimated amount of energy in Wh that is supplied to or discharged from the storage unit. E_bs_est = P_bs * _dt / 3600 # Reduce P_bs to avoid over charging of the battery if E_bs_est > 0 and E_bs_est > (_E_BAT - E_b0): P_bs = (_E_BAT - E_b0) * 3600 / _dt # When discharging take the correction factor into account elif E_bs_est < 0 and np.abs(E_bs_est) > (E_b0): P_bs = (E_b0 * 3600 / _dt) * (1-corr) # Adjust the AC power of the battery system due to the stationary # deviations taking the minimum charging and discharging power into # account if P_bs > _P_AC2BAT_min: P_bs = np.maximum(_P_AC2BAT_min, P_bs + _P_AC2BAT_DEV) elif P_bs < -_P_BAT2AC_min: P_bs = np.minimum(-_P_BAT2AC_min, P_bs - _P_BAT2AC_DEV) else: P_bs = 0 # Limit the AC power of the battery system to the rated power of the # battery converter P_bs = np.maximum(-_P_BAT2AC_out * 1000, np.minimum(_P_AC2BAT_in * 1000, P_bs)) # Adjust the AC power of the battery system due to the settling time # (modeled by a first-order time delay element) Hier hat der Schritt vorher eine Null? # Muss der vorherige Wert mit übergeben werden? if SETTLING: if t > 0: P_bs = _tde * _Pbs[t-1] + _tde * (P_bs - _Pbs[t-1]) * _ftde + P_bs * (not _tde) else: P_bs = _tde * _Pbs0 + _tde * (P_bs - _Pbs0) * _ftde + P_bs * (not _tde) # Decision if the battery should be charged or discharged if P_bs > 0 and _soc0 < 1 - _th * (1 - _SOC_h): # The last term th*(1-SOC_h) avoids the alternation between # charging and standby mode due to the DC power consumption of the # battery converter when the battery is fully charged. The battery # will not be recharged until the SOC falls below the SOC-threshold # (SOC_h) for recharging from PV. # Normalized AC power of the battery system p_bs = P_bs / _P_AC2BAT_in / 1000 # DC power of the battery affected by the AC2BAT conversion losses # of the battery converter P_bat = np.maximum( 0, P_bs - (_AC2BAT_a_in * p_bs * p_bs + _AC2BAT_b_in * p_bs + _AC2BAT_c_in)) elif P_bs < 0 and _soc0 > 0: # Normalized AC power of the battery system p_bs = np.abs(P_bs / _P_BAT2AC_out / 1000) # DC power of the battery affected by the BAT2AC conversion losses # of the battery converter P_bat = P_bs - (_BAT2AC_a_out * p_bs * p_bs + _BAT2AC_b_out * p_bs + _BAT2AC_c_out) else: # Neither charging nor discharging of the battery # Set the DC power of the battery to zero P_bat = 0 # Decision if the standby mode is active if P_bat == 0 and _soc0 <= 0: # Standby mode in discharged state # DC and AC power consumption of the battery converter P_bat = -np.maximum(0, _P_SYS_SOC0_DC) P_bs = _P_SYS_SOC0_AC elif P_bat == 0 and _soc0 > 0: # Standby mode in fully charged state # DC and AC power consumption of the battery converter P_bat = -np.maximum(0, _P_SYS_SOC1_DC) P_bs = _P_SYS_SOC1_AC # Transfer the realized AC power of the battery system and # the DC power of the battery _Pbs0 = P_bs _Pbs[t] = P_bs _Pbat[t] = P_bat # Change the energy content of the battery from Ws to Wh conversion if P_bat > 0: E_b = E_b0 + P_bat * np.sqrt(_eta_BAT) * _dt / 3600 elif P_bat < 0: E_b = E_b0 + P_bat / np.sqrt(_eta_BAT) * _dt / 3600 else: E_b = E_b0 # Calculate the state of charge of the battery _soc0 = E_b / (_E_BAT) _soc[t] = _soc0 # Adjust the hysteresis threshold to avoid alternation # between charging and standby mode due to the DC power # consumption of the battery converter. if _th and _soc[t] > _SOC_h or _soc[t] > 1: _th = True else: _th = False return _Pbat, _Pbs, _soc, _soc0, _Pbs0 @nb.jit(nopython=True) def batmod_ac_ideal(d, _dt, _soc0, _soc, _Pr, _Pbat): _E_BAT = d[0] for t in range(_Pr.size): # Energy content of the battery in the previous time step E_b0 = _soc0 * _E_BAT * 1000 # Calculate the DC power of the battery from the residual power P_bat = _Pr[t] # Decision if the battery should be charged or discharged if P_bat > 0 and _soc0 < 1: # Battery charging E_b = E_b0 + P_bat * _dt / 3600 # Change the energy content of the battery elif P_bat < 0 and _soc0 > 0: # Battery discharging # Change the energy content of the battery E_b = E_b0 + P_bat * _dt / 3600 else: # Neither charging nor discharging of the battery # Set the DC power of the battery to zero P_bat = 0 # No change in the energy content of the battery E_b = E_b0 # Transfer the realized DC power of the battery _Pbat[t] = P_bat # Calculate the state of charge of the battery _soc0 = E_b / (_E_BAT * 1000) _soc[t] = _soc0 # Define missing parameters _Pbs = _Pbat # Realized AC power of the battery system return _Pbs, _Pbat, _soc0, _soc @nb.jit(nopython=True) def batmod_dc(d, _dt, _soc0, _soc, _Pr, _Prpv, _Ppv, _Ppv2bat_in0, _Ppv2bat_in, _Pbat2ac_out0, _Pbat2ac_out, _Ppv2ac_out, _Ppvbs, _Pbat): """Performance simulation function for DC-coupled battery systems :param d: array containing parameters :type d: numpy array :param dt: time step width :type dt: integer :param soc0: state of charge in the previous time step :type soc0: float :param Pr: residual power :type Pr: numpy array :param Prpv: residual power of the PV-system :type Prpv: numpy array :param Ppv: PV-power :type Ppv: numpy array :param Ppv2bat_in0: AC input power of the battery system in the previous time step :type Ppv2bat_in0: float :param Ppv2bat_in: AC input power of the battery system :type Ppv2bat_in: numpy array :param Pbat2ac_out0: AC output power of the battery system in the previous time step :type Pbat2ac_out0: float :param Pbat2ac_out: AC output power of the battery system :type Pbat2ac_out: numpy array :param Ppv2ac_out0: AC output power of the PV inverter in the previous time step :type Ppv2ac_out0: float :param Ppv2ac_out: AC output power of the PV inverter :type Ppv2ac_out: numpy array :param Ppvbs: AC power from the PV system to the battery system :type Ppvbs: numpy array :param Pbat: DC power of the battery :type Pbat: float """ _E_BAT = d[0] _P_PV2AC_in = d[1] _P_PV2AC_out = d[2] _P_PV2BAT_in = d[3] _P_BAT2AC_out = d[4] _PV2AC_a_in = d[5] _PV2AC_b_in = d[6] _PV2AC_c_in = d[7] _PV2BAT_a_in = d[8] _PV2BAT_b_in = d[9] _BAT2AC_a_out = d[10] _BAT2AC_b_out = d[11] _BAT2AC_c_out = d[12] _eta_BAT = d[13] _SOC_h = d[14] _P_PV2BAT_DEV = d[15] _P_BAT2AC_DEV = d[16] _t_DEAD = int(round(d[17])) _t_CONSTANT = d[18] _P_SYS_SOC1_DC = d[19] _P_SYS_SOC0_AC = d[20] _P_SYS_SOC0_DC = d[21] _P_PV2AC_min = _PV2AC_c_in # Capacity of the battery, conversion from kWh to Wh _E_BAT *= 1000 # Effiency of the battery in percent _eta_BAT /= 100 # Initialization of particular variables # _P_PV2AC_min = _parameter['PV2AC_c_in'] # Minimum input power of the PV2AC conversion pathway _tde = _t_CONSTANT > 0 # Binary variable to activate the first-order time delay element # Factor of the first-order time delay element _ftde = 1 - np.exp(-_dt / _t_CONSTANT) # First time step with regard to the dead time of the system control _tstart = np.maximum(2, 1 + _t_DEAD) _tend = int(_Pr.size) _th = 0 corr = 0.1 # Check if the dead or settling time can be ignored and set flags accordingly if _dt >= (3 * _t_CONSTANT) or _tend == 1: _tstart = 1 T_DEAD = False else: T_DEAD = True if _dt >= _t_DEAD + 3 * _t_CONSTANT: SETTLING = False else: SETTLING = True for t in range(_tstart - 1, _tend): # Energy content of the battery in the previous time step E_b0 = _soc0 * _E_BAT # Residual power with regard to the dead time of the system control if T_DEAD: P_rpv = _Prpv[t - _t_DEAD] P_r = _Pr[t - _t_DEAD] else: P_rpv = _Prpv[t] P_r = _Pr[t] # Check if the battery holds enough unused capacity for charging or discharging # Estimated amount of energy that is supplied to or discharged from the storage unit. E_bs_rpv = P_rpv * _dt / 3600 E_bs_r = P_r * _dt / 3600 # Reduce P_bs to avoid over charging of the battery if E_bs_rpv > 0 and E_bs_rpv > (_E_BAT - E_b0): P_rpv = (_E_BAT - E_b0) * 3600 / _dt # When discharging take the correction factor into account elif E_bs_r < 0 and np.abs(E_bs_r) > (E_b0): P_r = ((E_b0) * 3600 / _dt) * (1-corr) # Decision if the battery should be charged or discharged if P_rpv > 0 and _soc0 < 1 - _th * (1 - _SOC_h): ''' The last term th*(1-SOC_h) avoids the alternation between charging and standby mode due to the DC power consumption of the battery converter when the battery is fully charged. The battery will not be recharged until the SOC falls below the SOC-threshold (SOC_h) for recharging from PV. ''' # Charging power P_pv2bat_in = P_rpv # Adjust the charging power due to the stationary deviations P_pv2bat_in = np.maximum(0, P_pv2bat_in + _P_PV2BAT_DEV) # Limit the charging power to the maximum charging power P_pv2bat_in = np.minimum(P_pv2bat_in, _P_PV2BAT_in * 1000) # Adjust the charging power due to the settling time # (modeled by a first-order time delay element) if SETTLING: if t > 0: P_pv2bat_in = _tde * _Ppv2bat_in[(t-1)] + _tde * ( P_pv2bat_in - _Ppv2bat_in[(t-1)]) * _ftde + P_pv2bat_in * (not _tde) else: P_pv2bat_in = _tde * _Ppv2bat_in0 + _tde * \ (P_pv2bat_in - _Ppv2bat_in0) * \ _ftde + P_pv2bat_in * (not _tde) # Limit the charging power to the current power output of the PV generator P_pv2bat_in = np.minimum(P_pv2bat_in, _Ppv[t]) # Normalized charging power ppv2bat = P_pv2bat_in / _P_PV2BAT_in / 1000 # DC power of the battery affected by the PV2BAT conversion losses # (the idle losses of the PV2BAT conversion pathway are not taken # into account) P_bat = np.maximum( 0, P_pv2bat_in - (_PV2BAT_a_in * ppv2bat**2 + _PV2BAT_b_in * ppv2bat)) # Realized DC input power of the PV2AC conversion pathway P_pv2ac_in = _Ppv[t] - P_pv2bat_in # Normalized DC input power of the PV2AC conversion pathway _ppv2ac = P_pv2ac_in / _P_PV2AC_in / 1000 # Realized AC power of the PV-battery system P_pv2ac_out = np.maximum( 0, P_pv2ac_in - (_PV2AC_a_in * _ppv2ac**2 + _PV2AC_b_in * _ppv2ac + _PV2AC_c_in)) P_pvbs = P_pv2ac_out # Transfer the final values _Ppv2ac_out[t] = P_pv2ac_out _Ppv2bat_in0 = P_pv2bat_in _Ppv2bat_in[t] = P_pv2bat_in elif P_rpv < 0 and _soc0 > 0: # Discharging power P_bat2ac_out = P_r * -1 # Adjust the discharging power due to the stationary deviations P_bat2ac_out = np.maximum(0, P_bat2ac_out + _P_BAT2AC_DEV) # Adjust the discharging power to the maximum discharging power P_bat2ac_out = np.minimum(P_bat2ac_out, _P_BAT2AC_out * 1000) # Adjust the discharging power due to the settling time # (modeled by a first-order time delay element) if SETTLING: if t > 0: P_bat2ac_out = _tde * _Pbat2ac_out[t-1] + _tde * ( P_bat2ac_out - _Pbat2ac_out[t-1]) * _ftde + P_bat2ac_out * (not _tde) else: P_bat2ac_out = _tde * _Pbat2ac_out0 + _tde * \ (P_bat2ac_out - _Pbat2ac_out0) * \ _ftde + P_bat2ac_out * (not _tde) # Limit the discharging power to the maximum AC power output of the PV-battery system P_bat2ac_out = np.minimum( _P_PV2AC_out * 1000 - _Ppv2ac_out[t], P_bat2ac_out) # Normalized discharging power ppv2bat = P_bat2ac_out / _P_BAT2AC_out / 1000 # DC power of the battery affected by the BAT2AC conversion losses # (if the idle losses of the PV2AC conversion pathway are covered by # the PV generator, the idle losses of the BAT2AC conversion pathway # are not taken into account) if _Ppv[t] > _P_PV2AC_min: P_bat = -1 * (P_bat2ac_out + (_BAT2AC_a_out * ppv2bat**2 + _BAT2AC_b_out * ppv2bat)) else: P_bat = -1 * (P_bat2ac_out + (_BAT2AC_a_out * ppv2bat ** 2 + _BAT2AC_b_out * ppv2bat + _BAT2AC_c_out)) + _Ppv[t] # Realized AC power of the PV-battery system P_pvbs = _Ppv2ac_out[t] + P_bat2ac_out # Transfer the final values _Pbat2ac_out0 = P_bat2ac_out _Pbat2ac_out[t] = P_bat2ac_out else: # Neither charging nor discharging of the battery # Set the DC power of the battery to zero P_bat = 0 # Realized AC power of the PV-battery system P_pvbs = _Ppv2ac_out[t] # Decision if the standby mode is active if P_bat == 0 and P_pvbs == 0 and _soc0 <= 0: # Standby mode in discharged state # DC and AC power consumption of the PV-battery inverter P_bat = -np.maximum(0, _P_SYS_SOC0_DC) P_pvbs = -_P_SYS_SOC0_AC elif P_bat == 0 and P_pvbs > 0 and _soc0 > 0: # Standby mode in fully charged state # DC power consumption of the PV-battery inverter P_bat = -np.maximum(0, _P_SYS_SOC1_DC) # Transfer the realized AC power of the PV-battery system and the DC power of the battery _Ppvbs[t] = P_pvbs _Pbat[t] = P_bat # Change the energy content of the battery Wx to Wh conversion if P_bat > 0: E_b = E_b0 + P_bat * np.sqrt(_eta_BAT) * _dt / 3600 elif P_bat < 0: E_b = E_b0 + P_bat / np.sqrt(_eta_BAT) * _dt / 3600 else: E_b = E_b0 # Calculate the state of charge of the battery _soc0 = E_b / _E_BAT _soc[t] = _soc0 # Adjust the hysteresis threshold to avoid alternation between charging # and standby mode due to the DC power consumption of the # PV-battery inverter if _th and _soc[t] > _SOC_h or _soc[t] > 1: _th = True else: _th = False return _Ppv2ac_out, _Ppv2bat_in, _Ppv2bat_in0, _Pbat2ac_out, _Pbat2ac_out0, _Ppvbs, _Pbat, _soc, _soc0 @nb.jit(nopython=True) def batmod_dc_ideal(d, _dt, _soc0, _soc, _Pr, _Pbat): _E_BAT = d[0] for t in range(_Pr.size): # Energy content of the battery in the previous time step E_b0 = _soc0 * _E_BAT * 1000 P_bat = _Pr[t] if P_bat > 0 and _soc0 < 1: # Battery charging # Change the energy content of the battery E_b = E_b0 + P_bat * _dt / 3600 elif P_bat < 0 and _soc0 > 0: # Battery discharging # Change the energy content of the battery E_b = E_b0 + P_bat * _dt / 3600 else: # Neither charging nor discharging of the battery P_bat = 0 E_b = E_b0 _Pbat[t] = P_bat _soc0 = E_b / (_E_BAT * 1000) _soc[t] = _soc0 return _Pbat, _soc, _soc0 @nb.jit(nopython=True) def batmod_pv(d, _dt, _soc0, _soc, _Ppv, _Pac, _Ppv2bat_in0, _Ppv2bat_in, _Ppv2ac_out, _Pbat2pv_out0, _Pbat2pv_out, _Ppvbs, _Pbat): """Performance simulation function for PV-coupled battery systems :param d: array containing parameters :type d: numpy array :param dt: time step width :type dt: integer :param soc0: state of charge of the battery in the previous time step :type soc0: float :param soc: state of charge of the battery :type soc: numpy array :param Pr: residual power :type Pr: numpy array :param Ppv: PV-power :type Ppv: numpy array :param Pac: AC output power of the PV inverter :type Pac: numpy array :param Ppv2bat_in: AC input power of the battery system :type Ppv2bat_in: numpy array :param Ppv2bat_in0: AC input power of the battery system in the previous time step :type Ppv2bat_in0: float :param Pbat2pv_out0: AC output power of the battery system in the previous time step :type Pbat2pv_out0: float :param Pbat2pv_out: AC output power of the battery system :type Pbat2pv_out: numpy array :param Ppvbs: AC power from the PV system to the battery system :type Ppvbs: numpy array :param Pbat: DC power of the battery :type Pbat: float """ # Initialization of particular variables _E_BAT = d[0] _P_PV2AC_in = d[1] _P_PV2AC_out = d[2] _P_PV2BAT_in = d[3] _P_BAT2PV_out = d[4] _PV2AC_a_in = d[5] _PV2AC_b_in = d[6] _PV2AC_c_in = d[7] _PV2BAT_a_in = d[8] _PV2BAT_b_in = d[9] _PV2BAT_c_in = d[10] _PV2AC_a_out = d[11] _PV2AC_b_out = d[12] _PV2AC_c_out = d[13] _BAT2PV_a_out = d[14] _BAT2PV_b_out = d[15] _BAT2PV_c_out = d[16] _eta_BAT = d[17] _SOC_h = d[18] _P_PV2BAT_DEV = d[19] _P_BAT2AC_DEV = d[20] _P_SYS_SOC1_DC = d[21] _P_SYS_SOC0_AC = d[22] _P_SYS_SOC0_DC = d[23] _t_DEAD = int(round(d[24])) _t_CONSTANT = d[25] # Correction factor to avoid over charge and discharge the battery corr = 0.1 _P_PV2BAT_min = _PV2BAT_c_in # Minimum DC charging power _P_BAT2PV_min = _BAT2PV_c_out # Minimum DC discharging power # Initialization of particular variables _tde = _t_CONSTANT > 0 # Binary variable to activate the first-order time delay element # Factor of the first-order time delay element _ftde = 1 - np.exp(-_dt / _t_CONSTANT) # First time step with regard to the dead time of the system control _tstart = np.maximum(2, 1 + _t_DEAD) _tend = int(_Ppv.size) _th = 0 _E_BAT *= 1000 # Conversion from W to kW _eta_BAT /= 100 # Check if the dead or settling time can be ignored and set flags accordingly if _dt >= (3 * _t_CONSTANT) or _tend == 1: _tstart = 1 T_DEAD = False else: T_DEAD = True if _dt >= _t_DEAD + 3 * _t_CONSTANT: SETTLING = False else: SETTLING = True for t in range(_tstart - 1, _tend): # Energy content of the battery in the previous time step E_b0 = _soc0 * _E_BAT # Target AC output power of the PV-battery system to cover the AC power demand if T_DEAD: P_pvbs = np.minimum(_Pac[t - _t_DEAD], _P_PV2AC_out * 1000) else: P_pvbs = np.minimum(_Pac[t], _P_PV2AC_out * 1000) # Normalized AC output power of the PV2AC conversion pathway ppv2ac = P_pvbs / _P_PV2AC_out / 1000 # Target DC input power of the PV2AC conversion pathway P_pv2ac_in = P_pvbs + (_PV2AC_a_out * ppv2ac ** 2 + _PV2AC_b_out * ppv2ac + _PV2AC_c_out) # Residual power if T_DEAD: P_rpv = _Ppv[t - _t_DEAD] - P_pv2ac_in else: P_rpv = _Ppv[t] - P_pv2ac_in # Check if the battery holds enough unused capacity for charging or discharging # Estimated amount of energy that is supplied to or discharged from the storage unit. E_bs_rpv = P_rpv * _dt / 3600 # Reduce P_bs to avoid over charging of the battery if E_bs_rpv > 0 and E_bs_rpv > (_E_BAT - E_b0): P_rpv = ((_E_BAT - E_b0) * 3600) / _dt # When charging take the correction factor into account elif E_bs_rpv < 0 and np.abs(E_bs_rpv) > (E_b0): P_rpv = ((E_b0) * 3600 / _dt) * (1-corr) # Decision if the battery should be charged or discharged if P_rpv > _P_PV2BAT_min and _soc0 < 1 - _th * (1 - _SOC_h): ''' The last term th*(1-SOC_h) avoids the alternation between charging and standby mode due to the DC power consumption of the battery converter when the battery is fully charged. The battery will not be recharged until the SOC falls below the SOC-threshold (SOC_h) for recharging from PV. ''' # Charging power P_pv2bat_in = P_rpv # Adjust the charging power due to stationary deviations P_pv2bat_in = np.maximum(0, P_pv2bat_in + _P_PV2BAT_DEV) # Limit the charging power to the maximum charging power P_pv2bat_in = np.minimum(P_pv2bat_in, _P_PV2BAT_in * 1000) # Adjust the charging power due to the settling time # (modeled by a first-order time delay element) if SETTLING: if t > 0: P_pv2bat_in = _tde * _Ppv2bat_in[t-1] + _tde * ( P_pv2bat_in - _Ppv2bat_in[t-1]) * _ftde + P_pv2bat_in * (not _tde) else: P_pv2bat_in = _tde * _Ppv2bat_in0 + _tde * \ (P_pv2bat_in - _Ppv2bat_in0) * \ _ftde + P_pv2bat_in * (not _tde) # Limit the charging power to the current power output of the PV generator P_pv2bat_in = np.minimum(P_pv2bat_in, _Ppv[t]) # Normalized charging power ppv2bat = P_pv2bat_in / _P_PV2BAT_in / 1000 # DC power of the battery P_bat = np.maximum(0, P_pv2bat_in - (_PV2BAT_a_in * ppv2bat**2 + _PV2BAT_b_in * ppv2bat + _PV2BAT_c_in)) # Realized DC input power of the PV2AC conversion pathway P_pv2ac_in = _Ppv[t] - P_pv2bat_in # Limit the DC input power of the PV2AC conversion pathway P_pv2ac_in = np.minimum(P_pv2ac_in, _P_PV2AC_in * 1000) # Recalculate Ppv(t) with limited PV2AC input power _Ppv[t] = P_pv2ac_in + P_pv2bat_in # Normalized DC input power of the PV2AC conversion pathway ppv2ac = P_pv2ac_in / _P_PV2AC_in / 1000 # Realized AC power of the PV-battery system P_pv2ac_out = np.maximum( 0, P_pv2ac_in - (_PV2AC_a_in * ppv2ac**2 + _PV2AC_b_in * ppv2ac + _PV2AC_c_in)) P_pvbs = P_pv2ac_out # Transfer the final values _Ppv2ac_out[t] = P_pv2ac_out _Ppv2bat_in0 = P_pv2bat_in _Ppv2bat_in[t] = P_pv2bat_in elif P_rpv < -_P_BAT2PV_min and _soc0 > 0: # Target discharging power of the battery P_bat2pv_out = np.abs(P_rpv) # Adjust the discharging power due to the stationary deviations P_bat2pv_out = np.maximum(0, P_bat2pv_out + _P_BAT2AC_DEV) # Adjust the discharging power to the maximum discharging power P_bat2pv_out = np.minimum(P_bat2pv_out, _P_BAT2PV_out * 1000) # Adjust the discharging power due to the settling time # (modeled by a first-order time delay element) if SETTLING: if t > 0: P_bat2pv_out = _tde * _Pbat2pv_out[t-1] + _tde * (P_bat2pv_out - _Pbat2pv_out[t-1]) * _ftde + P_bat2pv_out * (not _tde) else: P_bat2pv_out = _tde * _Pbat2pv_out0 + _tde * (P_bat2pv_out - _Pbat2pv_out0) * _ftde + P_bat2pv_out * (not _tde) # Recalculate Ppv(t) with limited PV2AC input power _Ppv[t] = np.minimum(_P_PV2AC_in * 1000, _Ppv[t]) # Limit the discharging power to the maximum AC power output of the PV-battery system P_bat2pv_out = np.minimum(_P_PV2AC_in * 1000 - _Ppv[t], P_bat2pv_out) # Normalized discharging power pbat2pv = P_bat2pv_out / _P_BAT2PV_out / 1000 # DC power of the battery affected by the BAT2PV conversion losses P_bat = -1*(P_bat2pv_out+(_BAT2PV_a_out * pbat2pv**2 + _BAT2PV_b_out * pbat2pv + _BAT2PV_c_out)) # Realized DC input power of the PV2AC conversion pathway P_pv2ac_in = _Ppv[t] + P_bat2pv_out # Normalized DC input power of the PV2AC conversion pathway ppv2ac = P_pv2ac_in / _P_PV2AC_in / 1000 # AC power of the PV-battery system P_pvbs = np.maximum(0, P_pv2ac_in-(_PV2AC_a_in * ppv2ac**2 + _PV2AC_b_in * ppv2ac + _PV2AC_c_in)) P_pv2ac_out = P_pvbs # Transfer the final values _Ppv2ac_out[t] = P_pv2ac_out _Pbat2pv_out0 = P_bat2pv_out _Pbat2pv_out[t] = P_bat2pv_out else: # Neither charging nor discharging of the battery # Set the DC power of the battery to zero P_bat = 0 # Limit the power output of the PV generator to the maximum input power # of the PV inverter _Ppv[t] = np.minimum(_Ppv[t], _P_PV2AC_in * 1000) # Normalized DC input power of the PV2AC conversion pathway ppv2ac = _Ppv[t] / _P_PV2AC_in / 1000 # Realized AC power of the PV-battery system P_pvbs = np.maximum(0, _Ppv[t] - (_PV2AC_a_in * ppv2ac**2 + _PV2AC_b_in * ppv2ac + _PV2AC_c_in)) # Transfer the final values _Ppv2ac_out[t] = P_pvbs # Decision if the standby mode is active if P_bat == 0 and _soc0 <= 0: # Standby mode in discharged state # DC power consumption of the battery converter P_bat = -np.maximum(0, _P_SYS_SOC0_DC) if P_pvbs == 0: P_pvbs = -_P_SYS_SOC0_AC elif P_bat == 0 and P_pvbs > 0 and _soc0 > 0: # Standby mode in fully charged state # DC power consumption of the battery converter P_bat = -np.maximum(0, _P_SYS_SOC1_DC) # Transfer the realized AC power of the battery system and # the DC power of the battery _Ppvbs[t] = P_pvbs _Pbat[t] = P_bat # Change the energy content of the battery Wx to Wh conversio if P_bat > 0: E_b = E_b0 + P_bat * np.sqrt(_eta_BAT) * _dt / 3600 elif P_bat < 0: E_b = E_b0 + P_bat / np.sqrt(_eta_BAT) * _dt / 3600 else: E_b = E_b0 # Calculate the state of charge of the battery _soc0 = E_b / (_E_BAT) _soc[t] = _soc0 # Adjust the hysteresis threshold to avoid alternation # between charging and standby mode due to the DC power # consumption of the battery converter. if _th and _soc[t] > _SOC_h or _soc[t] > 1: _th = True else: _th = False return _soc, _soc0, _Ppv, _Ppvbs, _Pbat, _Ppv2ac_out, _Pbat2pv_out, _Ppv2bat_in def bat_res_mod(_parameter, _Pl, _Ppv, _Pbat, _dt, *args): """Function for calculating energy sums :param _parameter: parameter of the system :type _parameter: dict :param _Pl: load power :type _Pl: numpy array :param _Ppv: output power of the PV generator :type _Ppv: numpy array :param _Pbat: DC power of the battery :type _Pbat: numpy array :param _dt: time step width :type _dt: integer :return: energy sums :rtype: dict """ _E = dict() if _parameter['Top'] == 'AC': # AC-coupled systems _Ppvs = args[0] # AC output power of the PV system _Pbs = args[1] # AC power of the battery system # Additional power consumption of the other system components _Pperi = args[2] elif _parameter['Top'] == 'DC' or _parameter['Top'] == 'PV': # DC- and PV-coupled systems _Ppv2ac = args[0] # AC output power of the PV2AC conversion pathway _Ppv2bat_in = args[1] # Input power of the PV2BAT conversion pathway _Ppvbs = args[2] # AC power of the PV-battery system # Additional power consumption of the other system components _Pperi = args[3] _Ppv2ac_in = _Ppv - _Ppv2bat_in # Input power of the PV2AC conversion pathway # Total load including the power consumption of the other system components _Plt = _Pl + _Pperi # DC input power of the battery (charged) _Pbatin = np.maximum(0, _Pbat) # DC output power of the battery (discharged) _Pbatout = np.minimum(0, _Pbat) # Maximum PV feed-in power _P_ac2g_max = _parameter['p_ac2g_max'] * _parameter['P_PV'] * 1000 if _parameter['Top'] == 'AC': # AC-coupled systems # Residual power without curtailment _Pr = _Ppvs - _Plt # AC input power of the battery system _Pac2bs = np.maximum(0, _Pbs) # AC output power of the battery system _Pbs2ac = np.minimum(0, _Pbs) # Negative residual power (residual load demand) _Prn = np.minimum(0, _Pr) # Positive residual power (surplus PV power) _Prp = np.maximum(0, _Pr) # Direct use of PV power by the load _Ppvs2l = np.minimum(_Ppvs, _Plt) # PV charging power _Ppvs2bs = np.minimum(_Prp, _Pac2bs) # Grid charging power _Pg2bs = np.maximum(_Pac2bs - _Prp, 0) # Grid supply power of the load _Pg2l = np.minimum(_Prn - _Pbs2ac, 0) # Battery supply power of the load _Pbs2l = np.maximum(_Prn, _Pbs2ac) # Battery feed-in power _Pbs2g = np.minimum(_Pbs2ac - _Prn, 0) # PV feed-in power including curtailment _Ppvs2g = np.minimum(np.maximum(_Prp - _Pac2bs, 0), _P_ac2g_max) # Power demand from the grid _Pg2ac = _Pg2l - _Pg2bs # Feed-in power to the grid _Pac2g = _Ppvs2g - _Pbs2g # Grid power _Pg = _Pac2g + _Pg2ac # Curtailed PV power (AC output power) _Pct = np.maximum(_Prp - _Pac2bs, 0) - _Ppvs2g # AC output power of the PV system including curtailment _Ppvs = _Ppvs - _Pct # Residual power including curtailment _Pr = _Ppvs - _Plt # Index for PV curtailment _idx = np.where(_Pct > 0)[0] for i in range(len(_idx)): _tct = _idx[i] # Normalized output power of the PV inverter _ppvinvout = _Ppvs[_tct] / _parameter['P_PV2AC_out'] / 1000 # DC output power of the PV generator taking into account the # conversion and curtailment losses _Ppv[_tct] = _Ppvs[_tct] + (_parameter['PV2AC_a_out'] * _ppvinvout ** 2 + _parameter['PV2AC_b_out'] * _ppvinvout + _parameter['PV2AC_c_out']) elif _parameter['Top'] == 'DC' or _parameter['Top'] == 'PV': # DC- and PV-coupled systems # Grid power demand of the PV-battery system _Pg2pvbs = np.minimum(0, _Ppvbs) # AC input power of the PV-battery system _Pac2pvbs = _Pg2pvbs # AC output power of the PV-battery system _Ppvbs2ac = np.maximum(0, _Ppvbs) # Load supply power by the PV-battery system _Ppvbs2l = np.minimum(_Plt, _Ppvbs2ac) # Load supply power by the grid _Pg2l = _Plt - _Ppvbs2l # Direct use of PV power by the load _Ppv2l = np.minimum(_Plt, _Ppv2ac) # PV feed-in power including curtailment _Ppv2g = np.minimum(_Ppv2ac - _Ppv2l, _P_ac2g_max) # Curtailed PV power (AC output power) _Pct = _Ppv2ac - _Ppv2l - _Ppv2g if np.sum(_Pct) > 0: # Power of the PV-battery system including curtailment _Ppvbs = _Ppvbs - _Pct # AC output power of the PV-battery system including curtailment _Ppvbs2ac = np.maximum(0, _Ppvbs) # AC output power of the PV2AC conversion pathway including curtailment _Ppv2ac = _Ppv2ac - _Pct # Index for PV curtailment _idx = np.where(_Pct > 0)[0] for i in range(len(_idx)): _tct = _idx[i] # Specific AC output power of the PV2AC conversion pathway _ppv2ac = _Ppv2ac[_tct] / _parameter['P_PV2AC_out'] / 1000 # DC input power of the PV2AC conversion pathway including curtailment _Ppv2ac_in[_tct] = _Ppv2ac[_tct] + (_parameter['PV2AC_a_out'] * _ppv2ac ** 2 + _parameter['PV2AC_b_out'] * _ppv2ac + _parameter['PV2AC_c_out']) # DC output power of the PV generator including curtailment _Ppv = _Ppv2ac_in + _Ppv2bat_in # Grid power including curtailment _Pg = _Ppvbs-_Plt # Feed-in power to the grid including curtailment _Pac2g = np.maximum(0, _Pg) # Power demand from the grid _Pg2ac = np.minimum(0, _Pg) # Energy sums in MWH # Electrical demand including the energy consumption of the other system components _E['El'] = np.sum(np.abs(_Plt)) * _dt / 3.6e9 # DC output of the PV generator including curtailment _E['Epv'] = np.sum(np.abs(_Ppv)) * _dt / 3.6e9 # DC input of the battery (charged) _E['Ebatin'] = np.sum(np.abs(_Pbatin)) * _dt / 3.6e9 # DC output of the battery (discharged) _E['Ebatout'] = np.sum(np.abs(_Pbatout)) * _dt / 3.6e9 # Grid feed-in _E['Eac2g'] = np.sum(np.abs(_Pac2g)) * _dt / 3.6e9 # Grid demand _E['Eg2ac'] = np.sum(np.abs(_Pg2ac)) * _dt / 3.6e9 # Load supply by the grid _E['Eg2l'] = np.sum(np.abs(_Pg2l)) * _dt / 3.6e9 # Demand of the other system components _E['Eperi'] = np.sum(np.abs(_Pperi)) * _dt / 3.6e9 # Curtailed PV energy _E['Ect'] = np.sum(np.abs(_Pct)) * _dt / 3.6e9 if _parameter['Top'] == 'AC': # AC-coupled systems # AC output of the PV system including curtailment _E['Epvs'] = np.sum(np.abs(_Ppvs)) * _dt / 3.6e9 # AC input of the battery system _E['Eac2bs'] = np.sum(np.abs(_Pac2bs)) * _dt / 3.6e9 # AC output of the battery system _E['Ebs2ac'] = np.sum(np.abs(_Pbs2ac)) * _dt / 3.6e9 # Direct use of PV energy _E['Epvs2l'] = np.sum(np.abs(_Ppvs2l)) * _dt / 3.6e9 # PV charging _E['Epvs2bs'] = np.sum(np.abs(_Ppvs2bs)) * _dt / 3.6e9 # Grid charging _E['Eg2bs'] = np.sum(np.abs(_Pg2bs)) * _dt / 3.6e9 # PV feed-in _E['Epvs2g'] = np.sum(np.abs(_Ppvs2g)) * _dt / 3.6e9 # Load supply by the battery system _E['Ebs2l'] = np.sum(np.abs(_Pbs2l)) * _dt / 3.6e9 # Battery feed-in _E['Ebs2g'] = np.sum(np.abs(_Pbs2g)) * _dt / 3.6e9 elif _parameter['Top'] == 'DC' or _parameter['Top'] == 'PV': # DC- and PV-coupled systems # Grid demand of the PV-battery system _E['Eg2pvbs'] = np.sum(np.abs(_Pg2pvbs)) * _dt / 3.6e9 # AC input of the PV-battery system _E['Eac2pvbs'] = np.sum(np.abs(_Pac2pvbs)) * _dt / 3.6e9 # AC output of the PV-battery system _E['Epvbs2ac'] = np.sum(np.abs(_Ppvbs2ac)) * _dt / 3.6e9 # Load supply by the PV-battery system _E['Epvbs2l'] = np.sum(np.abs(_Ppvbs2l)) * _dt / 3.6e9 return _E def bat_res_mod_ideal(_parameter, _Pl, _Ppv, _Pbat, _dt, *args): E = dict() # Dictionary to store energy sums if _parameter['Top'] == 'AC': Ppvs = args[0] # AC output power of the PV system Pbs = args[1] # AC power of the battery system Pperi = args[2] # Additional power consumption of the other system components elif _parameter['Top'] == 'DC': Ppv2ac = args[0] Ppv2bat_in = args[1] Ppvbs = args[2] Pperi = args[3] Ppv2ac_in = _Ppv - Ppv2bat_in # Additional power consumption of the other system components Pperi = np.zeros_like(_Ppv) # Total load including the power consumption of the other system components Plt = _Pl # DC input power of the battery (charged) Pbatin = np.maximum(0, _Pbat) # DC output power of the battery (discharged) Pbatout = np.minimum(0, _Pbat) if _parameter['Top'] == 'AC': # Grid power Pg = Ppvs - _Pl - Pbs # Residual power Pr = Ppvs - Plt # AC input power of the battery system Pac2bs = np.maximum(0, Pbs) # AC output power of the battery system Pbs2ac = np.minimum(0, Pbs) # Negative residual power (residual load demand) Prn = np.minimum(0, Pr) # Positive residual power (surplus PV power) Prp = np.maximum(0, Pr) # Direct use of PV power by the load Ppvs2l = np.minimum(Ppvs, Plt) # PV charging power Ppvs2bs=np.minimum(Prp, Pac2bs) # Grid charging power Pg2bs=np.maximum(Pac2bs - Prp, 0) # Grid supply power of the load Pg2l=np.minimum(Prn - Pbs2ac, 0) # Battery supply power of the load Pbs2l=np.maximum(Prn, Pbs2ac) # Battery feed-in power Pbs2g=np.minimum(Pbs2ac - Prn, 0) # PV feed-in power Ppvs2g=np.maximum(Prp - Pac2bs, 0) elif _parameter['Top'] == 'DC': # Grid power Pg = Ppvbs - _Pl # Grid power demand of the PV-battery system Pg2pvbs = np.minimum(0, Ppvbs) # AC input power of the PV-battery system Pac2pvbs = Pg2pvbs # AC output power of the PV-battery system Ppvbs2ac = np.maximum(0, Ppvbs) # Load supply power by the PV-battery system Ppvbs2l = np.minimum(_Pl, Ppvbs2ac) # Load supply power by the grid Pg2l = (Plt - Ppvbs2l) # Curtailed PV power (AC output power) Pct = np.zeros_like(_Ppv) # Power demand from the grid Pg2ac = np.minimum(0, Pg) # Feed-in power to the grid Pac2g=np.maximum(0, Pg) # Energy sums # Electrical demand including the energy consumption of the other system components E['El'] = np.sum(np.abs(Plt)) / 3.6e9 # DC output of the PV generator including curtailment E['Epv'] = np.sum(np.abs(_Ppv)) / 3.6e9 # DC input of the battery (charged) E['Ebatin'] = np.sum(np.abs(Pbatin)) / 3.6e9 # DC output of the battery (discharged) E['Ebatout'] = np.sum(np.abs(Pbatout)) / 3.6e9 # Grid feed-in E['Eac2g'] = np.sum(np.abs(Pac2g)) / 3.6e9 # Grid demand E['Eg2ac'] = np.sum(np.abs(Pg2ac)) / 3.6e9 # Load supply by the grid E['Eg2l'] = np.sum(np.abs(Pg2l)) / 3.6e9 # Demand of the other system components E['Eperi'] = np.sum(np.abs(Pperi)) / 3.6e9 # Curtailed PV energy E['Ect'] = np.sum(np.abs(Pct)) / 3.6e9 if _parameter['Top'] == 'AC': # AC output of the PV system including curtailment E['Epvs']=np.sum(np.abs(Ppvs)) / 3.6e9 # AC input of the battery system E['Eac2bs']=np.sum(np.abs(Pac2bs)) / 3.6e9 # AC output of the battery system E['Ebs2ac']=np.sum(np.abs(Pbs2ac)) / 3.6e9 # Direct use of PV energy E['Epvs2l']=np.sum(np.abs(Ppvs2l)) / 3.6e9 # PV charging E['Epvs2bs']=np.sum(np.abs(Ppvs2bs)) / 3.6e9 # Grid charging E['Eg2bs']=np.sum(np.abs(Pg2bs)) / 3.6e9 # PV feed-in E['Epvs2g']=np.sum(np.abs(Ppvs2g)) / 3.6e9 # Load supply by the battery system E['Ebs2l']=np.sum(
np.abs(Pbs2l)
numpy.abs
#!/usr/bin/env python """ Interpolation of scattered data using ordinary kriging/collocation The program uses nearest neighbors interpolation and selects data from eight quadrants around the prediction point and uses a third-order Gauss-Markov covariance model, with a correlation length defined by the user. Provides the possibility of pre-cleaning of the data using a spatial n-sigma filter before interpolation. Observations with provided noise/error estimates (for each observation) are added to the diagonal of the covariance matrix if provided. User can also provide a constant rms-noise added to the diagonal. Takes as input a h5df file with needed data in geographical coordinates and a-priori error if needed. The user provides the wanted projection using the EPSG projection format. Output consists of an hdf5 file containing the predictions, rmse and the number of points used in the prediction, and the epsg number for the projection. Notes: If both the a-priori errors are provided and constant rms all values smaller then provided rms is set to this value providing a minimum error for the observations. To reduce the impact of highly correlated along-track measurements (seen as streaks in the interpolated raster) the 'rand' option can be used. This randomly samples N-observations in each quadrant instead of using the closest data points. Example: python interpkrig.py ifile.h5 ofile.h5 -d 10 10 -n 25 -r 50 -a 25 -p 3031 \ -c 50 10 -v lon lat dhdt dummy -e 0.1 -m dist python interpkrig.py ifile.h5 ofile.h5 -d 10 10 -n 25 -r 50 -a 25 -p 3031 \ -c 50 10 -v lon lat dhdt rmse -e 0.1 -m rand Credits: captoolkit - JPL Cryosphere Altimetry Processing Toolkit <NAME> (<EMAIL>) <NAME> (<EMAIL>) <NAME> (<EMAIL>) Jet Propulsion Laboratory, California Institute of Technology """ import h5py import pyproj import argparse import numpy as np from scipy import stats from scipy.spatial import cKDTree from scipy.spatial.distance import cdist def rand(x, n): """Draws random samples from array""" # Determine data density if len(x) > n: # Draw random samples from array I = np.random.choice(np.arange(len(x)), n, replace=False) else: # Output boolean vector - true I = np.ones(len(x), dtype=bool) return I def sort_dist(d, n): """ Sort array by distance""" # Determine if sorting needed if len(d) >= n: # Sort according to distance I = np.argsort(d) else: # Output boolean vector - true I = np.ones(len(x), dtype=bool) return I def transform_coord(proj1, proj2, x, y): """Transform coordinates from proj1 to proj2 (EPSG num).""" # Set full EPSG projection strings proj1 = pyproj.Proj("+init=EPSG:" + proj1) proj2 = pyproj.Proj("+init=EPSG:" + proj2) # Convert coordinates return pyproj.transform(proj1, proj2, x, y) def make_grid(xmin, xmax, ymin, ymax, dx, dy): """ Construct output grid-coordinates. """ Nn = int((np.abs(ymax - ymin)) / dy) + 1 # ny Ne = int((np.abs(xmax - xmin)) / dx) + 1 # nx xi = np.linspace(xmin, xmax, num=Ne) yi = np.linspace(ymin, ymax, num=Nn) return np.meshgrid(xi, yi) def spatial_filter(x, y, z, dx, dy, sigma=5.0): """ Cleaning of spatial data """ # Grid dimensions Nn = int((np.abs(y.max() - y.min())) / dy) + 1 Ne = int((np.abs(x.max() - x.min())) / dx) + 1 # Bin data f_bin = stats.binned_statistic_2d(x, y, z, bins=(Ne, Nn)) # Get bin numbers for the data index = f_bin.binnumber # Unique indexes ind = np.unique(index) # Create output zo = z.copy() # Number of unique index for i in range(len(ind)): # index for each bin idx, = np.where(index == ind[i]) # Get data zb = z[idx] # Make sure we have enough if len(zb[~
np.isnan(zb)
numpy.isnan
import os import sys import yaml import numpy as np import torch import torch.utils.data as data import numpy as np import numpy.random as npr import cv2 import copy import glob import scipy import datasets from config.config import cfg from transforms3d.quaternions import mat2quat, quat2mat from utils.se3 import * from utils.pose_error import * from utils.cython_bbox import bbox_overlaps _SUBJECTS = [ '20200709-subject-01', '20200813-subject-02', '20200820-subject-03', '20200903-subject-04', '20200908-subject-05', '20200918-subject-06', '20200928-subject-07', '20201002-subject-08', '20201015-subject-09', '20201022-subject-10', ] _SERIALS = [ '836212060125', '839512060362', '840412060917', '841412060263', '932122060857', '932122060861', '932122061900', '932122062010', ] _YCB_CLASSES = { 1: '002_master_chef_can', 2: '003_cracker_box', 3: '004_sugar_box', 4: '005_tomato_soup_can', 5: '006_mustard_bottle', 6: '007_tuna_fish_can', 7: '008_pudding_box', 8: '009_gelatin_box', 9: '010_potted_meat_can', 10: '011_banana', 11: '019_pitcher_base', 12: '021_bleach_cleanser', 13: '024_bowl', 14: '025_mug', 15: '035_power_drill', 16: '036_wood_block', 17: '037_scissors', 18: '040_large_marker', 19: '051_large_clamp', 20: '052_extra_large_clamp', 21: '061_foam_brick', } _MANO_JOINTS = [ 'wrist', 'thumb_mcp', 'thumb_pip', 'thumb_dip', 'thumb_tip', 'index_mcp', 'index_pip', 'index_dip', 'index_tip', 'middle_mcp', 'middle_pip', 'middle_dip', 'middle_tip', 'ring_mcp', 'ring_pip', 'ring_dip', 'ring_tip', 'little_mcp', 'little_pip', 'little_dip', 'little_tip' ] _MANO_JOINT_CONNECT = [ [0, 1], [ 1, 2], [ 2, 3], [ 3, 4], [0, 5], [ 5, 6], [ 6, 7], [ 7, 8], [0, 9], [ 9, 10], [10, 11], [11, 12], [0, 13], [13, 14], [14, 15], [15, 16], [0, 17], [17, 18], [18, 19], [19, 20], ] _BOP_EVAL_SUBSAMPLING_FACTOR = 4 class dex_ycb_dataset(data.Dataset): def __init__(self, setup, split, obj_list): self._setup = setup self._split = split self._color_format = "color_{:06d}.jpg" self._depth_format = "aligned_depth_to_color_{:06d}.png" self._label_format = "labels_{:06d}.npz" self._height = 480 self._width = 640 # paths self._name = 'dex_ycb_' + setup + '_' + split self._image_set = split self._dex_ycb_path = self._get_default_path() path = os.path.join(self._dex_ycb_path, 'data') self._data_dir = path self._calib_dir = os.path.join(self._data_dir, "calibration") self._model_dir = os.path.join(self._data_dir, "models") self._obj_file = { k: os.path.join(self._model_dir, v, "textured_simple.obj") for k, v in _YCB_CLASSES.items() } # define all the classes self._classes_all = ('002_master_chef_can', '003_cracker_box', '004_sugar_box', '005_tomato_soup_can', '006_mustard_bottle', \ '007_tuna_fish_can', '008_pudding_box', '009_gelatin_box', '010_potted_meat_can', '011_banana', '019_pitcher_base', \ '021_bleach_cleanser', '024_bowl', '025_mug', '035_power_drill', '036_wood_block', '037_scissors', '040_large_marker', \ '051_large_clamp', '052_extra_large_clamp', '061_foam_brick') self._num_classes_all = len(self._classes_all) self._class_colors_all = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0), (255, 0, 255), (0, 255, 255), \ (128, 0, 0), (0, 128, 0), (0, 0, 128), (128, 128, 0), (128, 0, 128), (0, 128, 128), \ (64, 0, 0), (0, 64, 0), (0, 0, 64), (64, 64, 0), (64, 0, 64), (0, 64, 64), (192, 0, 0), (0, 192, 0), (0, 0, 192)] self._extents_all = self._load_object_extents() self._posecnn_class_indexes = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 20, 21] # compute class index class_index = [] for name in obj_list: for i in range(self._num_classes_all): if name == self._classes_all[i]: class_index.append(i) break print('class index:', class_index) self._class_index = class_index # select a subset of classes self._classes = obj_list self._num_classes = len(self._classes) self._class_colors = [self._class_colors_all[i] for i in class_index] self._extents = self._extents_all[class_index] self._points, self._points_all = self._load_object_points(self._classes, self._extents) # Seen subjects, camera views, grasped objects. if self._setup == 's0': if self._split == 'train': subject_ind = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] serial_ind = [0, 1, 2, 3, 4, 5, 6, 7] sequence_ind = [i for i in range(100) if i % 5 != 4] if self._split == 'val': subject_ind = [0, 1] serial_ind = [0, 1, 2, 3, 4, 5, 6, 7] sequence_ind = [i for i in range(100) if i % 5 == 4] if self._split == 'test': subject_ind = [2, 3, 4, 5, 6, 7, 8, 9] serial_ind = [0, 1, 2, 3, 4, 5, 6, 7] sequence_ind = [i for i in range(100) if i % 5 == 4] # Unseen subjects. if self._setup == 's1': if self._split == 'train': subject_ind = [0, 1, 2, 3, 4, 5, 9] serial_ind = [0, 1, 2, 3, 4, 5, 6, 7] sequence_ind = list(range(100)) if self._split == 'val': subject_ind = [6] serial_ind = [0, 1, 2, 3, 4, 5, 6, 7] sequence_ind = list(range(100)) if self._split == 'test': subject_ind = [7, 8] serial_ind = [0, 1, 2, 3, 4, 5, 6, 7] sequence_ind = list(range(100)) # Unseen camera views. if self._setup == 's2': if self._split == 'train': subject_ind = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] serial_ind = [0, 1, 2, 3, 4, 5] sequence_ind = list(range(100)) if self._split == 'val': subject_ind = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] serial_ind = [6] sequence_ind = list(range(100)) if self._split == 'test': subject_ind = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] serial_ind = [7] sequence_ind = list(range(100)) # Unseen grasped objects. if self._setup == 's3': if self._split == 'train': subject_ind = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] serial_ind = [0, 1, 2, 3, 4, 5, 6, 7] sequence_ind = [ i for i in range(100) if i // 5 not in (3, 7, 11, 15, 19) ] if self._split == 'val': subject_ind = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] serial_ind = [0, 1, 2, 3, 4, 5, 6, 7] sequence_ind = [i for i in range(100) if i // 5 in (3, 19)] if self._split == 'test': subject_ind = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] serial_ind = [0, 1, 2, 3, 4, 5, 6, 7] sequence_ind = [i for i in range(100) if i // 5 in (7, 11, 15)] self._subjects = [_SUBJECTS[i] for i in subject_ind] self._serials = [_SERIALS[i] for i in serial_ind] self._intrinsics = [] for s in self._serials: intr_file = os.path.join(self._calib_dir, "intrinsics", "{}_{}x{}.yml".format(s, self._width, self._height)) with open(intr_file, 'r') as f: intr = yaml.load(f, Loader=yaml.FullLoader) intr = intr['color'] self._intrinsics.append(intr) # build mapping self._sequences = [] self._mapping = [] self._ycb_ids = [] offset = 0 for n in self._subjects: seq = sorted(os.listdir(os.path.join(self._data_dir, n))) seq = [os.path.join(n, s) for s in seq] assert len(seq) == 100 seq = [seq[i] for i in sequence_ind] self._sequences += seq for i, q in enumerate(seq): meta_file = os.path.join(self._data_dir, q, "meta.yml") with open(meta_file, 'r') as f: meta = yaml.load(f, Loader=yaml.FullLoader) c = np.arange(len(self._serials)) f = np.arange(meta['num_frames']) f, c = np.meshgrid(f, c) c = c.ravel() f = f.ravel() s = (offset + i) * np.ones_like(c) m = np.vstack((s, c, f)).T self._mapping.append(m) self._ycb_ids.append(meta['ycb_ids']) offset += len(seq) self._mapping = np.vstack(self._mapping) # sample a subset for training if split == 'train': self._mapping = self._mapping[::10] # dataset size self._size = len(self._mapping) print('dataset %s with images %d' % (self._name, self._size)) def __len__(self): return self._size def get_bop_id_from_idx(self, idx): s, c, f = map(lambda x: x.item(), self._mapping[idx]) scene_id = s * len(self._serials) + c im_id = f return scene_id, im_id def __getitem__(self, idx): s, c, f = self._mapping[idx] is_testing = f % _BOP_EVAL_SUBSAMPLING_FACTOR == 0 if self._split == 'test' and not is_testing: sample = {'is_testing': is_testing} return sample scene_id, im_id = self.get_bop_id_from_idx(idx) video_id = '%04d' % (scene_id) image_id = '%06d' % (im_id) # posecnn result path posecnn_result_path = os.path.join(self._dex_ycb_path, 'results_posecnn', self._name, 'vgg16_dex_ycb_epoch_16.checkpoint.pth', video_id + '_' + image_id + '.mat') d = os.path.join(self._data_dir, self._sequences[s], self._serials[c]) roidb = { 'color_file': os.path.join(d, self._color_format.format(f)), 'depth_file': os.path.join(d, self._depth_format.format(f)), 'label_file': os.path.join(d, self._label_format.format(f)), 'intrinsics': self._intrinsics[c], 'ycb_ids': self._ycb_ids[s], 'posecnn': posecnn_result_path, } # Get the input image blob im_color, im_depth = self._get_image_blob(roidb['color_file'], roidb['depth_file']) # build the label blob im_label, intrinsic_matrix, poses, gt_boxes, poses_result, rois_result, labels_result \ = self._get_label_blob(roidb, self._num_classes) is_syn = 0 im_scale = 1.0 im_info = np.array([im_color.shape[1], im_color.shape[2], im_scale, is_syn], dtype=np.float32) sample = {'image_color': im_color[:, :, (2, 1, 0)], 'image_depth': im_depth, 'label': im_label, 'intrinsic_matrix': intrinsic_matrix, 'gt_poses': poses, 'gt_boxes': gt_boxes, 'poses_result': poses_result, 'rois_result': rois_result, 'labels_result': labels_result, 'extents': self._extents, 'points': self._points_all, 'im_info': im_info, 'video_id': video_id, 'image_id': image_id} if self._split == 'test': sample['is_testing'] = is_testing return sample def _get_image_blob(self, color_file, depth_file): # rgba rgba = cv2.imread(color_file, cv2.IMREAD_UNCHANGED) if rgba.shape[2] == 4: im = np.copy(rgba[:,:,:3]) alpha = rgba[:,:,3] I = np.where(alpha == 0) im[I[0], I[1], :] = 0 else: im = rgba im_color = im.astype('float') / 255.0 # depth image im_depth = cv2.imread(depth_file, cv2.IMREAD_UNCHANGED) im_depth = im_depth.astype('float') / 1000.0 return im_color, im_depth def _get_label_blob(self, roidb, num_classes): """ build the label blob """ # parse data cls_indexes = roidb['ycb_ids'] classes = np.array(self._class_index) fx = roidb['intrinsics']['fx'] fy = roidb['intrinsics']['fy'] px = roidb['intrinsics']['ppx'] py = roidb['intrinsics']['ppy'] intrinsic_matrix = np.eye(3, dtype=np.float32) intrinsic_matrix[0, 0] = fx intrinsic_matrix[1, 1] = fy intrinsic_matrix[0, 2] = px intrinsic_matrix[1, 2] = py label = np.load(roidb['label_file']) # label image im_label = label['seg'] # poses poses = label['pose_y'] if len(poses.shape) == 2: poses = np.reshape(poses, (1, 3, 4)) num = poses.shape[0] assert num == len(cls_indexes), 'number of poses not equal to number of objects' # bounding boxes gt_boxes = np.zeros((num, 5), dtype=np.float32) for i in range(num): cls = int(cls_indexes[i]) - 1 ind = np.where(classes == cls)[0] if len(ind) > 0: R = poses[i, :, :3] T = poses[i, :, 3] # compute box x3d = np.ones((4, self._points_all.shape[1]), dtype=np.float32) x3d[0, :] = self._points_all[ind,:,0] x3d[1, :] = self._points_all[ind,:,1] x3d[2, :] = self._points_all[ind,:,2] RT = np.zeros((3, 4), dtype=np.float32) RT[:3, :3] = R RT[:, 3] = T x2d = np.matmul(intrinsic_matrix, np.matmul(RT, x3d)) x2d[0, :] = np.divide(x2d[0, :], x2d[2, :]) x2d[1, :] = np.divide(x2d[1, :], x2d[2, :]) gt_boxes[i, 0] = np.min(x2d[0, :]) gt_boxes[i, 1] = np.min(x2d[1, :]) gt_boxes[i, 2] = np.max(x2d[0, :]) gt_boxes[i, 3] = np.max(x2d[1, :]) gt_boxes[i, 4] = ind # load posecnn result if available if os.path.exists(roidb['posecnn']): result = scipy.io.loadmat(roidb['posecnn']) n = result['poses'].shape[0] poses_result = np.zeros((n, 9), dtype=np.float32) poses_result[:, 0] = 1 poses_result[:, 1] = result['rois'][:, 1] poses_result[:, 2:] = result['poses'] rois_result = result['rois'].copy() labels_result = result['labels'].copy() # select the classes, one object per class index = [] flags = np.zeros((self._num_classes, ), dtype=np.int32) for i in range(poses_result.shape[0]): cls = self._posecnn_class_indexes[int(poses_result[i, 1])] - 1 ind = np.where(classes == cls)[0] if len(ind) > 0 and flags[ind] == 0: index.append(i) poses_result[i, 1] = ind rois_result[i, 1] = ind flags[ind] = 1 poses_result = poses_result[index, :] rois_result = rois_result[index, :] else: # print('no posecnn result %s' % (roidb['posecnn'])) poses_result = np.zeros((0, 9), dtype=np.float32) rois_result = np.zeros((0, 7), dtype=np.float32) labels_result = np.zeros((0, 1), dtype=np.float32) poses = poses.transpose((1, 2, 0)) return im_label, intrinsic_matrix, poses, gt_boxes, poses_result, rois_result, labels_result def _get_default_path(self): """ Return the default path where YCB_Video is expected to be installed. """ return os.path.join(datasets.ROOT_DIR, 'data', 'DEX_YCB') def _load_object_extents(self): extents = np.zeros((self._num_classes_all, 3), dtype=np.float32) for i in range(self._num_classes_all): point_file = os.path.join(self._model_dir, self._classes_all[i], 'points.xyz') print(point_file) assert os.path.exists(point_file), 'Path does not exist: {}'.format(point_file) points = np.loadtxt(point_file) extents[i, :] = 2 * np.max(np.absolute(points), axis=0) return extents def _load_object_points(self, classes, extents): points = [[] for _ in range(len(classes))] num = np.inf num_classes = len(classes) for i in range(num_classes): point_file = os.path.join(self._model_dir, classes[i], 'points.xyz') print(point_file) assert os.path.exists(point_file), 'Path does not exist: {}'.format(point_file) points[i] = np.loadtxt(point_file) if points[i].shape[0] < num: num = points[i].shape[0] points_all = np.zeros((num_classes, num, 3), dtype=np.float32) for i in range(num_classes): points_all[i, :, :] = points[i][:num, :] return points, points_all def write_dop_results(self, output_dir, modality): # only write the result file filename = os.path.join(output_dir, 'poserbpf_' + self._name + '_' + modality + '.csv') f = open(filename, 'w') f.write('scene_id,im_id,obj_id,score,R,t,time\n') # list the mat file filename = os.path.join(output_dir, '*.mat') files = sorted(glob.glob(filename)) # for each image for i in range(len(files)): filename = os.path.basename(files[i]) # parse filename pos = filename.find('_') scene_id = int(filename[:pos]) im_id = int(filename[pos+1:-4]) # load result print(files[i]) result = scipy.io.loadmat(files[i]) if len(result['rois']) == 0: continue rois = result['rois'] num = rois.shape[0] for j in range(num): obj_id = self._class_index[int(rois[j, 1])] + 1 if obj_id == 0: continue score = rois[j, -1] run_time = -1 # pose from network R = quat2mat(result['poses'][j, :4].flatten()) t = result['poses'][j, 4:] * 1000 line = '{scene_id},{im_id},{obj_id},{score},{R},{t},{time}\n'.format( scene_id=scene_id, im_id=im_id, obj_id=obj_id, score=score, R=' '.join(map(str, R.flatten().tolist())), t=' '.join(map(str, t.flatten().tolist())), time=run_time) f.write(line) # close file f.close() # compute box def compute_box(self, cls, intrinsic_matrix, RT): ind = np.where(self._class_index == cls)[0] x3d = np.ones((4, self._points_all.shape[1]), dtype=np.float32) x3d[0, :] = self._points_all[ind,:,0] x3d[1, :] = self._points_all[ind,:,1] x3d[2, :] = self._points_all[ind,:,2] x2d = np.matmul(intrinsic_matrix, np.matmul(RT, x3d)) x2d[0, :] = np.divide(x2d[0, :], x2d[2, :]) x2d[1, :] = np.divide(x2d[1, :], x2d[2, :]) x1 = np.min(x2d[0, :]) y1 = np.min(x2d[1, :]) x2 = np.max(x2d[0, :]) y2 = np.max(x2d[1, :]) return [x1, y1, x2, y2] def evaluation(self, output_dir, modality): self.write_dop_results(output_dir, modality) filename = os.path.join(output_dir, 'results_poserbpf.mat') if os.path.exists(filename): results_all = scipy.io.loadmat(filename) print('load results from file') print(filename) distances_sys = results_all['distances_sys'] distances_non = results_all['distances_non'] errors_rotation = results_all['errors_rotation'] errors_translation = results_all['errors_translation'] results_seq_id = results_all['results_seq_id'].flatten() results_frame_id = results_all['results_frame_id'].flatten() results_object_id = results_all['results_object_id'].flatten() results_cls_id = results_all['results_cls_id'].flatten() else: # save results num_max = 200000 num_results = 1 distances_sys = np.zeros((num_max, num_results), dtype=np.float32) distances_non = np.zeros((num_max, num_results), dtype=np.float32) errors_rotation = np.zeros((num_max, num_results), dtype=np.float32) errors_translation = np.zeros((num_max, num_results), dtype=np.float32) results_seq_id = np.zeros((num_max, ), dtype=np.float32) results_frame_id = np.zeros((num_max, ), dtype=np.float32) results_object_id = np.zeros((num_max, ), dtype=np.float32) results_cls_id = np.zeros((num_max, ), dtype=np.float32) # for each image count = -1 for i in range(len(self._mapping)): s, c, f = self._mapping[i] is_testing = f % _BOP_EVAL_SUBSAMPLING_FACTOR == 0 if not is_testing: continue # intrinsics intrinsics = self._intrinsics[c] intrinsic_matrix = np.eye(3, dtype=np.float32) intrinsic_matrix[0, 0] = intrinsics['fx'] intrinsic_matrix[1, 1] = intrinsics['fy'] intrinsic_matrix[0, 2] = intrinsics['ppx'] intrinsic_matrix[1, 2] = intrinsics['ppy'] # parse keyframe name scene_id, im_id = self.get_bop_id_from_idx(i) # load result filename = os.path.join(output_dir, '%04d_%06d.mat' % (scene_id, im_id)) print(filename) result = scipy.io.loadmat(filename) # load gt d = os.path.join(self._data_dir, self._sequences[s], self._serials[c]) label_file = os.path.join(d, self._label_format.format(f)) label = np.load(label_file) cls_indexes = np.array(self._ycb_ids[s]).flatten() # poses poses = label['pose_y'] if len(poses.shape) == 2: poses = np.reshape(poses, (1, 3, 4)) num = poses.shape[0] assert num == len(cls_indexes), 'number of poses not equal to number of objects' # instance label im_label = label['seg'] instance_ids = np.unique(im_label) if instance_ids[0] == 0: instance_ids = instance_ids[1:] if instance_ids[-1] == 255: instance_ids = instance_ids[:-1] # for each gt poses for j in range(len(instance_ids)): cls = instance_ids[j] # find the number of pixels of the object pixels = np.sum(im_label == cls) if pixels < 200: continue count += 1 # find the pose object_index = np.where(cls_indexes == cls)[0][0] RT_gt = poses[object_index, :, :] box_gt = self.compute_box(cls - 1, intrinsic_matrix, RT_gt) results_seq_id[count] = scene_id results_frame_id[count] = im_id results_object_id[count] = object_index results_cls_id[count] = cls # network result roi_index = [] if len(result['rois']) > 0: for k in range(result['rois'].shape[0]): ind = int(result['rois'][k, 1]) if cls == self._class_index[ind] + 1: roi_index.append(k) # select the roi if len(roi_index) > 1: # overlaps: (rois x gt_boxes) roi_blob = result['rois'][roi_index, :] roi_blob = roi_blob[:, (0, 2, 3, 4, 5, 1)] gt_box_blob = np.zeros((1, 5), dtype=np.float32) gt_box_blob[0, 1:] = box_gt overlaps = bbox_overlaps( np.ascontiguousarray(roi_blob[:, :5], dtype=np.float), np.ascontiguousarray(gt_box_blob, dtype=np.float)).flatten() assignment = overlaps.argmax() roi_index = [roi_index[assignment]] if len(roi_index) > 0: RT = np.zeros((3, 4), dtype=np.float32) ind = int(result['rois'][roi_index, 1]) points = self._points[ind] # pose from network RT[:3, :3] = quat2mat(result['poses'][roi_index, :4].flatten()) RT[:, 3] = result['poses'][roi_index, 4:] distances_sys[count, 0] = adi(RT[:3, :3], RT[:, 3], RT_gt[:3, :3], RT_gt[:, 3], points) distances_non[count, 0] = add(RT[:3, :3], RT[:, 3], RT_gt[:3, :3], RT_gt[:, 3], points) errors_rotation[count, 0] = re(RT[:3, :3], RT_gt[:3, :3]) errors_translation[count, 0] = te(RT[:, 3], RT_gt[:, 3]) else: distances_sys[count, :] = np.inf distances_non[count, :] = np.inf errors_rotation[count, :] = np.inf errors_translation[count, :] = np.inf distances_sys = distances_sys[:count+1, :] distances_non = distances_non[:count+1, :] errors_rotation = errors_rotation[:count+1, :] errors_translation = errors_translation[:count+1, :] results_seq_id = results_seq_id[:count+1] results_frame_id = results_frame_id[:count+1] results_object_id = results_object_id[:count+1] results_cls_id = results_cls_id[:count+1] results_all = {'distances_sys': distances_sys, 'distances_non': distances_non, 'errors_rotation': errors_rotation, 'errors_translation': errors_translation, 'results_seq_id': results_seq_id, 'results_frame_id': results_frame_id, 'results_object_id': results_object_id, 'results_cls_id': results_cls_id } filename = os.path.join(output_dir, 'results_poserbpf.mat') scipy.io.savemat(filename, results_all) # print the results # for each class import matplotlib.pyplot as plt max_distance = 0.1 index_plot = [0] color = ['r'] leng = ['PoseRBPF'] num = len(leng) ADD = np.zeros((self._num_classes_all + 1, num), dtype=np.float32) ADDS = np.zeros((self._num_classes_all + 1, num), dtype=np.float32) TS = np.zeros((self._num_classes_all + 1, num), dtype=np.float32) classes = list(copy.copy(self._classes_all)) classes.append('all') for k in range(self._num_classes_all + 1): fig = plt.figure(figsize=(16.0, 10.0)) if k == self._num_classes_all: index = range(len(results_cls_id)) else: index = np.where(results_cls_id == k + 1)[0] if len(index) == 0: continue print('%s: %d objects' % (classes[k], len(index))) # distance symmetry ax = fig.add_subplot(2, 3, 1) lengs = [] for i in index_plot: D = distances_sys[index, i] ind = np.where(D > max_distance)[0] D[ind] = np.inf d = np.sort(D) n = len(d) accuracy = np.cumsum(np.ones((n, ), np.float32)) / n plt.plot(d, accuracy, color[i], linewidth=2) ADDS[k, i] = VOCap(d, accuracy) lengs.append('%s (%.2f)' % (leng[i], ADDS[k, i] * 100)) print('%s, %s: %d objects missed' % (classes[k], leng[i], np.sum(np.isinf(D)))) ax.legend(lengs) plt.xlabel('Average distance threshold in meter (symmetry)') plt.ylabel('accuracy') ax.set_title(classes[k]) # distance non-symmetry ax = fig.add_subplot(2, 3, 2) lengs = [] for i in index_plot: D = distances_non[index, i] ind = np.where(D > max_distance)[0] D[ind] = np.inf d = np.sort(D) n = len(d) accuracy = np.cumsum(np.ones((n, ), np.float32)) / n plt.plot(d, accuracy, color[i], linewidth=2) ADD[k, i] = VOCap(d, accuracy) lengs.append('%s (%.2f)' % (leng[i], ADD[k, i] * 100)) print('%s, %s: %d objects missed' % (classes[k], leng[i], np.sum(np.isinf(D)))) ax.legend(lengs) plt.xlabel('Average distance threshold in meter (non-symmetry)') plt.ylabel('accuracy') ax.set_title(classes[k]) # translation ax = fig.add_subplot(2, 3, 3) lengs = [] for i in index_plot: D = errors_translation[index, i] ind = np.where(D > max_distance)[0] D[ind] = np.inf d = np.sort(D) n = len(d) accuracy = np.cumsum(np.ones((n, ), np.float32)) / n plt.plot(d, accuracy, color[i], linewidth=2) TS[k, i] = VOCap(d, accuracy) lengs.append('%s (%.2f)' % (leng[i], TS[k, i] * 100)) print('%s, %s: %d objects missed' % (classes[k], leng[i], np.sum(
np.isinf(D)
numpy.isinf
""" This script contains all necessary code to extract and convert the patients data from the Sciensano hospital survey into parameters usable by the BIOMATH COVID-19 SEIRD model. You must place the super secret detailed hospitalization dataset `COVID19BE_CLINIC.csv` in the same folder as this script in order to run it. Further, you must MANUALLY replace décédé and rétabli in the file `COVID19BE_CLINIC.csv` with D and R. To load the resulting .xlsx into a pandas dataframe use: dataframe = pd.read_excel('../../data/interim/model_parameters/COVID19_SEIRD/sciensano_hospital_parameters.xlsx', sheet_name='residence_times', index_col=0, header=[0,1]) """ __author__ = "<NAME>" __copyright__ = "Copyright (c) 2020 by <NAME>, BIOMATH, Ghent University. All Rights Reserved." # ---------------------- # Load required packages # ---------------------- import os import math import numpy as np import pandas as pd from scipy.stats import mannwhitneyu, ttest_ind, gamma, exponweib, weibull_min import matplotlib.pyplot as plt import datetime from datetime import timedelta import argparse # ---------------- # Script arguments # ---------------- parser = argparse.ArgumentParser() parser.add_argument("-s", "--subset_size", help="Size of subset drawn from total population during bootstrapping", default=1000, type=int) parser.add_argument("-n", "--number_iterations", help="Total number of bootstraps", default=100, type=int) parser.add_argument("-a", "--age_stratification_size", help="Total number of age groups", default=9, type=int) # Save as dict args = parser.parse_args() # Set correct age_classes if args.age_stratification_size == 3: age_classes = pd.IntervalIndex.from_tuples([(0,20),(20,60),(60,120)], closed='left') age_path = '0_20_60/' elif args.age_stratification_size == 9: age_classes = pd.IntervalIndex.from_tuples([(0,10),(10,20),(20,30),(30,40),(40,50),(50,60),(60,70),(70,80),(80,120)], closed='left') age_path = '0_10_20_30_40_50_60_70_80/' elif args.age_stratification_size == 10: age_classes =pd.IntervalIndex.from_tuples([(0,12),(12,18),(18,25),(25,35),(35,45),(45,55),(55,65),(65,75),(75,85),(85,120)], closed='left') age_path = '0_12_18_25_35_45_55_65_75_85/' else: raise ValueError( "age_stratification_size '{0}' is not legitimate. Valid options are 3, 9 or 10".format(args.age_stratification_size) ) # ----- # Paths # ----- fig_path = '../../results/analysis/hospital/'+age_path data_path = '../../data/interim/model_parameters/COVID19_SEIQRD/hospitals/' + age_path # Verify that the paths exist and if not, generate them for directory in [fig_path, data_path]: if not os.path.exists(directory): os.makedirs(directory) # ----------------------------- # Helper functions and settings # ----------------------------- plot_fit=False colorscale_okabe_ito = {"orange" : "#E69F00", "light_blue" : "#56B4E9", "green" : "#009E73", "yellow" : "#F0E442", "blue" : "#0072B2", "red" : "#D55E00", "pink" : "#CC79A7", "black" : "#000000"} def adjacent_values(vals, q1, q3): upper_adjacent_value = q3 + (q3 - q1) * 1.5 upper_adjacent_value = np.clip(upper_adjacent_value, q3, vals[-1]) lower_adjacent_value = q1 - (q3 - q1) * 1.5 lower_adjacent_value = np.clip(lower_adjacent_value, vals[0], q1) return lower_adjacent_value, upper_adjacent_value def set_axis_style(ax, labels): ax.xaxis.set_tick_params(direction='out') ax.xaxis.set_ticks_position('bottom') ax.set_xticks(np.arange(1, len(labels) + 1)) ax.set_xticklabels(labels) ax.set_xlim(0.25, len(labels) + 0.75) ax.set_xlabel('Sample name') def fit_weibull(v): sample_size_lst=[] shape_lst=[] loc_lst=[] scale_lst=[] for age_group in v.index.get_level_values(0).unique().values: if isinstance(v[age_group],list): values = [x for x in v[age_group] if (math.isnan(x) == False)] shape, loc, scale = weibull_min.fit(values,floc=0) sample_size_lst.append(len(v[age_group])) else: v[age_group][v[age_group]==0] = 0.01 v = v.dropna() shape, loc, scale = weibull_min.fit(v[age_group].values,floc=0) sample_size_lst.append(len(v[age_group].values)) shape_lst.append(shape) loc_lst.append(loc) scale_lst.append(scale) return sample_size_lst, shape_lst, loc_lst, scale_lst def plot_weibull_fit(v,par,max_val): fig,axes = plt.subplots(nrows=3,ncols=3,sharex=True,figsize=(12,12)) axes = axes.flatten() for idx,age_group in enumerate(v.index.get_level_values(0).unique().values): bins = np.linspace(0, max_val, 10) axes[idx].hist(v[age_group], bins=bins, density=True) x = np.linspace (0.5, max_val, 1000) #y = gamma.pdf(x, a=residence_times[par,'shape'][age_group], loc=residence_times[par,'loc'][age_group], scale=residence_times[par,'scale'][age_group]) y = weibull_min.pdf(x, c=residence_times[par,'shape'][age_group], loc=residence_times[par,'loc'][age_group], scale=residence_times[par,'scale'][age_group]) axes[idx].plot(x,y) axes[idx].text(x=0.70,y=0.82,s='Shape: '+"{:.2f}".format(residence_times[par,'shape'][age_group]) + '\nScale: ' + "{:.2f}".format(residence_times[par,'scale'][age_group]) + '\nLoc: '+ "{:.2f}".format(residence_times[par,'loc'][age_group]), transform=axes[idx].transAxes, fontsize=8) axes[idx].set_title('Age group: ' + str(age_group), fontsize=12) axes[idx].set_xlim([0,max_val]) fig.suptitle(par,fontsize=16) plt.show() plt.close() ####################################################### ## Load and format Sciensano hospital survey dataset ## ####################################################### df = pd.read_csv('COVID19BE_CLINIC.csv') n_orig = df.shape[0] print('The original dataset contains ' + str(n_orig) + ' entries.') # Drop the columns on admission_data and discharge_data --> do this myself df=df.drop(columns=['admission_data','discharge_data']) # Drop the columns with missing age df.dropna(subset=['age'], inplace=True) n_filtering_age = df.shape[0] print(str(n_orig-n_filtering_age) + ' entries were removed because the age was missing.') # Only if admission data, discharge data, status of discharge and ICU transfer is known, the data can be used by our model df.dropna(subset=['dt_admission'], inplace=True) df.dropna(subset=['dt_discharge'], inplace=True) df.dropna(subset=['status_discharge'], inplace=True) df.dropna(subset=['ICU_transfer'], inplace=True) df.drop(df[df.status_discharge == 'Autre'].index, inplace=True) df.drop(df[df.status_discharge == 'Inconnu'].index, inplace=True) df.drop(df[df.status_discharge == 'Transfert'].index, inplace=True) n_filtering_dates = df.shape[0] print(str(n_filtering_age-n_filtering_dates) + ' entries were removed because the admission date, discharge date, status at discharge or ICU transfer was missing.') # Convert dates to pd.datetimes df['dt_admission'] = pd.to_datetime(df['dt_admission']) df['dt_admission'] = df['dt_admission'].dt.date df['dt_discharge'] = pd.to_datetime(df['dt_discharge']) df['dt_discharge'] = df['dt_discharge'].dt.date df['dt_onset'] = pd.to_datetime(df['dt_onset']) df['dt_onset'] = df['dt_onset'].dt.date df['dt_icu_transfer'] = pd.to_datetime(df['dt_icu_transfer']) df['dt_icu_transfer'] = df['dt_icu_transfer'].dt.date # Add column with the age classes df['age_class'] = pd.cut(df.age, bins=age_classes) # Remove the negative residence times df.drop(df[((df['dt_discharge'] - df['dt_admission'])/datetime.timedelta(days=1)) < 0].index, inplace=True) # Remove the negative admission to onset times df.drop(df[((df['dt_admission'] - df['dt_onset'])/datetime.timedelta(days=1)) < 0].index, inplace=True) # Remove all residence times larger than 180 days df.drop(df[((df['dt_discharge'] - df['dt_admission'])/datetime.timedelta(days=1)) >= 180].index, inplace=True) n_filtering_times = df.shape[0] print(str(n_filtering_dates-n_filtering_times) + ' entries were removed because the residence time or onset time were negative.') # Drop retirement home patients from dataset exclude_homes = True if exclude_homes: df.drop(df[df.Expo_retirement_home == 'Oui'].index, inplace=True) n_filtering_homes = df.shape[0] print(str(n_filtering_times-n_filtering_homes) + ' additional entries were removed because the patient came from a retirement home.') # Print a summary of the filtering print(str(n_orig-n_filtering_homes)+' entries were removed during filtering. '+str(n_filtering_homes)+' entries remained.') else: # Print a summary of the filtering print(str(n_orig-n_filtering_times)+' entries were removed during filtering. '+str(n_filtering_times)+' entries remained.') ################################################### ## Compute fractions: c, m0, m0_{ICU} and m0_{C} ## ################################################### quantiles = [25,75,2.5,97.5] # ------------------------------------------------------ # Initialize dataframe for results and population totals # ------------------------------------------------------ columns = [[],[]] tuples = list(zip(*columns)) columns = pd.MultiIndex.from_tuples(tuples, names=["parameter", "quantity"]) fractions = pd.DataFrame(index=age_classes, columns=columns) averages = pd.DataFrame(index=['population'],columns=columns) # ------------------------------------------- # Compute fraction parameters point estimates # ------------------------------------------- # Sample size fractions['total_sample_size','point estimate']=df.groupby(by='age_class').apply(lambda x: x.age.count()) # Hospitalization propensity fractions['admission_propensity','point estimate']=df.groupby(by='age_class').apply(lambda x: x.age.count())/df.shape[0] # Distribution cohort/icu fractions['c','point estimate'] = df.groupby(by='age_class').apply(lambda x: x[x.ICU_transfer=='Non'].age.count()/x[x.ICU_transfer.isin(['Oui', 'Non'])].age.count()) # Mortalities fractions['m0','point estimate']=df.groupby(by='age_class').apply( lambda x: x[( (x.status_discharge=='D'))].age.count()/ x[x.ICU_transfer.isin(['Oui', 'Non'])].age.count()) fractions['m0_{ICU}','point estimate']= df.groupby(by='age_class').apply( lambda x: x[((x.ICU_transfer=='Oui') & (x.status_discharge=='D'))].age.count()/ x[x.ICU_transfer.isin(['Oui'])].age.count()) fractions['m0_{C}','point estimate']= df.groupby(by='age_class').apply( lambda x: x[((x.ICU_transfer=='Non') & (x.status_discharge=='D'))].age.count()/ x[x.ICU_transfer.isin(['Non'])].age.count()) # ----------------------------- # Bootstrap fraction parameters # ----------------------------- subset_size = args.subset_size n = args.number_iterations # First initialize a numpy array for the results # First axis: parameter: c, m0, m0_C, m0_ICU # Second axis: age group # Third axis: bootstrap sample bootstrap_fractions_age = np.zeros([4, len(age_classes), n]) # Loop over parameters for idx in range(4): for jdx in range(n): smpl = df.groupby(by='age_class').apply(lambda x: x.sample(n=subset_size,replace=True)) smpl=smpl.drop(columns='age_class') if idx == 0: bootstrap_fractions_age[idx,:,jdx] = smpl.groupby(by='age_class').apply(lambda x: x[x.ICU_transfer=='Non'].age.count()/ x[x.ICU_transfer.isin(['Oui', 'Non'])].age.count()).values elif idx == 1: bootstrap_fractions_age[idx,:,jdx] = smpl.groupby(by='age_class').apply(lambda x: x[( (x.status_discharge=='D'))].age.count()/ x[x.ICU_transfer.isin(['Oui', 'Non'])].age.count()).values elif idx == 2: bootstrap_fractions_age[idx,:,jdx] = smpl.groupby(by='age_class').apply(lambda x: x[((x.ICU_transfer=='Non') & (x.status_discharge=='D'))].age.count()/ x[x.ICU_transfer.isin(['Non'])].age.count()).values elif idx == 3: bootstrap_fractions_age[idx,:,jdx] = smpl.groupby(by='age_class').apply(lambda x: x[((x.ICU_transfer=='Oui') & (x.status_discharge=='D'))].age.count()/ x[x.ICU_transfer.isin(['Oui'])].age.count()).values # Compute summary statistics for idx,par in enumerate(['c', 'm0', 'm0_{C}', 'm0_{ICU}']): fractions[par,'bootstrap mean'] = np.median(bootstrap_fractions_age[idx,:,:], axis=1) fractions[par,'bootstrap median'] = np.median(bootstrap_fractions_age[idx,:,:], axis=1) for quantile in quantiles: fractions[par,'bootstrap Q'+str(quantile)] = np.quantile(bootstrap_fractions_age[idx,:,:], q=quantile/100, axis=1) # Save raw samples as a .npy with open(data_path+'sciensano_bootstrap_fractions.npy', 'wb') as f: np.save(f,bootstrap_fractions_age) # Compute population average/total point estimate averages['total_sample_size','point estimate'] = fractions['total_sample_size','point estimate'].sum() averages['admission_propensity', 'point estimate'] = sum(((fractions['total_sample_size','point estimate']*fractions['admission_propensity', 'point estimate']).values)/(np.ones(len(age_classes))*fractions['total_sample_size', 'point estimate'].sum())) averages['c', 'point estimate'] = df[df.ICU_transfer=='Non'].age.count()/df[df.ICU_transfer.isin(['Oui', 'Non'])].age.count() averages['m0', 'point estimate'] = df[((df.status_discharge=='D'))].age.count()/df[df.ICU_transfer.isin(['Oui', 'Non'])].age.count() averages['m0_{ICU}', 'point estimate'] = df[((df.ICU_transfer=='Oui') & (df.status_discharge=='D'))].age.count()/df[df.ICU_transfer.isin(['Oui'])].age.count() averages['m0_{C}', 'point estimate'] = df[((df.ICU_transfer=='Non') & (df.status_discharge=='D'))].age.count()/df[df.ICU_transfer.isin(['Non'])].age.count() # Bootstrap total population bootstrap_fractions = np.zeros([4, n]) # Loop over parameters for idx in range(4): for jdx in range(n): smpl = df.sample(n=subset_size,replace=True) if idx == 0: bootstrap_fractions[idx,jdx] = smpl[smpl.ICU_transfer=='Non'].age.count()/smpl[smpl.ICU_transfer.isin(['Oui', 'Non'])].age.count() elif idx == 1: bootstrap_fractions[idx,jdx] = smpl[((smpl.status_discharge=='D'))].age.count()/smpl[smpl.ICU_transfer.isin(['Oui', 'Non'])].age.count() elif idx == 2: bootstrap_fractions[idx,jdx] = smpl[((smpl.ICU_transfer=='Non') & (smpl.status_discharge=='D'))].age.count()/smpl[smpl.ICU_transfer.isin(['Non'])].age.count() elif idx == 3: bootstrap_fractions[idx,jdx] = smpl[((smpl.ICU_transfer=='Oui') & (smpl.status_discharge=='D'))].age.count()/smpl[smpl.ICU_transfer.isin(['Oui'])].age.count() # Compute summary statistics for idx,par in enumerate(['c', 'm0', 'm0_{C}', 'm0_{ICU}']): averages[par,'bootstrap mean'] = np.median(bootstrap_fractions[idx,:]) averages[par,'bootstrap median'] = np.median(bootstrap_fractions[idx,:]) for quantile in quantiles: averages[par,'bootstrap Q'+str(quantile)] = np.quantile(bootstrap_fractions[idx,:], q=quantile/100) # ------------------------------------------- # Perform Mann-Whitney U-tests on mortalities # ------------------------------------------- # Difference in mortality, ICU vs. Cohort # Boxplot x = bootstrap_fractions[2,:] y = bootstrap_fractions[3,:] stat, p_tt = ttest_ind(x, y) stat, p_mwu = mannwhitneyu(x, y) fig, ax = plt.subplots(figsize=(8,6)) bp = ax.boxplot([x, y], positions=[1,2]) plt.setp(bp['medians'], color='k') ax.set_ylabel('mortality (-)') ax.set_ylim(0,1) ax.set_xticklabels(['Cohort mortality (N={}) \n median = {:.2f} \n mean = {:.2f}'.format(len(x), np.median(x), np.mean(x)), 'ICU mortality (N={}) \n median = {:.2f} \n mean = {:.2f}'.format(len(y), np.median(y), np.mean(y))]) ax.set_title('Difference in overall mortality, \ntwo-sided t-test: p={:.2e} \nMann-Withney U-test: p={:.2e}'.format(p_tt,p_mwu)) plt.savefig(fig_path+'SCIENSANO_test_mortalities.pdf', dpi=600, bbox_inches='tight',orientation='portrait', papertype='a4') plt.close() # ----------------------------------------------------------------- # Make a violin plot of mortalities in ICU and cohort per age group # ----------------------------------------------------------------- data = [] for idx,age_class in enumerate(age_classes): data.append(bootstrap_fractions_age[2,idx,:]) # Violin plot fig,ax = plt.subplots(figsize=(12,4)) parts = ax.violinplot( data, positions=range(1,len(age_classes)+1), vert=False,showmeans=False, showmedians=False, showextrema=False) for idx,pc in enumerate(parts['bodies']): pc.set_facecolor(colorscale_okabe_ito['green']) pc.set_edgecolor('black') pc.set_alpha(1) quartiles = [25, 50, 75] quartile1 = np.zeros(len(data)) medians = np.zeros(len(data)) quartile3 = np.zeros(len(data)) for i,x in enumerate(data): quartile1[i],medians[i],quartile3[i] = np.percentile(x, quartiles) whiskers = np.array([ adjacent_values(sorted_array, q1, q3) for sorted_array, q1, q3 in zip(data, quartile1, quartile3)]) whiskers_min, whiskers_max = whiskers[:, 0], whiskers[:, 1] inds = np.arange(1, len(medians)+1) ax.scatter( medians, inds, marker='o', color='white', s=30, zorder=3) ax.hlines(inds, quartile1, quartile3, color='k', linestyle='-', lw=5) ax.hlines(inds, whiskers_min, whiskers_max, color='k', linestyle='-', lw=1) data = [] for idx,age_class in enumerate(age_classes): data.append(bootstrap_fractions_age[3,idx,:]) parts = ax.violinplot( data, positions=range(1,len(age_classes)+1), vert=False,showmeans=False, showmedians=False, showextrema=False) for idx,pc in enumerate(parts['bodies']): pc.set_facecolor(colorscale_okabe_ito['red']) pc.set_edgecolor('black') pc.set_alpha(1) quartiles = [25, 50, 75] quartile1 = np.zeros(len(data)) medians = np.zeros(len(data)) quartile3 = np.zeros(len(data)) for i,x in enumerate(data): quartile1[i],medians[i],quartile3[i] =
np.percentile(x, quartiles)
numpy.percentile
from typing import Union import numpy as np from objects.basis_functions import BasisFunctions from tests.objects.polynomial_basis_test import PolynomialBasisFunctionsTest class BasisFunctionsTest(PolynomialBasisFunctionsTest): def setUp(self): super().setUp() self.basis_fun = BasisFunctions() def test_eval(self): M = 1000 x = np.array([1] * M) returns = self.basis_fun.eval(x=x) self.assertEqual(np.shape(returns)[1], M) self.assertEqual(np.shape(returns)[0], 3) # TODO test and improve the following 2 methods def compute_exp_batch(self, n: float, x: Union[float, np.ndarray], u: Union[float, np.ndarray]): out = np.zeros((self.K, self.M, self.U)) for k in range(self.K): # TODO remove this if, should not be necessary if k == 0: out[k, :, :] = self.basis_function_expectation[k](x, u.T) else: out[k, :, :] = self.basis_function_expectation[k][0](x, u.T).T return out def compute_exp(self, n: float, x: Union[float, np.ndarray], u: Union[float, np.ndarray]): out =
np.zeros((self.K, self.M))
numpy.zeros
import astropy.units as u import numpy as np from lofti_gaia.loftitools import * from lofti_gaia.cFunctions import calcOFTI_C #from loftitools import * import pickle import time import matplotlib.pyplot as plt # Astroquery throws some warnings we can ignore: import warnings warnings.filterwarnings("ignore") '''This module obtaines measurements from Gaia EDR3 (Gaia DR2 is also available as a secondary option) and runs through the LOFTI Gaia/OFTI wide stellar binary orbit fitting technique. ''' class Fitter(object): '''Initialize the Fitter object for the binary system, and compute observational constraints to be used in the orbit fit. User must provide Gaia source ids, tuples of mass estimates for both objects, specify the number of desired orbits in posterior sample. Fit will be for object 2 relative to object 1. Attributes are tuples of (value,uncertainty) unless otherwise indicated. Attributes with astropy units are retrieved from Gaia archive, attributes without units are computed from Gaia values. All relative values are for object 2 relative to object 1. Args: sourceid1, sourceid2 (int): Gaia source ids for the two objects, fit will be for motion of \ object 2 relative to object 1 mass1, mass2 (tuple, flt): tuple os mass estimate for object 1 and 2, of the form (value, uncertainty) Norbits (int): Number of desired orbits in posterior sample. Default = 100000 results_filename (str): Filename for fit results files. If none, results will be written to files \ named FitResults.yr.mo.day.hr.min.s astrometry (dict): User-supplied astrometric measurements. Must be dictionary or table or pandas dataframe with\ column names "sep,seperr,pa,paerr,dates" or "ra,raerr,dec,decerr,dates". May be same as the rv table. \ Sep, deltaRA, and deltaDEC must be in arcseconds, PA in degrees, dates in decimal years. \ Default = None user_rv (dict): User-supplied radial velocity measurements. Must be dictionary or table or pandas dataframe with\ column names "rv,rverr,rv_dates". May be same as the astrometry table. Default = None. catalog (str): name of Gaia catalog to query. Default = 'gaiaedr3.gaia_source' ruwe1, ruwe2 (flt): RUWE value from Gaia archive ref_epoch (flt): reference epoch in decimal years. For Gaia DR2 this is 2015.5, for Gaia EDR3 it is 2016.0 plx1, plx2 (flt): parallax from Gaia in mas RA1, RA2 (flt): right ascension from Gaia; RA in deg, uncertainty in mas Dec1, Dec2 (flt): declination from Gaia; Dec in deg, uncertainty in mas pmRA1, pmRA2 (flt): proper motion in RA in mas yr^-1 from Gaia pmDec1, pmDec2 (flt): proper motion in DEC in mas yr^-1 from Gaia rv1, rv2 (flt, optional): radial velocity in km s^-1 from Gaia rv (flt, optional): relative RV of 2 relative to 1, if both are present in Gaia plx (flt): weighted mean parallax for the binary system in mas distance (flt): distance of system in pc, computed from Gaia parallax using method \ of Bailer-Jones et. al 2018. deltaRA, deltaDec (flt): relative separation in RA and Dec directions, in mas pmRA, pmDec (flt): relative proper motion in RA/Dec directions in km s^-1 sep (flt): total separation vector in mas pa (flt): postion angle of separation vector in degrees from North sep_au (flt): separation in AU sep_km (flt): separation in km total_vel (flt): total velocity vector in km s^-1. If RV is available for both, \ this is the 3d velocity vector; if not it is just the plane of sky velocity. total_planeofsky_vel (flt): total velocity in the plane of sky in km s^-1. \ In the absence of RV this is equivalent to the total velocity vector. deltaGmag (flt): relative contrast in Gaia G magnitude. Does not include uncertainty. inflateProperMOtionError (flt): an optional factor to mulitply default gaia proper motion error by. Written by <NAME>, 2020 ''' def __init__(self, sourceid1, sourceid2, mass1, mass2, Norbits = 100000, \ results_filename = None, astrometry = None, user_rv = None, catalog = 'gaiaedr3.gaia_source', inflateProperMotionError=1 ): self.sourceid1 = sourceid1 self.sourceid2 = sourceid2 try: self.mass1 = mass1[0] self.mass1err = mass1[1] self.mass2 = mass2[0] self.mass2err = mass2[1] self.mtot = [self.mass1 + self.mass2, np.sqrt((self.mass1err**2) + (self.mass2err**2))] except: raise ValueError('Masses must be tuples of (value,error), ex: mass1 = (1.0,0.05)') self.Norbits = Norbits if not results_filename: self.results_filename = 'FitResults.'+time.strftime("%Y.%m.%d.%H.%M.%S")+'.txt' self.stats_filename = 'FitResults.Stats.'+time.strftime("%Y.%m.%d.%H.%M.%S")+'.txt' else: self.results_filename = results_filename self.stats_filename = results_filename+'.Stats.txt' self.astrometry = False # check if user supplied astrometry: if astrometry is not None: # if so, set astrometric flag to True: self.astrometry = True # store observation dates: self.astrometric_dates = astrometry['dates'] # if in sep/pa, convert to ra/dec: if 'sep' in astrometry: try: astr_ra = [MonteCarloIt([astrometry['sep'][i],astrometry['seperr'][i]]) * \ np.sin(np.radians(MonteCarloIt([astrometry['pa'][i],astrometry['paerr'][i]]))) \ for i in range(len(astrometry['sep']))] astr_dec = [MonteCarloIt([astrometry['sep'][i],astrometry['seperr'][i]]) * \ np.cos(np.radians(MonteCarloIt([astrometry['pa'][i],astrometry['paerr'][i]]))) \ for i in range(len(astrometry['sep']))] self.astrometric_ra = np.array([ [np.mean(astr_ra[i]) for i in range(len(astrometry['sep']))], [np.std(astr_ra[i]) for i in range(len(astrometry['sep']))] ]) self.astrometric_dec = np.array([ [np.mean(astr_dec[i]) for i in range(len(astrometry['sep']))], [np.std(astr_dec[i]) for i in range(len(astrometry['sep']))] ]) except: raise ValueError('Astrometry keys not recognized. Please provide dictionary or table or pandas dataframe with\ column names "sep,seperr,pa,paerr,dates" or "ra,raerr,dec,decerr,dates"') elif 'ra' in astrometry: # else store the ra/dec as attributes: try: self.astrometric_ra = np.array([astrometry['ra'], astrometry['raerr']]) self.astrometric_dec = np.array([astrometry['dec'], astrometry['decerr']]) except: raise ValueError('Astrometry keys not recognized. Please provide dictionary or table or pandas dataframe with\ column names "sep,seperr,pa,paerr,dates" or "ra,raerr,dec,decerr,dates"') else: raise ValueError('Astrometry keys not recognized. Please provide dictionary or table or pandas dataframe with\ column names "sep,seperr,pa,paerr,dates" or "ra,raerr,dec,decerr,dates"') # Check if user supplied rv: self.use_user_rv = False if user_rv is not None: # set user rv flag to true: self.use_user_rv = True try: # set attributes; multiply rv by -1 due to difference in coordinate systems: self.user_rv = np.array([user_rv['rv']*-1,user_rv['rverr']]) self.user_rv_dates = np.array(user_rv['rv_dates']) except: raise ValueError('RV keys not recognized. Please use column names "rv,rverr,rv_dates"') self.catalog = catalog # Get Gaia measurements, compute needed constraints, and add to object: self.PrepareConstraints(catalog=self.catalog,inflateFactor=inflateProperMotionError) def edr3ToICRF(self,pmra,pmdec,ra,dec,G): ''' Corrects for biases in proper motion. The function is from https://arxiv.org/pdf/2103.07432.pdf Args: pmra,pmdec (float): proper motion ra, dec (float): right ascension and declination G (float): G magnitude Written by <NAME>, 2021 ''' if G>=13: return pmra , pmdec import numpy as np def sind(x): return np.sin(np.radians(x)) def cosd(x): return np.cos(np.radians(x)) table1=""" 0.0 9.0 9.0 9.5 9.5 10.0 10.0 10.5 10.5 11.0 11.0 11.5 11.5 11.75 11.75 12.0 12.0 12.25 12.25 12.5 12.5 12.75 12.75 13.0 18.4 33.8 -11.3 14.0 30.7 -19.4 12.8 31.4 -11.8 13.6 35.7 -10.5 16.2 50.0 2.1 19.4 59.9 0.2 21.8 64.2 1.0 17.7 65.6 -1.9 21.3 74.8 2.1 25.7 73.6 1.0 27.3 76.6 0.5 34.9 68.9 -2.9 """ table1 = np.fromstring(table1,sep=" ").reshape((12,5)).T Gmin = table1[0] Gmax = table1[1] #pick the appropriate omegaXYZ for the source’s magnitude: omegaX = table1[2][(Gmin<=G)&(Gmax>G)][0] omegaY = table1[3][(Gmin<=G)&(Gmax>G)][0] omegaZ = table1[4][(Gmin<=G)&(Gmax>G)][0] pmraCorr = -1*sind(dec)*cosd(ra)*omegaX -sind(dec)*sind(ra)*omegaY + cosd(dec)*omegaZ pmdecCorr = sind(ra)*omegaX -cosd(ra)*omegaY return pmra-pmraCorr/1000., pmdec-pmdecCorr/1000. def PrepareConstraints(self, rv=False, catalog='gaiaedr3.gaia_source', inflateFactor=1.): '''Retrieves parameters for both objects from Gaia EDR3 archive and computes system attriubtes, and assigns them to the Fitter object class. Args: rv (bool): flag for handling the presence or absence of RV measurements for both objects \ in EDR3. Gets set to True if both objects have Gaia RV measurements. Default = False catalog (str): name of Gaia catalog to query. Default = 'gaiaedr3.gaia_source' inflateFactor (flt): Factor by which to inflate the errors on Gaia proper motions to \ account for improper uncertainty estimates. Default = 1.0 Written by <NAME>, 2020 ''' from astroquery.gaia import Gaia deg_to_mas = 3600000. mas_to_deg = 1./3600000. # Retrieve astrometric solution from Gaia EDR3 job = Gaia.launch_job("SELECT * FROM "+catalog+" WHERE source_id = "+str(self.sourceid1)) j = job.get_results() job = Gaia.launch_job("SELECT * FROM "+catalog+" WHERE source_id = "+str(self.sourceid2)) k = job.get_results() if catalog == 'gaiadr2.gaia_source': # Retrieve RUWE from RUWE catalog for both sources and add to object state: job = Gaia.launch_job("SELECT * FROM gaiadr2.ruwe WHERE source_id = "+str(self.sourceid1)) jruwe = job.get_results() job = Gaia.launch_job("SELECT * FROM gaiadr2.ruwe WHERE source_id = "+str(self.sourceid2)) kruwe = job.get_results() self.ruwe1 = jruwe['ruwe'][0] self.ruwe2 = kruwe['ruwe'][0] else: # EDR3 contains ruwe in the main catalog: self.ruwe1 = j['ruwe'][0] self.ruwe2 = k['ruwe'][0] # Check RUWE for both objects and warn if too high: if self.ruwe1>1.2 or self.ruwe2>1.2: print('''WARNING: RUWE for one or more of your solutions is greater than 1.2. This indicates that the source might be an unresolved binary or experiencing acceleration during the observation. Orbit fit results may not be trustworthy.''') # reference epoch: self.ref_epoch = j['ref_epoch'][0] # parallax: self.plx1 = [j[0]['parallax']*u.mas, j[0]['parallax_error']*u.mas] self.plx2 = [k[0]['parallax']*u.mas, k[0]['parallax_error']*u.mas] # RA/DEC self.RA1 = [j[0]['ra']*u.deg, j[0]['ra_error']*mas_to_deg*u.deg] self.RA2 = [k[0]['ra']*u.deg, k[0]['ra_error']*mas_to_deg*u.deg] self.Dec1 = [j[0]['dec']*u.deg, j[0]['dec_error']*mas_to_deg*u.deg] self.Dec2 = [k[0]['dec']*u.deg, k[0]['dec_error']*mas_to_deg*u.deg] # Proper motions pmRACorrected1,pmDecCorrected1 = self.edr3ToICRF(j[0]['pmra'],j[0]['pmdec'],j[0]['ra'],j[0]['dec'],j[0]["phot_g_mean_mag"]) pmRACorrected2,pmDecCorrected2 = self.edr3ToICRF(k[0]['pmra'],k[0]['pmdec'],k[0]['ra'],k[0]['dec'],k[0]["phot_g_mean_mag"]) self.pmRA1 = [pmRACorrected1*u.mas/u.yr, j[0]['pmra_error']*u.mas/u.yr*inflateFactor] self.pmRA2 = [pmRACorrected2*u.mas/u.yr, k[0]['pmra_error']*u.mas/u.yr*inflateFactor] self.pmDec1 = [pmDecCorrected1*u.mas/u.yr, j[0]['pmdec_error']*u.mas/u.yr*inflateFactor] self.pmDec2 = [pmDecCorrected2*u.mas/u.yr, k[0]['pmdec_error']*u.mas/u.yr*inflateFactor] # See if both objects have RV's in DR2: if catalog == 'gaiaedr3.gaia_source': key = 'dr2_radial_velocity' error_key = 'dr2_radial_velocity_error' elif catalog == 'gaiadr2.gaia_source': key = 'radial_velocity' error_key = 'radial_velocity_error' if type(k[0][key]) == np.float64 and type(j[0][key]) == np.float64 or type(k[0][key]) == np.float32 and type(j[0][key]) == np.float32: rv = True self.rv1 = [j[0][key]*u.km/u.s,j[0][error_key]*u.km/u.s] self.rv2 = [k[0][key]*u.km/u.s,k[0][error_key]*u.km/u.s] rv1 = MonteCarloIt(self.rv1) rv2 = MonteCarloIt(self.rv2) self.rv = [ -np.mean(rv2-rv1) , np.std(rv2-rv1) ] # km/s # negative to relfect change in coordinate system from RV measurements to lofti # pos RV = towards observer in this coord system else: self.rv = [0,0] # weighted mean of parallax values: plx = np.average([self.plx1[0].value,self.plx2[0].value], weights = [self.plx1[1].value,self.plx2[1].value]) plxerr = np.max([self.plx1[1].value,self.plx2[1].value]) self.plx = [plx,plxerr] # mas self.distance = distance(*self.plx) # pc # Compute separations of component 2 relative to 1: r1 = MonteCarloIt(self.RA1) r2 = MonteCarloIt(self.RA2) d1 = MonteCarloIt(self.Dec1) d2 = MonteCarloIt(self.Dec2) ra = (r2*deg_to_mas - r1*deg_to_mas) * np.cos(np.radians(np.mean([self.Dec1[0].value,self.Dec2[0].value]))) dec = ((d2 - d1)*u.deg).to(u.mas).value self.deltaRA = [np.mean(ra),np.std(ra)] # mas self.deltaDec = [np.mean(dec),np.std(dec)] # mas # compute relative proper motion: pr1 = MonteCarloIt(self.pmRA1) pr2 = MonteCarloIt(self.pmRA2) pd1 = MonteCarloIt(self.pmDec1) pd2 = MonteCarloIt(self.pmDec2) pmRA = [np.mean(pr2 - pr1), np.std(pr2-pr1)] # mas/yr pmDec = [np.mean(pd2 - pd1), np.std(pd2 - pd1)] # mas/yr self.pmRA = masyr_to_kms(pmRA,self.plx) # km/s self.pmDec = masyr_to_kms(pmDec,self.plx) # km/s # Compute separation/position angle: r, p = to_polar(r1,r2,d1,d2) self.sep = tuple([np.mean(r).value, np.std(r).value]) # mas self.pa = tuple([np.mean(p).value, np.std(p).value]) # deg self.sep_au = tuple([((self.sep[0]/1000)*self.distance[0]), ((self.sep[1]/1000)*self.distance[0])]) self.sep_km = tuple([ self.sep_au[0]*u.au.to(u.km) , self.sep_au[1]*u.au.to(u.km)]) # compute total velocities: if rv: self.total_vel = [ add_in_quad([self.pmRA[0],self.pmDec[0],self.rv[0]]) , add_in_quad([self.pmRA[1],self.pmDec[1],self.rv[1]]) ] # km/s self.total_planeofsky_vel = [ add_in_quad([self.pmRA[0],self.pmDec[0]]) , add_in_quad([self.pmRA[1],self.pmDec[1]]) ] # km/s else: self.total_vel = [ add_in_quad([self.pmRA[0],self.pmDec[0]]) , add_in_quad([self.pmRA[1],self.pmDec[1]]) ] # km/s self.total_planeofsky_vel = self.total_vel.copy() # km/s # compute deltamag: self.deltaGmag = j[0]['phot_g_mean_mag'] - k[0]['phot_g_mean_mag'] class FitOrbit(object): ''' Object for performing an orbit fit. Takes attributes from Fitter class. ex: orbits = FitOrbit(fitterobject) Args: fitterobject (Fitter object): Fitter object initialized from the Fitter class write_stats (bool): If True, write out summary statistics of orbit sample at \ conclusion of fit. Default = True. write_results (bool): If True, write out the fit results to a pickle file \ in addition to the text file created during the fit. Default = True. deltaRA, deltaDec (flt): relative separation in RA and Dec directions, in mas pmRA, pmDec (flt): relative proper motion in RA/Dec directions in km s^-1 rv (flt, optional): relative RV of 2 relative to 1, if both are present in Gaia EDR3 mtot_init (flt): initial total system mass in Msun from user input distance (flt): distance of system in pc, computed from Gaia parallax using method of Bailer-Jones et. al 2018. sep (flt): separation vector in mas pa (flt): postion angle of separation vector in degrees from North ref_epoch (flt): epoch of the measurement, 2016.0 for Gaia EDR3 and 2015.5 for Gaia DR2. Norbits (int): number of desired orbit samples write_stats (bool): if True, write summary of sample statistics to human-readable file at end of run. Default = True write_results (bool): if True, write out current state of sample orbits in pickle file in periodic intervals during \ run, and again at the end of the run. RECOMMENDED. Default = True results_filename (str): name of file for saving pickled results to disk. If not supplied, \ defaul name is FitResults.y.mo.d.h.m.s.pkl, saved in same directory as fit was run. stats_filename (str): name of file for saving human-readable file of stats of sample results. If not supplied, \ defaul name is FitResults.Stats.y.mo.d.h.m.s.pkl, saved in same directory as fit was run. run_time (flt): run time for the last fit. astropy units object Written by <NAME>, 2020 ''' def __init__(self, fitterobject, write_stats = True, write_results = True, python_version=False, \ use_pm_cross_term = False, corr_coeff = None): # establish fit parameters: self.deltaRA = fitterobject.deltaRA self.deltaDec = fitterobject.deltaDec self.pmRA = fitterobject.pmRA self.pmDec = fitterobject.pmDec self.rv = fitterobject.rv self.mtot_init = fitterobject.mtot self.distance = fitterobject.distance self.sep = fitterobject.sep self.pa = fitterobject.pa self.ref_epoch = fitterobject.ref_epoch self.Norbits = fitterobject.Norbits self.write_results = write_results self.write_stats = write_stats self.results_filename = fitterobject.results_filename self.stats_filename = fitterobject.stats_filename self.astrometry = fitterobject.astrometry if self.astrometry: self.astrometric_ra = fitterobject.astrometric_ra self.astrometric_dec = fitterobject.astrometric_dec self.astrometric_dates = fitterobject.astrometric_dates self.use_user_rv = fitterobject.use_user_rv if self.use_user_rv: self.user_rv = fitterobject.user_rv self.user_rv_dates = fitterobject.user_rv_dates # run orbit fitter: self.fitorbit(python_fitOFTI=python_version, use_pm_cross_term = use_pm_cross_term, corr_coeff = corr_coeff) def fitorbit(self, save_results_every_X_loops = 100, python_fitOFTI=False, use_pm_cross_term = False, corr_coeff = None): '''Run the OFTI fitting run on the Fitter object. Called when FitOrbit object is created. Args: save_results_every_X_loops (int): on every Xth loop, save status of the \ orbit sample arrays to a pickle file, if write_results = True (Default) python_fitOFTI (bool): If True, fit using python only without using C Kepler's equation solver. Default = False use_pm_cross_term (bool): If True, include the proper motion correlation cross term in the Chi^2 computation \ Default = False Written by <NAME>, 2020 ''' # write header: print('Saving orbits in',self.results_filename) k = open(self.results_filename, 'w') output_file_header = '# sma [arcsec] period [yrs] orbit phase t_0 [yr] ecc incl [deg]\ argp [deg] lan [deg] m_tot [Msun] dist [pc] chi^2 ln(prob) ln(randn)' k.write(output_file_header + "\n") k.close() import time as tm ########### Perform initial run to get initial chi-squared: ############# # Draw random orbits: #parameters = a,T,const,to,e,i,w,O,m1,dist numSamples = 10000 parameters_init = draw_samples(numSamples, self.mtot_init, self.distance, self.ref_epoch) # Compute positions and velocities: if(python_fitOFTI): X,Y,Z,Xdot,Ydot,Zdot,Xddot,Yddot,Zddot,parameters=calc_OFTI(parameters_init,self.ref_epoch,self.sep,self.pa) else: returnArray = np.zeros((19,numSamples)) returnArray = calcOFTI_C(parameters_init,self.ref_epoch,self.sep,self.pa,returnArray.copy()) X,Y,Z,Xdot,Ydot,Zdot,Xddot,Yddot,Zddot = returnArray[0:9] parameters = returnArray[9:] # Compute chi squared: if self.rv[0] != 0: model = np.array([Y,X,Ydot,Xdot,Zdot]) data = np.array([self.deltaRA, self.deltaDec, self.pmRA, self.pmDec, self.rv]) else: model = np.array([Y,X,Ydot,Xdot]) data = np.array([self.deltaRA, self.deltaDec, self.pmRA, self.pmDec]) chi2 = ComputeChi2(data,model) if use_pm_cross_term: chi2 -= ( 2 * corr_coeff * (data[2][0] - model[2]) * (data[3][0] - model[3]) ) / (data[2][1] * data[3][1]) if self.astrometry: p = parameters.copy() a,T,const,to,e,i,w,O,m1,dist = p[0],p[1],p[2],p[3],p[4],p[5],p[6],p[7],p[8],p[9] chi2_astr = np.zeros(10000) # Calculate predicted positions at astr observation dates for each orbit: for j in range(self.astrometric_ra.shape[1]): # for each date, compute XYZ for each 10000 trial orbit. We can # skip scale and rotate because that was accomplished in the calc_OFTI call above. X1,Y1,Z1,E1 = calc_XYZ(a,T,to,e,i,w,O,self.astrometric_dates[j]) # Place astrometry into data array where: data[0][0]=ra obs, data[0][1]=ra err, etc: data = np.array([self.astrometric_ra[:,j], self.astrometric_dec[:,j]]) # place corresponding predicited positions at that date for each trial orbit in arcsec: model = np.array([Y1*1000,X1*1000]) # compute chi2 for trial orbits at that date and add to the total chi2 sum: chi2_astr += ComputeChi2(data,model) chi2 = chi2 + chi2_astr if self.use_user_rv: p = parameters.copy() a,T,const,to,e,i,w,O,m1,dist = p[0],p[1],p[2],p[3],p[4],p[5],p[6],p[7],p[8],p[9] chi2_rv = np.zeros(10000) for j in range(self.user_rv.shape[1]): # compute ecc anomaly at that date: X1,Y1,Z1,E1 = calc_XYZ(a,T,to,e,i,w,O,self.user_rv_dates[j]) # compute velocities at that ecc anom: Xdot,Ydot,Zdot = calc_velocities(a,T,to,e,i,w,O,dist,E1) # compute chi2: chi2_rv += ComputeChi2(np.array([self.user_rv[:,j]]),np.array([Zdot])) chi2 = chi2 + chi2_rv print('inital chi min',np.nanmin(chi2)) self.chi_min = np.nanmin(chi2) # Accept/reject: accepted, lnprob, lnrand = AcceptOrReject(chi2,self.chi_min) # count number accepted: number_orbits_accepted = np.size(accepted) # tack on chi2, log probability, log random unif number to parameters array: parameters = np.concatenate((parameters,chi2[None,:],lnprob[None,:],lnrand[None,:]), axis = 0) # transpose: parameters=np.transpose(parameters) # write results to file: k = open(self.results_filename, 'a') for params in parameters[accepted]: string = ' '.join([str(p) for p in params]) k.write(string + "\n") k.close() ###### start loop ######## # initialize: loop_count = 0 start=tm.time() while number_orbits_accepted < self.Norbits: # Draw random orbits: numSamples = 10000 parameters_init = draw_samples(numSamples, self.mtot_init, self.distance, self.ref_epoch) # Compute positions and velocities and new parameters array with scaled and rotated values: if(python_fitOFTI): X,Y,Z,Xdot,Ydot,Zdot,Xddot,Yddot,Zddot,parameters=calc_OFTI(parameters_init,self.ref_epoch,self.sep,self.pa) else: returnArray = np.zeros((19,numSamples)) returnArray = calcOFTI_C(parameters_init,self.ref_epoch,self.sep,self.pa,returnArray.copy()) X,Y,Z,Xdot,Ydot,Zdot,Xddot,Yddot,Zddot = returnArray[0:9] parameters = returnArray[9:] returnArray = None # compute chi2 for orbits using Gaia observations: if self.rv[0] != 0: model = np.array([Y,X,Ydot,Xdot,Zdot]) data = np.array([self.deltaRA, self.deltaDec, self.pmRA, self.pmDec, self.rv]) else: model = np.array([Y,X,Ydot,Xdot]) data = np.array([self.deltaRA, self.deltaDec, self.pmRA, self.pmDec]) chi2 = ComputeChi2(data,model) if use_pm_cross_term: chi2 -= ( 2 * (data[2][0] - model[2]) * (data[3][0] - model[3]) ) / (data[2][1] * data[3][1]) # add user astrometry if given: if self.astrometry: p = parameters.copy() a,T,const,to,e,i,w,O,m1,dist = p[0],p[1],p[2],p[3],p[4],p[5],p[6],p[7],p[8],p[9] chi2_astr = np.zeros(10000) # Calculate predicted positions at astr observation dates for each orbit: for j in range(self.astrometric_ra.shape[1]): # for each date, compute XYZ for each 10000 trial orbit. We can # skip scale and rotate because that was accomplished in the calc_OFTI call above. X1,Y1,Z1,E1 = calc_XYZ(a,T,to,e,i,w,O,self.astrometric_dates[j]) # Place astrometry into data array where: data[0][0]=ra obs, data[0][1]=ra err, etc: data = np.array([self.astrometric_ra[:,j], self.astrometric_dec[:,j]]) # place corresponding predicited positions at that date for each trial orbit: model = np.array([Y1*1000,X1*1000]) # compute chi2 for trial orbits at that date and add to the total chi2 sum: chi2_astr += ComputeChi2(data,model) chi2 = chi2 + chi2_astr # add user rv if given: if self.use_user_rv: p = parameters.copy() a,T,const,to,e,i,w,O,m1,dist = p[0],p[1],p[2],p[3],p[4],p[5],p[6],p[7],p[8],p[9] chi2_rv = np.zeros(10000) for j in range(self.user_rv.shape[1]): # compute ecc anomaly at that date: X1,Y1,Z1,E1 = calc_XYZ(a,T,to,e,i,w,O,self.user_rv_dates[j]) # compute velocities at that ecc anom: Xdot,Ydot,Zdot = calc_velocities(a,T,to,e,i,w,O,dist,E1) # compute chi2: chi2_rv += ComputeChi2(np.array([self.user_rv[:,j]]),np.array([Zdot])) chi2 = chi2 + chi2_rv # Accept/reject: accepted, lnprob, lnrand = AcceptOrReject(chi2,self.chi_min) if np.size(accepted) == 0: pass else: # count num accepted p = parameters.copy() a,T,const,to,e,i,w,O,m1,dist = p[0],p[1],p[2],p[3],p[4],p[5],p[6],p[7],p[8],p[9] sampleResults = calc_XYZ(a,T,to,e,i/180*np.pi,w/180*np.pi,O/180*np.pi,2016.0) number_orbits_accepted +=
np.size(accepted)
numpy.size
import numpy as np from text_selection.kld.kld_iterator import get_minimun_indices def test_one_entry__returns_zero(): array = np.array([1.2], dtype=np.float64) min_value, min_indices = get_minimun_indices(array) assert min_value == 1.2 np.testing.assert_array_equal(min_indices, np.array([0])) def test_two_same_entries__returns_zero_and_one(): array = np.array([1.2, 1.2], dtype=np.float64) min_value, min_indices = get_minimun_indices(array) assert min_value == 1.2 np.testing.assert_array_equal(min_indices, np.array([0, 1])) def test_two_different_entries__returns_min(): array = np.array([1, 0.2], dtype=np.float64) min_value, min_indices = get_minimun_indices(array) assert min_value == 0.2 np.testing.assert_array_equal(min_indices, np.array([1])) def test_two_same_entries_with_one_different_entry__returns_min(): array = np.array([0.2, 1, 0.2], dtype=np.float64) min_value, min_indices = get_minimun_indices(array) assert min_value == 0.2 np.testing.assert_array_equal(min_indices, np.array([0, 2])) def test_inf_and_one__returns_min(): array = np.array([np.inf, 1.2], dtype=np.float64) min_value, min_indices = get_minimun_indices(array) assert min_value == 1.2 np.testing.assert_array_equal(min_indices,
np.array([1])
numpy.array
import numpy as np import sys from random import randint class MazeMap: """A Class that establishes a maze's environment""" def __init__(self, dimensions, wall=-1, blank=0, goal=-2): # Tuple argument for the length and width of the maze self.dims = dimensions # By default, maze will generate as blank self.maze =
np.zeros(self.dims, dtype=int)
numpy.zeros
""" Tests for smoothing and estimation of unobserved states and disturbances - Predicted states: :math:`E(\alpha_t | Y_{t-1})` - Filtered states: :math:`E(\alpha_t | Y_t)` - Smoothed states: :math:`E(\alpha_t | Y_n)` - Smoothed disturbances :math:`E(\varepsilon_t | Y_n), E(\eta_t | Y_n)` Tested against R (FKF, KalmanRun / KalmanSmooth), Stata (sspace), and MATLAB (ssm toolbox) Author: <NAME> License: Simplified-BSD """ from __future__ import division, absolute_import, print_function import numpy as np import pandas as pd import os from statsmodels import datasets from statsmodels.tsa.statespace import mlemodel, sarimax from statsmodels.tsa.statespace.tools import compatibility_mode from statsmodels.tsa.statespace.kalman_filter import ( FILTER_CONVENTIONAL, FILTER_COLLAPSED, FILTER_UNIVARIATE) from statsmodels.tsa.statespace.kalman_smoother import ( SMOOTH_CONVENTIONAL, SMOOTH_CLASSICAL, SMOOTH_ALTERNATIVE, SMOOTH_UNIVARIATE) from numpy.testing import assert_allclose, assert_almost_equal, assert_equal, assert_raises from nose.exc import SkipTest current_path = os.path.dirname(os.path.abspath(__file__)) class TestStatesAR3(object): @classmethod def setup_class(cls, alternate_timing=False, *args, **kwargs): # Dataset / Stata comparison path = current_path + os.sep + 'results/results_wpi1_ar3_stata.csv' cls.stata = pd.read_csv(path) cls.stata.index = pd.date_range(start='1960-01-01', periods=124, freq='QS') # Matlab comparison path = current_path + os.sep+'results/results_wpi1_ar3_matlab_ssm.csv' matlab_names = [ 'a1', 'a2', 'a3', 'detP', 'alphahat1', 'alphahat2', 'alphahat3', 'detV', 'eps', 'epsvar', 'eta', 'etavar' ] cls.matlab_ssm = pd.read_csv(path, header=None, names=matlab_names) cls.model = sarimax.SARIMAX( cls.stata['wpi'], order=(3, 1, 0), simple_differencing=True, hamilton_representation=True, *args, **kwargs ) if alternate_timing: cls.model.ssm.timing_init_filtered = True # Parameters from from Stata's sspace MLE estimation params = np.r_[.5270715, .0952613, .2580355, .5307459] cls.results = cls.model.smooth(params, cov_type='none') # Calculate the determinant of the covariance matrices (for easy # comparison to other languages without having to store 2-dim arrays) cls.results.det_predicted_state_cov = np.zeros((1, cls.model.nobs)) cls.results.det_smoothed_state_cov = np.zeros((1, cls.model.nobs)) for i in range(cls.model.nobs): cls.results.det_predicted_state_cov[0, i] = np.linalg.det( cls.results.filter_results.predicted_state_cov[:, :, i]) cls.results.det_smoothed_state_cov[0, i] = np.linalg.det( cls.results.smoother_results.smoothed_state_cov[:, :, i]) if not compatibility_mode: # Perform simulation smoothing n_disturbance_variates = ( (cls.model.k_endog + cls.model.ssm.k_posdef) * cls.model.nobs ) cls.sim = cls.model.simulation_smoother(filter_timing=0) cls.sim.simulate( disturbance_variates=np.zeros(n_disturbance_variates), initial_state_variates=np.zeros(cls.model.k_states) ) def test_predict_obs(self): assert_almost_equal( self.results.filter_results.predict().forecasts[0], self.stata.ix[1:, 'dep1'], 4 ) def test_standardized_residuals(self): assert_almost_equal( self.results.filter_results.standardized_forecasts_error[0], self.stata.ix[1:, 'sr1'], 4 ) def test_predicted_states(self): assert_almost_equal( self.results.filter_results.predicted_state[:, :-1].T, self.stata.ix[1:, ['sp1', 'sp2', 'sp3']], 4 ) assert_almost_equal( self.results.filter_results.predicted_state[:, :-1].T, self.matlab_ssm[['a1', 'a2', 'a3']], 4 ) def test_predicted_states_cov(self): assert_almost_equal( self.results.det_predicted_state_cov.T, self.matlab_ssm[['detP']], 4 ) def test_filtered_states(self): assert_almost_equal( self.results.filter_results.filtered_state.T, self.stata.ix[1:, ['sf1', 'sf2', 'sf3']], 4 ) def test_smoothed_states(self): assert_almost_equal( self.results.smoother_results.smoothed_state.T, self.stata.ix[1:, ['sm1', 'sm2', 'sm3']], 4 ) assert_almost_equal( self.results.smoother_results.smoothed_state.T, self.matlab_ssm[['alphahat1', 'alphahat2', 'alphahat3']], 4 ) def test_smoothed_states_cov(self): assert_almost_equal( self.results.det_smoothed_state_cov.T, self.matlab_ssm[['detV']], 4 ) def test_smoothed_measurement_disturbance(self): assert_almost_equal( self.results.smoother_results.smoothed_measurement_disturbance.T, self.matlab_ssm[['eps']], 4 ) def test_smoothed_measurement_disturbance_cov(self): res = self.results.smoother_results assert_almost_equal( res.smoothed_measurement_disturbance_cov[0].T, self.matlab_ssm[['epsvar']], 4 ) def test_smoothed_state_disturbance(self): assert_almost_equal( self.results.smoother_results.smoothed_state_disturbance.T, self.matlab_ssm[['eta']], 4 ) def test_smoothed_state_disturbance_cov(self): assert_almost_equal( self.results.smoother_results.smoothed_state_disturbance_cov[0].T, self.matlab_ssm[['etavar']], 4 ) class TestStatesAR3AlternateTiming(TestStatesAR3): @classmethod def setup_class(cls, *args, **kwargs): if compatibility_mode: raise SkipTest super(TestStatesAR3AlternateTiming, cls).setup_class( alternate_timing=True, *args, **kwargs) class TestStatesAR3AlternativeSmoothing(TestStatesAR3): @classmethod def setup_class(cls, *args, **kwargs): if compatibility_mode: raise SkipTest super(TestStatesAR3AlternativeSmoothing, cls).setup_class( smooth_method=SMOOTH_ALTERNATIVE, *args, **kwargs) def test_smoothed_states(self): # Initialization issues can change the first few smoothed states assert_almost_equal( self.results.smoother_results.smoothed_state.T[2:], self.stata.ix[3:, ['sm1', 'sm2', 'sm3']], 4 ) assert_almost_equal( self.results.smoother_results.smoothed_state.T[2:], self.matlab_ssm.ix[2:, ['alphahat1', 'alphahat2', 'alphahat3']], 4 ) def test_smoothed_states_cov(self): assert_almost_equal( self.results.det_smoothed_state_cov.T[1:], self.matlab_ssm.ix[1:, ['detV']], 4 ) def test_smooth_method(self): assert_equal(self.model.ssm.smooth_method, SMOOTH_ALTERNATIVE) assert_equal(self.model.ssm._kalman_smoother.smooth_method, SMOOTH_ALTERNATIVE) assert_equal(self.model.ssm._kalman_smoother._smooth_method, SMOOTH_ALTERNATIVE) class TestStatesAR3UnivariateSmoothing(TestStatesAR3): @classmethod def setup_class(cls, *args, **kwargs): if compatibility_mode: raise SkipTest super(TestStatesAR3UnivariateSmoothing, cls).setup_class( filter_method=FILTER_UNIVARIATE, *args, **kwargs) def test_smooth_method(self): assert_equal(self.model.ssm.smooth_method, 0) assert_equal(self.model.ssm._kalman_smoother.smooth_method, 0) assert_equal(self.model.ssm._kalman_smoother._smooth_method, SMOOTH_UNIVARIATE) class TestStatesMissingAR3(object): @classmethod def setup_class(cls, alternate_timing=False, *args, **kwargs): # Dataset path = current_path + os.sep + 'results/results_wpi1_ar3_stata.csv' cls.stata = pd.read_csv(path) cls.stata.index = pd.date_range(start='1960-01-01', periods=124, freq='QS') # Matlab comparison path = current_path + os.sep+'results/results_wpi1_missing_ar3_matlab_ssm.csv' matlab_names = [ 'a1','a2','a3','detP','alphahat1','alphahat2','alphahat3', 'detV','eps','epsvar','eta','etavar' ] cls.matlab_ssm = pd.read_csv(path, header=None, names=matlab_names) # KFAS comparison path = current_path + os.sep+'results/results_smoothing3_R.csv' cls.R_ssm = pd.read_csv(path) # Create missing observations cls.stata['dwpi'] = cls.stata['wpi'].diff() cls.stata.ix[10:21, 'dwpi'] = np.nan cls.model = sarimax.SARIMAX( cls.stata.ix[1:,'dwpi'], order=(3, 0, 0), hamilton_representation=True, *args, **kwargs ) if alternate_timing: cls.model.ssm.timing_init_filtered = True # Parameters from from Stata's sspace MLE estimation params = np.r_[.5270715, .0952613, .2580355, .5307459] cls.results = cls.model.smooth(params, return_ssm=True) # Calculate the determinant of the covariance matrices (for easy # comparison to other languages without having to store 2-dim arrays) cls.results.det_predicted_state_cov = np.zeros((1, cls.model.nobs)) cls.results.det_smoothed_state_cov = np.zeros((1, cls.model.nobs)) for i in range(cls.model.nobs): cls.results.det_predicted_state_cov[0,i] = np.linalg.det( cls.results.predicted_state_cov[:,:,i]) cls.results.det_smoothed_state_cov[0,i] = np.linalg.det( cls.results.smoothed_state_cov[:,:,i]) if not compatibility_mode: # Perform simulation smoothing n_disturbance_variates = ( (cls.model.k_endog + cls.model.k_posdef) * cls.model.nobs ) cls.sim = cls.model.simulation_smoother() cls.sim.simulate( disturbance_variates=np.zeros(n_disturbance_variates), initial_state_variates=np.zeros(cls.model.k_states) ) def test_predicted_states(self): assert_almost_equal( self.results.predicted_state[:,:-1].T, self.matlab_ssm[['a1', 'a2', 'a3']], 4 ) def test_predicted_states_cov(self): assert_almost_equal( self.results.det_predicted_state_cov.T, self.matlab_ssm[['detP']], 4 ) def test_smoothed_states(self): assert_almost_equal( self.results.smoothed_state.T, self.matlab_ssm[['alphahat1', 'alphahat2', 'alphahat3']], 4 ) def test_smoothed_states_cov(self): assert_almost_equal( self.results.det_smoothed_state_cov.T, self.matlab_ssm[['detV']], 4 ) def test_smoothed_measurement_disturbance(self): assert_almost_equal( self.results.smoothed_measurement_disturbance.T, self.matlab_ssm[['eps']], 4 ) def test_smoothed_measurement_disturbance_cov(self): assert_almost_equal( self.results.smoothed_measurement_disturbance_cov[0].T, self.matlab_ssm[['epsvar']], 4 ) # There is a discrepancy between MATLAB ssm toolbox and # statsmodels.tsa.statespace on the following variables in the case of # missing data. Tests against the R package KFAS confirm our results def test_smoothed_state_disturbance(self): # assert_almost_equal( # self.results.smoothed_state_disturbance.T, # self.matlab_ssm[['eta']], 4 # ) assert_almost_equal( self.results.smoothed_state_disturbance.T, self.R_ssm[['etahat']], 9 ) def test_smoothed_state_disturbance_cov(self): # assert_almost_equal( # self.results.smoothed_state_disturbance_cov[0].T, # self.matlab_ssm[['etavar']], 4 # ) assert_almost_equal( self.results.smoothed_state_disturbance_cov[0,0,:], self.R_ssm['detVeta'], 9 ) class TestStatesMissingAR3AlternateTiming(TestStatesMissingAR3): @classmethod def setup_class(cls, *args, **kwargs): if compatibility_mode: raise SkipTest super(TestStatesMissingAR3AlternateTiming, cls).setup_class(alternate_timing=True, *args, **kwargs) class TestStatesMissingAR3AlternativeSmoothing(TestStatesMissingAR3): @classmethod def setup_class(cls, *args, **kwargs): if compatibility_mode: raise SkipTest super(TestStatesMissingAR3AlternativeSmoothing, cls).setup_class( smooth_method=SMOOTH_ALTERNATIVE, *args, **kwargs) def test_smooth_method(self): assert_equal(self.model.ssm.smooth_method, SMOOTH_ALTERNATIVE) assert_equal(self.model.ssm._kalman_smoother.smooth_method, SMOOTH_ALTERNATIVE) assert_equal(self.model.ssm._kalman_smoother._smooth_method, SMOOTH_ALTERNATIVE) class TestStatesMissingAR3UnivariateSmoothing(TestStatesMissingAR3): @classmethod def setup_class(cls, *args, **kwargs): if compatibility_mode: raise SkipTest super(TestStatesMissingAR3UnivariateSmoothing, cls).setup_class( filter_method=FILTER_UNIVARIATE, *args, **kwargs) def test_smooth_method(self): assert_equal(self.model.ssm.smooth_method, 0) assert_equal(self.model.ssm._kalman_smoother.smooth_method, 0) assert_equal(self.model.ssm._kalman_smoother._smooth_method, SMOOTH_UNIVARIATE) class TestMultivariateMissing(object): """ Tests for most filtering and smoothing variables against output from the R library KFAS. Note that KFAS uses the univariate approach which generally will result in different predicted values and covariance matrices associated with the measurement equation (e.g. forecasts, etc.). In this case, although the model is multivariate, each of the series is truly independent so the values will be the same regardless of whether the univariate approach is used or not. """ @classmethod def setup_class(cls, **kwargs): # Results path = current_path + os.sep + 'results/results_smoothing_R.csv' cls.desired = pd.read_csv(path) # Data dta = datasets.macrodata.load_pandas().data dta.index = pd.date_range(start='1959-01-01', end='2009-7-01', freq='QS') obs = dta[['realgdp','realcons','realinv']].diff().ix[1:] obs.ix[0:50, 0] = np.nan obs.ix[19:70, 1] = np.nan obs.ix[39:90, 2] = np.nan obs.ix[119:130, 0] = np.nan obs.ix[119:130, 2] = np.nan # Create the model mod = mlemodel.MLEModel(obs, k_states=3, k_posdef=3, **kwargs) mod['design'] = np.eye(3) mod['obs_cov'] = np.eye(3) mod['transition'] = np.eye(3) mod['selection'] = np.eye(3) mod['state_cov'] = np.eye(3) mod.initialize_approximate_diffuse(1e6) cls.model = mod cls.results = mod.smooth([], return_ssm=True) # Calculate the determinant of the covariance matrices (for easy # comparison to other languages without having to store 2-dim arrays) cls.results.det_scaled_smoothed_estimator_cov = ( np.zeros((1, cls.model.nobs))) cls.results.det_predicted_state_cov = np.zeros((1, cls.model.nobs)) cls.results.det_smoothed_state_cov = np.zeros((1, cls.model.nobs)) cls.results.det_smoothed_state_disturbance_cov = ( np.zeros((1, cls.model.nobs))) for i in range(cls.model.nobs): cls.results.det_scaled_smoothed_estimator_cov[0,i] = ( np.linalg.det( cls.results.scaled_smoothed_estimator_cov[:,:,i])) cls.results.det_predicted_state_cov[0,i] = np.linalg.det( cls.results.predicted_state_cov[:,:,i+1]) cls.results.det_smoothed_state_cov[0,i] = np.linalg.det( cls.results.smoothed_state_cov[:,:,i]) cls.results.det_smoothed_state_disturbance_cov[0,i] = ( np.linalg.det( cls.results.smoothed_state_disturbance_cov[:,:,i])) def test_loglike(self): assert_allclose(np.sum(self.results.llf_obs), -205310.9767) def test_scaled_smoothed_estimator(self): assert_allclose( self.results.scaled_smoothed_estimator.T, self.desired[['r1', 'r2', 'r3']] ) def test_scaled_smoothed_estimator_cov(self): assert_allclose( self.results.det_scaled_smoothed_estimator_cov.T, self.desired[['detN']] ) def test_forecasts(self): assert_allclose( self.results.forecasts.T, self.desired[['m1', 'm2', 'm3']] ) def test_forecasts_error(self): assert_allclose( self.results.forecasts_error.T, self.desired[['v1', 'v2', 'v3']] ) def test_forecasts_error_cov(self): assert_allclose( self.results.forecasts_error_cov.diagonal(), self.desired[['F1', 'F2', 'F3']] ) def test_predicted_states(self): assert_allclose( self.results.predicted_state[:,1:].T, self.desired[['a1', 'a2', 'a3']] ) def test_predicted_states_cov(self): assert_allclose( self.results.det_predicted_state_cov.T, self.desired[['detP']] ) def test_smoothed_states(self): assert_allclose( self.results.smoothed_state.T, self.desired[['alphahat1', 'alphahat2', 'alphahat3']] ) def test_smoothed_states_cov(self): assert_allclose( self.results.det_smoothed_state_cov.T, self.desired[['detV']] ) def test_smoothed_forecasts(self): assert_allclose( self.results.smoothed_forecasts.T, self.desired[['muhat1','muhat2','muhat3']] ) def test_smoothed_state_disturbance(self): assert_allclose( self.results.smoothed_state_disturbance.T, self.desired[['etahat1','etahat2','etahat3']] ) def test_smoothed_state_disturbance_cov(self): assert_allclose( self.results.det_smoothed_state_disturbance_cov.T, self.desired[['detVeta']] ) def test_smoothed_measurement_disturbance(self): assert_allclose( self.results.smoothed_measurement_disturbance.T, self.desired[['epshat1','epshat2','epshat3']] ) def test_smoothed_measurement_disturbance_cov(self): assert_allclose( self.results.smoothed_measurement_disturbance_cov.diagonal(), self.desired[['Veps1','Veps2','Veps3']] ) class TestMultivariateMissingClassicalSmoothing(TestMultivariateMissing): @classmethod def setup_class(cls, *args, **kwargs): if compatibility_mode: raise SkipTest super(TestMultivariateMissingClassicalSmoothing, cls).setup_class( smooth_method=SMOOTH_CLASSICAL, *args, **kwargs) def test_smooth_method(self): assert_equal(self.model.ssm.smooth_method, SMOOTH_CLASSICAL) assert_equal(self.model.ssm._kalman_smoother.smooth_method, SMOOTH_CLASSICAL) assert_equal(self.model.ssm._kalman_smoother._smooth_method, SMOOTH_CLASSICAL) class TestMultivariateMissingAlternativeSmoothing(TestMultivariateMissing): @classmethod def setup_class(cls, *args, **kwargs): if compatibility_mode: raise SkipTest super(TestMultivariateMissingAlternativeSmoothing, cls).setup_class( smooth_method=SMOOTH_ALTERNATIVE, *args, **kwargs) def test_smooth_method(self): assert_equal(self.model.ssm.smooth_method, SMOOTH_ALTERNATIVE) assert_equal(self.model.ssm._kalman_smoother.smooth_method, SMOOTH_ALTERNATIVE) assert_equal(self.model.ssm._kalman_smoother._smooth_method, SMOOTH_ALTERNATIVE) class TestMultivariateMissingUnivariateSmoothing(TestMultivariateMissing): @classmethod def setup_class(cls, *args, **kwargs): if compatibility_mode: raise SkipTest super(TestMultivariateMissingUnivariateSmoothing, cls).setup_class( filter_method=FILTER_UNIVARIATE, *args, **kwargs) def test_smooth_method(self): assert_equal(self.model.ssm.smooth_method, 0) assert_equal(self.model.ssm._kalman_smoother.smooth_method, 0) assert_equal(self.model.ssm._kalman_smoother._smooth_method, SMOOTH_UNIVARIATE) class TestMultivariateVAR(object): """ Tests for most filtering and smoothing variables against output from the R library KFAS. Note that KFAS uses the univariate approach which generally will result in different predicted values and covariance matrices associated with the measurement equation (e.g. forecasts, etc.). In this case, although the model is multivariate, each of the series is truly independent so the values will be the same regardless of whether the univariate approach is used or not. """ @classmethod def setup_class(cls, *args, **kwargs): # Results path = current_path + os.sep + 'results/results_smoothing2_R.csv' cls.desired = pd.read_csv(path) # Data dta = datasets.macrodata.load_pandas().data dta.index = pd.date_range(start='1959-01-01', end='2009-7-01', freq='QS') obs = np.log(dta[['realgdp','realcons','realinv']]).diff().ix[1:] # Create the model mod = mlemodel.MLEModel(obs, k_states=3, k_posdef=3, **kwargs) mod['design'] = np.eye(3) mod['obs_cov'] = np.array([[ 0.0000640649, 0. , 0. ], [ 0. , 0.0000572802, 0. ], [ 0. , 0. , 0.0017088585]]) mod['transition'] = np.array([[-0.1119908792, 0.8441841604, 0.0238725303], [ 0.2629347724, 0.4996718412, -0.0173023305], [-3.2192369082, 4.1536028244, 0.4514379215]]) mod['selection'] = np.eye(3) mod['state_cov'] = np.array([[ 0.0000640649, 0.0000388496, 0.0002148769], [ 0.0000388496, 0.0000572802, 0.000001555 ], [ 0.0002148769, 0.000001555 , 0.0017088585]]) mod.initialize_approximate_diffuse(1e6) cls.model = mod cls.results = mod.smooth([], return_ssm=True) # Calculate the determinant of the covariance matrices (for easy # comparison to other languages without having to store 2-dim arrays) cls.results.det_scaled_smoothed_estimator_cov = ( np.zeros((1, cls.model.nobs))) cls.results.det_predicted_state_cov = np.zeros((1, cls.model.nobs)) cls.results.det_smoothed_state_cov = np.zeros((1, cls.model.nobs)) cls.results.det_smoothed_state_disturbance_cov = ( np.zeros((1, cls.model.nobs))) for i in range(cls.model.nobs): cls.results.det_scaled_smoothed_estimator_cov[0,i] = ( np.linalg.det( cls.results.scaled_smoothed_estimator_cov[:,:,i])) cls.results.det_predicted_state_cov[0,i] = np.linalg.det( cls.results.predicted_state_cov[:,:,i+1]) cls.results.det_smoothed_state_cov[0,i] = np.linalg.det( cls.results.smoothed_state_cov[:,:,i]) cls.results.det_smoothed_state_disturbance_cov[0,i] = ( np.linalg.det( cls.results.smoothed_state_disturbance_cov[:,:,i])) def test_loglike(self): assert_allclose(np.sum(self.results.llf_obs), 1695.34872) def test_scaled_smoothed_estimator(self): assert_allclose( self.results.scaled_smoothed_estimator.T, self.desired[['r1', 'r2', 'r3']], atol=1e-4 ) def test_scaled_smoothed_estimator_cov(self): # Last obs is zero, so exclude it assert_allclose( np.log(self.results.det_scaled_smoothed_estimator_cov.T[:-1]), np.log(self.desired[['detN']][:-1]), atol=1e-6 ) def test_forecasts(self): assert_allclose( self.results.forecasts.T, self.desired[['m1', 'm2', 'm3']], atol=1e-6 ) def test_forecasts_error(self): assert_allclose( self.results.forecasts_error.T[:, 0], self.desired['v1'], atol=1e-6 ) def test_forecasts_error_cov(self): assert_allclose( self.results.forecasts_error_cov.diagonal()[:, 0], self.desired['F1'], atol=1e-6 ) def test_predicted_states(self): assert_allclose( self.results.predicted_state[:,1:].T, self.desired[['a1', 'a2', 'a3']], atol=1e-6 ) def test_predicted_states_cov(self): assert_allclose( self.results.det_predicted_state_cov.T, self.desired[['detP']], atol=1e-16 ) def test_smoothed_states(self): assert_allclose( self.results.smoothed_state.T, self.desired[['alphahat1', 'alphahat2', 'alphahat3']], atol=1e-6 ) def test_smoothed_states_cov(self): assert_allclose( self.results.det_smoothed_state_cov.T, self.desired[['detV']], atol=1e-16 ) def test_smoothed_forecasts(self): assert_allclose( self.results.smoothed_forecasts.T, self.desired[['muhat1','muhat2','muhat3']], atol=1e-6 ) def test_smoothed_state_disturbance(self): assert_allclose( self.results.smoothed_state_disturbance.T, self.desired[['etahat1','etahat2','etahat3']], atol=1e-6 ) def test_smoothed_state_disturbance_cov(self): assert_allclose( self.results.det_smoothed_state_disturbance_cov.T, self.desired[['detVeta']], atol=1e-18 ) def test_smoothed_measurement_disturbance(self): assert_allclose( self.results.smoothed_measurement_disturbance.T, self.desired[['epshat1','epshat2','epshat3']], atol=1e-6 ) def test_smoothed_measurement_disturbance_cov(self): assert_allclose( self.results.smoothed_measurement_disturbance_cov.diagonal(), self.desired[['Veps1','Veps2','Veps3']], atol=1e-6 ) class TestMultivariateVARAlternativeSmoothing(TestMultivariateVAR): @classmethod def setup_class(cls, *args, **kwargs): if compatibility_mode: raise SkipTest super(TestMultivariateVARAlternativeSmoothing, cls).setup_class( smooth_method=SMOOTH_ALTERNATIVE, *args, **kwargs) def test_smooth_method(self):
assert_equal(self.model.ssm.smooth_method, SMOOTH_ALTERNATIVE)
numpy.testing.assert_equal
# -*- coding: utf-8 -*- __all__ = ['points', 'costfunction', 'station'] # todo add grid definitions of reanalysis # for plotting era5_grid = {'lon': None, 'lat': None} erai_grid = None jra55_grid = None cera_grid = None def station(lon, lat, label=None, marker='o', markersize=20, bounds=1, ax=None, data=None, **kwargs): import numpy as np import cartopy as cpy import matplotlib.pyplot as plt from ..fun import check_kw # from cartopy.feature import NaturalEarthFeature # coast = NaturalEarthFeature(category='physical', scale='10m', facecolor='none', name='coastline') # have a time component? -> plot as time series if data is not None: if lon in data: lon = data[lon].values if lat in data: lat = data[lat].values else: lon = np.asarray(lon) lat = np.asarray(lat) itx = np.isfinite(lon) & np.isfinite(lat) lon = lon[itx] lat = lat[itx] if lon.size != lat.size: raise ValueError("Lon and Lat need same size") # todo add number of points plotted / median distance ilon = np.median(lon) ilat = np.median(lat) projection = kwargs.get('projection', cpy.crs.PlateCarree()) if ax is None: ax = plt.axes(projection=projection) ax.set_extent((ilon - bounds, ilon + bounds, ilat - bounds, ilat + bounds), crs=projection) if check_kw('ocean', value=True, **kwargs): ax.add_feature(cpy.feature.OCEAN.with_scale('10m'), zorder=0) if check_kw('land', value=True, **kwargs): ax.add_feature(cpy.feature.LAND.with_scale('10m'), zorder=0) if check_kw('coastline', value=True, **kwargs): ax.add_feature(cpy.feature.COASTLINE.with_scale('10m'), zorder=0) # ax.coastlines() if check_kw('rivers', value=True, **kwargs): ax.add_feature(cpy.feature.RIVERS.with_scale('10m'), zorder=1) if check_kw('lakes', value=True, **kwargs): ax.add_feature(cpy.feature.LAKES.with_scale('10m'), zorder=1) if check_kw('borders', value=True, **kwargs): ax.add_feature(cpy.feature.BORDERS.with_scale('10m'), zorder=1) if check_kw('states', value=True, **kwargs): ax.add_feature(cpy.feature.STATES.with_scale('10m'), zorder=1) ax.scatter(lon, lat, s=markersize, c=kwargs.get('color', 'r'), transform=projection, zorder=10, edgecolor='k') # ontop try: gl = ax.gridlines(draw_labels=True, xlocs=kwargs.get('xlocs', None), ylocs=kwargs.get('ylocs', None), linewidth=0.5, linestyle='--', color='k') gl.xformatter = cpy.mpl.gridliner.LONGITUDE_FORMATTER gl.yformatter = cpy.mpl.gridliner.LATITUDE_FORMATTER gl.xlabels_top = False gl.ylabels_right = False except: ax.gridlines(draw_labels=False) ax.set_extent((ilon - bounds, ilon + bounds, ilat - bounds, ilat + bounds), crs=projection) left = 0.5 bottom = 0.13 width = 0.3 height = 0.2 rect = [left, bottom, width, height] ax2 = plt.axes(rect, projection=cpy.crs.PlateCarree()) ax2.set_extent((ilon - 30, ilon + 30, ilat - 30, ilat + 30)) # ax2.set_global() #will show the whole world as context ax2.coastlines(resolution='110m', zorder=2) ax2.add_feature(cpy.feature.LAND) ax2.add_feature(cpy.feature.OCEAN) ax2.gridlines() lon0, lon1, lat0, lat1 = ax.get_extent() box_x = [lon0, lon1, lon1, lon0, lon0] box_y = [lat0, lat0, lat1, lat1, lat0] plt.plot(box_x, box_y, color='red', transform=cpy.crs.Geodetic()) return ax def points(lon, lat, labels=None, values=None, markersize=80, ocean=True, land=True, coastlines=True, rivers=False, grid=True, posneg=False, extent=None, lloffset=0.2, showcost=False, clabel=None, cbars={}, colorlevels=None, data=None, vmin=None, vmax=None, dropna=False, figure=None, gridspecs=None, **kwargs): """ Plot stations on a map Args: lon (np.array, list): Longitudes lat (np.array, list): Latidutes labels (np.array, list): Labels values (np.array, list): Values for scatterplot markersize (int): markersize ocean (bool): plot ocean ? land (bool): plot land ? coastlines (bool): plot coastlines ? rivers (bool): plot river ? grid (bool): plot gridlines ? posneg (bool): different markers for positive and negative extent (str): neither (default), both, min, max lloffset (float): label offset showcost (bool): Estimate Cost function and add to title clabel (str): Colorbar Label cbars (dict): Colorbar Options colorlevels (list, str): scatterplot colorlevels data (xr.DataArray): Data vmin (float): minimum value vmax (float): maximum value dropna (bool): Remove missing values? figure (plt.figure): figure handle gridspecs (dict): gridspec options for figure **kwargs: Returns: plt.axes """ import numpy as np import cartopy as cpy from matplotlib.colors import BoundaryNorm import matplotlib.pyplot as plt from ._helpers import cost, plot_arange as pa, plot_levels as pl if data is not None: lon = data[lon] lat = data[lat] values = data lon = np.asarray(lon) lat = np.asarray(lat) if lon.size != lat.size: raise ValueError("Lon and Lat need same size") if values is not None: values = np.asarray(values, dtype=float) nn = np.size(values) if lon.size != lat.size or lon.size != values.size: raise ValueError("Lon, Lat and Values need same size", lon.size, lat.size, values.size) if vmin is not None: idx = values < vmin values[idx] = np.nan print("vmin", idx.sum(), nn) if vmax is not None: idx = values > vmax values[idx] = np.nan print("vmax", idx.sum(), nn) if dropna: idx = np.isfinite(values) values = values[idx] lon = lon[idx] lat = lat[idx] print("NA", nn - idx.sum(), nn) projection = kwargs.get('projection', cpy.crs.PlateCarree()) if figure is None: ax = plt.axes(projection=projection) else: ax = figure.add_subplot(gridspecs, projection=projection) if ocean: ax.add_feature(cpy.feature.OCEAN, zorder=0, facecolor=kwargs.get('ocean_facecolor', cpy.feature.COLORS['water'])) if land: ax.add_feature(cpy.feature.LAND, zorder=0, facecolor=kwargs.get('land_facecolor', cpy.feature.COLORS['land'])) if coastlines: ax.coastlines() if rivers: ax.add_feature(cpy.feature.LAKES, zorder=0) ax.add_feature(cpy.feature.RIVERS, zorder=1) if labels is not None: labels = np.asarray(labels) if values is None: ax.scatter(lon, lat, s=markersize, c=kwargs.get('color', 'r'), transform=cpy.crs.PlateCarree(), zorder=10, edgecolor='k') # ontop else: if posneg: kwargs['marker'] = np.where(values < 0, 'd', 'o') cmap = plt.get_cmap(kwargs.pop('cmap', None)) norm = None if colorlevels is not None: if isinstance(colorlevels, str): colorlevels = eval(colorlevels) # plot_levels, plot_arange norm = BoundaryNorm(colorlevels, cmap.N) idx = np.isfinite(values) cs = ax.scatter(lon[idx], lat[idx], s=markersize, c=values[idx], transform=cpy.crs.PlateCarree(), zorder=10, cmap=cmap, edgecolor='k', marker=kwargs.get('marker', 'o'), norm=norm) cbars['fraction'] = cbars.get('fraction', 0.01) cbars['aspect'] = cbars.get('aspect', 50) cbars['shrink'] = cbars.get('shrink', 0.8) cbars['extend'] = cbars.get('extend', 'both') cb = plt.colorbar(cs, ax=ax, **cbars) if clabel is not None: cb.set_label(clabel) if showcost: tcost = cost(lon, lat, values) if np.isfinite(values).sum() != np.size(values): itx = ~np.isfinite(values) ax.scatter(lon[itx], lat[itx], s=markersize, marker='s', c='w', transform=cpy.crs.PlateCarree(), zorder=9, edgecolor='k') if labels is not None: if not hasattr(lloffset, '__iter__'): lloffset = [lloffset] * len(labels) for i, j, l, k in zip(lon, lat, labels, lloffset): ax.text(i + k, j, str(l), horizontalalignment='left', verticalalignment='top', transform=cpy.crs.PlateCarree(), fontsize=kwargs.get('fontsize', 8), zorder=12, clip_on=True) if grid: try: gl = ax.gridlines(draw_labels=True, xlocs=kwargs.get('xlocs', None), ylocs=kwargs.get('ylocs', None), linewidth=0.5, linestyle='--', color='k') gl.xformatter = cpy.mpl.gridliner.LONGITUDE_FORMATTER gl.yformatter = cpy.mpl.gridliner.LATITUDE_FORMATTER gl.xlabels_top = False gl.ylabels_right = False except: ax.gridlines(draw_labels=False) if values is not None: nn = np.sum(np.isfinite(values)) title = '(# %d / %d)' % (nn, np.size(values)) # COST Summary if showcost: tscost = np.nansum(tcost) / np.sum(np.isfinite(values)) title += ' Cost: %5.2f' % tscost else: title = 'Stations # %d' % np.size(lon) ax.set_title(kwargs.get('title', '') + ' ' + title) if 'xlabel' in kwargs.keys(): ax.set_xlabel(kwargs.get('xlabel')) if 'ylabel' in kwargs.keys(): ax.set_ylabel(kwargs.get('ylabel')) if extent is not None: ax.set_extent(extent, crs=cpy.crs.PlateCarree()) return ax def costfunction(lon, lat, values, **kwargs): import numpy as np from ._helpers import cost # plot cost values instead tcost = cost(lon, lat, values) values = np.where(np.isfinite(values), tcost, np.nan) # Plot Cost Values instead of real values return points(lon, lat, values=values, **kwargs) def values_zonal_meridional(lon, lat, values, zonal=True, ax=None, label=None, lat_bands=10, lon_bands=10, func=None, fkwargs={}, **kwargs): import numpy as np import cartopy as cpy import matplotlib.pyplot as plt lon =
np.asarray(lon)
numpy.asarray
import datetime import json import numpy as np import os import pandas as pd import pickle import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers os.environ['TF_CPP_MIN_VLOG_LEVEL'] = '0' os.environ['TF_CPP_MIN_LOG_LEVEL'] = '0' import random from absl import logging logging.set_verbosity(logging.INFO) from itertools import chain from sklearn.linear_model import LinearRegression from statsmodels.tsa.api import VAR from statsmodels.tsa.stattools import adfuller from statsmodels.tools.eval_measures import rmse, aic import local_settings from climate_ae.models import utils from climate_ae.data_generator.datahandler import input_fn import climate_ae.models.ae.eval_utils as eval_utils import climate_ae.models.ae.climate_utils as climate_utils import climate_ae.models.ae.train as train from climate_ae.models.ae.train_linear_model import load_data, predict_latents_and_decode DEBUG = False def get_forecasts(model_fitted, annos, var_order, n_steps): forecasts_ = [] if n_steps == 0: n_steps = annos.shape[0] - var_order n_to_predict = (annos.shape[0] - var_order)//n_steps for i in range(n_to_predict): forecasts_.append( model_fitted.forecast( y=annos[(i*n_steps):((i*n_steps)+var_order),:], steps=n_steps)) forecasts_np = np.concatenate(forecasts_) residuals = annos[var_order:(var_order+n_to_predict*n_steps),:] - forecasts_np return forecasts_np, residuals def generate_bts_sample_parametric(model, reg, fitted_, resids_, block_size, dim_x, dim_out, precip): n_blocks = int(np.floor(fitted_.shape[0]/block_size)) n_blocks_dict = {} for b in range(n_blocks): n_blocks_dict[b] = list(range((b*block_size), ((b+1)*block_size))) ho_annos_bts = np.zeros([n_blocks*block_size, dim_x]) for b in range(n_blocks): # get a random block block_id = random.randint(0, n_blocks-1) # get residuals from this random block resampled_resid = resids_[n_blocks_dict[block_id],:] fitted_to_add = fitted_[(b*block_size):((b+1)*block_size),:] # add to fitted values ho_annos_bts[(b*block_size):((b+1)*block_size),:] = (fitted_to_add + resampled_resid) # get decoded predictions for bootstrap sample shape_ = [ho_annos_bts.shape[0], *dim_out[1:]] ho_xhatexp_bts = predict_latents_and_decode(model, reg, ho_annos_bts, shape_) # if precipitation: transform back to original scale if precip: ho_xhatexp_bts = ho_xhatexp_bts ** 2 return ho_xhatexp_bts def generate_bts_sample_simple(model, reg, annos, block_size, dim_out, precip): dim_x = annos.shape[1] n_blocks = int(np.floor(annos.shape[0]/block_size)) n_blocks_dict = {} for b in range(n_blocks): n_blocks_dict[b] = list(range((b*block_size), ((b+1)*block_size))) annos_bts = np.zeros([n_blocks*block_size, dim_x]) for b in range(n_blocks): # get a random block block_id = random.randint(0, n_blocks-1) annos_bts[(b*block_size):((b+1)*block_size),:] = \ annos[n_blocks_dict[block_id],:] # get decoded predictions for bootstrap sample shape_ = [annos_bts.shape[0], *dim_out[1:]] xhatexp_bts = predict_latents_and_decode(model, reg, annos_bts, shape_) # if precipitation: transform back to original scale if precip: xhatexp_bts = xhatexp_bts ** 2 return xhatexp_bts def train_linear_model_and_generate(checkpoint_path, n_bts_samples, var_order, block_size, n_steps, precip, save_nc, seed=1): # set seed np.random.seed(seed) # get configs from model with open(os.path.join(checkpoint_path, "hparams.pkl"), 'rb') as f: config = pickle.load(f) config = utils.config_to_namedtuple(config) model, _ = train.get_models(config) # input function def input_anno(params, mode, repeat, n_repeat=None): dataset = input_fn(params=params, mode=mode, repeat=repeat, n_repeat=n_repeat, shuffle=False) if len(params.temp_indices) == 0 and len(params.psl_indices) == 0: dataset = dataset.map(lambda x: {"inputs": x["inputs"], "anno": tf.gather(x["anno"], params.anno_indices, axis=1), "year": x["year"], "month": x["month"], "day": x["day"] }) elif len(params.temp_indices) == 0: dataset = dataset.map(lambda x: {"inputs": x["inputs"], "anno": tf.concat( (tf.gather(x["anno"], params.anno_indices, axis=1), tf.gather(x["psl_mean_ens"], params.psl_indices, axis=1)), axis=1), "year": x["year"], "month": x["month"], "day": x["day"] }) elif len(params.psl_indices) == 0: dataset = dataset.map(lambda x: {"inputs": x["inputs"], "anno": tf.concat( (tf.gather(x["anno"], params.anno_indices, axis=1), tf.gather(x["temp_mean_ens"], params.temp_indices, axis=1)), axis=1), "year": x["year"], "month": x["month"], "day": x["day"] }) else: dataset = dataset.map(lambda x: {"inputs": x["inputs"], "anno": tf.concat( (tf.gather(x["anno"], params.anno_indices, axis=1), tf.gather(x["psl_mean_ens"], params.psl_indices, axis=1), tf.gather(x["temp_mean_ens"], params.temp_indices, axis=1)), axis=1), "year": x["year"], "month": x["month"], "day": x["day"] }) return dataset global_step = tf.Variable(initial_value=0, dtype=tf.int64, trainable=False, name="global_step") train_inputs = input_anno(params=config, mode="train", repeat=False) test_inputs = input_anno(params=config, mode="test1", repeat=False) holdout_inputs = input_anno(params=config, mode="test2", repeat=False) # dummy run - otherwise, the model wouldn't be fully build show_inputs = iter(train_inputs) _ = model(next(show_inputs)["inputs"]) # restore model from checkpoint checkpoint = tf.train.Checkpoint(model=model, global_step=global_step) manager = tf.train.CheckpointManager(checkpoint, checkpoint_path, max_to_keep=3) status = checkpoint.restore(manager.latest_checkpoint) status.assert_consumed() # get training data for linear latent space model tr_inputs, _, tr_latents, tr_annos, _, _, _ = load_data( train_inputs, model, subset=True, debug=DEBUG) # fit linear model reg = LinearRegression().fit(tr_annos, tr_latents) # get test data te_inputs, te_recons, _, te_annos, _, _, _ = load_data(test_inputs, model, debug=DEBUG) # predict latents for test set and decode te_xhatexp = predict_latents_and_decode(model, reg, te_annos,
np.shape(te_inputs)
numpy.shape
""" Author: <NAME> Email: <EMAIL> Description: Part of the code package corresponding to my master thesis. This file implements a 2d optimal control problem example. Year: 2019/2020 """ ######################################################################################################################## # importing stuff ######################################################################################################################## import numpy as np import random import logging import re from matplotlib import pyplot as plt from collections import deque from mpl_toolkits.mplot3d import Axes3D from matplotlib import cm import time from valueFunctionApprox.myValueFunctionApproximation import MyValueFunctionApproximation from misc.myLoggingFunctions import createSetupFile from misc.myLoggingFunctions import depositeIntermediateTrainingResults import sys sys.path.append('../functionApproximation/cpd') from myALSOptimiser import MyALSRbfOptimiser ######################################################################################################################## # definition of functions ######################################################################################################################## def terminalCostFunc(xList, domainBox): """ @param[in] ### xList ### list of length two containing data points, where xList[0] corresponds to the x1 data, and xList[1] corresponds to the x2 data NOTE: > data can be given in scalar or in matrix form! @param[in] ### domainBox ### list of two tuples representing the boundary of the domain the vehicle should not leave @return ### retVal ### a scalar or a matrix modeling the cost of achieving the states given in xList at the final time """ # take care of the following: # > there are logical operators in python for boolean arrays and for boolean scalars - there is no approach to # cover both cases ... # > hence distinguish two cases: is xList a list of two scalars or is it a list of arrays .... tmp = 100.0 if np.isscalar(xList[0]) == True: if (domainBox[0][0] <= xList[0] <= domainBox[0][1]) and (domainBox[1][0] <= xList[1] <= domainBox[1][1]): return 0.0 else: return tmp else: retVal = tmp *
np.ones(xList[0].shape)
numpy.ones
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import itertools import typing as tp import numpy as np from nevergrad.parametrization import parameter as p from .base import ExperimentFunction from . import corefuncs class PBT(ExperimentFunction): """Population-Based Training, also known as Lamarckism or Meta-Optimization.""" def __init__( self, names: tp.Tuple[str, ...] = ("sphere", "cigar", "ellipsoid"), dimensions: tp.Tuple[int, ...] = (7, 7, 7), num_workers: int = 10, ): for name in names: if name not in corefuncs.registry: available = ", ".join(sorted(corefuncs.registry)) raise ValueError( f'Unknown core function "{name}" in PBT. Available names are:\n-----\n{available}' ) self._funcs = [corefuncs.registry[name] for name in names] self._optima = [np.random.normal(size=d) for d in dimensions] assert len(names) == len(dimensions) self._hyperparameter_dimension = len(names) self._dimensions = dimensions self._total_dimension = sum(dimensions) parametrization = p.Array(shape=(self._hyperparameter_dimension,)).set_name("") # Population of checkpoints (that are optimized by the underlying optimization method) # and parameters (that we do optimize). self._population_checkpoints: tp.List[np.ndarray] = [np.zeros(self._total_dimension)] * num_workers self._population_parameters: tp.List[np.ndarray] = [
np.zeros(self._hyperparameter_dimension)
numpy.zeros
"""Module that handles shared information for all network objects.""" import xml.etree.ElementTree as et import numbers import numpy as np import pandas as pd import scipy.sparse as sps from paminco.utils.readin import parse_number, xml_find_root from paminco.utils.misc import Cache from paminco.utils.typing import sparse_format, is_int, is_iterable, IntEnum2 import paminco._doc as _doc ID_UNMAPPED = -9999 LBL_UNMAPPED = "Invalid" class FlowDirection(IntEnum2): """Enum defining the type flow for the graph.""" DIRECTED = 0 """All edges can only take flow >= 0.""" UNDIRECTED = 1 """All edges can take any flow.""" MIXED = 2 """Some edges may only take flow >= 0.""" class Edges: """ Class that contains the edges/links of a network. An edges object can be instantiated in several ways: Edges(e) where ``e`` is an Edges object. Data in ``e`` will be copied if specfied by parameter ``copy``. Edges(st) where ``st`` is array_like. Parameter st is converted to ndarray and is expected to me of shape (m, 2). Can be node indices or node labels specifying an edge. If labels are given, indices are mapped by ``map_labels_to_indices``, given indices are mapped by ``map_indices_to_labels``. Edge bounds are determined by the parameter ``directed``. Edges((st, bounds)) where ``st`` is array_like and ``bounds`` is tuple (lower, upper) specifying bounds used for all edges or array_like of shape (m, 2) marking individual bounds for all edges. Edges((labels, indices, bounds)) where ``labels``, ``indices`` are array_like of shape (m, 2) and ``bounds`` is tuple (lower, upper) specifying bounds used for all edges or array_like of shape (m, 2) containing individual bounds for all edges. Parameters ---------- data : ndarray, or tuple of ndarray Edge data. directed_flow : bool, default=True Controls default values for ``None`` in bounds. If ``True``, lower bounds are set to 0 and ``False`` to -inf. Missing upper bounds are set to inf. map_labels_to_indices : None, bool, dict, or callable, default=True Determines mapping of labels to indices if no indices are given. If ``None`` or ``False``, indices of edges will be set to -9999, denoting invalid edge indices. If ``dict``, labels will be mapped by this dict. If ``True``, node indices are set to 0, 1, ..., n-1. If ``callable``, use callable with signature ``indices = callable(labels)``. map_indices_to_labels : None, bool, dict, or callable, default=True Determines mapping of indices to indices if no labels are given. If ``None`` or ``False``, indices of edges will be set to 'invalid', denoting invalid edge labels. If ``dict``, indices will be mapped by this dict. If ``True``, node labels are set to node indices as str. If ``callable``, use callable with signature ``labels = callable(indices)``. dtype_float : dtype, default=numpy.float_ Datatype for edge bounds. dtype_int : dtype, default=int Datatype for edge bounds. copy : bool, default=False Whether to create a copy of the inputs in data. Attributes ---------- flow_directions : ndarray Ndarray of shape (m, ). A ``-1`` denotes an edge with lb < 0 and ub <= 0. A ``0`` denotes an edge with lb < 0 and ub > 0. A ``1`` denotes an edge with lb >=0 and ub > 0. """ def __init__( self, data, directed_flow: bool = True, map_labels_to_indices=True, # optional map_indices_to_labels=True, # optional dtype_float=None, dtype_int=None, copy: bool = False, ) -> None: # Collect kwargs kw = { "directed_flow": directed_flow, "map_labels_to_indices": map_labels_to_indices, "map_indices_to_labels": map_indices_to_labels, "dtype_float": dtype_float, "dtype_int": dtype_int, "copy": copy, } if isinstance(data, Edges): d = (data.labels, data.indices, data.bounds) return self.__init__(d, dtype_float=data.dtype_float, dtype_int=data.dtype_int) elif isinstance(data, tuple): if len(data) == 3: pass elif len(data) == 2: # (labels or indices, bounds) st, bounds = data st = np.array(st) if st.dtype.kind in {'U', 'S'}: # (labels, bounds) if isinstance(map_labels_to_indices, dict): st_ids = np.vectorize(map_labels_to_indices.__getitem__)(st) elif map_labels_to_indices is True: # Automap labels # Get unique labels and sort them if quasi-ints unique_lbl = np.unique(st) try: unique_lbl = sorted(unique_lbl, key=int) except ValueError: pass d = dict(zip(unique_lbl, np.arange(len(unique_lbl)))) st_ids = np.vectorize(d.__getitem__)(st) elif map_labels_to_indices is None or map_labels_to_indices is False: # Set to invalid indices st_ids = np.full(st.shape, ID_UNMAPPED, dtype=int) else: # Map labels by callable st_ids = map_labels_to_indices(st) return self.__init__((st, st_ids, bounds), **kw) elif issubclass(st.dtype.type, numbers.Integral): # (indices, bounds) unique_st = np.unique(st) if np.array_equal(np.sort(unique_st), np.arange(len(unique_st))) is False: raise ValueError(f"Indices must be all integers from 0 to {len(unique_st) - 1}.") if isinstance(map_indices_to_labels, dict): st_lbl = np.vectorize(map_indices_to_labels.__getitem__)(st) elif map_indices_to_labels is True: st_lbl = st.astype(str) elif map_indices_to_labels is None or map_indices_to_labels is False: # Set to invalid indices st_lbl = np.full(st.shape, LBL_UNMAPPED) else: st_lbl = map_indices_to_labels(st) return self.__init__((st_lbl, st, bounds), **kw) else: raise ValueError(f"Invalid edge data: {data}.") else: raise ValueError(f"Invalid edge data: {data}.") else: # Only labels or indices given -> build lower and upper bounds by directed if directed_flow is True: return self.__init__((data, (0, np.inf)), **kw) return self.__init__((data, (-np.inf, np.inf)), **kw) # Handle datatypes if dtype_float is None: dtype_float = np.float64 if dtype_int is None: dtype_int = int self._dtype_float = dtype_float self._dtype_int = dtype_int # Unpack data labels, indices, bounds = data self.labels = np.array(labels, dtype=str, copy=copy) self.indices = np.array(indices, dtype=dtype_int, copy=copy) # Broadcast bounds if lower, upper for all edges given if not isinstance(bounds, np.ndarray): bounds = np.array(bounds) if len(bounds.shape) == 1: bounds = bounds.reshape(1, -1) bounds = np.repeat(bounds, len(labels), axis=0) # Handle 'None' bounds bkw = {"posinf": np.inf, "neginf": -np.inf} self.bounds = np.array(bounds, dtype=dtype_float, copy=copy) self.bounds[:, 1] = np.nan_to_num(self.bounds[:, 1], copy=False, nan=np.inf, **bkw) if directed_flow is True: self.bounds[:, 0] = np.nan_to_num(self.bounds[:, 0], copy=False, nan=0., **bkw) else: self.bounds[:, 0] = np.nan_to_num(self.bounds[:, 0], copy=False, nan=-np.inf, **bkw) # Check consistency of labels, indices and bounds if self.labels.shape[1] != 2: raise ValueError( f"Invalid edge data, labels are of shape {self.labels.shape}." ) if (self.labels.shape == self.indices.shape == self.bounds.shape) is False: raise ValueError( "Inconsistent shapes. " f"Labels: {self.labels.shape}, " f"indices: {self.indices.shape}, " f"bounds: {self.bounds.shape}." ) # Set edge directions and get type of graph self.flow_directions = np.zeros(len(self)) self.flow_directions[self.lb < 0] -= 1 self.flow_directions[self.ub > 0] += 1 if len(self.flow_undirected) == len(self): self.flow_dir = FlowDirection.UNDIRECTED elif len(self.flow_undirected) == 0: self.flow_dir = FlowDirection.DIRECTED else: self.flow_dir = FlowDirection.MIXED self.cache = Cache() def __eq__(self, other) -> bool: for att in ["labels", "indices", "bounds"]: if np.array_equal(getattr(self, att), getattr(other, att)) is False: return False return True def __len__(self) -> int: return len(self.indices) def __getitem__(self, idx): if is_iterable(idx): return [self[i] for i in idx] return {att: getattr(self, att)[idx] for att in ["source_lbl", "target_lbl", "s", "t", "lb", "ub"]} def to_df(self, **kwargs) -> pd.DataFrame: """Get object as DataFrame. Parameters ---------- **kwargs : keyword arguments, optional Passed to DataFrame constructor. Returns ------- df : pandas.DataFrame Edges with source/target labels, source/target ids, lower and upper bounds. """ data = np.hstack([self.labels, self.indices, self.bounds]) df = pd.DataFrame(data, **kwargs) df.columns = ["source_lbl", "target_lbl", "s", "t", "lb", "ub"] df[["source_lbl", "target_lbl"]] = df[["source_lbl", "target_lbl"]].astype(str) df[["s", "t"]] = df[["s", "t"]].astype(self._dtype_int) df[["lb", "ub"]] = df[["lb", "ub"]].astype(self._dtype_float) return df def get_flow_df( self, x, labels: bool = True, colname_flow: str = "flow" ) -> pd.DataFrame: if isinstance(x, (int, float)): x = np.full(len(self), x) if labels is True: s, t = self.source_lbl, self.target_lbl dtype = str else: s, t = self.s, self.t dtype = int df = pd.DataFrame({"source": s, "target": t, colname_flow: x}) df[["source", "target"]] = df[["source", "target"]].astype(dtype) return df def get_directed( self, w=None, backward_positive: bool = True ) -> tuple: if self.cache.is_valid("directed_elements") is False: forward = self.ub > 0 backward = self.lb < 0 s_fw, t_fw = self.indices[forward, :].T t_bw, s_bw = self.indices[backward, :].T s = np.hstack((s_fw, s_bw)) t = np.hstack((t_fw, t_bw)) self.cache["directed_elements"] = (forward, backward, s, t) else: (forward, backward, s, t) = self.cache["directed_elements"] if w is not None: w_fw = w[forward] w_bw = w[backward] if backward_positive is False: w_bw = - w_bw # Stack weight similar to source, target w = np.hstack((w_fw, w_bw)) return s, t, w return s, t def get_duplicate_edges(self) -> np.ndarray: # Dubplicates -> s/t both are the same st = pd.Series([str(a) + "-" + str(b) for (a, b) in self.indices]) return np.where(st.duplicated())[0] def map_labels(self, d: dict) -> None: """Map edge labels by d.""" self.indices = np.vectorize(d.__getitem__)(self.labels).astype(self.dtype_int) def _delete_edges( self, del_idx, return_indices: bool = False ): del_idx = np.array(del_idx) # Delete edges in all numpy arrays self.labels = np.delete(self.labels, del_idx, axis=0) self.indices = np.delete(self.indices, del_idx, axis=0) self.bounds =
np.delete(self.bounds, del_idx, axis=0)
numpy.delete
import numpy as np import os import sys import yaml from .fgcmUtilities import FocalPlaneProjectorFromOffsets from .fgcmLogger import FgcmLogger class ConfigField(object): """ A validatable field with a default """ def __init__(self, datatype, value=None, default=None, required=False, length=None): self._datatype = datatype self._value = value self._required = required self._length = length _default = default if self._datatype == np.ndarray: if default is not None: _default =
np.atleast_1d(default)
numpy.atleast_1d
from __future__ import print_function import os import sys import numpy as np import torch # import networkx as nx import random from torch.autograd import Variable from torch.nn.parameter import Parameter import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from tqdm import tqdm from copy import deepcopy from pygcn.models import SGCNModel from rl.dqn import DQN import itertools as it # from .q_net_node import QNetNode, NStepQNetNode, node_greedy_actions # from .node_attack_common import load_base_model, NodeAttakEnv, init_setup # sys.path.append('%s/../common' % os.path.dirname(os.path.realpath(__file__))) # from common.cmd_args import cmd_args # sys.path.append('%s/../node_classification' % os.path.dirname(os.path.realpath(__file__))) # from node_classification.node_utils import run_test, load_raw_graph # sys.path.append('%s/../graph_attack' % os.path.dirname(os.path.realpath(__file__))) # from graph_attack.nstep_replay_mem import NstepReplayMem class Memory: def __init__(self, memory_size): self.memory_size = memory_size self.memory = [None] * memory_size self.occ = set() self.avail = set(range(memory_size)) def add(self, record): i = self.avail.pop() self.memory[i] = record self.occ.add(i) def sample(self): i = self.occ.pop() record = self.memory[i] self.memory[i] = None self.avail.add(i) return record class Agent(object): def __init__(self, args, env, target_dict, adv_start): self.mem_pool = Memory(memory_size=500000) self.env = env self.args = args self.target_dict = target_dict self.actions1 =
np.arange(args.num_adv)
numpy.arange
import numpy as np def sqrt_gain_db(gain_db): return gain_db / 2 def design_low_shelving_filter(g_db, f, q, fs): k = np.tan((np.pi * f) / fs); v0 = np.power(10.0, g_db / 20.0); root2 = 1.0 / q if v0 < 1: v0 = 1 / v0 if g_db > 0: b0 = (1 + np.sqrt(v0) * root2 * k + v0 * k * k) / (1 + root2 * k + k * k) b1 = (2 * (v0 * k * k - 1)) / (1 + root2 * k + k * k) b2 = (1 - np.sqrt(v0) * root2 * k + v0 * k * k) / (1 + root2 * k + k * k) a0 = 1 a1 = (2 * (k * k - 1)) / (1 + root2 * k + k * k) a2 = (1 - root2 * k + k * k) / (1 + root2 * k + k * k) elif g_db < 0: b0 = (1 + root2 * k + k * k) / (1 + root2 * np.sqrt(v0) * k + v0 * k * k) b1 = (2 * (k * k - 1)) / (1 + root2 * np.sqrt(v0) * k + v0 * k * k) b2 = (1 - root2 * k + k * k) / (1 + root2 * np.sqrt(v0) * k + v0 * k * k) a0 = 1 a1 = (2 * (v0 * k * k - 1)) / (1 + root2 * np.sqrt(v0) * k + v0 * k * k) a2 = (1 - root2 * np.sqrt(v0) * k + v0 * k * k) / (1 + root2 * np.sqrt(v0) * k + v0 * k * k) else: b0 = 1 b1 = 0 b2 = 0 a0 = 1 a1 = 0 a2 = 0 return np.array([b0, b1, b2, a0, a1, a2]) def design_high_shelving_filter(g_db, f, q, fs): k = np.tan(np.pi * f / fs) v0 = np.power(10, g_db / 20) root2 = 1 / q if v0 < 1: v0 = 1 / v0 if g_db > 0: b0 = (v0 + root2 * np.sqrt(v0) * k + k * k) / (1 + root2 * k + k * k) b1 = (2 * (k * k - v0)) / (1 + root2 * k + k * k) b2 = (v0 - root2 * np.sqrt(v0) * k + k * k) / (1 + root2 * k + k * k) a0 = 1 a1 = (2 * (k * k - 1)) / (1 + root2 * k + k * k) a2 = (1 - root2 * k + k * k) / (1 + root2 * k + k * k) elif g_db < 0: b0 = (1 + root2 * k + k * k) / (v0 + root2 * np.sqrt(v0) * k + k * k) b1 = (2 * (k * k - 1)) / (v0 + root2 * np.sqrt(v0) * k + k * k) b2 = (1 - root2 * k + k * k) / (v0 + root2 * np.sqrt(v0) * k + k * k) a0 = 1 a1 = (2 * ((k * k) / v0 - 1)) / (1 + root2 / np.sqrt(v0) * k + (k * k) / v0) a2 = (1 - root2 / np.sqrt(v0) * k + (k * k) / v0) / (1 + root2 /
np.sqrt(v0)
numpy.sqrt
"""Bokeh Violinplot.""" import numpy as np from bokeh.models.annotations import Title from ....stats import hdi from ....stats.density_utils import get_bins, histogram, kde from ...plot_utils import _scale_fig_size, make_label from .. import show_layout from . import backend_kwarg_defaults, create_axes_grid def plot_violin( ax, plotters, figsize, rows, cols, sharex, sharey, shade_kwargs, shade, rug, rug_kwargs, bw, textsize, circular, hdi_prob, quartiles, backend_kwargs, show, ): """Bokeh violin plot.""" if backend_kwargs is None: backend_kwargs = {} backend_kwargs = { **backend_kwarg_defaults(("dpi", "plot.bokeh.figure.dpi"),), **backend_kwargs, } (figsize, *_, linewidth, _) = _scale_fig_size(figsize, textsize, rows, cols) shade_kwargs = {} if shade_kwargs is None else shade_kwargs rug_kwargs = {} if rug_kwargs is None else rug_kwargs rug_kwargs.setdefault("fill_alpha", 0.1) rug_kwargs.setdefault("line_alpha", 0.1) if ax is None: ax = create_axes_grid( len(plotters), rows, cols, sharex=sharex, sharey=sharey, figsize=figsize, backend_kwargs=backend_kwargs, ) else: ax = np.atleast_2d(ax) for (var_name, selection, x), ax_ in zip( plotters, (item for item in ax.flatten() if item is not None) ): val = x.flatten() if val[0].dtype.kind == "i": dens = cat_hist(val, rug, shade, ax_, **shade_kwargs) else: dens = _violinplot(val, rug, shade, bw, circular, ax_, **shade_kwargs) if rug: rug_x = -np.abs(np.random.normal(scale=max(dens) / 3.5, size=len(val))) ax_.scatter(rug_x, val, **rug_kwargs) per = np.percentile(val, [25, 75, 50]) hdi_probs = hdi(val, hdi_prob, multimodal=False) if quartiles: ax_.line( [0, 0], per[:2], line_width=linewidth * 3, line_color="black", line_cap="round" ) ax_.line([0, 0], hdi_probs, line_width=linewidth, line_color="black", line_cap="round") ax_.circle( 0, per[-1], line_color="white", fill_color="white", size=linewidth * 1.5, line_width=linewidth, ) _title = Title() _title.text = make_label(var_name, selection) ax_.title = _title ax_.xaxis.major_tick_line_color = None ax_.xaxis.minor_tick_line_color = None ax_.xaxis.major_label_text_font_size = "0pt" show_layout(ax, show) return ax def _violinplot(val, rug, shade, bw, circular, ax, **shade_kwargs): """Auxiliary function to plot violinplots.""" if bw == "default": if circular: bw = "taylor" else: bw = "experimental" x, density = kde(val, circular=circular, bw=bw) if not rug: x = np.concatenate([x, x[::-1]]) density = np.concatenate([-density, density[::-1]]) ax.harea(y=x, x1=density, x2=
np.zeros_like(density)
numpy.zeros_like
import math import numpy as np def split_patients(patient_admission: dict, admission_codes: dict, code_map: dict, seed=6669) -> (np.ndarray, np.ndarray, np.ndarray): print('splitting train, valid, and test pids') np.random.seed(seed) common_pids = set() for i, code in enumerate(code_map): print('\r\t%.2f%%' % ((i + 1) * 100 / len(code_map)), end='') for pid, admissions in patient_admission.items(): for admission in admissions: codes = admission_codes[admission['admission_id']] if code in codes: common_pids.add(pid) break else: continue break print('\r\t100%') max_admission_num = 0 pid_max_admission_num = 0 for pid, admissions in patient_admission.items(): if len(admissions) > max_admission_num: max_admission_num = len(admissions) pid_max_admission_num = pid common_pids.add(pid_max_admission_num) remaining_pids = np.array(list(set(patient_admission.keys()).difference(common_pids))) np.random.shuffle(remaining_pids) train_num = 6000 valid_num = 125 train_pids = np.array(list(common_pids.union(set(remaining_pids[:(train_num - len(common_pids))].tolist())))) valid_pids = remaining_pids[(train_num - len(common_pids)):(train_num + valid_num - len(common_pids))] test_pids = remaining_pids[(train_num + valid_num - len(common_pids)):] return train_pids, valid_pids, test_pids def build_code_xy(pids: np.ndarray, patient_admission: dict, admission_codes_encoded: dict, max_admission_num: int, code_num: int, max_code_num_in_a_visit: int) -> (np.ndarray, np.ndarray, np.ndarray): print('building train/valid/test codes features and labels ...') n = len(pids) x =
np.zeros((n, max_admission_num, max_code_num_in_a_visit), dtype=int)
numpy.zeros
#! /usr/bin/env python # -*- coding: utf-8 -*- """ This modules handles the CCD based images """ """ // Full usage of ccd functionalities requires: shapely, astrobject, pynverse (all pipable) // // ccd-x axis is refered to as 'i', y as "j". show: plot the ccd as imshow. show_traceindex: plot the ccd using `show()` and overplot the trace coutours. """ import warnings import numpy as np # Propobject from propobject import BaseObject # Astrobject from astrobject.photometry import Image # PyIFU from pyifu.spectroscopy import Spectrum from .utils.tools import kwargs_update """ The Idea of the CCD calibration is to first estimate the Spectral Matching. Then to set this Matching to all the ScienceCCD objects. - Tracematch holds the spaxel->pixel_area relation. """ __all__ = ["get_dome","get_ccd"] ################################## # # # Object Generators # # # ################################## def get_ccd(lampfile, ccdspec_mask=None, tracematch=None, background=None, correct_traceflexure=False, savefile_traceflexure=None, **kwargs): """ Load a SEDmachine ccd image. Parameters ----------- lampfile: [string] Location of the file containing the object tracematch: [Tracematch (pysedm object)] -optional- Tracematch object containing the Spectral property on the CCD background: [bool] -optional- which kind of background do you want (i.e. background=0) **kwargs goes to _get_default_background_() Returns ------- ScienceCCD (Child of CCD which is a Child of an astrobjec's Image) """ lamp = ScienceCCD(lampfile, background=0) if tracematch is not None: lamp.set_tracematch(tracematch) # Trace Flexure Correction (if any) if correct_traceflexure: from .flexure import get_ccd_jflexure from .sedm import TRACE_DISPERSION # Save the flexure plot j_offset = get_ccd_jflexure(lamp, ntraces=200, tracewidth=1, jscan=[-3,3,10], savefile=savefile_traceflexure, get_object=False) new_tracematch = lamp.tracematch.get_shifted_tracematch(0, j_offset) new_tracematch.set_buffer( TRACE_DISPERSION) lamp.set_tracematch(new_tracematch ) lamp.header["JFLXCORR"] = (True, "Is TraceMatch corrected for j flexure?") lamp.header["CCDJFLX"] = (j_offset, "amplitude in pixel of the j flexure Trace correction") else: lamp.header["JFLXCORR"] = (False, "Is TraceMatch corrected for j flexure?") lamp.header["CCDJFLX"] = (0, "amplitude in pixel of the j flexure Trace correction") if background is None: lamp.set_background(lamp._get_default_background_(**kwargs), force_it=True) elif not background == 0: lamp.set_background(background, force_it=True) return lamp def get_dome(domefile, tracematch=None, load_sep=False, **kwargs): """ Load a SEDmachine domeflat image. (special version of get_ccd. might be moved to get_ccd...) Parameters ----------- domefile: [string] Location of the file containing the object tracematch: [Tracematch (pysedm object)] -optional- Tracematch object containing the Spectral property on the CCD **kwargs goes to DomeCCD.__init__() if tracematch is None else to _get_default_background_ Returns ------- DomeCCD (Child of CCD which is a Child of an astrobjec's Image) """ if tracematch is not None: kwargs["background"] = 0 dome = DomeCCD(domefile, **kwargs) if load_sep: dome.datadet = dome.data/np.sqrt(np.abs(dome.data)) dome.sep_extract(thresh=50., on="datadet") if tracematch is not None: # = Tracematch that gonna help the background dome.set_tracematch(tracematch) dome.set_background(dome._get_default_background_(**kwargs), force_it=True) return dome ##################################### # # # Raw CCD Images for SED machine # # # ##################################### class BaseCCD( Image ): """ """ def __build__(self,bw=64, bh=64, fw=3, fh=3,**kwargs): """ build the structure of the class // Doc from SEP bw, bh : int, -optional- Size of background boxes in pixels. Default is 64 [ndlr (in SEP)]. fw, fh : int, -optional- Filter width and height in boxes. Default is 3 [ndlr (in SEP)]. """ super(BaseCCD,self).__build__(**kwargs) # -- How to read the image self._build_properties["bkgdbox"]={"bh":bh,"bw":bw,"fh":fh,"fw":fw} # ==================== # # Internal tools # # ==================== # def _get_sep_threshold_(self, thresh): """ Trick to automatically get the proper threshold for SEP extract """ if thresh is None: return np.median(self.rawdata) return thresh # ==================== # # Properties # # ==================== # @property def data_log(self): return np.log10(self.data) # ============================== # # # # Main CCD Object # # Tracing <-> CCD <-> Cube # # # # ============================== # class CCD( BaseCCD ): """ Virtual Class For CCD images that have input light """ PROPERTIES = ["tracematch"] DERIVED_PROPERTIES = ["matched_septrace_index"] # ------------------- # # Tracematch <-> CCD # # ------------------- # def set_tracematch(self, tracematch): """ Attach to this instance the Tracematch """ from .spectralmatching import TraceMatch if TraceMatch not in tracematch.__class__.__mro__: raise TypeError("The given tracematch must be a TraceMatch object") self._properties["tracematch"] = tracematch def match_trace_to_sep(self): """ matches the SEP ellipse with the current trace vertices. You must have ran sep_extract() to be able to use this method. (This method need Shapely.) You can then use the methods: - sepindex_to_traceindex() - traceindex_to_sepindex() Returns ------- Void """ # -> import and tests try: from shapely import geometry, vectorized except ImportError: raise AttributeError("Matching traces to sep requires Shapely. pip install Shapely") if not self.has_sepobjects(): raise AttributeError("sep has not been ran. Do so to be able to match sep output with traces") # -> actual code x,y,a,b,theta = self.sepobjects.get(["x","y","a","b","theta"]).T self._derived_properties["matched_septrace_index"] =\ {idx: np.argwhere(vectorized.contains( geometry.Polygon(self.tracematch.get_trace_vertices(idx)), x,y)).ravel() for idx in self.tracematch.trace_indexes} def sepindex_to_traceindex(self, sepindex): """ Give the index of an sep entry. This will give the corresponding trace index """ if not self.has_matchedindex(): raise AttributeError("spectral match traces has not been matched with sep indexes. Run match_tracematch_and_sep()") return [traceindex for traceindex, sepindexes in self.matchedindex.items() if sepindex in sepindexes] def traceindex_to_sepindex(self, traceindex): """ Give the index of an sep entry. This will give the corresponding trace index """ if not self.has_matchedindex(): self.match_trace_to_sep() return self.matchedindex[traceindex] def set_default_variance(self, force_it=False): """ define a default variance using the following formula: rawdata + ( median(data) - percentile(data, 16) )**2, it supposed to account for poisson noise + potential additional variance. This is a really poor's man tools... Returns ------- Void """ if self.has_var() and not force_it: raise AttributeError("Cannot reset the variance. Set force_it to True to allow overwritting of the variance.") delta_sigma = np.percentile(self.data, [16,50]) self._properties['var'] = self.rawdata+(delta_sigma[1]-delta_sigma[0])**2 # ----------- # # GETTER # # ----------- # def get_trace_cutout(self, traceindex, masked=True, on="data"): """ returns a 2D array containg the cutout around the requested trace. The trace could be either with or without tracematch mask (i.e. 0 outside the trace). Parameters ---------- traceindex: [int] index of the spaxel trace. masked: [bool] -optional- do you want the data (see `on`) to be masked out (i.e. ==0) outside the trace on: [string] -optional- On which data source do you want the trace (e.g. data, rawdata, background, variance) Returns ------- 2d-array """ xmin, xmax = self.tracematch.get_trace_xbounds(traceindex) ymin, ymax = self.tracematch.get_trace_ybounds(traceindex) if masked: return self.get_trace_mask(traceindex)[ymin:ymax,xmin:xmax]*eval("self.%s"%on)[ymin:ymax,xmin:xmax] return eval("self.%s"%on)[ymin:ymax,xmin:xmax] def get_finetuned_trace(self, traceindex, polydegree=2, width=None, trace_position=False, **kwargs): """ The builds a fine tuned trace of the given traceindex. The tuning uses detected object from sep. => You must have run match_tracematch_and_sep() Parameters ---------- traceindex: [int] Index of the trace you want to fine tune polydegree: [positive-int] -optional- Degree of the polynome that will be used to define the trace. (See 'width' for details on the width of the trace polygon) => If polydegree is higher than the number of sep object detected belonging to this trace, polydegree will the reduced to that number => If polydegree ends up being lower of equal to 0, None is returned width: [float / None] -optional- Width of the polygon (in pixels) If None, width will be estimated based on the b-values of the sep detected objects. trace_position: [bool] -optional- Get the expected trace central position instead of the vertices of the polygon containing it. **kwargs goes to spectralmatching.get_boxing_polygone() Returns ------- One of these: - None if no fit possible (one of less sep object for this trace ) - array (vertices) - array (x,y if trace_position=True) """ if not self.has_sepobjects(): warnings.warn("No SEP object loaded. run sep_extract() to enable finetuning. Original vertices returned") return self.tracematch.copy() _cannot_finetune = False try: x, y, b = self.sepobjects.get(["x","y","b"], self.traceindex_to_sepindex(traceindex)).T except: _cannot_finetune = True x,y,b = [],[],[] if len(x) < polydegree: warnings.warn("less sep-points than polynom degree. Degree reduced ") polydegree = len(x)-1 if polydegree <=0: _cannot_finetune = True if _cannot_finetune: warnings.warn("cannot build finetune tracing for %s. Normal vertices returned"%traceindex) return self.tracematch.get_trace_vertices(traceindex) else: if trace_position: return self.tracematch.get_finetuned_trace(traceindex, x, y, polydegree=polydegree, **kwargs) else: return self.tracematch.get_finetuned_trace_vertices(traceindex, x, y, width= np.nanmedian(b)*2. if width is None else width, polydegree=polydegree, **kwargs) def get_trace_mask(self, traceindex, finetune=False, polydegree=2, subpixelisation=5, **kwargs): """ Build a weightmask based on the trace vertices. Parameters ---------- traceindex: [int] index of the trace for which you want a mask finetune: [bool] -optional- Should the trace be remeasured based on potential detected sources? // The following options apply if finetune is True polydegree: [int] -optional- Degree of the polynome used to define the traces subpixelisation: [int] -optional- Our much should the pixel be subdivided to do the polygon-to-image interpolation? (the higher the slower) Set 1 for no subdivition (fastest) **kwargs goes to the method `get_finetuned_trace` """ if not finetune: return self.tracematch.get_trace_mask(traceindex) from .spectralmatching import polygon_mask, _HAS_SKIMAGE if not _HAS_SKIMAGE: warnings.warn("get_trace needs skimage to be able to use subpixelisation") subpixelisation = 1 verts = self.get_finetuned_trace(traceindex, polydegree=polydegree, **kwargs) mask = np.asarray(polygon_mask( verts*subpixelisation, self.shape[0]*subpixelisation, self.shape[1]*subpixelisation, get_fullcolor=False), dtype="float") if subpixelisation==1: return mask/np.nanmax(mask) from .spectralmatching import measure return measure.block_reduce(mask, (subpixelisation,subpixelisation) )/float(subpixelisation**2) def get_finetuned_tracematch(self, indexes, polydegree=2, width=None, build_masking=False, **kwargs): """ """ from .spectralmatching import TraceMatch tmap = TraceMatch() tmap.set_trace_vertices({i:self.get_finetuned_trace(i, polydegree=polydegree, width=width, **kwargs) for i in indexes}, build_masking=build_masking) return tmap # ---------------- # # Tracematch # # ---------------- # def get_spectrum(self, traceindex, on="data", finetune=False): """ Get the basic spectrum extracted for the CCD based on the TraceMatch object. Parameters ---------- traceindex: [int, list of] index(es) of the spectrum(a) to return on: [str] -optional- on which 2d image shall the spectrum be extracted. By Default 'data', but you can set e.g. rawdata, background or anything accessible as 'self.%s'%on. finetune: [bool] -optional- Should the trace masking come from finetunning of spectral trace? (Remark: The spectral match loaded might already be finetuned ones.) Returns ------- flux (or list of) as a function of pixels """ if not self.has_tracematch(): raise AttributeError("The TraceMatch has not been set. see set_tracematch() ") if hasattr(traceindex, "__iter__"): return [self.get_spectrum(id_) for id_ in traceindex] maskidx = self.get_trace_mask(traceindex, finetune=finetune) return np.sum(eval("self.%s"%on)*maskidx, axis=0) def get_xslice(self, i, on="data"): """ build a `CCDSlice` based on the ith-column. Returns ------- CCDSlice (child of Spectrum) """ slice_ = CCDSlice(None) if "data" in on and not self.has_var(): warnings.warn("Setting the default variance for 'get_xslice' ") self.set_default_variance() var = self.var.T[i] if 'data' in on else np.ones(np.shape("self.%s"%on))*np.nanstd("self.%s"%on) slice_.create(eval("self.%s.T[i]"%on), variance = var, lbda = np.arange(len(self.data.T[i])), logwave=False) slice_.set_tracebounds(self.tracematch.get_traces_crossing_x_ybounds(i)) return slice_ def fit_background(self, start=2, jump=10, multiprocess=True, set_it=True, smoothing=[0,5], **kwargs): """ """ from .background import get_background, fit_background self._background = get_background( fit_background(self, start=start, jump=jump, multiprocess=multiprocess, **kwargs), smoothing=smoothing ) if set_it: self.set_background(self._background.background, force_it=True) def fetch_background(self, set_it=True, build_if_needed=True, ncore=None, **kwargs): """ ncore is used only if build_background() is called. """ from .background import load_background from .io import filename_to_background_name # ---------------- # # Test it exists # # ---------------- # from glob import glob if len(glob(filename_to_background_name(self.filename)))==0: warnings.warn("No background has been found for %s"%self.filename) if not build_if_needed: raise IOError("Since build_if_needed=False, No background available.") from .background import build_background build_background(self, ncore=ncore, **kwargs) warnings.warn("A background has been built") self._background = load_background( filename_to_background_name( self.filename )) if set_it: self.set_background( self._background.background, force_it=True) def extract_spectrum(self, traceindex, wavesolution, lbda=None, kind="cubic", get_spectrum=True, pixel_shift=0.): """ Build the `traceindex` spectrum based on the given wavelength solution. The returned object could be an pyifu's Spectrum or three arrays, lbda, flux, variance. The method works as follow for the given traceindex: 1) Get the flux per pixels [using the get_spectrum() method] (Get the variance the same way if any) 2) Convert the given lbda into pixels [using the lbda_to_pixels() method from wavesolution] 3) Interpolate the flux per pixels into flux per lbda (Interpolate the variance the same way) [using interp1d from scipy.interpolate] Parameters ---------- traceindex: [int] The index of the spectrum you want to extract wavesolution: [WaveSolution] Object containing the method to go from pixels to lbda lbda: [array] -optional- Shape of the lbda array you want the spectrum to have. kind: [str or int] -optional- Specifies the kind of interpolation as a string ('linear', 'nearest', 'zero', 'slinear', 'quadratic, 'cubic' where 'slinear', 'quadratic' and 'cubic' refer to a spline interpolation of first, second or third order) or as an integer specifying the order of the spline interpolator to use. get_spectrum: [bool] -optional- Which form the returned data should have? finetune: [bool] -optional- Should the trace masking come from finetunning of spectral trace? (Remark: The spectral match loaded might already be a finetuned ones.) Returns ------- Spectrum or array, array, array/None (lbda, flux, variance) """ f = self.get_spectrum(traceindex, finetune=False) v = self.get_spectrum(traceindex, finetune=False, on="var") if self.has_var() else None pixs = np.arange(len(f))[::-1] minpix, maxpix = self.tracematch.get_trace_xbounds(traceindex) mask = pixs[(pixs>minpix)* (pixs<maxpix)][::-1] if lbda is not None: from scipy.interpolate import interp1d pxl_wanted = wavesolution.lbda_to_pixels(lbda, traceindex) + pixel_shift flux = interp1d(pixs[mask], f[mask], kind=kind)(pxl_wanted) var = interp1d(pixs[mask], v[mask], kind=kind)(pxl_wanted) if v is not None else v else: lbda = wavesolution.pixels_to_lbda(pixs[mask], traceindex) flux = f[mask] var = v[mask] if get_spectrum: spec = Spectrum(None) spec.create(flux,variance=var,lbda=lbda) return spec return lbda, flux, var # --------------- # # Extract Cube # # --------------- # def extract_cube(self, wavesolution, lbda, hexagrid=None, traceindexes=None, show_progress=False, pixel_shift=0., rotation=None): """ Create a cube from the ccd. ------------------------------------ | Central method of the ccd object | ------------------------------------ The method works as follow (see the extract_spectrum() method): for each trace (loop object traceindexes) 1) Get the flux per pixels [using the get_spectrum() method] (Get the variance the same way if any) 2) Convert the given lbda into pixels [using the lbda_to_pixels() method from wavesolution] 3) Interpolate the flux per pixels into flux per lbda (Interpolate the variance the same way) [using interp1d from scipy.interpolate] 4) Get the x,y position of the traceindex [using the ids_to_index() and index_to_xy() methods from hexagrid] If anything false (most likely the interpolation because of wavelength matching) the flux (or variance) per lbda will be set to an array of NaNs All the spaxels fluxes will be set to a cube (SEDMCube see .sedm) Parameters ---------- wavesolution: [WaveSolution] The object containing the pixels<->wavelength relation of the night. lbda: [float array] wavelength array of the cube to be created (in Angstrom) hexagrid: [HexagoneProjection] -optional- object containing the x,y position of the traces. If not given, this will be created based on the instance's TraceMatch. (it is advised to give the night hexagrid.) traceindexes: [list of int] -optional- Which trace should be extracted to build the cube? If not given (None) this will used all the traceindexes for which there is a wavelength solution (wavesolution) show_progress: [bool] -optional- Should the progress within the loop over traceindexes should be shown (using astropy's ProgressBar) Returns ------- SEDMCube (child of pyifu's Cube) """ from .sedm import SEDMSPAXELS, SEDMCube, SEDM_INVERT, SEDM_ROT if rotation is None: rotation = SEDM_ROT # - index check if traceindexes is None: traceindexes = np.sort(list(wavesolution.wavesolutions.keys())) elif np.any(~np.in1d(traceindexes, list(wavesolution.wavesolutions.keys()))): raise ValueError("At least some given indexes in `used_indexes` do not have a wavelength solution") # - Hexagonal Grid if hexagrid is None: hexagrid = self.tracematch.extract_hexgrid(traceindexes) used_indexes = [i_ for i_ in traceindexes if i_ in hexagrid.ids_index.keys()] # - data cube = SEDMCube(None) cubeflux_ = {} cubevar_ = {} if self.has_var() else None # ------------ # # MultiProcess # # ------------ # def _build_ith_flux_(i_): try: lbda_, flux_, variance_ = self.extract_spectrum(i_, wavesolution, lbda=lbda, get_spectrum=False, pixel_shift=pixel_shift) except: warnings.warn("FAILING EXTRACT_SPECTRUM for trace index %d: most likely wavesolution failed for this trace. *NaN Spectrum set*"%i_) flux_ = np.ones(len(lbda) )*np.NaN variance_ = np.ones(len(lbda) )*np.inf cubeflux_[i_] = flux_ if cubevar_ is not None: cubevar_[i_] = variance_ #except: # warnings.warn("FAILED FOR %s. Most likely, the requested wavelength are not fully covered by the trace"%i_) # cubeflux_[i_] = np.zeros(len(lbda))*np.NaN # if cubevar_ is not None: # cubevar_[i_] = np.zeros(len(lbda))*np.NaN # ------------ # # - MultiThreading to speed this up if show_progress: print("show_progress removed from extract_cube()") _ = [_build_ith_flux_(i) for i in used_indexes] cubeflux = np.asarray([cubeflux_[i] for i in used_indexes]) cubevar = np.asarray([cubevar_[i] for i in used_indexes]) if cubevar_ is not None else None # - Fill the Cube # SEDM DEPENDENT hexagrid.set_rot_degree(rotation) spaxels_position = np.asarray(hexagrid.index_to_xy( hexagrid.ids_to_index(used_indexes), invert_rotation=False, switch_axis=SEDM_INVERT)).T spaxel_map = {i:c for i,c in zip(used_indexes, spaxels_position)} cube.create(cubeflux.T,lbda=lbda, spaxel_mapping=spaxel_map, variance=cubevar.T) cube.set_spaxel_vertices(np.dot(hexagrid.grid_rotmatrix,SEDMSPAXELS.T).T) return cube # --------------- # # Extract Cube # # --------------- # def show(self, toshow="data", ax=None, logscale= False, cmap = None, show_sepobjects=False, vmin = None, vmax = None, savefile=None, show=True, **kwargs): """ Highlight the trace on top of the CCD image. This method requires that the spectral match has been loaded. Parameters ---------- idx: [int] vmin, vmax: [float /string / None] Upper and lower value for the colormap. 3 Format are available - float: Value in data unit - string: percentile. Give a float (between 0 and 100) in string format. This will be converted in float and passed to numpy.percentile - None: The default will be used (percentile 0.5 and 99.5 percent respectively). (NB: vmin and vmax are independent, i.e. one can be None and the other '98' for instance) Returns ------- dict ({ax, fig, imshow's output}) """ from .utils.mpl import figout, mpl if ax is None: fig = mpl.figure(figsize=[8,8]) ax = fig.add_axes([0.13,0.13,0.8,0.8]) ax.set_xlabel(r"$\mathrm{x\ [ccd]}$",fontsize = "large") ax.set_ylabel(r"$\mathrm{y\ [ccd]}$",fontsize = "large") elif "imshow" not in dir(ax): raise TypeError("The given 'ax' most likely is not a matplotlib axes. "+\ "No imshow available") else: fig = ax.figure # What To Show data_ = eval("self.%s"%toshow) if type(toshow) == str else toshow if logscale: data_ = np.log10(data_) if cmap is None: cmap=mpl.cm.viridis vmin = np.percentile(data_, 0.5) if vmin is None else \ np.percentile(data_, float(vmin)) if type(vmin) == str else\ vmin vmax = np.percentile(data_, 99.5) if vmax is None else \ np.percentile(data_, float(vmax)) if type(vmax) == str else\ vmax prop = kwargs_update(dict(origin="lower", aspect='auto', vmin=vmin, vmax=vmax), **kwargs) # Show It sc = ax.imshow(data_, **prop) # - sepobject if show_sepobjects and self.has_sepobjects(): self.sepobjects.display(ax) fig.figout(savefile=savefile, show=show) return {"ax":ax, "fig" : fig, "imshow":sc} def show_traceindex(self, traceindex, ax=None, logscale= False, toshow="data", show_finetuned_traces=False, cmap = None, facecolor = "None", edgecolor="k", vmin = None, vmax = None, savefile=None, show=True, **kwargs): """ Highlight the trace on top of the CCD image. This method requires that the spectral match has been loaded. Parameters ---------- traceindex: [int] """ from .utils.mpl import figout pl = self.show( toshow=toshow, ax=ax, logscale=logscale, cmap = cmap, vmin = vmin, vmax = vmax, savefile=None, show=False, **kwargs) ax, fig = pl["ax"], pl["fig"] pl["patch"] = self.display_traces(ax, traceindex, facecolors=facecolor, edgecolors=edgecolor) if show_finetuned_traces and self.has_matchedindex(): from matplotlib import patches traces = traceindex if hasattr(traceindex, "__iter__") else [traceindex] for idx_ in traces: self.sepobjects.display_ellipses(pl["ax"], self.traceindex_to_sepindex(idx_)) p_ = patches.Polygon(self.get_finetuned_trace(idx_), facecolor="None", edgecolor="C1") pl["ax"].add_patch(p_) # Output fig.figout(savefile=savefile, show=show) return pl def show_xslice(self, xpixel, toshow="data", savefile=None, ax=None, show=True, ls="-", color=None, lw=1, show_tracebounds=True, bandalpha=0.5, bandfacecolor="0.7", bandedgecolor="None",bandedgewidth=0, **kwargs): """ """ from .utils.mpl import figout, mpl if ax is None: fig = mpl.figure(figsize=[9,5]) ax = fig.add_subplot(111) else: fig = ax.figure # This plot ax.plot( eval("self.%s.T[xpixel]"%toshow), ls=ls, color=color, lw=lw,**kwargs) if show_tracebounds: [ax.axvspan(*y_, alpha=bandalpha, facecolor=bandfacecolor, edgecolor=bandedgecolor,linewidth=bandedgewidth) for y_ in self.tracematch.get_traces_crossing_x_ybounds(xpixel)] fig.figout(savefile=savefile, show=show) def show_as_slicer(self, traceindexes, vmin=0 , vmax="90", masked=True, toshow="rawdata"): """ """ import matplotlib.pyplot as mpl data = eval("self.%s"%toshow) if vmin is None: vmin = 0 if type(vmin) == str: vmin = np.percentile(data, vmin) if vmax is None: vmax = "95" if type(vmax) == str: vmax = np.percentile(data, vmax) # parameters ntraces = len(traceindexes) height = 0.85/ntraces # Build the figure fig = mpl.figure(figsize=[8,4]) # draw the traces for i,index_ in enumerate(traceindexes): ax = fig.add_axes([0.1,0.1+height*i, 0.8,height]) ax.imshow(self.get_trace_cutout(index_, masked=masked, on=toshow), origin="lower", aspect="auto", vmin=vmin, vmax=vmax) ax.set_yticks([]) if i>0: ax.set_xticks([]) else: ax.set_xlabel("pixels since trace origin") return {"fig":fig} def display_traces(self, ax, traceindex, facecolors="None", edgecolors="k", update_limits=True): """ """ pl = self.tracematch.display_traces(ax, traceindex, facecolors =facecolors, edgecolors=edgecolors) # Fancy It if update_limits: [xmin, xmax], [ymin, ymax] = np.percentile(self.tracematch.trace_vertices[traceindex] if not hasattr(traceindex,"__iter__") else\ np.concatenate([self.tracematch.trace_vertices[idx] for idx in traceindex], axis=0), [0,100], axis=0).T ax.set_xlim(xmin-20, xmax+20) ax.set_ylim(ymin-5, ymax+5) return pl # ================== # # Internal Tools # # ================== # def _get_default_background_(self, add_mask=None, cut_bright_pixels=None, exclude_edges=False, scaleup_sepmask=2, apply_sepmask=True, **kwargs): """ This Background has been optimized for SEDm Calibration Lamps """ if add_mask is None and self.has_tracematch(): add_mask = np.asarray(~self.tracematch.get_notrace_mask(), dtype="bool") if add_mask is not None and cut_bright_pixels is not None: data_ = self.rawdata.copy() data_[add_mask] = np.NaN add_mask = add_mask + (data_>np.percentile(data_[data_==data_],50)) if exclude_edges: falses = np.zeros(self.shape) # - x cuts xremove, yremove =100, 20 falses[:,(np.arange(self.shape[1])<xremove) + (np.arange(self.shape[1])>(self.shape[1]-xremove)) ] = 1. falses[(np.arange(self.shape[0])<yremove) + (np.arange(self.shape[0])>(self.shape[0]-yremove)),:] = 1. add_mask = add_mask + np.asarray(falses, dtype="bool") return self.get_sep_background(doublepass=False, update_background=False, add_mask=add_mask, apply_sepmask=apply_sepmask, scaleup_sepmask=scaleup_sepmask, **kwargs) # ================== # # Properties # # ================== # # - TraceMatching association @property def tracematch(self): """ """ return self._properties["tracematch"] def has_tracematch(self): return self.tracematch is not None @property def matchedindex(self): """ Object containing the relation between the sepindex and the trace index. see the methods sepindex_to_traceindex() and traceindex_to_sepindex() """ if self._derived_properties["matched_septrace_index"] is None: self._derived_properties["matched_septrace_index"] = {} return self._derived_properties["matched_septrace_index"] def has_matchedindex(self): """ Is the sep<-> trace index matching done? """ return self.matchedindex is not None and len(self.matchedindex.keys())>0 # - Generic properties @property def objname(self): if "Calib" in self.header.get("NAME","no-name"): return self.header["NAME"].split()[1] return self.header.get("NAME","no-name") # ============================== # # # # Childs Of CCD # # # # ============================== # class ScienceCCD( CCD ): """ Should be used to improve the trace matching. """ class DomeCCD( ScienceCCD ): """ Object Build to handle the CCD images of the Dome exposures""" # ================== # # Main Tools # # ================== # def get_tracematch(self, bound_pixels=None, width="optimal"): """ """ xlim, ylim = self.get_trace_position(bound_pixels=bound_pixels) if width is None: b = self.sepobjects.get("b") width = np.median(b)*2 if width=="median" else \ b*2 if width!="optimal" else np.clip(b,np.median(b)-2*np.std(b),np.median(b)+2*np.std(b))*2 return [np.concatenate(v_) for v_ in zip(np.asarray([xlim,ylim+width]).T,np.asarray([xlim[::-1],ylim[::-1]-width]).T)] def get_trace_position(self, bound_pixels=None): """ """ x, y, a, b, theta = self.sepobjects.get(["x","y","a","b","theta"]).T # This enables to remove cosmics flagout = a/b<10 x, y, theta = x[~flagout], y[~flagout], theta[~flagout] if bound_pixels is not None: print("BOUNDS") xlim = np.asarray([x-bound_pixels[0], x+bound_pixels[1]]) ylim =
np.sin(theta)
numpy.sin
import numpy as np import numba from src.data import Problem, Case, Matter @numba.jit('i8[:, :](i8[:, :], i8)', nopython=True) def is_rot_symmetry_point(x_arr: np.array, background: np.int) -> np.array: res_arr = np.zeros(x_arr.shape, dtype=np.int64) # center point for i0 in range(x_arr.shape[0]): for j0 in range(x_arr.shape[1]): # right-down x_arr_1 = x_arr[i0:, j0:] # right-up x_arr_2 = x_arr[i0::-1, j0:].transpose() # left-up x_arr_3 = x_arr[i0::-1, j0::-1] # left-down x_arr_4 = x_arr[i0:, j0::-1].transpose() new_shape_0 = max([x_arr_1.shape[0], x_arr_2.shape[0], x_arr_3.shape[0], x_arr_4.shape[0]]) new_shape_1 = max([x_arr_1.shape[1], x_arr_2.shape[1], x_arr_3.shape[1], x_arr_4.shape[1]]) x_arr_1_exp = background * np.ones((new_shape_0, new_shape_1), dtype=np.int64) x_arr_1_exp[:x_arr_1.shape[0], :x_arr_1.shape[1]] = x_arr_1 x_arr_2_exp = background * np.ones((new_shape_0, new_shape_1), dtype=np.int64) x_arr_2_exp[:x_arr_2.shape[0], :x_arr_2.shape[1]] = x_arr_2 x_arr_3_exp = background * np.ones((new_shape_0, new_shape_1), dtype=np.int64) x_arr_3_exp[:x_arr_3.shape[0], :x_arr_3.shape[1]] = x_arr_3 x_arr_4_exp = background * np.ones((new_shape_0, new_shape_1), dtype=np.int64) x_arr_4_exp[:x_arr_4.shape[0], :x_arr_4.shape[1]] = x_arr_4 # break if gap if ((x_arr_1_exp != x_arr_2_exp) * (x_arr_1_exp != background) * (x_arr_2_exp != background)).sum(): res_arr[i0, j0] = -2 continue # break if gap if ((x_arr_1_exp != x_arr_3_exp) * (x_arr_1_exp != background) * (x_arr_3_exp != background)).sum(): res_arr[i0, j0] = -2 continue # break if gap if ((x_arr_1_exp != x_arr_4_exp) * (x_arr_1_exp != background) * (x_arr_4_exp != background)).sum(): res_arr[i0, j0] = -2 continue # break if gap if ((x_arr_2_exp != x_arr_3_exp) * (x_arr_2_exp != background) * (x_arr_3_exp != background)).sum(): res_arr[i0, j0] = -2 continue # break if gap if ((x_arr_2_exp != x_arr_4_exp) * (x_arr_2_exp != background) * (x_arr_4_exp != background)).sum(): res_arr[i0, j0] = -2 continue # break if gap if ((x_arr_3_exp != x_arr_4_exp) * (x_arr_3_exp != background) * (x_arr_4_exp != background)).sum(): res_arr[i0, j0] = -2 continue score_arr = (x_arr_1_exp != background).astype(np.int64) + (x_arr_2_exp != background).astype(np.int64) + \ (x_arr_3_exp != background).astype(np.int64) + (x_arr_4_exp != background).astype(np.int64) res_arr[i0, j0] = (score_arr * (score_arr - 1)).sum() # center should be same if 1 <= score_arr[0, 1] <= 2: flag_todo = 1 elif 1 <= score_arr[1, 1] <= 2: flag_todo = 1 else: flag_todo = 0 if flag_todo == 1: # unless it is the center if i0 == j0 == x_arr.shape[0] // 2 and x_arr.shape[0] == x_arr.shape[1] and x_arr.shape[0] % 2 == 1: pass else: res_arr[i0, j0] = -2 return res_arr // 2 @numba.jit('i8[:, :](i8[:, :], i8)', nopython=True) def is_rot_symmetry_valley(x_arr: np.array, background: np.int) -> np.array: res_arr = np.zeros(x_arr.shape, dtype=np.int64) res_arr[-1, :] = -1 res_arr[:, -1] = -1 # center point for i0 in range(x_arr.shape[0] - 1): for j0 in range(x_arr.shape[1] - 1): # right-down x_arr_1 = x_arr[i0 + 1:, j0 + 1:] # right-up x_arr_2 = x_arr[i0::-1, j0 + 1:].transpose() # left-up x_arr_3 = x_arr[i0::-1, j0::-1] # left-down x_arr_4 = x_arr[i0 + 1:, j0::-1].transpose() new_shape_0 = max([x_arr_1.shape[0], x_arr_2.shape[0], x_arr_3.shape[0], x_arr_4.shape[0]]) new_shape_1 = max([x_arr_1.shape[1], x_arr_2.shape[1], x_arr_3.shape[1], x_arr_4.shape[1]]) x_arr_1_exp = background * np.ones((new_shape_0, new_shape_1), dtype=np.int64) x_arr_1_exp[:x_arr_1.shape[0], :x_arr_1.shape[1]] = x_arr_1 x_arr_2_exp = background * np.ones((new_shape_0, new_shape_1), dtype=np.int64) x_arr_2_exp[:x_arr_2.shape[0], :x_arr_2.shape[1]] = x_arr_2 x_arr_3_exp = background *
np.ones((new_shape_0, new_shape_1), dtype=np.int64)
numpy.ones
# coding: utf-8 # In[20]: import numpy as np import pydensecrf.densecrf as dcrf import os import cv2 import random from tqdm import tqdm # In[21]: from skimage.color import gray2rgb from skimage.color import rgb2gray import matplotlib.pyplot as plt from sklearn.metrics import f1_score, accuracy_score from pydensecrf.utils import unary_from_labels, create_pairwise_bilateral, create_pairwise_gaussian, unary_from_softmax #from osgeo import gdal get_ipython().run_line_magic('matplotlib', 'inline') # In[22]: # Color maps for direction map COLOR_LR = [0,128,128] COLOR_UD = [128,0,128] COLOR_DIAG = [255,215,0] COLOR_ADIAG = [1,255,255] INF = 10000 # In[23]: MAX = 0 SUM = 1 VEC = 0 MAT = 1 # In[24]: def dir_to_features(dir_map): """Converts direction color map to feature used for crf kernel. The feature is obtained by computing the intersections of the x, y axis and the line determined by the position of one point and its direction. (More details in the report) Parameters ____________ dir_map: numpy.array Direction map that maps each pixel to a direction in [left_right, up_down, diagonal, anti-diagonal], each direction is represented by a color. """ (h, w, c) = dir_map.shape feature_map = np.zeros((h,w,2)) for i in range(h): for j in range(w): dir_color = dir_map[i,j] if dir_color[0] == COLOR_LR[0]: # dir = lr feature_map[i,j] = np.array([INF,i]) if dir_color[0] == COLOR_UP[0]: # dir = ud feature_map[i,j] = np.array([j,INF]) if dir_color[1] == COLOR_DIAG[0]: # dir = diag feature_map[i,j] = np.array([j-i,i-j]) if dir_color[1] == COLOR_ADIAG[0]: # dir = adiag feature_map[i,j] = np.array([i+j, i+j]) return feature_map # In[25]: def gen_dir_map(img): """Generate direction map from a rgb img Parameters ____________ img: numpy.array Rgb img with width = height """ window_size = 101 half_size = int((window_size-1)/2) sigma_1 = 2 sigma_2 = 40 (h, w, c) = img.shape assert h==w, "h and w are not equal" dir_map = np.zeros((h,w)) pos_mat = np.zeros((h,w,2)) for i in range(h): for j in range(w): pos_mat[i,j,0]=i pos_mat[i,j,1]=j padded_pos = np.pad(pos_mat, ((half_size, half_size), (half_size, half_size), (0,0))) padded_img = np.pad(img, ((half_size, half_size), (half_size, half_size), (0,0))) index_mask_lr = np.zeros((window_size, window_size)).astype("bool") index_mask_lr[half_size,:]=True index_mask_ud = np.zeros((window_size, window_size)).astype("bool") index_mask_ud[:,half_size]=True index_mask_diag = np.identity(window_size).astype("bool") index_mask_adiag = np.fliplr(np.identity(window_size)).astype("bool") mask_list = [index_mask_lr, index_mask_ud, index_mask_diag, index_mask_adiag] for i in range(h): for j in range(w): img_nbr = padded_img[i:i+window_size,j:j+window_size] pos_nbr = padded_pos[i:i+window_size,j:j+window_size] img_nbr = img_nbr - img[i,j,:] pos_nbr = pos_nbr - np.array([i,j]) dir_intensity = np.zeros(4) for dir_index, index_mask in enumerate(mask_list): img_nbr_dir = img_nbr[index_mask] pos_nbr_dir = pos_nbr[index_mask] img_nbr_dir = np.sum(img_nbr_dir**2, axis=1)/(2*sigma_1**2) pos_nbr_dir = np.sum(pos_nbr_dir**2, axis=1)/(2*sigma_2**2) k = np.exp(-img_nbr_dir-pos_nbr_dir) dir_intensity[dir_index]=np.sum(k) dir_map[i,j]=np.argmax(dir_intensity)+1 return dir_map # In[26]: def visualize_dir_map(img, dir_map, save_file=False, filename=None, vis_path=None, dir_path=None): """Visualize a direction map Parameters ____________ img: numpy.array Rgb img dir_map: numpy.array Correspongding direction map ... """ h = img.shape[0] w = img.shape[1] vis_dir = np.zeros(img.shape) vis_dir[dir_map==1] = np.array(COLOR_LR) vis_dir[dir_map==2] = np.array(COLOR_UD) vis_dir[dir_map==3] = np.array(COLOR_DIAG) vis_dir[dir_map==4] = np.array(COLOR_ADIAG) plt.figure(figsize=(10,5)) plt.subplot(1,2,1); plt.imshow(img); plt.title('Original Image (blurred)'); plt.axis('off'); plt.subplot(1,2,2); plt.imshow(dir_map); plt.title('Direction map'); plt.axis('off'); if save_file: plt.savefig(os.path.join(vis_path, filename),dpi=300) plt.close() cv2.imwrite(os.path.join(dir_path, filename), vis_dir) # In[27]: def gen_dir_map_and_visualize(image_path= './images/', vis_path='./vis_dir_blur_/', dir_path='./dir_map_/', process_all=True): """Generate direction color map for images in image_path Parameters ____________ image_path: string Image path vis_path: string Path to save visualization results dir_path: string Path to save direction map process_all: Bool False to generate a single visualization result without save. True to generate and save visualizaiton results for all images. """ if not os.path.exists(dir_path): os.mkdir(dir_path) if not os.path.exists(vis_path): os.mkdir(vis_path) if process_all: for file in tqdm(os.listdir(image_path)): img = cv2.imread(os.path.join(image_path, file)) img = cv2.GaussianBlur(img,(5,5),0) dir_map = gen_dir_map(img) visualize_dir_map(img, dir_map, filename=file, save_file=True, vis_path=vis_path, dir_path=dir_path) else: img = cv2.imread('./images/satImage_001.png') img = cv2.GaussianBlur(img,(5,5),0) dir_map = gen_dir_map(img) visualize_dir_map(img, dir_map, save_file=False) # In[28]: def crf_with_dir_kernel(original_img, dir_feature, prob, iter_num, compat_smooth, compat_appearance, compat_struct, w_smooth, w_appearance, w_struct, sigma_smooth, sigma_app_color, sigma_app_pos, sigma_struct_pos, sigma_struct_feat): """CRF with a Gaussian smoothing kernel, an appearance kernel and a structural kernel """ (h,w) = prob.shape y = np.zeros((h,w,2)) y[:,:,1] = prob y[:,:,0] = 1-y[:,:,1] annotated_image=y.transpose((2, 0, 1)) #Gives no of class labels in the annotated image n_labels = 2 #Setting up the CRF model d = dcrf.DenseCRF2D(original_img.shape[1], original_img.shape[0], n_labels) # get unary potentials (neg log probability) U = unary_from_softmax(annotated_image) unary = np.ascontiguousarray(U) d.setUnaryEnergy(unary) compat_smooth = compat_smooth * w_smooth compat_appearance = compat_appearance * w_appearance compat_struct = compat_struct * w_struct # Smooth kernel d.addPairwiseGaussian(sxy=(sigma_smooth, sigma_smooth), compat=compat_smooth.astype(np.float32), kernel=dcrf.DIAG_KERNEL, normalization=dcrf.NORMALIZE_SYMMETRIC) # Appearance kernel d.addPairwiseBilateral(sxy=(sigma_app_pos, sigma_app_pos), srgb=(sigma_app_color, sigma_app_color, sigma_app_color), rgbim=original_image, compat=compat_appearance.astype(np.float32), kernel=dcrf.DIAG_KERNEL, normalization=dcrf.NORMALIZE_SYMMETRIC) # Structural kernel pairwise_energy = create_pairwise_bilateral(sdims=(sigma_struct_pos,sigma_struct_pos), schan=(sigma_struct_feat,sigma_struct_feat), img=dir_feature, chdim=2) d.addPairwiseEnergy(pairwise_energy, compat=compat_struct.astype(np.float32)) Q = d.inference(iter_num) proba = np.array(Q) return proba[1].reshape((dir_feature.shape[0], dir_feature.shape[1])) # In[29]: def crf(original_image, prob, iter_num=4, compat_smooth = np.array([[-0.4946432, 1.27117338],[0.59452892, 0.23182234]]), compat_appearance = np.array([[-0.30571318, 0.83015124],[1.3217825, -0.13046645]]), w_smooth=3.7946478055761963, w_appearance=1.8458537690881878, sigma_smooth=8.575103751642672, sigma_color=2.0738539891571977, sigma_color_pos=20): """Basic CRF with a Gaussian smoothing kernel and an appearance kernel """ (h,w) = prob.shape y = np.zeros((h,w,2)) y[:,:,1] = prob y[:,:,0] = 1-y[:,:,1] annotated_image=y.transpose((2, 0, 1)) #Gives no of class labels in the annotated image n_labels = 2 #print("No of labels in the Image are ") #print(n_labels) #Setting up the CRF model d = dcrf.DenseCRF2D(original_image.shape[1], original_image.shape[0], n_labels) # get unary potentials (neg log probability) U = unary_from_softmax(annotated_image) unary = np.ascontiguousarray(U) d.setUnaryEnergy(unary) compat_smooth=compat_smooth*w_smooth compat_appearance=compat_appearance*w_appearance # This adds the color-independent term, features are the locations only. d.addPairwiseGaussian(sxy=(sigma_smooth, sigma_smooth), compat=compat_smooth.astype(np.float32), kernel=dcrf.DIAG_KERNEL, normalization=dcrf.NORMALIZE_SYMMETRIC) # This adds the color-dependent term, i.e. features are (x,y,r,g,b). d.addPairwiseBilateral(sxy=(sigma_color_pos, sigma_color_pos), srgb=(sigma_color, sigma_color, sigma_color), rgbim=original_image, compat=compat_appearance.astype(np.float32), kernel=dcrf.DIAG_KERNEL, normalization=dcrf.NORMALIZE_SYMMETRIC) Q = d.inference(iter_num) proba = np.array(Q) return proba[1].reshape((original_image.shape[0], original_image.shape[1])) # In[30]: def crf_smooth(original_image, prob, use_2d = True, iter_num=1, w=4.921522279119057, sigma_sm=4.325251720130304): """CRF with only a smoothing kernel """ (h,w) = prob.shape y = np.zeros((h,w,2)) y[:,:,1] = prob y[:,:,0] = 1-y[:,:,1] annotated_image=y.transpose((2, 0, 1)) #Gives no of class labels in the annotated image n_labels = 2 #Setting up the CRF model if use_2d : d = dcrf.DenseCRF2D(original_image.shape[1], original_image.shape[0], n_labels) # get unary potentials (neg log probability) U = unary_from_softmax(annotated_image) unary = np.ascontiguousarray(U) d.setUnaryEnergy(unary) # This adds the color-independent term, features are the locations only. d.addPairwiseGaussian(sxy=(sigma_sm, sigma_sm), compat=w, kernel=dcrf.DIAG_KERNEL, normalization=dcrf.NORMALIZE_SYMMETRIC) Q = d.inference(iter_num) proba = np.array(Q) return proba[1].reshape((original_image.shape[0], original_image.shape[1])) # In[31]: def propagate_max_mat(img, prob): """Probability propagation (max) in 4 directions via matrix multiplication """ prob_out = prob.copy() prop_size = 51 half_size = int((prop_size-1)/2) prop_num = 3 sigma_1 = 5 sigma_2 = 42 (h, w) = prob.shape pos_mat = np.zeros((h,w,2)) for i in range(h): for j in range(w): pos_mat[i,j,0]=i pos_mat[i,j,1]=j padded_pos = np.pad(pos_mat, ((half_size, half_size), (half_size, half_size), (0,0))) padded_img = np.pad(img, ((half_size, half_size), (half_size, half_size), (0,0))) index_mask = np.zeros((prop_size, prop_size)).astype("bool") for i in range(prop_size): index_mask[i,half_size]=1 index_mask[half_size,i]=1 index_mask[i,i]=1 index_mask[prop_size-1-i,i]=1 for iteration in range(prop_num): padded_prob = np.pad(prob_out, ((half_size, half_size), (half_size, half_size))) # propagate prob (maximum) for i in range(h): for j in range(w): if prob_out[i,j]<0.01: continue img_nbr = padded_img[i:i+prop_size,j:j+prop_size] pos_nbr = padded_pos[i:i+prop_size,j:j+prop_size] img_nbr = img_nbr - img[i,j,:] pos_nbr = pos_nbr - np.array([i,j]) img_nbr[~index_mask]=0 pos_nbr[~index_mask]=0 img_nbr = np.sum(img_nbr**2, axis=2)/(2*sigma_1**2) pos_nbr = np.sum(pos_nbr**2, axis=2)/(2*sigma_2**2) k = np.exp(-img_nbr-pos_nbr)*prob_out[i,j] k = k*index_mask padded_prob[i:i+prop_size,j:j+prop_size] = np.maximum(padded_prob[i:i+prop_size,j:j+prop_size], k) prob_out = padded_prob[half_size:h+half_size,half_size:w+half_size] return prob_out # In[32]: def propagate_max_vec(img, prob, prop_size=11, prop_num=16, sigma_1=1.039316347691348, sigma_2=40): """ vec means only do propagation along x and y axis max means propagate using max function Args: prop_size: neighborhood size prop_num: number of iteration/propagation sigma_1: variance of color sigma_2: variance of distance """ prob_out = prob.copy() half_size = int((prop_size-1)/2) (h, w, c) = img.shape pos_mat = np.zeros((h,w,2)) # position matrix for i in range(h): for j in range(w): pos_mat[i,j,0]=i pos_mat[i,j,1]=j padded_pos = np.pad(pos_mat, ((half_size, half_size), (half_size, half_size), (0,0))) padded_img = np.pad(img, ((half_size, half_size), (half_size, half_size), (0,0))) for iteration in range(prop_num): padded_prob = np.pad(prob_out, ((half_size, half_size), (half_size, half_size))) padded_prob_fix = padded_prob.copy() # propagate prob (maximum) assert h==w, "h and w are not equal" for i in range(h): # prop along y for row i img_nbr = padded_img[i:i+prop_size,:] pos_nbr = padded_pos[i:i+prop_size,:] img_nbr = img_nbr - padded_img[i+half_size,:,:] pos_nbr = pos_nbr - padded_pos[i+half_size,:,:] img_nbr = np.sum(img_nbr**2, axis=2)/(2*sigma_1**2) pos_nbr = np.sum(pos_nbr**2, axis=2)/(2*sigma_2**2) k = np.exp(-img_nbr-pos_nbr)*padded_prob_fix[i+half_size,:] padded_prob[i:i+prop_size,:] = np.maximum(padded_prob[i:i+prop_size,:], k) # prop along x for col i img_nbr = padded_img[:,i:i+prop_size] pos_nbr = padded_pos[:,i:i+prop_size] img_nbr = img_nbr - padded_img[:,i+half_size,:].reshape((padded_img.shape[0],1,c)) pos_nbr = pos_nbr - padded_pos[:,i+half_size,:].reshape((padded_img.shape[0],1,2)) img_nbr = np.sum(img_nbr**2, axis=2)/(2*sigma_1**2) pos_nbr = np.sum(pos_nbr**2, axis=2)/(2*sigma_2**2) k = np.exp(-img_nbr-pos_nbr)*padded_prob_fix[:,i+half_size].reshape((-1,1)) padded_prob[:,i:i+prop_size] = np.maximum(padded_prob[:,i:i+prop_size], k) prob_out = padded_prob[half_size:h+half_size,half_size:w+half_size] return prob_out # In[33]: def propagate_sum_vec(img, prob, prop_size=11, prop_num=1, sigma_1=1.5319569104856783, sigma_2=80): """ vec means only do propagation along x and y axis sum means propagate in a additive schema (with total probability fixed) Args: prop_size: neighborhood size prop_num: number of iteration/propagation sigma_1: variance of color sigma_2: variance of distance """ # print(np.sum(prob)) prob_out = prob.copy() half_size = int((prop_size-1)/2) (h, w, c) = img.shape pos_mat = np.zeros((h,w,2)) # position matrix for i in range(h): for j in range(w): pos_mat[i,j,0]=i pos_mat[i,j,1]=j padded_pos = np.pad(pos_mat, ((half_size, half_size), (half_size, half_size), (0,0))) padded_img = np.pad(img, ((half_size, half_size), (half_size, half_size), (0,0))) padded_prob = np.pad(prob, ((half_size, half_size), (half_size, half_size))) for iteration in range(prop_num): padded_prob_fix = padded_prob.copy() padded_prob = np.pad(np.zeros((h,w)), ((half_size, half_size), (half_size, half_size))) # propagate prob (sum) assert h==w, "h and w are not equal" # compute the degree mat deg_mat = np.zeros((h+2*half_size,w+2*half_size)) for i in range(h): # prop along y for row i img_nbr = padded_img[i:i+prop_size,:] pos_nbr = padded_pos[i:i+prop_size,:] img_nbr = img_nbr - padded_img[i+half_size,:,:] pos_nbr = pos_nbr - padded_pos[i+half_size,:,:] img_nbr = np.sum(img_nbr**2, axis=2)/(2*sigma_1**2) pos_nbr = np.sum(pos_nbr**2, axis=2)/(2*sigma_2**2) k = np.exp(-img_nbr-pos_nbr) deg_mat[i+half_size,:] = deg_mat[i+half_size,:]+np.sum(k,axis=0) # prop along x for col i img_nbr = padded_img[:,i:i+prop_size] pos_nbr = padded_pos[:,i:i+prop_size] img_nbr = img_nbr - padded_img[:,i+half_size,:].reshape((padded_img.shape[0],1,c)) pos_nbr = pos_nbr - padded_pos[:,i+half_size,:].reshape((padded_img.shape[0],1,2)) img_nbr = np.sum(img_nbr**2, axis=2)/(2*sigma_1**2) pos_nbr = np.sum(pos_nbr**2, axis=2)/(2*sigma_2**2) k = np.exp(-img_nbr-pos_nbr) deg_mat[:,i+half_size] = deg_mat[:,i+half_size]+np.sum(k,axis=1) for i in range(h): # prop along y for row i img_nbr = padded_img[i:i+prop_size,:] pos_nbr = padded_pos[i:i+prop_size,:] img_nbr = img_nbr - padded_img[i+half_size,:,:] pos_nbr = pos_nbr - padded_pos[i+half_size,:,:] img_nbr = np.sum(img_nbr**2, axis=2)/(2*sigma_1**2) pos_nbr = np.sum(pos_nbr**2, axis=2)/(2*sigma_2**2) k = np.exp(-img_nbr-pos_nbr) # similarity matrix k = k/deg_mat[i+half_size,:] #devided by degree prop_prob = k * padded_prob_fix[i+half_size,:] padded_prob[i:i+prop_size,:] = padded_prob[i:i+prop_size,:] + prop_prob # prop along x for col i img_nbr = padded_img[:,i:i+prop_size] pos_nbr = padded_pos[:,i:i+prop_size] img_nbr = img_nbr - padded_img[:,i+half_size,:].reshape((padded_img.shape[0],1,c)) pos_nbr = pos_nbr - padded_pos[:,i+half_size,:].reshape((padded_img.shape[0],1,2)) img_nbr = np.sum(img_nbr**2, axis=2)/(2*sigma_1**2) pos_nbr = np.sum(pos_nbr**2, axis=2)/(2*sigma_2**2) k = np.exp(-img_nbr-pos_nbr) # similarity matrix k = k/deg_mat[:,i+half_size].reshape((-1,1)) #devided by degree prop_prob = k * padded_prob_fix[:,i+half_size].reshape((-1,1)) padded_prob[:,i:i+prop_size] = padded_prob[:,i:i+prop_size]+ prop_prob # padded_prob = padded_prob + 0.5 * padded_prob_fix # lazy propagation prob_out = padded_prob[half_size:h+half_size,half_size:w+half_size] # print(np.sum(prob_out)) prob_out[prob_out>1]=1 return prob_out # In[34]: def prob_to_patch(im): """Convert pixel level probability prediction to patch version """ patch_list = [] patch_size = 16 for j in range(0, im.shape[1], patch_size): for i in range(0, im.shape[0], patch_size): patch = im[i:i + patch_size, j:j + patch_size] df = np.mean(patch) patch_list.append(df) return
np.array(patch_list)
numpy.array
from youtube_discussion_tree_api._conflicts import _tf_idf_automatic_algorithm, _calculate_document_frequency, _calculate_tf_idf, _cosine_sim, _cosine_similarity, _gen_vector, _in_doc_freq, _preprocessing from unittest import TestCase from youtube_discussion_tree_api.utils import Node import numpy as np class TestConflistSolvingAlgorithm(TestCase): def setUp(self): self.candidates = [ Node( id = "comment1", author_name = "Ororo", author_id = "author1", text = "Hello, I love turtles and dogs", like_count = 10000000, parent_id = None, published_at = "12-12-2012" ), Node( id = "comment2", author_name = "<NAME>", author_id = "author2", text = "Cats are the best animals in the whole world", like_count = 10000000, parent_id = None, published_at = "12-12-2012" ), Node( id = "comment3", author_name = "Kekino", author_id = "author3", text = "I'm more of a dogs person, they are so cute", like_count = 10000000, parent_id = None, published_at = "12-12-2012" ) ] self.candidates_preprocessed = [["hello", "love", "turtl", "dog"] , ["cat", "best", "anim", "whole", "world"] , ["dog", "person", "cute"]] def test_preprocessing(self): self.assertEqual(self.candidates_preprocessed, _preprocessing(self.candidates)) def test_calculate_document_frequency(self): expected = { "hello" : 1, "love" : 1, "turtl" : 1, "dog" : 2, "cat" : 1, "best" : 1, "anim" : 1, "whole" : 1, "world" : 1, "person" : 1, "cute" : 1 } self.assertEqual(expected, _calculate_document_frequency(self.candidates_preprocessed)) def test_in_doc_frequency(self): df = { "hello" : 1, "love" : 1, "turtl" : 1, "dog" : 2, "cat" : 1, "best" : 1, "anim" : 1, "whole" : 1, "world" : 1, "person" : 1, "cute" : 1 } self.assertEqual(1, _in_doc_freq("love", df)) self.assertEqual(2, _in_doc_freq("dog", df)) self.assertEqual(0, _in_doc_freq("patata", df)) def test_calculate_tf_idf(self): df = { "hello" : 1, "love" : 1, "turtl" : 1, "dog" : 2, "cat" : 1, "best" : 1, "anim" : 1, "whole" : 1, "world" : 1, "person" : 1, "cute" : 1 } tf_idf = _calculate_tf_idf( self.candidates_preprocessed ,df ,len(self.candidates_preprocessed) ,len(df.keys())) self.assertDictEqual({ (0, "hello") : (1/11 * np.log(3/2)), (0, "love") : (1/11 * np.log(3/2)), (0, "turtl") : (1/11 * np.log(3/2)), (0, "dog") : (1/11 * np.log(3/3)), (1, "cat") : (1/11 * np.log(3/2)), (1, "best") : (1/11 * np.log(3/2)), (1, "anim") : (1/11 * np.log(3/2)), (1, "whole") : (1/11 * np.log(3/2)), (1, "world") : (1/11 * np.log(3/2)), (2, "dog") : (1/11 * np.log(3/3)), (2, "person") : (1/11 * np.log(3/2)), (2, "cute") : (1/11 * np.log(3/2)), }, tf_idf) def test_cosine_sim(self): v1 = [1/11 * np.log(3/2),1/11 * np.log(3/2),1/11 * np.log(3/2),1/11 * np.log(3/3),1/11 * np.log(3/2),1/11 * np.log(3/2),1/11 * np.log(3/2),1/11 * np.log(3/2),1/11 * np.log(3/2),1/11 * np.log(3/2),1/11 * np.log(3/2)] v2 = [0,0,0,0,1,1,1,1,1,0,0] self.assertEqual(np.dot(v1, v2)/(np.linalg.norm(v1)*np.linalg.norm(v2)), _cosine_sim(v1,v2)) def test_gen_vector(self): df = { "hello" : 1, "love" : 1, "turtl" : 1, "dog" : 2, "cat" : 1, "best" : 1, "anim" : 1, "whole" : 1, "world" : 1, "person" : 1, "cute" : 1 } expected = np.zeros(len(df.keys())) expected[1]= 1/11 * np.log(4/2) expected[2]= 1/11 * np.log(4/2) expected[10] = 1/11 * np.log(4/2) self.assertEqual( expected.all(),_gen_vector(['hey', 'dude', 'also', 'love', 'turtl', 'cute', 'slow'], list(df.keys()), df , 3).all() ) def test_cosine_similarity(self): reply = Node( id = "comment1", author_name = "Quim10^-12", author_id = "author1", text = "Hey dude! I also love turtle, they are so cute and slow.", like_count = 10000000, parent_id = None, published_at = "12-12-2012" ) df = { "hello" : 1, "love" : 1, "turtl" : 1, "dog" : 2, "cat" : 1, "best" : 1, "anim" : 1, "whole" : 1, "world" : 1, "person" : 1, "cute" : 1 } D = [[1,1,1,1,0,0,0,0,0,0,0], [0,0,0,0,1,1,1,1,1,0,0], [0,0,0,1,0,0,0,0,0,1,1]] cosines = _cosine_similarity( reply , D , list(df.keys()) , df, 3) v = [0, 1/7 * np.log(4/2), 1/7 * np.log(4/2), 0, 0, 0, 0, 0, 0, 0, 1/7 * np.log(4/2)] self.assertEqual([ (1,np.dot(v, D[0])/(
np.linalg.norm(v)
numpy.linalg.norm
import sys, os sys.path.append(os.path.join(os.path.dirname(__file__), '..')) import argparse, glob, json import matplotlib.pyplot as plt import numpy as np from matplotlib.offsetbox import * from matplotlib.patches import * from PIL import Image def load_results(path): res = [] skip_count = 0 for cur_path in glob.glob(os.path.join(path, '*.json')): with open(cur_path, 'r', encoding='utf8') as fop: cur_res = json.load(fop) # skip incomplete sessions if None in cur_res: skip_count += 1 continue res.append(cur_res) if skip_count > 0: print("[Warning] Skipped %d incomplete sessions." % skip_count) return np.array(res) def load_task(config): truths, options = [], [] options = [task['options'] for task in config['tasks']] truths = [task['options'][task['truth']] for task in config['tasks']] truth_idcs = [task['truth'] for task in config['tasks']] return np.array(truth_idcs), np.array(truths), np.array(options) def get_choice_matrix(results, options): # convert results into (task, choices) choice count matrix mat_choices = np.zeros((results.shape[1], 3)) for res_idx in range(results.shape[0]): for task_idx in range(results.shape[1]): choice_idx = np.where(options[task_idx] == results[res_idx, task_idx]) mat_choices[task_idx, choice_idx] += 1 return mat_choices def calc_accuracies(results, truths): mat_truths = np.repeat(truths.reshape((1, truths.shape[0])), results.shape[0], axis=0) matches = np.sum(results == mat_truths, axis=1) accuracies = matches / truths.shape[0] return accuracies def calc_kappa(choices, num_choices=3): '''Calculate Fleiss' Kappa (based on https://en.wikibooks.org/wiki/Algorithm_Implementation/Statistics/Fleiss'_kappa)''' num_evals = np.sum(choices[0]) num_tasks = choices.shape[0] p = [0.0] * num_choices for j in range(num_choices): p[j] = 0.0 for i in range(num_tasks): p[j] += choices[i][j] p[j] /= num_tasks * num_evals P = [0.0] * num_tasks for i in range(num_tasks): P[i] = 0.0 for j in range(num_choices): P[i] += choices[i][j] * choices[i][j] P[i] = (P[i] - num_evals) / (num_evals * (num_evals - 1)) Pbar = sum(P) / num_tasks PbarE = 0.0 for pj in p: PbarE += pj * pj kappa = (Pbar - PbarE) / (1 - PbarE) return kappa if __name__ == '__main__': arg_parser = argparse.ArgumentParser(description='SynEval Result Aggregation') arg_parser.add_argument('config_path', help='path evaluation configuration JSON') arg_parser.add_argument('result_path', help='path result files') arg_parser.add_argument('--plot', action='store_true', help='plot results') arg_parser.add_argument('--data_path', help='path to data (required for plotting)') args = arg_parser.parse_args() # load config with open(args.config_path, 'r', encoding='utf8') as fop: config = json.load(fop) truth_idcs, truths, options = load_task(config) # load results results = load_results(args.result_path) print("Loaded %d evaluation sessions with %d tasks each." % (results.shape[0], truths.shape[0])) # calculate accuracy accuracies = calc_accuracies(results, truths) print("Accuracy: %.2f avg, %.2f stddev, %.2f max, %.2f min" % (np.mean(accuracies), np.std(accuracies), np.max(accuracies), np.min(accuracies))) print(" ", accuracies) # calculate accuracy per class choices = get_choice_matrix(results, options) print("Accuracy per class:") for class_idx, class_dsc in enumerate(config['classes']): class_correct = choices[[ti for ti, t in enumerate(truth_idcs) if t == class_idx], class_idx] class_accuracy =
np.sum(class_correct)
numpy.sum
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: # # Copyright 2021 The NiPreps Developers <<EMAIL>> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # We support and encourage derived works from this project, please read # about our expectations at # # https://www.nipreps.org/community/licensing/ # """The :math:`B_0` unwarping transform formalism.""" from pathlib import Path import attr import numpy as np from scipy import ndimage as ndi from scipy.sparse import vstack as sparse_vstack, csr_matrix, kron import nibabel as nb from bids.utils import listify @attr.s(slots=True) class B0FieldTransform: """Represents and applies the transform to correct for susceptibility distortions.""" coeffs = attr.ib(default=None) shifts = attr.ib(default=None, init=False) def fit(self, spatialimage): r""" Generate the interpolation matrix (and the VSM with it). Implements Eq. :math:`\eqref{eq:1}`, interpolating :math:`f(\mathbf{s})` for all voxels in the target-image's extent. """ # Calculate the physical coordinates of target grid if isinstance(spatialimage, (str, bytes, Path)): spatialimage = nb.load(spatialimage) if self.shifts is not None: newaff = spatialimage.affine newshape = spatialimage.shape if np.all(newshape == self.shifts.shape) and np.allclose( newaff, self.shifts.affine ): return weights = [] coeffs = [] # Generate tensor-product B-Spline weights for level in listify(self.coeffs): wmat = grid_bspline_weights(spatialimage, level) weights.append(wmat) coeffs.append(level.get_fdata(dtype="float32").reshape(-1)) # Interpolate the VSM (voxel-shift map) vsm = np.zeros(spatialimage.shape[:3], dtype="float32") vsm = (np.squeeze(np.vstack(coeffs).T) @ sparse_vstack(weights)).reshape( vsm.shape ) # Cache self.shifts = nb.Nifti1Image(vsm, spatialimage.affine, None) def apply( self, spatialimage, pe_dir, ro_time, order=3, mode="constant", cval=0.0, prefilter=True, output_dtype=None, ): """ Apply a transformation to an image, resampling on the reference spatial object. Parameters ---------- spatialimage : `spatialimage` The image object containing the data to be resampled in reference space reference : spatial object, optional The image, surface, or combination thereof containing the coordinates of samples that will be sampled. order : int, optional The order of the spline interpolation, default is 3. The order has to be in the range 0-5. mode : {'constant', 'reflect', 'nearest', 'mirror', 'wrap'}, optional Determines how the input image is extended when the resamplings overflows a border. Default is 'constant'. cval : float, optional Constant value for ``mode='constant'``. Default is 0.0. prefilter: bool, optional Determines if the image's data array is prefiltered with a spline filter before interpolation. The default is ``True``, which will create a temporary *float64* array of filtered values if *order > 1*. If setting this to ``False``, the output will be slightly blurred if *order > 1*, unless the input is prefiltered, i.e. it is the result of calling the spline filter on the original input. Returns ------- resampled : `spatialimage` or ndarray The data imaged after resampling to reference space. """ # Ensure the vsm has been computed self.fit(spatialimage) vsm = self.shifts.get_fdata().copy() # Reverse shifts if reversed blips if pe_dir.endswith("-"): vsm *= -1.0 # Generate warp field pe_axis = "ijk".index(pe_dir[0]) # Map voxel coordinates applying the VSM ijk_axis = tuple([np.arange(s) for s in vsm.shape]) voxcoords = np.array(np.meshgrid(*ijk_axis, indexing="ij"), dtype="float32") voxcoords[pe_axis, ...] += vsm * ro_time # Prepare data data = np.squeeze(np.asanyarray(spatialimage.dataobj)) output_dtype = output_dtype or data.dtype # Resample resampled = ndi.map_coordinates( data, voxcoords.reshape(3, -1), output=output_dtype, order=order, mode=mode, cval=cval, prefilter=prefilter, ).reshape(spatialimage.shape) moved = spatialimage.__class__( resampled, spatialimage.affine, spatialimage.header ) moved.header.set_data_dtype(output_dtype) return moved def to_displacements(self, ro_time, pe_dir): """ Generate a NIfTI file containing a displacements field transform compatible with ITK/ANTs. The displacements field can be calculated following `Eq. (2) in the fieldmap fitting section <sdcflows.workflows.fit.fieldmap.html#mjx-eqn-eq%3Afieldmap-2>`__. Parameters ---------- ro_time : :obj:`float` The total readout time in seconds (only if ``vsm=False``). pe_dir : :obj:`str` The ``PhaseEncodingDirection`` metadata value (only if ``vsm=False``). Returns ------- spatialimage : :obj:`nibabel.nifti.Nifti1Image` A NIfTI 1.0 object containing the distortion. """ from math import pi from nibabel.affines import voxel_sizes, obliquity from nibabel.orientations import io_orientation # Generate warp field data = self.shifts.get_fdata(dtype="float32").copy() pe_axis = "ijk".index(pe_dir[0]) pe_sign = -1.0 if pe_dir.endswith("-") else 1.0 pe_size = self.shifts.header.get_zooms()[pe_axis] data *= pe_sign * ro_time * pe_size fieldshape = tuple(list(data.shape[:3]) + [3]) # Compose a vector field field = np.zeros((data.size, 3), dtype="float32") field[..., pe_axis] = data.reshape(-1) # If coordinate system is oblique, project displacements through directions matrix aff = self.shifts.affine if obliquity(aff).max() * 180 / pi > 0.01: dirmat =
np.eye(4)
numpy.eye
""" Linear Algebra solvers and other helpers """ from __future__ import print_function from statsmodels.compat.python import range import numpy as np from scipy.linalg import pinv, pinv2, lstsq # noqa:F421 def logdet_symm(m, check_symm=False): """ Return log(det(m)) asserting positive definiteness of m. Parameters ---------- m : array-like 2d array that is positive-definite (and symmetric) Returns ------- logdet : float The log-determinant of m. """ from scipy import linalg if check_symm: if not np.all(m == m.T): # would be nice to short-circuit check raise ValueError("m is not symmetric.") c, _ = linalg.cho_factor(m, lower=True) return 2*np.sum(np.log(c.diagonal())) def stationary_solve(r, b): """ Solve a linear system for a Toeplitz correlation matrix. A Toeplitz correlation matrix represents the covariance of a stationary series with unit variance. Parameters ---------- r : array-like A vector describing the coefficient matrix. r[0] is the first band next to the diagonal, r[1] is the second band, etc. b : array-like The right-hand side for which we are solving, i.e. we solve Tx = b and return b, where T is the Toeplitz coefficient matrix. Returns ------- The solution to the linear system. """ db = r[0:1] dim = b.ndim if b.ndim == 1: b = b[:, None] x = b[0:1, :] for j in range(1, len(b)): rf = r[0:j][::-1] a = (b[j, :] - np.dot(rf, x)) / (1 -
np.dot(rf, db[::-1])
numpy.dot
import tensorflow as tf import numpy as np from config import cfg def compute_area(xmin, xmax, ymin, ymax): return ((xmax>xmin)*(xmax-xmin)*(ymax>ymin)*(ymax-ymin)).astype(np.float32) def bbox_overlaps(boxes, query): ''' boxes: (N, 4) array query: (M, 4) array RETURN: (N, M) array where ai,j is the distance matrix ''' bxmin, bxmax = np.reshape(boxes[:,0], [-1,1]), np.reshape(boxes[:,2], [-1,1]) bymin, bymax = np.reshape(boxes[:,1], [-1,1]), np.reshape(boxes[:,3], [-1,1]) qxmin, qxmax = np.reshape(query[:,0], [1,-1]), np.reshape(query[:,2], [1,-1]) qymin, qymax = np.reshape(query[:,1], [1,-1]), np.reshape(query[:,3], [1,-1]) ixmin, ixmax = np.maximum(bxmin, qxmin), np.minimum(bxmax, qxmax) iymin, iymax = np.maximum(bymin, qymin), np.minimum(bymax, qymax) intersection = compute_area(ixmin, ixmax, iymin, iymax) area_boxes = compute_area(bxmin, bxmax, bymin, bymax) area_query = compute_area(qxmin, qxmax, qymin, qymax) union = area_boxes + area_query - intersection overlap = intersection / (union + cfg.eps) return overlap def minmax2ctrwh(boxes): widths = np.maximum(0.0, boxes[:,2] - boxes[:,0]) heights = np.maximum(0.0, boxes[:,3] - boxes[:,1]) ctrx = boxes[:,0] + widths * 0.5 ctry = boxes[:,1] + heights * 0.5 return widths, heights, ctrx, ctry def encode_roi(anchors, boxes): ''' - anchors: (N, 4) tensors - boxes: (N, 4) tensors RETURN - terms: (N, 4) encoded terms ''' anc_w, anc_h, anc_ctrx, anc_ctry = minmax2ctrwh(anchors) box_w, box_h, box_ctrx, box_ctry = minmax2ctrwh(boxes) tx = (box_ctrx - anc_ctrx) / (anc_w + cfg.eps) ty = (box_ctry - anc_ctry) / (anc_h + cfg.eps) tw = np.log(box_w / (anc_w + cfg.eps) + cfg.log_eps) th = np.log(box_h / (anc_h + cfg.eps) + cfg.log_eps) return np.stack((tx, ty, tw, th), axis=1) def rpn_target_one_batch(anchors, gt_boxes): ''' Propose rpn_targt for one batch - anchors: (N, 4) array - gt_boxes: (M, 4) groundtruths boxes RETURN - labels: (N,), 1 for positive, 0 for negative, -1 for don't care - terms: (N, 4), regression terms for each positive anchors ''' N, M = anchors.shape[0], gt_boxes.shape[0] iou = bbox_overlaps(gt_boxes, anchors) max_iou_ind = iou.argmax(axis=1) max_iou = iou[range(M), max_iou_ind] max_gt_ind = iou.argmax(axis=0) max_gt_iou = iou[max_gt_ind, range(N)] # decide labels labels = np.zeros(N, np.int32)-1 labels[max_gt_iou < cfg.rpn_negative_iou] = 0 # iou < negative_thresh labels[max_gt_iou > cfg.rpn_positive_iou] = 1 # iou > postive_thresh labels[max_iou_ind] = 1 # maximum iou with each groundtruth # filter out too many positive or negative pos_inds = np.where(labels == 1)[0] neg_inds =
np.where(labels == 0)
numpy.where
import numpy as np import torch from torchvision.transforms import ToTensor def batchComputeFlowImage(uv): flow_im = torch.zeros(uv.size(0), 3, uv.size(2), uv.size(3) ) uv_np = uv.numpy() for i in range(uv.size(0)): flow_im[i] = ToTensor()(computeFlowImage(uv_np[i][0], uv_np[i][1])) return flow_im def computeFlowImage(u,v,logscale=True,scaledown=6,output=False): """ topleft is zero, u is horiz, v is vertical red is 3 o'clock, yellow is 6, light blue is 9, blue/purple is 12 """ colorwheel = makecolorwheel() ncols = colorwheel.shape[0] radius =
np.sqrt(u**2 + v**2)
numpy.sqrt
import numpy as np import configparser import os import motmetrics as mm # from numba import jit import torch from torch.nn.functional import cosine_similarity from scipy.optimize import linear_sum_assignment as linear_assignment from .tn_utils import * from .kalman_box import KalmanBoxTracker, TrackState # read config file config = configparser.ConfigParser() config.read(os.path.join("src", "configs", "tracking.config")) # detector params det_mode = config.get("Detector", "det_mode") nms_thres = config.getfloat("Detector", "nms_thres") nms_flag = config.getboolean("Detector", "nms_flag") high_thres = config.getfloat("Detector", "high_thres") low_thres = config.getfloat("Detector", "low_thres") track_init_thres = config.getfloat("Detector", "track_init_thres") exp_file = config.get("Detector", "exp_file") detector_model_file = config.get("Detector", "chkpt_file") half = config.getboolean("Detector", "half") # TrackerInfo pretext_model_path = config.get("TrackerInfo", "pretext_model_path") embedding_size = config.getint("TrackerInfo", "embedding_size") tracking_filename = config.get("TrackerInfo", "tracking_filename") experiment_name = config.get("TrackerInfo", "experiment_name") # TrackerParams max_age = config.getint("TrackerParams", "max_age") min_hits = config.getint("TrackerParams", "min_hits") beta = config.getfloat("TrackerParams", "beta") iou_thres = config.getfloat("TrackerParams", "iou_thres") min_box_area = config.getfloat("TrackerParams", "min_box_area") iou_thres_2 = config.getfloat("TrackerParams", "iou_thres_2") visual = config.getboolean("TrackerParams", "visual") # @jit def iou(bb_test, bb_gt): """ Computes IUO between two bboxes in the form [x1,y1,x2,y2] """ xx1 =
np.maximum(bb_test[0], bb_gt[0])
numpy.maximum
import unittest import qteasy as qt import pandas as pd from pandas import Timestamp import numpy as np import math from numpy import int64 import itertools import datetime from qteasy.utilfuncs import list_to_str_format, regulate_date_format, time_str_format, str_to_list from qteasy.utilfuncs import maybe_trade_day, is_market_trade_day, prev_trade_day, next_trade_day from qteasy.utilfuncs import next_market_trade_day, unify, mask_to_signal, list_or_slice, labels_to_dict from qteasy.utilfuncs import weekday_name, prev_market_trade_day, is_number_like, list_truncate, input_to_list from qteasy.space import Space, Axis, space_around_centre, ResultPool from qteasy.core import apply_loop from qteasy.built_in import SelectingFinanceIndicator, TimingDMA, TimingMACD, TimingCDL, TimingTRIX from qteasy.tsfuncs import income, indicators, name_change, get_bar from qteasy.tsfuncs import stock_basic, trade_calendar, new_share, get_index from qteasy.tsfuncs import balance, cashflow, top_list, index_indicators, composite from qteasy.tsfuncs import future_basic, future_daily, options_basic, options_daily from qteasy.tsfuncs import fund_basic, fund_net_value, index_basic, stock_company from qteasy.evaluate import eval_alpha, eval_benchmark, eval_beta, eval_fv from qteasy.evaluate import eval_info_ratio, eval_max_drawdown, eval_sharp from qteasy.evaluate import eval_volatility from qteasy.tafuncs import bbands, dema, ema, ht, kama, ma, mama, mavp, mid_point from qteasy.tafuncs import mid_price, sar, sarext, sma, t3, tema, trima, wma, adx, adxr from qteasy.tafuncs import apo, bop, cci, cmo, dx, macd, macdext, aroon, aroonosc from qteasy.tafuncs import macdfix, mfi, minus_di, minus_dm, mom, plus_di, plus_dm from qteasy.tafuncs import ppo, roc, rocp, rocr, rocr100, rsi, stoch, stochf, stochrsi from qteasy.tafuncs import trix, ultosc, willr, ad, adosc, obv, atr, natr, trange from qteasy.tafuncs import avgprice, medprice, typprice, wclprice, ht_dcperiod from qteasy.tafuncs import ht_dcphase, ht_phasor, ht_sine, ht_trendmode, cdl2crows from qteasy.tafuncs import cdl3blackcrows, cdl3inside, cdl3linestrike, cdl3outside from qteasy.tafuncs import cdl3starsinsouth, cdl3whitesoldiers, cdlabandonedbaby from qteasy.tafuncs import cdladvanceblock, cdlbelthold, cdlbreakaway, cdlclosingmarubozu from qteasy.tafuncs import cdlconcealbabyswall, cdlcounterattack, cdldarkcloudcover from qteasy.tafuncs import cdldoji, cdldojistar, cdldragonflydoji, cdlengulfing from qteasy.tafuncs import cdleveningdojistar, cdleveningstar, cdlgapsidesidewhite from qteasy.tafuncs import cdlgravestonedoji, cdlhammer, cdlhangingman, cdlharami from qteasy.tafuncs import cdlharamicross, cdlhighwave, cdlhikkake, cdlhikkakemod from qteasy.tafuncs import cdlhomingpigeon, cdlidentical3crows, cdlinneck from qteasy.tafuncs import cdlinvertedhammer, cdlkicking, cdlkickingbylength from qteasy.tafuncs import cdlladderbottom, cdllongleggeddoji, cdllongline, cdlmarubozu from qteasy.tafuncs import cdlmatchinglow, cdlmathold, cdlmorningdojistar, cdlmorningstar from qteasy.tafuncs import cdlonneck, cdlpiercing, cdlrickshawman, cdlrisefall3methods from qteasy.tafuncs import cdlseparatinglines, cdlshootingstar, cdlshortline, cdlspinningtop from qteasy.tafuncs import cdlstalledpattern, cdlsticksandwich, cdltakuri, cdltasukigap from qteasy.tafuncs import cdlthrusting, cdltristar, cdlunique3river, cdlupsidegap2crows from qteasy.tafuncs import cdlxsidegap3methods, beta, correl, linearreg, linearreg_angle from qteasy.tafuncs import linearreg_intercept, linearreg_slope, stddev, tsf, var, acos from qteasy.tafuncs import asin, atan, ceil, cos, cosh, exp, floor, ln, log10, sin, sinh from qteasy.tafuncs import sqrt, tan, tanh, add, div, max, maxindex, min, minindex, minmax from qteasy.tafuncs import minmaxindex, mult, sub, sum from qteasy.history import get_financial_report_type_raw_data, get_price_type_raw_data from qteasy.history import stack_dataframes, dataframe_to_hp, HistoryPanel from qteasy.database import DataSource from qteasy.strategy import Strategy, SimpleTiming, RollingTiming, SimpleSelecting, FactoralSelecting from qteasy._arg_validators import _parse_string_kwargs, _valid_qt_kwargs from qteasy.blender import _exp_to_token, blender_parser, signal_blend class TestCost(unittest.TestCase): def setUp(self): self.amounts = np.array([10000., 20000., 10000.]) self.op = np.array([0., 1., -0.33333333]) self.amounts_to_sell = np.array([0., 0., -3333.3333]) self.cash_to_spend = np.array([0., 20000., 0.]) self.prices = np.array([10., 20., 10.]) self.r = qt.Cost(0.0) def test_rate_creation(self): """测试对象生成""" print('testing rates objects\n') self.assertIsInstance(self.r, qt.Cost, 'Type should be Rate') self.assertEqual(self.r.buy_fix, 0) self.assertEqual(self.r.sell_fix, 0) def test_rate_operations(self): """测试交易费率对象""" self.assertEqual(self.r['buy_fix'], 0.0, 'Item got is incorrect') self.assertEqual(self.r['sell_fix'], 0.0, 'Item got is wrong') self.assertEqual(self.r['buy_rate'], 0.003, 'Item got is incorrect') self.assertEqual(self.r['sell_rate'], 0.001, 'Item got is incorrect') self.assertEqual(self.r['buy_min'], 5., 'Item got is incorrect') self.assertEqual(self.r['sell_min'], 0.0, 'Item got is incorrect') self.assertEqual(self.r['slipage'], 0.0, 'Item got is incorrect') self.assertEqual(np.allclose(self.r.calculate(self.amounts), [0.003, 0.003, 0.003]), True, 'fee calculation wrong') def test_rate_fee(self): """测试买卖交易费率""" self.r.buy_rate = 0.003 self.r.sell_rate = 0.001 self.r.buy_fix = 0. self.r.sell_fix = 0. self.r.buy_min = 0. self.r.sell_min = 0. self.r.slipage = 0. print('\nSell result with fixed rate = 0.001 and moq = 0:') print(self.r.get_selling_result(self.prices, self.amounts_to_sell)) test_rate_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell) self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3333.3333]), True, 'result incorrect') self.assertAlmostEqual(test_rate_fee_result[1], 33299.999667, msg='result incorrect') self.assertAlmostEqual(test_rate_fee_result[2], 33.333332999999996, msg='result incorrect') print('\nSell result with fixed rate = 0.001 and moq = 1:') print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 1.)) test_rate_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 1) self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3333]), True, 'result incorrect') self.assertAlmostEqual(test_rate_fee_result[1], 33296.67, msg='result incorrect') self.assertAlmostEqual(test_rate_fee_result[2], 33.33, msg='result incorrect') print('\nSell result with fixed rate = 0.001 and moq = 100:') print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)) test_rate_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100) self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3300]), True, 'result incorrect') self.assertAlmostEqual(test_rate_fee_result[1], 32967.0, msg='result incorrect') self.assertAlmostEqual(test_rate_fee_result[2], 33, msg='result incorrect') print('\nPurchase result with fixed rate = 0.003 and moq = 0:') print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)) test_rate_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0) self.assertIs(np.allclose(test_rate_fee_result[0], [0., 997.00897308, 0.]), True, 'result incorrect') self.assertAlmostEqual(test_rate_fee_result[1], -20000.0, msg='result incorrect') self.assertAlmostEqual(test_rate_fee_result[2], 59.82053838484547, msg='result incorrect') print('\nPurchase result with fixed rate = 0.003 and moq = 1:') print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 1)) test_rate_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 1) self.assertIs(np.allclose(test_rate_fee_result[0], [0., 997., 0.]), True, 'result incorrect') self.assertAlmostEqual(test_rate_fee_result[1], -19999.82, msg='result incorrect') self.assertAlmostEqual(test_rate_fee_result[2], 59.82, msg='result incorrect') print('\nPurchase result with fixed rate = 0.003 and moq = 100:') print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)) test_rate_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100) self.assertIs(np.allclose(test_rate_fee_result[0], [0., 900., 0.]), True, 'result incorrect') self.assertAlmostEqual(test_rate_fee_result[1], -18054., msg='result incorrect') self.assertAlmostEqual(test_rate_fee_result[2], 54.0, msg='result incorrect') def test_min_fee(self): """测试最低交易费用""" self.r.buy_rate = 0. self.r.sell_rate = 0. self.r.buy_fix = 0. self.r.sell_fix = 0. self.r.buy_min = 300 self.r.sell_min = 300 self.r.slipage = 0. print('\npurchase result with fixed cost rate with min fee = 300 and moq = 0:') print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)) test_min_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0) self.assertIs(np.allclose(test_min_fee_result[0], [0., 985, 0.]), True, 'result incorrect') self.assertAlmostEqual(test_min_fee_result[1], -20000.0, msg='result incorrect') self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect') print('\npurchase result with fixed cost rate with min fee = 300 and moq = 10:') print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 10)) test_min_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 10) self.assertIs(np.allclose(test_min_fee_result[0], [0., 980, 0.]), True, 'result incorrect') self.assertAlmostEqual(test_min_fee_result[1], -19900.0, msg='result incorrect') self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect') print('\npurchase result with fixed cost rate with min fee = 300 and moq = 100:') print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)) test_min_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100) self.assertIs(np.allclose(test_min_fee_result[0], [0., 900, 0.]), True, 'result incorrect') self.assertAlmostEqual(test_min_fee_result[1], -18300.0, msg='result incorrect') self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect') print('\nselling result with fixed cost rate with min fee = 300 and moq = 0:') print(self.r.get_selling_result(self.prices, self.amounts_to_sell)) test_min_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell) self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3333.3333]), True, 'result incorrect') self.assertAlmostEqual(test_min_fee_result[1], 33033.333) self.assertAlmostEqual(test_min_fee_result[2], 300.0) print('\nselling result with fixed cost rate with min fee = 300 and moq = 1:') print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 1)) test_min_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 1) self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3333]), True, 'result incorrect') self.assertAlmostEqual(test_min_fee_result[1], 33030) self.assertAlmostEqual(test_min_fee_result[2], 300.0) print('\nselling result with fixed cost rate with min fee = 300 and moq = 100:') print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)) test_min_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100) self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3300]), True, 'result incorrect') self.assertAlmostEqual(test_min_fee_result[1], 32700) self.assertAlmostEqual(test_min_fee_result[2], 300.0) def test_rate_with_min(self): """测试最低交易费用对其他交易费率参数的影响""" self.r.buy_rate = 0.0153 self.r.sell_rate = 0.01 self.r.buy_fix = 0. self.r.sell_fix = 0. self.r.buy_min = 300 self.r.sell_min = 333 self.r.slipage = 0. print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 0:') print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)) test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0) self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 984.9305624, 0.]), True, 'result incorrect') self.assertAlmostEqual(test_rate_with_min_result[1], -20000.0, msg='result incorrect') self.assertAlmostEqual(test_rate_with_min_result[2], 301.3887520929774, msg='result incorrect') print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 10:') print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 10)) test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 10) self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 980, 0.]), True, 'result incorrect') self.assertAlmostEqual(test_rate_with_min_result[1], -19900.0, msg='result incorrect') self.assertAlmostEqual(test_rate_with_min_result[2], 300.0, msg='result incorrect') print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 100:') print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)) test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100) self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 900, 0.]), True, 'result incorrect') self.assertAlmostEqual(test_rate_with_min_result[1], -18300.0, msg='result incorrect') self.assertAlmostEqual(test_rate_with_min_result[2], 300.0, msg='result incorrect') print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 0:') print(self.r.get_selling_result(self.prices, self.amounts_to_sell)) test_rate_with_min_result = self.r.get_selling_result(self.prices, self.amounts_to_sell) self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3333.3333]), True, 'result incorrect') self.assertAlmostEqual(test_rate_with_min_result[1], 32999.99967) self.assertAlmostEqual(test_rate_with_min_result[2], 333.33333) print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 1:') print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 1)) test_rate_with_min_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 1) self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3333]), True, 'result incorrect') self.assertAlmostEqual(test_rate_with_min_result[1], 32996.7) self.assertAlmostEqual(test_rate_with_min_result[2], 333.3) print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 100:') print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)) test_rate_with_min_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100) self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3300]), True, 'result incorrect') self.assertAlmostEqual(test_rate_with_min_result[1], 32667.0) self.assertAlmostEqual(test_rate_with_min_result[2], 333.0) def test_fixed_fee(self): """测试固定交易费用""" self.r.buy_rate = 0. self.r.sell_rate = 0. self.r.buy_fix = 200 self.r.sell_fix = 150 self.r.buy_min = 0 self.r.sell_min = 0 self.r.slipage = 0 print('\nselling result of fixed cost with fixed fee = 150 and moq=0:') print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 0)) test_fixed_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell) self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3333.3333]), True, 'result incorrect') self.assertAlmostEqual(test_fixed_fee_result[1], 33183.333, msg='result incorrect') self.assertAlmostEqual(test_fixed_fee_result[2], 150.0, msg='result incorrect') print('\nselling result of fixed cost with fixed fee = 150 and moq=100:') print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)) test_fixed_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100) self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3300.]), True, f'result incorrect, {test_fixed_fee_result[0]} does not equal to [0,0,-3400]') self.assertAlmostEqual(test_fixed_fee_result[1], 32850., msg='result incorrect') self.assertAlmostEqual(test_fixed_fee_result[2], 150., msg='result incorrect') print('\npurchase result of fixed cost with fixed fee = 200:') print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)) test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0) self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 990., 0.]), True, 'result incorrect') self.assertAlmostEqual(test_fixed_fee_result[1], -20000.0, msg='result incorrect') self.assertAlmostEqual(test_fixed_fee_result[2], 200.0, msg='result incorrect') print('\npurchase result of fixed cost with fixed fee = 200:') print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)) test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100) self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 900., 0.]), True, 'result incorrect') self.assertAlmostEqual(test_fixed_fee_result[1], -18200.0, msg='result incorrect') self.assertAlmostEqual(test_fixed_fee_result[2], 200.0, msg='result incorrect') def test_slipage(self): """测试交易滑点""" self.r.buy_fix = 0 self.r.sell_fix = 0 self.r.buy_min = 0 self.r.sell_min = 0 self.r.buy_rate = 0.003 self.r.sell_rate = 0.001 self.r.slipage = 1E-9 print('\npurchase result of fixed rate = 0.003 and slipage = 1E-10 and moq = 0:') print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)) print('\npurchase result of fixed rate = 0.003 and slipage = 1E-10 and moq = 100:') print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)) print('\nselling result with fixed rate = 0.001 and slipage = 1E-10:') print(self.r.get_selling_result(self.prices, self.amounts_to_sell)) test_fixed_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell) self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3333.3333]), True, f'{test_fixed_fee_result[0]} does not equal to [0, 0, -10000]') self.assertAlmostEqual(test_fixed_fee_result[1], 33298.88855591, msg=f'{test_fixed_fee_result[1]} does not equal to 99890.') self.assertAlmostEqual(test_fixed_fee_result[2], 34.44444409, msg=f'{test_fixed_fee_result[2]} does not equal to -36.666663.') test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0) self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 996.98909294, 0.]), True, 'result incorrect') self.assertAlmostEqual(test_fixed_fee_result[1], -20000.0, msg='result incorrect') self.assertAlmostEqual(test_fixed_fee_result[2], 60.21814121353513, msg='result incorrect') test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100) self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 900., 0.]), True, 'result incorrect') self.assertAlmostEqual(test_fixed_fee_result[1], -18054.36, msg='result incorrect') self.assertAlmostEqual(test_fixed_fee_result[2], 54.36, msg='result incorrect') class TestSpace(unittest.TestCase): def test_creation(self): """ test if creation of space object is fine """ # first group of inputs, output Space with two discr axis from [0,10] print('testing space objects\n') # pars_list = [[(0, 10), (0, 10)], # [[0, 10], [0, 10]]] # # types_list = ['discr', # ['discr', 'discr']] # # input_pars = itertools.product(pars_list, types_list) # for p in input_pars: # # print(p) # s = qt.Space(*p) # b = s.boes # t = s.types # # print(s, t) # self.assertIsInstance(s, qt.Space) # self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!') # self.assertEqual(t, ['discr', 'discr'], 'types incorrect') # pars_list = [[(0, 10), (0, 10)], [[0, 10], [0, 10]]] types_list = ['foo, bar', ['foo', 'bar']] input_pars = itertools.product(pars_list, types_list) for p in input_pars: # print(p) s = Space(*p) b = s.boes t = s.types # print(s, t) self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!') self.assertEqual(t, ['enum', 'enum'], 'types incorrect') pars_list = [[(0, 10), (0, 10)], [[0, 10], [0, 10]]] types_list = [['discr', 'foobar']] input_pars = itertools.product(pars_list, types_list) for p in input_pars: # print(p) s = Space(*p) b = s.boes t = s.types # print(s, t) self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!') self.assertEqual(t, ['discr', 'enum'], 'types incorrect') pars_list = [(0., 10), (0, 10)] s = Space(pars=pars_list, par_types=None) self.assertEqual(s.types, ['conti', 'discr']) self.assertEqual(s.dim, 2) self.assertEqual(s.size, (10.0, 11)) self.assertEqual(s.shape, (np.inf, 11)) self.assertEqual(s.count, np.inf) self.assertEqual(s.boes, [(0., 10), (0, 10)]) pars_list = [(0., 10), (0, 10)] s = Space(pars=pars_list, par_types='conti, enum') self.assertEqual(s.types, ['conti', 'enum']) self.assertEqual(s.dim, 2) self.assertEqual(s.size, (10.0, 2)) self.assertEqual(s.shape, (np.inf, 2)) self.assertEqual(s.count, np.inf) self.assertEqual(s.boes, [(0., 10), (0, 10)]) pars_list = [(1, 2), (2, 3), (3, 4)] s = Space(pars=pars_list) self.assertEqual(s.types, ['discr', 'discr', 'discr']) self.assertEqual(s.dim, 3) self.assertEqual(s.size, (2, 2, 2)) self.assertEqual(s.shape, (2, 2, 2)) self.assertEqual(s.count, 8) self.assertEqual(s.boes, [(1, 2), (2, 3), (3, 4)]) pars_list = [(1, 2, 3), (2, 3, 4), (3, 4, 5)] s = Space(pars=pars_list) self.assertEqual(s.types, ['enum', 'enum', 'enum']) self.assertEqual(s.dim, 3) self.assertEqual(s.size, (3, 3, 3)) self.assertEqual(s.shape, (3, 3, 3)) self.assertEqual(s.count, 27) self.assertEqual(s.boes, [(1, 2, 3), (2, 3, 4), (3, 4, 5)]) pars_list = [((1, 2, 3), (2, 3, 4), (3, 4, 5))] s = Space(pars=pars_list) self.assertEqual(s.types, ['enum']) self.assertEqual(s.dim, 1) self.assertEqual(s.size, (3,)) self.assertEqual(s.shape, (3,)) self.assertEqual(s.count, 3) pars_list = ((1, 2, 3), (2, 3, 4), (3, 4, 5)) s = Space(pars=pars_list) self.assertEqual(s.types, ['enum', 'enum', 'enum']) self.assertEqual(s.dim, 3) self.assertEqual(s.size, (3, 3, 3)) self.assertEqual(s.shape, (3, 3, 3)) self.assertEqual(s.count, 27) self.assertEqual(s.boes, [(1, 2, 3), (2, 3, 4), (3, 4, 5)]) def test_extract(self): """ :return: """ pars_list = [(0, 10), (0, 10)] types_list = ['discr', 'discr'] s = Space(pars=pars_list, par_types=types_list) extracted_int, count = s.extract(3, 'interval') extracted_int_list = list(extracted_int) print('extracted int\n', extracted_int_list) self.assertEqual(count, 16, 'extraction count wrong!') self.assertEqual(extracted_int_list, [(0, 0), (0, 3), (0, 6), (0, 9), (3, 0), (3, 3), (3, 6), (3, 9), (6, 0), (6, 3), (6, 6), (6, 9), (9, 0), (9, 3), (9, 6), (9, 9)], 'space extraction wrong!') extracted_rand, count = s.extract(10, 'rand') extracted_rand_list = list(extracted_rand) self.assertEqual(count, 10, 'extraction count wrong!') print('extracted rand\n', extracted_rand_list) for point in list(extracted_rand_list): self.assertEqual(len(point), 2) self.assertLessEqual(point[0], 10) self.assertGreaterEqual(point[0], 0) self.assertLessEqual(point[1], 10) self.assertGreaterEqual(point[1], 0) pars_list = [(0., 10), (0, 10)] s = Space(pars=pars_list, par_types=None) extracted_int2, count = s.extract(3, 'interval') self.assertEqual(count, 16, 'extraction count wrong!') extracted_int_list2 = list(extracted_int2) self.assertEqual(extracted_int_list2, [(0, 0), (0, 3), (0, 6), (0, 9), (3, 0), (3, 3), (3, 6), (3, 9), (6, 0), (6, 3), (6, 6), (6, 9), (9, 0), (9, 3), (9, 6), (9, 9)], 'space extraction wrong!') print('extracted int list 2\n', extracted_int_list2) self.assertIsInstance(extracted_int_list2[0][0], float) self.assertIsInstance(extracted_int_list2[0][1], (int, int64)) extracted_rand2, count = s.extract(10, 'rand') self.assertEqual(count, 10, 'extraction count wrong!') extracted_rand_list2 = list(extracted_rand2) print('extracted rand list 2:\n', extracted_rand_list2) for point in extracted_rand_list2: self.assertEqual(len(point), 2) self.assertIsInstance(point[0], float) self.assertLessEqual(point[0], 10) self.assertGreaterEqual(point[0], 0) self.assertIsInstance(point[1], (int, int64)) self.assertLessEqual(point[1], 10) self.assertGreaterEqual(point[1], 0) pars_list = [(0., 10), ('a', 'b')] s = Space(pars=pars_list, par_types='enum, enum') extracted_int3, count = s.extract(1, 'interval') self.assertEqual(count, 4, 'extraction count wrong!') extracted_int_list3 = list(extracted_int3) self.assertEqual(extracted_int_list3, [(0., 'a'), (0., 'b'), (10, 'a'), (10, 'b')], 'space extraction wrong!') print('extracted int list 3\n', extracted_int_list3) self.assertIsInstance(extracted_int_list3[0][0], float) self.assertIsInstance(extracted_int_list3[0][1], str) extracted_rand3, count = s.extract(3, 'rand') self.assertEqual(count, 3, 'extraction count wrong!') extracted_rand_list3 = list(extracted_rand3) print('extracted rand list 3:\n', extracted_rand_list3) for point in extracted_rand_list3: self.assertEqual(len(point), 2) self.assertIsInstance(point[0], (float, int)) self.assertLessEqual(point[0], 10) self.assertGreaterEqual(point[0], 0) self.assertIsInstance(point[1], str) self.assertIn(point[1], ['a', 'b']) pars_list = [((0, 10), (1, 'c'), ('a', 'b'), (1, 14))] s = Space(pars=pars_list, par_types='enum') extracted_int4, count = s.extract(1, 'interval') self.assertEqual(count, 4, 'extraction count wrong!') extracted_int_list4 = list(extracted_int4) it = zip(extracted_int_list4, [(0, 10), (1, 'c'), (0, 'b'), (1, 14)]) for item, item2 in it: print(item, item2) self.assertTrue(all([tuple(ext_item) == item for ext_item, item in it])) print('extracted int list 4\n', extracted_int_list4) self.assertIsInstance(extracted_int_list4[0], tuple) extracted_rand4, count = s.extract(3, 'rand') self.assertEqual(count, 3, 'extraction count wrong!') extracted_rand_list4 = list(extracted_rand4) print('extracted rand list 4:\n', extracted_rand_list4) for point in extracted_rand_list4: self.assertEqual(len(point), 2) self.assertIsInstance(point[0], (int, str)) self.assertIn(point[0], [0, 1, 'a']) self.assertIsInstance(point[1], (int, str)) self.assertIn(point[1], [10, 14, 'b', 'c']) self.assertIn(point, [(0., 10), (1, 'c'), ('a', 'b'), (1, 14)]) pars_list = [((0, 10), (1, 'c'), ('a', 'b'), (1, 14)), (1, 4)] s = Space(pars=pars_list, par_types='enum, discr') extracted_int5, count = s.extract(1, 'interval') self.assertEqual(count, 16, 'extraction count wrong!') extracted_int_list5 = list(extracted_int5) for item, item2 in extracted_int_list5: print(item, item2) self.assertTrue(all([tuple(ext_item) == item for ext_item, item in it])) print('extracted int list 5\n', extracted_int_list5) self.assertIsInstance(extracted_int_list5[0], tuple) extracted_rand5, count = s.extract(5, 'rand') self.assertEqual(count, 5, 'extraction count wrong!') extracted_rand_list5 = list(extracted_rand5) print('extracted rand list 5:\n', extracted_rand_list5) for point in extracted_rand_list5: self.assertEqual(len(point), 2) self.assertIsInstance(point[0], tuple) print(f'type of point[1] is {type(point[1])}') self.assertIsInstance(point[1], (int, np.int64)) self.assertIn(point[0], [(0., 10), (1, 'c'), ('a', 'b'), (1, 14)]) print(f'test incremental extraction') pars_list = [(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)] s = Space(pars_list) ext, count = s.extract(64, 'interval') self.assertEqual(count, 4096) points = list(ext) # 已经取出所有的点,围绕其中10个点生成十个subspaces # 检查是否每个subspace都为Space,是否都在s范围内,使用32生成点集,检查生成数量是否正确 for point in points[1000:1010]: subspace = s.from_point(point, 64) self.assertIsInstance(subspace, Space) self.assertTrue(subspace in s) self.assertEqual(subspace.dim, 6) self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti']) ext, count = subspace.extract(32) points = list(ext) self.assertGreaterEqual(count, 512) self.assertLessEqual(count, 4096) print(f'\n---------------------------------' f'\nthe space created around point <{point}> is' f'\n{subspace.boes}' f'\nand extracted {count} points, the first 5 are:' f'\n{points[:5]}') def test_axis_extract(self): # test axis object with conti type axis = Axis((0., 5)) self.assertIsInstance(axis, Axis) self.assertEqual(axis.axis_type, 'conti') self.assertEqual(axis.axis_boe, (0., 5.)) self.assertEqual(axis.count, np.inf) self.assertEqual(axis.size, 5.0) self.assertTrue(np.allclose(axis.extract(1, 'int'), [0., 1., 2., 3., 4.])) self.assertTrue(np.allclose(axis.extract(0.5, 'int'), [0., 0.5, 1., 1.5, 2., 2.5, 3., 3.5, 4., 4.5])) extracted = axis.extract(8, 'rand') self.assertEqual(len(extracted), 8) self.assertTrue(all([(0 <= item <= 5) for item in extracted])) # test axis object with discrete type axis = Axis((1, 5)) self.assertIsInstance(axis, Axis) self.assertEqual(axis.axis_type, 'discr') self.assertEqual(axis.axis_boe, (1, 5)) self.assertEqual(axis.count, 5) self.assertEqual(axis.size, 5) self.assertTrue(np.allclose(axis.extract(1, 'int'), [1, 2, 3, 4, 5])) self.assertRaises(ValueError, axis.extract, 0.5, 'int') extracted = axis.extract(8, 'rand') self.assertEqual(len(extracted), 8) self.assertTrue(all([(item in [1, 2, 3, 4, 5]) for item in extracted])) # test axis object with enumerate type axis = Axis((1, 5, 7, 10, 'A', 'F')) self.assertIsInstance(axis, Axis) self.assertEqual(axis.axis_type, 'enum') self.assertEqual(axis.axis_boe, (1, 5, 7, 10, 'A', 'F')) self.assertEqual(axis.count, 6) self.assertEqual(axis.size, 6) self.assertEqual(axis.extract(1, 'int'), [1, 5, 7, 10, 'A', 'F']) self.assertRaises(ValueError, axis.extract, 0.5, 'int') extracted = axis.extract(8, 'rand') self.assertEqual(len(extracted), 8) self.assertTrue(all([(item in [1, 5, 7, 10, 'A', 'F']) for item in extracted])) def test_from_point(self): """测试从一个点生成一个space""" # 生成一个space,指定space中的一个点以及distance,生成一个sub-space pars_list = [(0., 10), (0, 10)] s = Space(pars=pars_list, par_types=None) self.assertEqual(s.types, ['conti', 'discr']) self.assertEqual(s.dim, 2) self.assertEqual(s.size, (10., 11)) self.assertEqual(s.shape, (np.inf, 11)) self.assertEqual(s.count, np.inf) self.assertEqual(s.boes, [(0., 10), (0, 10)]) print('create subspace from a point in space') p = (3, 3) distance = 2 subspace = s.from_point(p, distance) self.assertIsInstance(subspace, Space) self.assertEqual(subspace.types, ['conti', 'discr']) self.assertEqual(subspace.dim, 2) self.assertEqual(subspace.size, (4.0, 5)) self.assertEqual(subspace.shape, (np.inf, 5)) self.assertEqual(subspace.count, np.inf) self.assertEqual(subspace.boes, [(1, 5), (1, 5)]) print('create subspace from a 6 dimensional discrete space') s = Space(pars=[(10, 250), (10, 250), (10, 250), (10, 250), (10, 250), (10, 250)]) p = (15, 200, 150, 150, 150, 150) d = 10 subspace = s.from_point(p, d) self.assertIsInstance(subspace, Space) self.assertEqual(subspace.types, ['discr', 'discr', 'discr', 'discr', 'discr', 'discr']) self.assertEqual(subspace.dim, 6) self.assertEqual(subspace.volume, 65345616) self.assertEqual(subspace.size, (16, 21, 21, 21, 21, 21)) self.assertEqual(subspace.shape, (16, 21, 21, 21, 21, 21)) self.assertEqual(subspace.count, 65345616) self.assertEqual(subspace.boes, [(10, 25), (190, 210), (140, 160), (140, 160), (140, 160), (140, 160)]) print('create subspace from a 6 dimensional continuous space') s = Space(pars=[(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)]) p = (15, 200, 150, 150, 150, 150) d = 10 subspace = s.from_point(p, d) self.assertIsInstance(subspace, Space) self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti']) self.assertEqual(subspace.dim, 6) self.assertEqual(subspace.volume, 48000000) self.assertEqual(subspace.size, (15.0, 20.0, 20.0, 20.0, 20.0, 20.0)) self.assertEqual(subspace.shape, (np.inf, np.inf, np.inf, np.inf, np.inf, np.inf)) self.assertEqual(subspace.count, np.inf) self.assertEqual(subspace.boes, [(10, 25), (190, 210), (140, 160), (140, 160), (140, 160), (140, 160)]) print('create subspace with different distances on each dimension') s = Space(pars=[(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)]) p = (15, 200, 150, 150, 150, 150) d = [10, 5, 5, 10, 10, 5] subspace = s.from_point(p, d) self.assertIsInstance(subspace, Space) self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti']) self.assertEqual(subspace.dim, 6) self.assertEqual(subspace.volume, 6000000) self.assertEqual(subspace.size, (15.0, 10.0, 10.0, 20.0, 20.0, 10.0)) self.assertEqual(subspace.shape, (np.inf, np.inf, np.inf, np.inf, np.inf, np.inf)) self.assertEqual(subspace.count, np.inf) self.assertEqual(subspace.boes, [(10, 25), (195, 205), (145, 155), (140, 160), (140, 160), (145, 155)]) class TestCashPlan(unittest.TestCase): def setUp(self): self.cp1 = qt.CashPlan(['2012-01-01', '2010-01-01'], [10000, 20000], 0.1) self.cp1.info() self.cp2 = qt.CashPlan(['20100501'], 10000) self.cp2.info() self.cp3 = qt.CashPlan(pd.date_range(start='2019-01-01', freq='Y', periods=12), [i * 1000 + 10000 for i in range(12)], 0.035) self.cp3.info() def test_creation(self): self.assertIsInstance(self.cp1, qt.CashPlan, 'CashPlan object creation wrong') self.assertIsInstance(self.cp2, qt.CashPlan, 'CashPlan object creation wrong') self.assertIsInstance(self.cp3, qt.CashPlan, 'CashPlan object creation wrong') # test __repr__() print(self.cp1) print(self.cp2) print(self.cp3) # test __str__() self.cp1.info() self.cp2.info() self.cp3.info() # test assersion errors self.assertRaises(AssertionError, qt.CashPlan, '2016-01-01', [10000, 10000]) self.assertRaises(KeyError, qt.CashPlan, '2020-20-20', 10000) def test_properties(self): self.assertEqual(self.cp1.amounts, [20000, 10000], 'property wrong') self.assertEqual(self.cp1.first_day, Timestamp('2010-01-01')) self.assertEqual(self.cp1.last_day, Timestamp('2012-01-01')) self.assertEqual(self.cp1.investment_count, 2) self.assertEqual(self.cp1.period, 730) self.assertEqual(self.cp1.dates, [Timestamp('2010-01-01'), Timestamp('2012-01-01')]) self.assertEqual(self.cp1.ir, 0.1) self.assertAlmostEqual(self.cp1.closing_value, 34200) self.assertAlmostEqual(self.cp2.closing_value, 10000) self.assertAlmostEqual(self.cp3.closing_value, 220385.3483685) self.assertIsInstance(self.cp1.plan, pd.DataFrame) self.assertIsInstance(self.cp2.plan, pd.DataFrame) self.assertIsInstance(self.cp3.plan, pd.DataFrame) def test_operation(self): cp_self_add = self.cp1 + self.cp1 cp_add = self.cp1 + self.cp2 cp_add_int = self.cp1 + 10000 cp_mul_int = self.cp1 * 2 cp_mul_float = self.cp2 * 1.5 cp_mul_time = 3 * self.cp2 cp_mul_time2 = 2 * self.cp1 cp_mul_time3 = 2 * self.cp3 cp_mul_float2 = 2. * self.cp3 self.assertIsInstance(cp_self_add, qt.CashPlan) self.assertEqual(cp_self_add.amounts, [40000, 20000]) self.assertEqual(cp_add.amounts, [20000, 10000, 10000]) self.assertEqual(cp_add_int.amounts, [30000, 20000]) self.assertEqual(cp_mul_int.amounts, [40000, 20000]) self.assertEqual(cp_mul_float.amounts, [15000]) self.assertEqual(cp_mul_float.dates, [Timestamp('2010-05-01')]) self.assertEqual(cp_mul_time.amounts, [10000, 10000, 10000]) self.assertEqual(cp_mul_time.dates, [Timestamp('2010-05-01'), Timestamp('2011-05-01'), Timestamp('2012-04-30')]) self.assertEqual(cp_mul_time2.amounts, [20000, 10000, 20000, 10000]) self.assertEqual(cp_mul_time2.dates, [Timestamp('2010-01-01'), Timestamp('2012-01-01'), Timestamp('2014-01-01'), Timestamp('2016-01-01')]) self.assertEqual(cp_mul_time3.dates, [Timestamp('2019-12-31'), Timestamp('2020-12-31'), Timestamp('2021-12-31'), Timestamp('2022-12-31'), Timestamp('2023-12-31'), Timestamp('2024-12-31'), Timestamp('2025-12-31'), Timestamp('2026-12-31'), Timestamp('2027-12-31'), Timestamp('2028-12-31'), Timestamp('2029-12-31'), Timestamp('2030-12-31'), Timestamp('2031-12-29'), Timestamp('2032-12-29'), Timestamp('2033-12-29'), Timestamp('2034-12-29'), Timestamp('2035-12-29'), Timestamp('2036-12-29'), Timestamp('2037-12-29'), Timestamp('2038-12-29'), Timestamp('2039-12-29'), Timestamp('2040-12-29'), Timestamp('2041-12-29'), Timestamp('2042-12-29')]) self.assertEqual(cp_mul_float2.dates, [Timestamp('2019-12-31'), Timestamp('2020-12-31'), Timestamp('2021-12-31'), Timestamp('2022-12-31'), Timestamp('2023-12-31'), Timestamp('2024-12-31'), Timestamp('2025-12-31'), Timestamp('2026-12-31'), Timestamp('2027-12-31'), Timestamp('2028-12-31'), Timestamp('2029-12-31'), Timestamp('2030-12-31')]) self.assertEqual(cp_mul_float2.amounts, [20000.0, 22000.0, 24000.0, 26000.0, 28000.0, 30000.0, 32000.0, 34000.0, 36000.0, 38000.0, 40000.0, 42000.0]) class TestPool(unittest.TestCase): def setUp(self): self.p = ResultPool(5) self.items = ['first', 'second', (1, 2, 3), 'this', 24] self.perfs = [1, 2, 3, 4, 5] self.additional_result1 = ('abc', 12) self.additional_result2 = ([1, 2], -1) self.additional_result3 = (12, 5) def test_create(self): self.assertIsInstance(self.p, ResultPool) def test_operation(self): self.p.in_pool(self.additional_result1[0], self.additional_result1[1]) self.p.cut() self.assertEqual(self.p.item_count, 1) self.assertEqual(self.p.items, ['abc']) for item, perf in zip(self.items, self.perfs): self.p.in_pool(item, perf) self.assertEqual(self.p.item_count, 6) self.assertEqual(self.p.items, ['abc', 'first', 'second', (1, 2, 3), 'this', 24]) self.p.cut() self.assertEqual(self.p.items, ['second', (1, 2, 3), 'this', 24, 'abc']) self.assertEqual(self.p.perfs, [2, 3, 4, 5, 12]) self.p.in_pool(self.additional_result2[0], self.additional_result2[1]) self.p.in_pool(self.additional_result3[0], self.additional_result3[1]) self.assertEqual(self.p.item_count, 7) self.p.cut(keep_largest=False) self.assertEqual(self.p.items, [[1, 2], 'second', (1, 2, 3), 'this', 24]) self.assertEqual(self.p.perfs, [-1, 2, 3, 4, 5]) class TestCoreSubFuncs(unittest.TestCase): """Test all functions in core.py""" def setUp(self): pass def test_input_to_list(self): print('Testing input_to_list() function') input_str = 'first' self.assertEqual(qt.utilfuncs.input_to_list(input_str, 3), ['first', 'first', 'first']) self.assertEqual(qt.utilfuncs.input_to_list(input_str, 4), ['first', 'first', 'first', 'first']) self.assertEqual(qt.utilfuncs.input_to_list(input_str, 2, None), ['first', 'first']) input_list = ['first', 'second'] self.assertEqual(qt.utilfuncs.input_to_list(input_list, 3), ['first', 'second', None]) self.assertEqual(qt.utilfuncs.input_to_list(input_list, 4, 'padder'), ['first', 'second', 'padder', 'padder']) self.assertEqual(qt.utilfuncs.input_to_list(input_list, 1), ['first', 'second']) self.assertEqual(qt.utilfuncs.input_to_list(input_list, -5), ['first', 'second']) def test_point_in_space(self): sp = Space([(0., 10.), (0., 10.), (0., 10.)]) p1 = (5.5, 3.2, 7) p2 = (-1, 3, 10) self.assertTrue(p1 in sp) print(f'point {p1} is in space {sp}') self.assertFalse(p2 in sp) print(f'point {p2} is not in space {sp}') sp = Space([(0., 10.), (0., 10.), range(40, 3, -2)], 'conti, conti, enum') p1 = (5.5, 3.2, 8) self.assertTrue(p1 in sp) print(f'point {p1} is in space {sp}') def test_space_in_space(self): print('test if a space is in another space') sp = Space([(0., 10.), (0., 10.), (0., 10.)]) sp2 = Space([(0., 10.), (0., 10.), (0., 10.)]) self.assertTrue(sp2 in sp) self.assertTrue(sp in sp2) print(f'space {sp2} is in space {sp}\n' f'and space {sp} is in space {sp2}\n' f'they are equal to each other\n') sp2 = Space([(0, 5.), (2, 7.), (3., 9.)]) self.assertTrue(sp2 in sp) self.assertFalse(sp in sp2) print(f'space {sp2} is in space {sp}\n' f'and space {sp} is not in space {sp2}\n' f'{sp2} is a sub space of {sp}\n') sp2 = Space([(0, 5), (2, 7), (3., 9)]) self.assertFalse(sp2 in sp) self.assertFalse(sp in sp2) print(f'space {sp2} is not in space {sp}\n' f'and space {sp} is not in space {sp2}\n' f'they have different types of axes\n') sp = Space([(0., 10.), (0., 10.), range(40, 3, -2)]) self.assertFalse(sp in sp2) self.assertFalse(sp2 in sp) print(f'space {sp2} is not in space {sp}\n' f'and space {sp} is not in space {sp2}\n' f'they have different types of axes\n') def test_space_around_centre(self): sp = Space([(0., 10.), (0., 10.), (0., 10.)]) p1 = (5.5, 3.2, 7) ssp = space_around_centre(space=sp, centre=p1, radius=1.2) print(ssp.boes) print('\ntest multiple diameters:') self.assertEqual(ssp.boes, [(4.3, 6.7), (2.0, 4.4), (5.8, 8.2)]) ssp = space_around_centre(space=sp, centre=p1, radius=[1, 2, 1]) print(ssp.boes) self.assertEqual(ssp.boes, [(4.5, 6.5), (1.2000000000000002, 5.2), (6.0, 8.0)]) print('\ntest points on edge:') p2 = (5.5, 3.2, 10) ssp = space_around_centre(space=sp, centre=p1, radius=3.9) print(ssp.boes) self.assertEqual(ssp.boes, [(1.6, 9.4), (0.0, 7.1), (3.1, 10.0)]) print('\ntest enum spaces') sp = Space([(0, 100), range(40, 3, -2)], 'discr, enum') p1 = [34, 12] ssp = space_around_centre(space=sp, centre=p1, radius=5, ignore_enums=False) self.assertEqual(ssp.boes, [(29, 39), (22, 20, 18, 16, 14, 12, 10, 8, 6, 4)]) print(ssp.boes) print('\ntest enum space and ignore enum axis') ssp = space_around_centre(space=sp, centre=p1, radius=5) self.assertEqual(ssp.boes, [(29, 39), (40, 38, 36, 34, 32, 30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4)]) print(sp.boes) def test_get_stock_pool(self): print(f'start test building stock pool function\n') share_basics = stock_basic(fields='ts_code,symbol,name,area,industry,market,list_date,exchange') print(f'\nselect all stocks by area') stock_pool = qt.get_stock_pool(area='上海') print(f'{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n' f'check if all stock areas are "上海"\n' f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}') self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].eq('上海').all()) print(f'\nselect all stocks by multiple areas') stock_pool = qt.get_stock_pool(area='贵州,北京,天津') print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n' f'check if all stock areas are in list of ["贵州", "北京", "天津"]\n' f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}') self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(['贵州', '北京', '天津']).all()) print(f'\nselect all stocks by area and industry') stock_pool = qt.get_stock_pool(area='四川', industry='银行, 金融') print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n' f'check if all stock areas are "四川", and industry in ["银行", "金融"]\n' f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}') self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(['银行', '金融']).all()) self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(['四川']).all()) print(f'\nselect all stocks by industry') stock_pool = qt.get_stock_pool(industry='银行, 金融') print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n' f'check if all stocks industry in ["银行", "金融"]\n' f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}') self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(['银行', '金融']).all()) print(f'\nselect all stocks by market') stock_pool = qt.get_stock_pool(market='主板') print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n' f'check if all stock market is "主板"\n' f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}') self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['market'].isin(['主板']).all()) print(f'\nselect all stocks by market and list date') stock_pool = qt.get_stock_pool(date='2000-01-01', market='主板') print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n' f'check if all stock market is "主板", and list date after "2000-01-01"\n' f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}') self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['market'].isin(['主板']).all()) self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('2000-01-01').all()) print(f'\nselect all stocks by list date') stock_pool = qt.get_stock_pool(date='1997-01-01') print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n' f'check if all list date after "1997-01-01"\n' f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}') self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('1997-01-01').all()) print(f'\nselect all stocks by exchange') stock_pool = qt.get_stock_pool(exchange='SSE') print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n' f'check if all exchanges are "SSE"\n' f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}') self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['exchange'].eq('SSE').all()) print(f'\nselect all stocks by industry, area and list date') industry_list = ['银行', '全国地产', '互联网', '环境保护', '区域地产', '酒店餐饮', '运输设备', '综合类', '建筑工程', '玻璃', '家用电器', '文教休闲', '其他商业', '元器件', 'IT设备', '其他建材', '汽车服务', '火力发电', '医药商业', '汽车配件', '广告包装', '轻工机械', '新型电力', '多元金融', '饲料'] area_list = ['深圳', '北京', '吉林', '江苏', '辽宁', '广东', '安徽', '四川', '浙江', '湖南', '河北', '新疆', '山东', '河南', '山西', '江西', '青海', '湖北', '内蒙', '海南', '重庆', '陕西', '福建', '广西', '上海'] stock_pool = qt.get_stock_pool(date='19980101', industry=industry_list, area=area_list) print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n' f'check if all exchanges are "SSE"\n' f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}') self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('1998-01-01').all()) self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(industry_list).all()) self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(area_list).all()) self.assertRaises(KeyError, qt.get_stock_pool, industry=25) self.assertRaises(KeyError, qt.get_stock_pool, share_name='000300.SH') self.assertRaises(KeyError, qt.get_stock_pool, markets='SSE') class TestEvaluations(unittest.TestCase): """Test all evaluation functions in core.py""" # 以下手动计算结果在Excel文件中 def setUp(self): """用np.random生成测试用数据,使用cumsum()模拟股票走势""" self.test_data1 = pd.DataFrame([5.34892759, 5.65768696, 5.79227076, 5.56266871, 5.88189632, 6.24795001, 5.92755558, 6.38748165, 6.31331899, 5.86001665, 5.61048472, 5.30696736, 5.40406792, 5.03180571, 5.37886353, 5.78608307, 6.26540339, 6.59348026, 6.90943801, 6.70911677, 6.33015954, 6.06697417, 5.9752499, 6.45786408, 6.95273763, 6.7691991, 6.70355481, 6.28048969, 6.61344541, 6.24620003, 6.47409983, 6.4522311, 6.8773094, 6.99727832, 6.59262674, 6.59014938, 6.63758237, 6.38331869, 6.09902105, 6.35390109, 6.51993567, 6.87244592, 6.83963485, 7.08797815, 6.88003144, 6.83657323, 6.97819483, 7.01600276, 7.12554256, 7.58941523, 7.61014457, 7.21224091, 7.48174399, 7.66490854, 7.51371968, 7.11586198, 6.97147399, 6.67453301, 6.2042138, 6.33967015, 6.22187938, 5.98426993, 6.37096079, 6.55897161, 6.26422645, 6.69363762, 7.12668015, 6.83232926, 7.30524081, 7.4262041, 7.54031383, 7.17545919, 7.20659257, 7.44886016, 7.37094393, 6.88011022, 7.08142491, 6.74992833, 6.5967097, 6.21336693, 6.35565105, 6.82347596, 6.44773408, 6.84538053, 6.47966466, 6.09699528, 5.63927014, 6.01081024, 6.20585303, 6.60528206, 7.01594726, 7.03684251, 6.76574977, 7.08740846, 6.65336462, 7.07126686, 6.80058956, 6.79241977, 6.47843472, 6.39245474], columns=['value']) self.test_data2 = pd.DataFrame([5.09276527, 4.83828592, 4.6000911, 4.63170487, 4.63566451, 4.50546921, 4.96390044, 4.64557907, 4.25787855, 3.76585551, 3.38826334, 3.76243422, 4.06365426, 3.87084726, 3.91400935, 4.13438822, 4.27064542, 4.56776104, 5.03800296, 5.31070529, 5.39902276, 5.21186286, 5.05683114, 4.68842046, 5.11895168, 5.27151571, 5.72294993, 6.09961056, 6.26569635, 6.48806151, 6.16058885, 6.2582459, 6.38934791, 6.57831057, 6.19508831, 5.70155153, 5.20435735, 5.36538825, 5.40450056, 5.2227697, 5.37828693, 5.53058991, 6.02996797, 5.76802181, 5.66166713, 6.07988994, 5.61794367, 5.63218151, 6.10728013, 6.0324168, 6.27164431, 6.27551239, 6.52329665, 7.00470007, 7.34163113, 7.33699083, 7.67661334, 8.09395749, 7.68086668, 7.58341161, 7.46219819, 7.58671899, 7.19348298, 7.40088323, 7.47562005, 7.93342043, 8.2286081, 8.3521632, 8.43590025, 8.34977395, 8.57563095, 8.81586328, 9.08738649, 9.01542031, 8.8653815, 9.21763111, 9.04233017, 8.59533999, 8.47590075, 8.70857222, 8.78890756, 8.92697606, 9.35743773, 9.68280866, 10.15622021, 10.55908549, 10.6337894, 10.55197128, 10.65435176, 10.54611045, 10.19432562, 10.48320884, 10.36176768, 10.03186854, 10.23656092, 10.0062843, 10.13669686, 10.30758958, 9.87904176, 10.05126375], columns=['value']) self.test_data3 = pd.DataFrame([5.02851874, 5.20700348, 5.02410709, 5.49836387, 5.06834371, 5.10956737, 5.15314979, 5.02256472, 5.09746382, 5.23909247, 4.93410336, 4.96316186, 5.40026682, 5.7353255, 5.53438319, 5.79092139, 5.67528173, 5.89840855, 5.75379463, 6.10855386, 5.77322365, 5.84538021, 5.6103973, 5.7518655, 5.49729695, 5.13610628, 5.30524121, 5.68093462, 5.73251319, 6.04420783, 6.26929843, 6.59610234, 6.09872345, 6.25475121, 6.72927396, 6.91395783, 7.00693283, 7.36217783, 7.71516676, 7.67580263, 7.62477511, 7.73600568, 7.53457914, 7.46170277, 7.83658014, 8.11481319, 8.03705544, 7.64948845, 7.52043731, 7.67247943, 7.46511982, 7.43541798, 7.58856517, 7.9392717, 8.25406287, 7.77031632, 8.03223447, 7.86799055, 7.57630999, 7.33230519, 7.22378732, 6.85972264, 7.17548456, 7.5387846, 7.2392632, 6.8455644, 6.59557185, 6.6496796, 6.73685623, 7.18598015, 7.13619128, 6.88060157, 7.1399681, 7.30308077, 6.94942434, 7.0247815, 7.37567798, 7.50080197, 7.59719284, 7.14520561, 7.29913484, 7.79551341, 8.15497781, 8.40456095, 8.86516528, 8.53042688, 8.94268762, 8.52048006, 8.80036284, 8.91602364, 9.19953385, 8.70828953, 8.24613093, 8.18770453, 7.79548389, 7.68627967, 7.23205036, 6.98302636, 7.06515819, 6.95068113], columns=['value']) self.test_data4 = pd.DataFrame([4.97926539, 5.44016005, 5.45122915, 5.74485615, 5.45600553, 5.44858945, 5.2435413, 5.47315161, 5.58464303, 5.36179749, 5.38236326, 5.29614981, 5.76523508, 5.75102892, 6.15316618, 6.03852528, 6.01442228, 5.70510182, 5.22748133, 5.46762379, 5.78926267, 5.8221362, 5.61236849, 5.30615725, 5.24200611, 5.41042642, 5.59940342, 5.28306781, 4.99451932, 5.08799266, 5.38865647, 5.58229139, 5.33492845, 5.48206276, 5.09721379, 5.39190493, 5.29965087, 5.0374415, 5.50798022, 5.43107577, 5.22759507, 4.991809, 5.43153084, 5.39966868, 5.59916352, 5.66412137, 6.00611838, 5.63564902, 5.66723484, 5.29863863, 4.91115153, 5.3749929, 5.75082334, 6.08308148, 6.58091182, 6.77848803, 7.19588758, 7.64862286, 7.99818347, 7.91824794, 8.30341071, 8.45984973, 7.98700002, 8.18924931, 8.60755649, 8.66233396, 8.91018407, 9.0782739, 9.33515448, 8.95870245, 8.98426422, 8.50340317, 8.64916085, 8.93592407, 8.63145745, 8.65322862, 8.39543204, 8.37969997, 8.23394504, 8.04062872, 7.91259763, 7.57252171, 7.72670114, 7.74486117, 8.06908188, 7.99166889, 7.92155906, 8.39956136, 8.80181323, 8.47464091, 8.06557064, 7.87145573, 8.0237959, 8.39481998, 8.68525692, 8.81185461, 8.98632237, 9.0989835, 8.89787405, 8.86508591], columns=['value']) self.test_data5 = pd.DataFrame([4.50258923, 4.35142568, 4.07459514, 3.87791297, 3.73715985, 3.98455684, 4.07587908, 4.00042472, 4.28276612, 4.01362051, 4.13713565, 4.49312372, 4.48633159, 4.4641207, 4.13444605, 3.79107217, 4.22941629, 4.56548511, 4.92472163, 5.27723158, 5.67409193, 6.00176917, 5.88889928, 5.55256103, 5.39308314, 5.2610492, 5.30738908, 5.22222408, 4.90332238, 4.57499908, 4.96097146, 4.81531011, 4.39115442, 4.63200662, 5.04588813, 4.67866025, 5.01705123, 4.83562258, 4.60381702, 4.66187576, 4.41292828, 4.86604507, 4.42280124, 4.07517294, 4.16317319, 4.10316596, 4.42913598, 4.06609666, 3.96725913, 4.15965746, 4.12379564, 4.04054068, 3.84342851, 3.45902867, 3.17649855, 3.09773586, 3.5502119, 3.66396995, 3.66306483, 3.29131401, 2.79558533, 2.88319542, 3.03671098, 3.44645857, 3.88167161, 3.57961874, 3.60180276, 3.96702102, 4.05429995, 4.40056979, 4.05653231, 3.59600456, 3.60792477, 4.09989922, 3.73503663, 4.01892626, 3.94597242, 3.81466605, 3.71417992, 3.93767156, 4.42806557, 4.06988106, 4.03713636, 4.34408673, 4.79810156, 5.18115011, 4.89798406, 5.3960077, 5.72504875, 5.61894017, 5.1958197, 4.85275896, 5.17550207, 4.71548987, 4.62408567, 4.55488535, 4.36532649, 4.26031979, 4.25225607, 4.58627048], columns=['value']) self.test_data6 = pd.DataFrame([5.08639513, 5.05761083, 4.76160923, 4.62166504, 4.62923183, 4.25070173, 4.13447513, 3.90890013, 3.76687608, 3.43342482, 3.67648224, 3.6274775, 3.9385404, 4.39771627, 4.03199346, 3.93265288, 3.50059789, 3.3851961, 3.29743973, 3.2544872, 2.93692949, 2.70893003, 2.55461976, 2.20922332, 2.29054475, 2.2144714, 2.03726827, 2.39007617, 2.29866155, 2.40607111, 2.40440444, 2.79374649, 2.66541922, 2.27018079, 2.08505127, 2.55478864, 2.22415625, 2.58517923, 2.58802256, 2.94870959, 2.69301739, 2.19991535, 2.69473146, 2.64704637, 2.62753542, 2.14240825, 2.38565154, 1.94592117, 2.32243877, 2.69337246, 2.51283854, 2.62484451, 2.15559054, 2.35410875, 2.31219177, 1.96018265, 2.34711266, 2.58083322, 2.40290041, 2.20439791, 2.31472425, 2.16228248, 2.16439749, 2.20080737, 1.73293206, 1.9264407, 2.25089861, 2.69269101, 2.59296687, 2.1420998, 1.67819153, 1.98419023, 2.14479494, 1.89055376, 1.96720648, 1.9916694, 2.37227761, 2.14446036, 2.34573903, 1.86162546, 2.1410721, 2.39204939, 2.52529064, 2.47079939, 2.9299031, 3.09452923, 2.93276708, 3.21731309, 3.06248964, 2.90413406, 2.67844632, 2.45621213, 2.41463398, 2.7373913, 3.14917045, 3.4033949, 3.82283446, 4.02285451, 3.7619638, 4.10346795], columns=['value']) self.test_data7 = pd.DataFrame([4.75233583, 4.47668283, 4.55894263, 4.61765848, 4.622892, 4.58941116, 4.32535872, 3.88112797, 3.47237806, 3.50898953, 3.82530406, 3.6718017, 3.78918195, 4.1800752, 4.01818557, 4.40822582, 4.65474654, 4.89287256, 4.40879274, 4.65505126, 4.36876403, 4.58418934, 4.75687172, 4.3689799, 4.16126498, 4.0203982, 3.77148242, 3.38198096, 3.07261764, 2.9014741, 2.5049543, 2.756105, 2.28779058, 2.16986991, 1.8415962, 1.83319008, 2.20898291, 2.00128981, 1.75747025, 1.26676663, 1.40316876, 1.11126484, 1.60376367, 1.22523829, 1.58816681, 1.49705679, 1.80244138, 1.55128293, 1.35339409, 1.50985759, 1.0808451, 1.05892796, 1.43414812, 1.43039101, 1.73631655, 1.43940867, 1.82864425, 1.71088265, 2.12015154, 2.45417128, 2.84777618, 2.7925612, 2.90975121, 3.25920745, 3.13801182, 3.52733677, 3.65468491, 3.69395211, 3.49862035, 3.24786017, 3.64463138, 4.00331929, 3.62509565, 3.78013949, 3.4174012, 3.76312271, 3.62054004, 3.67206716, 3.60596058, 3.38636199, 3.42580676, 3.32921095, 3.02976759, 3.28258676, 3.45760838, 3.24917528, 2.94618304, 2.86980011, 2.63191259, 2.39566759, 2.53159917, 2.96273967, 3.25626185, 2.97425402, 3.16412191, 3.58280763, 3.23257727, 3.62353556, 3.12806399, 2.92532313], columns=['value']) # 建立一个长度为 500 个数据点的测试数据, 用于测试数据点多于250个的情况下的评价过程 self.long_data = pd.DataFrame([9.879, 9.916, 10.109, 10.214, 10.361, 10.768, 10.594, 10.288, 10.082, 9.994, 10.125, 10.126, 10.384, 10.734, 10.4, 10.87, 11.338, 11.061, 11.415, 11.724, 12.077, 12.196, 12.064, 12.423, 12.19, 11.729, 11.677, 11.448, 11.485, 10.989, 11.242, 11.239, 11.113, 11.075, 11.471, 11.745, 11.754, 11.782, 12.079, 11.97, 12.178, 11.95, 12.438, 12.612, 12.804, 12.952, 12.612, 12.867, 12.832, 12.832, 13.015, 13.315, 13.249, 12.904, 12.776, 12.64, 12.543, 12.287, 12.225, 11.844, 11.985, 11.945, 11.542, 11.871, 12.245, 12.228, 12.362, 11.899, 11.962, 12.374, 12.816, 12.649, 12.252, 12.579, 12.3, 11.988, 12.177, 12.312, 12.744, 12.599, 12.524, 12.82, 12.67, 12.876, 12.986, 13.271, 13.606, 13.82, 14.161, 13.833, 13.831, 14.137, 13.705, 13.414, 13.037, 12.759, 12.642, 12.948, 13.297, 13.483, 13.836, 14.179, 13.709, 13.655, 13.198, 13.508, 13.953, 14.387, 14.043, 13.987, 13.561, 13.391, 12.923, 12.555, 12.503, 12.292, 11.877, 12.34, 12.141, 11.687, 11.992, 12.458, 12.131, 11.75, 11.739, 11.263, 11.762, 11.976, 11.578, 11.854, 12.136, 12.422, 12.311, 12.56, 12.879, 12.861, 12.973, 13.235, 13.53, 13.531, 13.137, 13.166, 13.31, 13.103, 13.007, 12.643, 12.69, 12.216, 12.385, 12.046, 12.321, 11.9, 11.772, 11.816, 11.871, 11.59, 11.518, 11.94, 11.803, 11.924, 12.183, 12.136, 12.361, 12.406, 11.932, 11.684, 11.292, 11.388, 11.874, 12.184, 12.002, 12.16, 11.741, 11.26, 11.123, 11.534, 11.777, 11.407, 11.275, 11.679, 11.62, 11.218, 11.235, 11.352, 11.366, 11.061, 10.661, 10.582, 10.899, 11.352, 11.792, 11.475, 11.263, 11.538, 11.183, 10.936, 11.399, 11.171, 11.214, 10.89, 10.728, 11.191, 11.646, 11.62, 11.195, 11.178, 11.18, 10.956, 11.205, 10.87, 11.098, 10.639, 10.487, 10.507, 10.92, 10.558, 10.119, 9.882, 9.573, 9.515, 9.845, 9.852, 9.495, 9.726, 10.116, 10.452, 10.77, 11.225, 10.92, 10.824, 11.096, 11.542, 11.06, 10.568, 10.585, 10.884, 10.401, 10.068, 9.964, 10.285, 10.239, 10.036, 10.417, 10.132, 9.839, 9.556, 9.084, 9.239, 9.304, 9.067, 8.587, 8.471, 8.007, 8.321, 8.55, 9.008, 9.138, 9.088, 9.434, 9.156, 9.65, 9.431, 9.654, 10.079, 10.411, 10.865, 10.51, 10.205, 10.519, 10.367, 10.855, 10.642, 10.298, 10.622, 10.173, 9.792, 9.995, 9.904, 9.771, 9.597, 9.506, 9.212, 9.688, 10.032, 9.723, 9.839, 9.918, 10.332, 10.236, 9.989, 10.192, 10.685, 10.908, 11.275, 11.72, 12.158, 12.045, 12.244, 12.333, 12.246, 12.552, 12.958, 13.11, 13.53, 13.123, 13.138, 13.57, 13.389, 13.511, 13.759, 13.698, 13.744, 13.467, 13.795, 13.665, 13.377, 13.423, 13.772, 13.295, 13.073, 12.718, 12.388, 12.399, 12.185, 11.941, 11.818, 11.465, 11.811, 12.163, 11.86, 11.935, 11.809, 12.145, 12.624, 12.768, 12.321, 12.277, 11.889, 12.11, 12.606, 12.943, 12.945, 13.112, 13.199, 13.664, 14.051, 14.189, 14.339, 14.611, 14.656, 15.112, 15.086, 15.263, 15.021, 15.346, 15.572, 15.607, 15.983, 16.151, 16.215, 16.096, 16.089, 16.32, 16.59, 16.657, 16.752, 16.583, 16.743, 16.373, 16.662, 16.243, 16.163, 16.491, 16.958, 16.977, 17.225, 17.637, 17.344, 17.684, 17.892, 18.036, 18.182, 17.803, 17.588, 17.101, 17.538, 17.124, 16.787, 17.167, 17.138, 16.955, 17.148, 17.135, 17.635, 17.718, 17.675, 17.622, 17.358, 17.754, 17.729, 17.576, 17.772, 18.239, 18.441, 18.729, 18.319, 18.608, 18.493, 18.069, 18.122, 18.314, 18.423, 18.709, 18.548, 18.384, 18.391, 17.988, 17.986, 17.653, 17.249, 17.298, 17.06, 17.36, 17.108, 17.348, 17.596, 17.46, 17.635, 17.275, 17.291, 16.933, 17.337, 17.231, 17.146, 17.148, 16.751, 16.891, 17.038, 16.735, 16.64, 16.231, 15.957, 15.977, 16.077, 16.054, 15.797, 15.67, 15.911, 16.077, 16.17, 15.722, 15.258, 14.877, 15.138, 15., 14.811, 14.698, 14.407, 14.583, 14.704, 15.153, 15.436, 15.634, 15.453, 15.877, 15.696, 15.563, 15.927, 16.255, 16.696, 16.266, 16.698, 16.365, 16.493, 16.973, 16.71, 16.327, 16.605, 16.486, 16.846, 16.935, 17.21, 17.389, 17.546, 17.773, 17.641, 17.485, 17.794, 17.354, 16.904, 16.675, 16.43, 16.898, 16.819, 16.921, 17.201, 17.617, 17.368, 17.864, 17.484], columns=['value']) self.long_bench = pd.DataFrame([9.7, 10.179, 10.321, 9.855, 9.936, 10.096, 10.331, 10.662, 10.59, 11.031, 11.154, 10.945, 10.625, 10.233, 10.284, 10.252, 10.221, 10.352, 10.444, 10.773, 10.904, 11.104, 10.797, 10.55, 10.943, 11.352, 11.641, 11.983, 11.696, 12.138, 12.365, 12.379, 11.969, 12.454, 12.947, 13.119, 13.013, 12.763, 12.632, 13.034, 12.681, 12.561, 12.938, 12.867, 13.202, 13.132, 13.539, 13.91, 13.456, 13.692, 13.771, 13.904, 14.069, 13.728, 13.97, 14.228, 13.84, 14.041, 13.963, 13.689, 13.543, 13.858, 14.118, 13.987, 13.611, 14.028, 14.229, 14.41, 14.74, 15.03, 14.915, 15.207, 15.354, 15.665, 15.877, 15.682, 15.625, 15.175, 15.105, 14.893, 14.86, 15.097, 15.178, 15.293, 15.238, 15., 15.283, 14.994, 14.907, 14.664, 14.888, 15.297, 15.313, 15.368, 14.956, 14.802, 14.506, 14.257, 14.619, 15.019, 15.049, 14.625, 14.894, 14.978, 15.434, 15.578, 16.038, 16.107, 16.277, 16.365, 16.204, 16.465, 16.401, 16.895, 17.057, 16.621, 16.225, 16.075, 15.863, 16.292, 16.551, 16.724, 16.817, 16.81, 17.192, 16.86, 16.745, 16.707, 16.552, 16.133, 16.301, 16.08, 15.81, 15.75, 15.909, 16.127, 16.457, 16.204, 16.329, 16.748, 16.624, 17.011, 16.548, 16.831, 16.653, 16.791, 16.57, 16.778, 16.928, 16.932, 17.22, 16.876, 17.301, 17.422, 17.689, 17.316, 17.547, 17.534, 17.409, 17.669, 17.416, 17.859, 17.477, 17.307, 17.245, 17.352, 17.851, 17.412, 17.144, 17.138, 17.085, 16.926, 16.674, 16.854, 17.064, 16.95, 16.609, 16.957, 16.498, 16.552, 16.175, 15.858, 15.697, 15.781, 15.583, 15.36, 15.558, 16.046, 15.968, 15.905, 16.358, 16.783, 17.048, 16.762, 17.224, 17.363, 17.246, 16.79, 16.608, 16.423, 15.991, 15.527, 15.147, 14.759, 14.792, 15.206, 15.148, 15.046, 15.429, 14.999, 15.407, 15.124, 14.72, 14.713, 15.022, 15.092, 14.982, 15.001, 14.734, 14.713, 14.841, 14.562, 15.005, 15.483, 15.472, 15.277, 15.503, 15.116, 15.12, 15.442, 15.476, 15.789, 15.36, 15.764, 16.218, 16.493, 16.642, 17.088, 16.816, 16.645, 16.336, 16.511, 16.2, 15.994, 15.86, 15.929, 16.316, 16.416, 16.746, 17.173, 17.531, 17.627, 17.407, 17.49, 17.768, 17.509, 17.795, 18.147, 18.63, 18.945, 19.021, 19.518, 19.6, 19.744, 19.63, 19.32, 18.933, 19.297, 19.598, 19.446, 19.236, 19.198, 19.144, 19.159, 19.065, 19.032, 18.586, 18.272, 18.119, 18.3, 17.894, 17.744, 17.5, 17.083, 17.092, 16.864, 16.453, 16.31, 16.681, 16.342, 16.447, 16.715, 17.068, 17.067, 16.822, 16.673, 16.675, 16.592, 16.686, 16.397, 15.902, 15.597, 15.357, 15.162, 15.348, 15.603, 15.283, 15.257, 15.082, 14.621, 14.366, 14.039, 13.957, 14.141, 13.854, 14.243, 14.414, 14.033, 13.93, 14.104, 14.461, 14.249, 14.053, 14.165, 14.035, 14.408, 14.501, 14.019, 14.265, 14.67, 14.797, 14.42, 14.681, 15.16, 14.715, 14.292, 14.411, 14.656, 15.094, 15.366, 15.055, 15.198, 14.762, 14.294, 13.854, 13.811, 13.549, 13.927, 13.897, 13.421, 13.037, 13.32, 13.721, 13.511, 13.999, 13.529, 13.418, 13.881, 14.326, 14.362, 13.987, 14.015, 13.599, 13.343, 13.307, 13.689, 13.851, 13.404, 13.577, 13.395, 13.619, 13.195, 12.904, 12.553, 12.294, 12.649, 12.425, 11.967, 12.062, 11.71, 11.645, 12.058, 12.136, 11.749, 11.953, 12.401, 12.044, 11.901, 11.631, 11.396, 11.036, 11.244, 10.864, 11.207, 11.135, 11.39, 11.723, 12.084, 11.8, 11.471, 11.33, 11.504, 11.295, 11.3, 10.901, 10.494, 10.825, 11.054, 10.866, 10.713, 10.875, 10.846, 10.947, 11.422, 11.158, 10.94, 10.521, 10.36, 10.411, 10.792, 10.472, 10.305, 10.525, 10.853, 10.556, 10.72, 10.54, 10.583, 10.299, 10.061, 10.004, 9.903, 9.796, 9.472, 9.246, 9.54, 9.456, 9.177, 9.484, 9.557, 9.493, 9.968, 9.536, 9.39, 8.922, 8.423, 8.518, 8.686, 8.771, 9.098, 9.281, 8.858, 9.027, 8.553, 8.784, 8.996, 9.379, 9.846, 9.855, 9.502, 9.608, 9.761, 9.409, 9.4, 9.332, 9.34, 9.284, 8.844, 8.722, 8.376, 8.775, 8.293, 8.144, 8.63, 8.831, 8.957, 9.18, 9.601, 9.695, 10.018, 9.841, 9.743, 9.292, 8.85, 9.316, 9.288, 9.519, 9.738, 9.289, 9.785, 9.804, 10.06, 10.188, 10.095, 9.739, 9.881, 9.7, 9.991, 10.391, 10.002], columns=['value']) def test_performance_stats(self): """test the function performance_statistics() """ pass def test_fv(self): print(f'test with test data and empty DataFrame') self.assertAlmostEqual(eval_fv(self.test_data1), 6.39245474) self.assertAlmostEqual(eval_fv(self.test_data2), 10.05126375) self.assertAlmostEqual(eval_fv(self.test_data3), 6.95068113) self.assertAlmostEqual(eval_fv(self.test_data4), 8.86508591) self.assertAlmostEqual(eval_fv(self.test_data5), 4.58627048) self.assertAlmostEqual(eval_fv(self.test_data6), 4.10346795) self.assertAlmostEqual(eval_fv(self.test_data7), 2.92532313) self.assertAlmostEqual(eval_fv(pd.DataFrame()), -np.inf) print(f'Error testing') self.assertRaises(AssertionError, eval_fv, 15) self.assertRaises(KeyError, eval_fv, pd.DataFrame([1, 2, 3], columns=['non_value'])) def test_max_drawdown(self): print(f'test with test data and empty DataFrame') self.assertAlmostEqual(eval_max_drawdown(self.test_data1)[0], 0.264274308) self.assertEqual(eval_max_drawdown(self.test_data1)[1], 53) self.assertEqual(eval_max_drawdown(self.test_data1)[2], 86) self.assertTrue(np.isnan(eval_max_drawdown(self.test_data1)[3])) self.assertAlmostEqual(eval_max_drawdown(self.test_data2)[0], 0.334690849) self.assertEqual(eval_max_drawdown(self.test_data2)[1], 0) self.assertEqual(eval_max_drawdown(self.test_data2)[2], 10) self.assertEqual(eval_max_drawdown(self.test_data2)[3], 19) self.assertAlmostEqual(eval_max_drawdown(self.test_data3)[0], 0.244452899) self.assertEqual(eval_max_drawdown(self.test_data3)[1], 90) self.assertEqual(eval_max_drawdown(self.test_data3)[2], 99) self.assertTrue(np.isnan(eval_max_drawdown(self.test_data3)[3])) self.assertAlmostEqual(eval_max_drawdown(self.test_data4)[0], 0.201849684) self.assertEqual(eval_max_drawdown(self.test_data4)[1], 14) self.assertEqual(eval_max_drawdown(self.test_data4)[2], 50) self.assertEqual(eval_max_drawdown(self.test_data4)[3], 54) self.assertAlmostEqual(eval_max_drawdown(self.test_data5)[0], 0.534206456) self.assertEqual(eval_max_drawdown(self.test_data5)[1], 21) self.assertEqual(eval_max_drawdown(self.test_data5)[2], 60) self.assertTrue(np.isnan(eval_max_drawdown(self.test_data5)[3])) self.assertAlmostEqual(eval_max_drawdown(self.test_data6)[0], 0.670062689) self.assertEqual(eval_max_drawdown(self.test_data6)[1], 0) self.assertEqual(eval_max_drawdown(self.test_data6)[2], 70) self.assertTrue(np.isnan(eval_max_drawdown(self.test_data6)[3])) self.assertAlmostEqual(eval_max_drawdown(self.test_data7)[0], 0.783577449) self.assertEqual(eval_max_drawdown(self.test_data7)[1], 17) self.assertEqual(eval_max_drawdown(self.test_data7)[2], 51) self.assertTrue(np.isnan(eval_max_drawdown(self.test_data7)[3])) self.assertEqual(eval_max_drawdown(pd.DataFrame()), -np.inf) print(f'Error testing') self.assertRaises(AssertionError, eval_fv, 15) self.assertRaises(KeyError, eval_fv, pd.DataFrame([1, 2, 3], columns=['non_value'])) # test max drawdown == 0: # TODO: investigate: how does divide by zero change? self.assertAlmostEqual(eval_max_drawdown(self.test_data4 - 5)[0], 1.0770474121951792) self.assertEqual(eval_max_drawdown(self.test_data4 - 5)[1], 14) self.assertEqual(eval_max_drawdown(self.test_data4 - 5)[2], 50) def test_info_ratio(self): reference = self.test_data1 self.assertAlmostEqual(eval_info_ratio(self.test_data2, reference, 'value'), 0.075553316) self.assertAlmostEqual(eval_info_ratio(self.test_data3, reference, 'value'), 0.018949457) self.assertAlmostEqual(eval_info_ratio(self.test_data4, reference, 'value'), 0.056328143) self.assertAlmostEqual(eval_info_ratio(self.test_data5, reference, 'value'), -0.004270068) self.assertAlmostEqual(eval_info_ratio(self.test_data6, reference, 'value'), 0.009198027) self.assertAlmostEqual(eval_info_ratio(self.test_data7, reference, 'value'), -0.000890283) def test_volatility(self): self.assertAlmostEqual(eval_volatility(self.test_data1), 0.748646166) self.assertAlmostEqual(eval_volatility(self.test_data2), 0.75527442) self.assertAlmostEqual(eval_volatility(self.test_data3), 0.654188853) self.assertAlmostEqual(eval_volatility(self.test_data4), 0.688375814) self.assertAlmostEqual(eval_volatility(self.test_data5), 1.089989522) self.assertAlmostEqual(eval_volatility(self.test_data6), 1.775419308) self.assertAlmostEqual(eval_volatility(self.test_data7), 1.962758406) self.assertAlmostEqual(eval_volatility(self.test_data1, logarithm=False), 0.750993311) self.assertAlmostEqual(eval_volatility(self.test_data2, logarithm=False), 0.75571473) self.assertAlmostEqual(eval_volatility(self.test_data3, logarithm=False), 0.655331424) self.assertAlmostEqual(eval_volatility(self.test_data4, logarithm=False), 0.692683021) self.assertAlmostEqual(eval_volatility(self.test_data5, logarithm=False), 1.09602969) self.assertAlmostEqual(eval_volatility(self.test_data6, logarithm=False), 1.774789504) self.assertAlmostEqual(eval_volatility(self.test_data7, logarithm=False), 2.003329156) self.assertEqual(eval_volatility(pd.DataFrame()), -np.inf) self.assertRaises(AssertionError, eval_volatility, [1, 2, 3]) # 测试长数据的Volatility计算 expected_volatility = np.array([np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, 0.39955371, 0.39974258, 0.40309866, 0.40486593, 0.4055514, 0.40710639, 0.40708157, 0.40609006, 0.4073625, 0.40835305, 0.41155304, 0.41218193, 0.41207489, 0.41300276, 0.41308415, 0.41292392, 0.41207645, 0.41238397, 0.41229291, 0.41164056, 0.41316317, 0.41348842, 0.41462249, 0.41474574, 0.41652625, 0.41649176, 0.41701556, 0.4166593, 0.41684221, 0.41491689, 0.41435209, 0.41549087, 0.41849338, 0.41998049, 0.41959106, 0.41907311, 0.41916103, 0.42120773, 0.42052391, 0.42111225, 0.42124589, 0.42356445, 0.42214672, 0.42324022, 0.42476639, 0.42621689, 0.42549439, 0.42533678, 0.42539414, 0.42545038, 0.42593637, 0.42652095, 0.42665489, 0.42699563, 0.42798159, 0.42784512, 0.42898006, 0.42868781, 0.42874188, 0.42789631, 0.4277768, 0.42776827, 0.42685216, 0.42660989, 0.42563155, 0.42618281, 0.42606281, 0.42505222, 0.42653242, 0.42555378, 0.42500842, 0.42561939, 0.42442059, 0.42395414, 0.42384356, 0.42319135, 0.42397497, 0.42488579, 0.42449729, 0.42508766, 0.42509878, 0.42456616, 0.42535577, 0.42681884, 0.42688552, 0.42779918, 0.42706058, 0.42792887, 0.42762114, 0.42894045, 0.42977398, 0.42919859, 0.42829041, 0.42780946, 0.42825318, 0.42858952, 0.42858315, 0.42805601, 0.42764751, 0.42744107, 0.42775518, 0.42707283, 0.4258592, 0.42615335, 0.42526286, 0.4248906, 0.42368986, 0.4232565, 0.42265079, 0.42263954, 0.42153046, 0.42132051, 0.41995353, 0.41916605, 0.41914271, 0.41876945, 0.41740175, 0.41583884, 0.41614026, 0.41457908, 0.41472411, 0.41310876, 0.41261041, 0.41212369, 0.41211677, 0.4100645, 0.40852504, 0.40860297, 0.40745338, 0.40698661, 0.40644546, 0.40591375, 0.40640744, 0.40620663, 0.40656649, 0.40727154, 0.40797605, 0.40807137, 0.40808913, 0.40809676, 0.40711767, 0.40724628, 0.40713077, 0.40772698, 0.40765157, 0.40658297, 0.4065991, 0.405011, 0.40537645, 0.40432626, 0.40390177, 0.40237701, 0.40291623, 0.40301797, 0.40324145, 0.40312864, 0.40328316, 0.40190955, 0.40246506, 0.40237663, 0.40198407, 0.401969, 0.40185623, 0.40198313, 0.40005643, 0.39940743, 0.39850438, 0.39845398, 0.39695093, 0.39697295, 0.39663201, 0.39675444, 0.39538699, 0.39331959, 0.39326074, 0.39193287, 0.39157266, 0.39021327, 0.39062591, 0.38917591, 0.38976991, 0.38864187, 0.38872158, 0.38868096, 0.38868377, 0.38842057, 0.38654784, 0.38649517, 0.38600464, 0.38408115, 0.38323049, 0.38260215, 0.38207663, 0.38142669, 0.38003262, 0.37969367, 0.37768092, 0.37732108, 0.37741991, 0.37617779, 0.37698504, 0.37606784, 0.37499276, 0.37533731, 0.37350437, 0.37375172, 0.37385382, 0.37384003, 0.37338938, 0.37212288, 0.37273075, 0.370559, 0.37038506, 0.37062153, 0.36964661, 0.36818564, 0.3656634, 0.36539259, 0.36428672, 0.36502487, 0.3647148, 0.36551435, 0.36409919, 0.36348181, 0.36254383, 0.36166601, 0.36142665, 0.35954942, 0.35846915, 0.35886759, 0.35813867, 0.35642888, 0.35375231, 0.35061783, 0.35078463, 0.34995508, 0.34688918, 0.34548257, 0.34633158, 0.34622833, 0.34652111, 0.34622774, 0.34540951, 0.34418809, 0.34276593, 0.34160916, 0.33811193, 0.33822709, 0.3391685, 0.33883381]) test_volatility = eval_volatility(self.long_data) test_volatility_roll = self.long_data['volatility'].values self.assertAlmostEqual(test_volatility, np.nanmean(expected_volatility)) self.assertTrue(np.allclose(expected_volatility, test_volatility_roll, equal_nan=True)) def test_sharp(self): self.assertAlmostEqual(eval_sharp(self.test_data1, 5, 0), 0.06135557) self.assertAlmostEqual(eval_sharp(self.test_data2, 5, 0), 0.167858667) self.assertAlmostEqual(eval_sharp(self.test_data3, 5, 0), 0.09950547) self.assertAlmostEqual(eval_sharp(self.test_data4, 5, 0), 0.154928241) self.assertAlmostEqual(eval_sharp(self.test_data5, 5, 0.002), 0.007868673) self.assertAlmostEqual(eval_sharp(self.test_data6, 5, 0.002), 0.018306537) self.assertAlmostEqual(eval_sharp(self.test_data7, 5, 0.002), 0.006259971) # 测试长数据的sharp率计算 expected_sharp = np.array([np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, -0.02346815, -0.02618783, -0.03763912, -0.03296276, -0.03085698, -0.02851101, -0.02375842, -0.02016746, -0.01107885, -0.01426613, -0.00787204, -0.01135784, -0.01164232, -0.01003481, -0.00022512, -0.00046792, -0.01209378, -0.01278892, -0.01298135, -0.01938214, -0.01671044, -0.02120509, -0.0244281, -0.02416067, -0.02763238, -0.027579, -0.02372774, -0.02215294, -0.02467094, -0.02091266, -0.02590194, -0.03049876, -0.02077131, -0.01483653, -0.02488144, -0.02671638, -0.02561547, -0.01957986, -0.02479803, -0.02703162, -0.02658087, -0.01641755, -0.01946472, -0.01647757, -0.01280889, -0.00893643, -0.00643275, -0.00698457, -0.00549962, -0.00654677, -0.00494757, -0.0035633, -0.00109037, 0.00750654, 0.00451208, 0.00625502, 0.01221367, 0.01326454, 0.01535037, 0.02269538, 0.02028715, 0.02127712, 0.02333264, 0.02273159, 0.01670643, 0.01376513, 0.01265342, 0.02211647, 0.01612449, 0.00856706, -0.00077147, -0.00268848, 0.00210993, -0.00443934, -0.00411912, -0.0018756, -0.00867461, -0.00581601, -0.00660835, -0.00861137, -0.00678614, -0.01188408, -0.00589617, -0.00244323, -0.00201891, -0.01042846, -0.01471016, -0.02167034, -0.02258554, -0.01306809, -0.00909086, -0.01233746, -0.00595166, -0.00184208, 0.00750497, 0.01481886, 0.01761972, 0.01562886, 0.01446414, 0.01285826, 0.01357719, 0.00967613, 0.01636272, 0.01458437, 0.02280183, 0.02151903, 0.01700276, 0.01597368, 0.02114336, 0.02233297, 0.02585631, 0.02768459, 0.03519235, 0.04204535, 0.04328161, 0.04672855, 0.05046191, 0.04619848, 0.04525853, 0.05381529, 0.04598861, 0.03947394, 0.04665006, 0.05586077, 0.05617728, 0.06495018, 0.06205172, 0.05665466, 0.06500615, 0.0632062, 0.06084328, 0.05851466, 0.05659229, 0.05159347, 0.0432977, 0.0474047, 0.04231723, 0.03613176, 0.03618391, 0.03591012, 0.03885674, 0.0402686, 0.03846423, 0.04534014, 0.04721458, 0.05130912, 0.05026281, 0.05394312, 0.05529349, 0.05949243, 0.05463304, 0.06195165, 0.06767606, 0.06880985, 0.07048996, 0.07078815, 0.07420767, 0.06773439, 0.0658441, 0.06470875, 0.06302349, 0.06456876, 0.06411282, 0.06216669, 0.067094, 0.07055075, 0.07254976, 0.07119253, 0.06173308, 0.05393352, 0.05681246, 0.05250643, 0.06099845, 0.0655544, 0.06977334, 0.06636514, 0.06177949, 0.06869908, 0.06719767, 0.06178738, 0.05915714, 0.06882277, 0.06756821, 0.06507994, 0.06489791, 0.06553941, 0.073123, 0.07576757, 0.06805446, 0.06063571, 0.05033801, 0.05206971, 0.05540306, 0.05249118, 0.05755587, 0.0586174, 0.05051288, 0.0564852, 0.05757284, 0.06358355, 0.06130082, 0.04925482, 0.03834472, 0.04163981, 0.04648316, 0.04457858, 0.04324626, 0.04328791, 0.04156207, 0.04818652, 0.04972634, 0.06024123, 0.06489556, 0.06255485, 0.06069815, 0.06466389, 0.07081163, 0.07895358, 0.0881782, 0.09374151, 0.08336506, 0.08764795, 0.09080174, 0.08808926, 0.08641158, 0.07811943, 0.06885318, 0.06479503, 0.06851185, 0.07382819, 0.07047903, 0.06658251, 0.07638379, 0.08667974, 0.08867918, 0.08245323, 0.08961866, 0.09905298, 0.0961908, 0.08562706, 0.0839014, 0.0849072, 0.08338395, 0.08783487, 0.09463609, 0.10332336, 0.11806497, 0.11220297, 0.11589097, 0.11678405]) test_sharp = eval_sharp(self.long_data, 5, 0.00035) self.assertAlmostEqual(np.nanmean(expected_sharp), test_sharp) self.assertTrue(np.allclose(self.long_data['sharp'].values, expected_sharp, equal_nan=True)) def test_beta(self): reference = self.test_data1 self.assertAlmostEqual(eval_beta(self.test_data2, reference, 'value'), -0.017148939) self.assertAlmostEqual(eval_beta(self.test_data3, reference, 'value'), -0.042204233) self.assertAlmostEqual(eval_beta(self.test_data4, reference, 'value'), -0.15652986) self.assertAlmostEqual(eval_beta(self.test_data5, reference, 'value'), -0.049195532) self.assertAlmostEqual(eval_beta(self.test_data6, reference, 'value'), -0.026995082) self.assertAlmostEqual(eval_beta(self.test_data7, reference, 'value'), -0.01147809) self.assertRaises(TypeError, eval_beta, [1, 2, 3], reference, 'value') self.assertRaises(TypeError, eval_beta, self.test_data3, [1, 2, 3], 'value') self.assertRaises(KeyError, eval_beta, self.test_data3, reference, 'not_found_value') # 测试长数据的beta计算 expected_beta = np.array([np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, -0.04988841, -0.05127618, -0.04692104, -0.04272652, -0.04080598, -0.0493347, -0.0460858, -0.0416761, -0.03691527, -0.03724924, -0.03678865, -0.03987324, -0.03488321, -0.02567672, -0.02690303, -0.03010128, -0.02437967, -0.02571932, -0.02455681, -0.02839811, -0.03358653, -0.03396697, -0.03466321, -0.03050966, -0.0247583, -0.01629325, -0.01880895, -0.01480403, -0.01348783, -0.00544294, -0.00648176, -0.00467036, -0.01135331, -0.0156841, -0.02340763, -0.02615705, -0.02730771, -0.02906174, -0.02860664, -0.02412914, -0.02066416, -0.01744816, -0.02185133, -0.02145285, -0.02681765, -0.02827694, -0.02394581, -0.02744096, -0.02778825, -0.02703065, -0.03160023, -0.03615371, -0.03681072, -0.04265126, -0.04344738, -0.04232421, -0.04705272, -0.04533344, -0.04605934, -0.05272737, -0.05156463, -0.05134196, -0.04730733, -0.04425352, -0.03869831, -0.04159571, -0.04223998, -0.04346747, -0.04229844, -0.04740093, -0.04992507, -0.04621232, -0.04477644, -0.0486915, -0.04598224, -0.04943463, -0.05006391, -0.05362256, -0.04994067, -0.05464769, -0.05443275, -0.05513493, -0.05173594, -0.04500994, -0.04662891, -0.03903505, -0.0419592, -0.04307773, -0.03925718, -0.03711574, -0.03992631, -0.0433058, -0.04533641, -0.0461183, -0.05600344, -0.05758377, -0.05959874, -0.05605942, -0.06002859, -0.06253002, -0.06747014, -0.06427915, -0.05931947, -0.05769974, -0.04791515, -0.05175088, -0.05748039, -0.05385232, -0.05072975, -0.05052637, -0.05125567, -0.05005785, -0.05325104, -0.04977727, -0.04947867, -0.05148544, -0.05739156, -0.05742069, -0.06047279, -0.0558414, -0.06086126, -0.06265151, -0.06411129, -0.06828052, -0.06781762, -0.07083409, -0.07211207, -0.06799162, -0.06913295, -0.06775162, -0.0696265, -0.06678248, -0.06867502, -0.06581961, -0.07055823, -0.06448184, -0.06097973, -0.05795587, -0.0618383, -0.06130145, -0.06050652, -0.05936661, -0.05749424, -0.0499, -0.05050495, -0.04962687, -0.05033439, -0.05070116, -0.05422009, -0.05369759, -0.05548943, -0.05907353, -0.05933035, -0.05927918, -0.06227663, -0.06011455, -0.05650432, -0.05828134, -0.05620949, -0.05715323, -0.05482478, -0.05387113, -0.05095559, -0.05377999, -0.05334267, -0.05220438, -0.04001521, -0.03892434, -0.03660782, -0.04282708, -0.04324623, -0.04127048, -0.04227559, -0.04275226, -0.04347049, -0.04125853, -0.03806295, -0.0330632, -0.03155531, -0.03277152, -0.03304518, -0.03878731, -0.03830672, -0.03727434, -0.0370571, -0.04509224, -0.04207632, -0.04116198, -0.04545179, -0.04584584, -0.05287341, -0.05417433, -0.05175836, -0.05005509, -0.04268674, -0.03442321, -0.03457309, -0.03613426, -0.03524391, -0.03629479, -0.04361312, -0.02626705, -0.02406115, -0.03046384, -0.03181044, -0.03375164, -0.03661673, -0.04520779, -0.04926951, -0.05726738, -0.0584486, -0.06220608, -0.06800563, -0.06797431, -0.07562211, -0.07481996, -0.07731229, -0.08413381, -0.09031826, -0.09691925, -0.11018071, -0.11952675, -0.10826026, -0.11173895, -0.10756359, -0.10775916, -0.11664559, -0.10505051, -0.10606547, -0.09855355, -0.10004159, -0.10857084, -0.12209301, -0.11605758, -0.11105113, -0.1155195, -0.11569505, -0.10513348, -0.09611072, -0.10719791, -0.10843965, -0.11025856, -0.10247839, -0.10554044, -0.10927647, -0.10645088, -0.09982498, -0.10542734, -0.09631372, -0.08229695]) test_beta_mean = eval_beta(self.long_data, self.long_bench, 'value') test_beta_roll = self.long_data['beta'].values self.assertAlmostEqual(test_beta_mean, np.nanmean(expected_beta)) self.assertTrue(np.allclose(test_beta_roll, expected_beta, equal_nan=True)) def test_alpha(self): reference = self.test_data1 self.assertAlmostEqual(eval_alpha(self.test_data2, 5, reference, 'value', 0.5), 11.63072977) self.assertAlmostEqual(eval_alpha(self.test_data3, 5, reference, 'value', 0.5), 1.886590071) self.assertAlmostEqual(eval_alpha(self.test_data4, 5, reference, 'value', 0.5), 6.827021872) self.assertAlmostEqual(eval_alpha(self.test_data5, 5, reference, 'value', 0.92), -1.192265168) self.assertAlmostEqual(eval_alpha(self.test_data6, 5, reference, 'value', 0.92), -1.437142359) self.assertAlmostEqual(eval_alpha(self.test_data7, 5, reference, 'value', 0.92), -1.781311545) # 测试长数据的alpha计算 expected_alpha = np.array([np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, -0.09418119, -0.11188463, -0.17938358, -0.15588172, -0.1462678, -0.13089586, -0.10780125, -0.09102891, -0.03987585, -0.06075686, -0.02459503, -0.04104284, -0.0444565, -0.04074585, 0.02191275, 0.02255955, -0.05583375, -0.05875539, -0.06055551, -0.09648245, -0.07913737, -0.10627829, -0.12320965, -0.12368335, -0.1506743, -0.15768033, -0.13638829, -0.13065298, -0.14537834, -0.127428, -0.15504529, -0.18184636, -0.12652146, -0.09190138, -0.14847221, -0.15840648, -0.1525789, -0.11859418, -0.14700954, -0.16295761, -0.16051645, -0.10364859, -0.11961134, -0.10258267, -0.08090148, -0.05727746, -0.0429945, -0.04672356, -0.03581408, -0.0439215, -0.03429495, -0.0260362, -0.01075022, 0.04931808, 0.02779388, 0.03984083, 0.08311951, 0.08995566, 0.10522428, 0.16159058, 0.14238174, 0.14759783, 0.16257712, 0.158908, 0.11302115, 0.0909566, 0.08272888, 0.15261884, 0.10546376, 0.04990313, -0.01284111, -0.02720704, 0.00454725, -0.03965491, -0.03818265, -0.02186992, -0.06574751, -0.04846454, -0.05204211, -0.06316498, -0.05095099, -0.08502656, -0.04681162, -0.02362027, -0.02205091, -0.07706374, -0.10371841, -0.14434688, -0.14797935, -0.09055402, -0.06739549, -0.08824959, -0.04855888, -0.02291244, 0.04027138, 0.09370505, 0.11472939, 0.10243593, 0.0921445, 0.07662648, 0.07946651, 0.05450718, 0.10497677, 0.09068334, 0.15462924, 0.14231034, 0.10544952, 0.09980256, 0.14035223, 0.14942974, 0.17624102, 0.19035477, 0.2500807, 0.30724652, 0.31768915, 0.35007521, 0.38412975, 0.34356521, 0.33614463, 0.41206165, 0.33999177, 0.28045963, 0.34076789, 0.42220356, 0.42314636, 0.50790423, 0.47713348, 0.42520169, 0.50488411, 0.48705211, 0.46252601, 0.44325578, 0.42640573, 0.37986783, 0.30652822, 0.34503393, 0.2999069, 0.24928617, 0.24730218, 0.24326897, 0.26657905, 0.27861168, 0.26392824, 0.32552649, 0.34177792, 0.37837011, 0.37025267, 0.4030612, 0.41339361, 0.45076809, 0.40383354, 0.47093422, 0.52505036, 0.53614256, 0.5500943, 0.55319293, 0.59021451, 0.52358459, 0.50605947, 0.49359168, 0.47895956, 0.49320243, 0.4908336, 0.47310767, 0.51821564, 0.55105932, 0.57291504, 0.5599809, 0.46868842, 0.39620087, 0.42086934, 0.38317217, 0.45934108, 0.50048866, 0.53941991, 0.50676751, 0.46500915, 0.52993663, 0.51668366, 0.46405428, 0.44100603, 0.52726147, 0.51565458, 0.49186248, 0.49001081, 0.49367648, 0.56422294, 0.58882785, 0.51334664, 0.44386256, 0.35056709, 0.36490029, 0.39205071, 0.3677061, 0.41134736, 0.42315067, 0.35356394, 0.40324562, 0.41340007, 0.46503322, 0.44355762, 0.34854314, 0.26412842, 0.28633753, 0.32335224, 0.30761141, 0.29709569, 0.29570487, 0.28000063, 0.32802547, 0.33967726, 0.42511212, 0.46252357, 0.44244974, 0.42152907, 0.45436727, 0.50482359, 0.57339198, 0.6573356, 0.70912003, 0.60328917, 0.6395092, 0.67015805, 0.64241557, 0.62779142, 0.55028063, 0.46448736, 0.43709245, 0.46777983, 0.51789439, 0.48594916, 0.4456216, 0.52008189, 0.60548684, 0.62792473, 0.56645031, 0.62766439, 0.71829315, 0.69481356, 0.59550329, 0.58133754, 0.59014148, 0.58026655, 0.61719273, 0.67373203, 0.75573056, 0.89501633, 0.8347253, 0.87964685, 0.89015835]) test_alpha_mean = eval_alpha(self.long_data, 100, self.long_bench, 'value') test_alpha_roll = self.long_data['alpha'].values self.assertAlmostEqual(test_alpha_mean, np.nanmean(expected_alpha)) self.assertTrue(np.allclose(test_alpha_roll, expected_alpha, equal_nan=True)) def test_calmar(self): """test evaluate function eval_calmar()""" pass def test_benchmark(self): reference = self.test_data1 tr, yr = eval_benchmark(self.test_data2, reference, 'value') self.assertAlmostEqual(tr, 0.19509091) self.assertAlmostEqual(yr, 0.929154957) tr, yr = eval_benchmark(self.test_data3, reference, 'value') self.assertAlmostEqual(tr, 0.19509091) self.assertAlmostEqual(yr, 0.929154957) tr, yr = eval_benchmark(self.test_data4, reference, 'value') self.assertAlmostEqual(tr, 0.19509091) self.assertAlmostEqual(yr, 0.929154957) tr, yr = eval_benchmark(self.test_data5, reference, 'value') self.assertAlmostEqual(tr, 0.19509091) self.assertAlmostEqual(yr, 0.929154957) tr, yr = eval_benchmark(self.test_data6, reference, 'value') self.assertAlmostEqual(tr, 0.19509091) self.assertAlmostEqual(yr, 0.929154957) tr, yr = eval_benchmark(self.test_data7, reference, 'value') self.assertAlmostEqual(tr, 0.19509091) self.assertAlmostEqual(yr, 0.929154957) def test_evaluate(self): pass class TestLoop(unittest.TestCase): """通过一个假设但精心设计的例子来测试loop_step以及loop方法的正确性""" def setUp(self): # 精心设计的模拟股票名称、交易日期、以及股票价格 self.shares = ['share1', 'share2', 'share3', 'share4', 'share5', 'share6', 'share7'] self.dates = ['2016/07/01', '2016/07/04', '2016/07/05', '2016/07/06', '2016/07/07', '2016/07/08', '2016/07/11', '2016/07/12', '2016/07/13', '2016/07/14', '2016/07/15', '2016/07/18', '2016/07/19', '2016/07/20', '2016/07/21', '2016/07/22', '2016/07/25', '2016/07/26', '2016/07/27', '2016/07/28', '2016/07/29', '2016/08/01', '2016/08/02', '2016/08/03', '2016/08/04', '2016/08/05', '2016/08/08', '2016/08/09', '2016/08/10', '2016/08/11', '2016/08/12', '2016/08/15', '2016/08/16', '2016/08/17', '2016/08/18', '2016/08/19', '2016/08/22', '2016/08/23', '2016/08/24', '2016/08/25', '2016/08/26', '2016/08/29', '2016/08/30', '2016/08/31', '2016/09/01', '2016/09/02', '2016/09/05', '2016/09/06', '2016/09/07', '2016/09/08', '2016/09/09', '2016/09/12', '2016/09/13', '2016/09/14', '2016/09/15', '2016/09/16', '2016/09/19', '2016/09/20', '2016/09/21', '2016/09/22', '2016/09/23', '2016/09/26', '2016/09/27', '2016/09/28', '2016/09/29', '2016/09/30', '2016/10/10', '2016/10/11', '2016/10/12', '2016/10/13', '2016/10/14', '2016/10/17', '2016/10/18', '2016/10/19', '2016/10/20', '2016/10/21', '2016/10/23', '2016/10/24', '2016/10/25', '2016/10/26', '2016/10/27', '2016/10/29', '2016/10/30', '2016/10/31', '2016/11/01', '2016/11/02', '2016/11/05', '2016/11/06', '2016/11/07', '2016/11/08', '2016/11/09', '2016/11/12', '2016/11/13', '2016/11/14', '2016/11/15', '2016/11/16', '2016/11/19', '2016/11/20', '2016/11/21', '2016/11/22'] self.dates = [pd.Timestamp(date_text) for date_text in self.dates] self.prices = np.array([[5.35, 5.09, 5.03, 4.98, 4.50, 5.09, 4.75], [5.66, 4.84, 5.21, 5.44, 4.35, 5.06, 4.48], [5.79, 4.60, 5.02, 5.45, 4.07, 4.76, 4.56], [5.56, 4.63, 5.50, 5.74, 3.88, 4.62, 4.62], [5.88, 4.64, 5.07, 5.46, 3.74, 4.63, 4.62], [6.25, 4.51, 5.11, 5.45, 3.98, 4.25, 4.59], [5.93, 4.96, 5.15, 5.24, 4.08, 4.13, 4.33], [6.39, 4.65, 5.02, 5.47, 4.00, 3.91, 3.88], [6.31, 4.26, 5.10, 5.58, 4.28, 3.77, 3.47], [5.86, 3.77, 5.24, 5.36, 4.01, 3.43, 3.51], [5.61, 3.39, 4.93, 5.38, 4.14, 3.68, 3.83], [5.31, 3.76, 4.96, 5.30, 4.49, 3.63, 3.67], [5.40, 4.06, 5.40, 5.77, 4.49, 3.94, 3.79], [5.03, 3.87, 5.74, 5.75, 4.46, 4.40, 4.18], [5.38, 3.91, 5.53, 6.15, 4.13, 4.03, 4.02], [5.79, 4.13, 5.79, 6.04, 3.79, 3.93, 4.41], [6.27, 4.27, 5.68, 6.01, 4.23, 3.50, 4.65], [6.59, 4.57, 5.90, 5.71, 4.57, 3.39, 4.89], [6.91, 5.04, 5.75, 5.23, 4.92, 3.30, 4.41], [6.71, 5.31, 6.11, 5.47, 5.28, 3.25, 4.66], [6.33, 5.40, 5.77, 5.79, 5.67, 2.94, 4.37], [6.07, 5.21, 5.85, 5.82, 6.00, 2.71, 4.58], [5.98, 5.06, 5.61, 5.61, 5.89, 2.55, 4.76], [6.46, 4.69, 5.75, 5.31, 5.55, 2.21, 4.37], [6.95, 5.12, 5.50, 5.24, 5.39, 2.29, 4.16], [6.77, 5.27, 5.14, 5.41, 5.26, 2.21, 4.02], [6.70, 5.72, 5.31, 5.60, 5.31, 2.04, 3.77], [6.28, 6.10, 5.68, 5.28, 5.22, 2.39, 3.38], [6.61, 6.27, 5.73, 4.99, 4.90, 2.30, 3.07], [6.25, 6.49, 6.04, 5.09, 4.57, 2.41, 2.90], [6.47, 6.16, 6.27, 5.39, 4.96, 2.40, 2.50], [6.45, 6.26, 6.60, 5.58, 4.82, 2.79, 2.76], [6.88, 6.39, 6.10, 5.33, 4.39, 2.67, 2.29], [7.00, 6.58, 6.25, 5.48, 4.63, 2.27, 2.17], [6.59, 6.20, 6.73, 5.10, 5.05, 2.09, 1.84], [6.59, 5.70, 6.91, 5.39, 4.68, 2.55, 1.83], [6.64, 5.20, 7.01, 5.30, 5.02, 2.22, 2.21], [6.38, 5.37, 7.36, 5.04, 4.84, 2.59, 2.00], [6.10, 5.40, 7.72, 5.51, 4.60, 2.59, 1.76], [6.35, 5.22, 7.68, 5.43, 4.66, 2.95, 1.27], [6.52, 5.38, 7.62, 5.23, 4.41, 2.69, 1.40], [6.87, 5.53, 7.74, 4.99, 4.87, 2.20, 1.11], [6.84, 6.03, 7.53, 5.43, 4.42, 2.69, 1.60], [7.09, 5.77, 7.46, 5.40, 4.08, 2.65, 1.23], [6.88, 5.66, 7.84, 5.60, 4.16, 2.63, 1.59], [6.84, 6.08, 8.11, 5.66, 4.10, 2.14, 1.50], [6.98, 5.62, 8.04, 6.01, 4.43, 2.39, 1.80], [7.02, 5.63, 7.65, 5.64, 4.07, 1.95, 1.55], [7.13, 6.11, 7.52, 5.67, 3.97, 2.32, 1.35], [7.59, 6.03, 7.67, 5.30, 4.16, 2.69, 1.51], [7.61, 6.27, 7.47, 4.91, 4.12, 2.51, 1.08], [7.21, 6.28, 7.44, 5.37, 4.04, 2.62, 1.06], [7.48, 6.52, 7.59, 5.75, 3.84, 2.16, 1.43], [7.66, 7.00, 7.94, 6.08, 3.46, 2.35, 1.43], [7.51, 7.34, 8.25, 6.58, 3.18, 2.31, 1.74], [7.12, 7.34, 7.77, 6.78, 3.10, 1.96, 1.44], [6.97, 7.68, 8.03, 7.20, 3.55, 2.35, 1.83], [6.67, 8.09, 7.87, 7.65, 3.66, 2.58, 1.71], [6.20, 7.68, 7.58, 8.00, 3.66, 2.40, 2.12], [6.34, 7.58, 7.33, 7.92, 3.29, 2.20, 2.45], [6.22, 7.46, 7.22, 8.30, 2.80, 2.31, 2.85], [5.98, 7.59, 6.86, 8.46, 2.88, 2.16, 2.79], [6.37, 7.19, 7.18, 7.99, 3.04, 2.16, 2.91], [6.56, 7.40, 7.54, 8.19, 3.45, 2.20, 3.26], [6.26, 7.48, 7.24, 8.61, 3.88, 1.73, 3.14], [6.69, 7.93, 6.85, 8.66, 3.58, 1.93, 3.53], [7.13, 8.23, 6.60, 8.91, 3.60, 2.25, 3.65], [6.83, 8.35, 6.65, 9.08, 3.97, 2.69, 3.69], [7.31, 8.44, 6.74, 9.34, 4.05, 2.59, 3.50], [7.43, 8.35, 7.19, 8.96, 4.40, 2.14, 3.25], [7.54, 8.58, 7.14, 8.98, 4.06, 1.68, 3.64], [7.18, 8.82, 6.88, 8.50, 3.60, 1.98, 4.00], [7.21, 9.09, 7.14, 8.65, 3.61, 2.14, 3.63], [7.45, 9.02, 7.30, 8.94, 4.10, 1.89, 3.78], [7.37, 8.87, 6.95, 8.63, 3.74, 1.97, 3.42], [6.88, 9.22, 7.02, 8.65, 4.02, 1.99, 3.76], [7.08, 9.04, 7.38, 8.40, 3.95, 2.37, 3.62], [6.75, 8.60, 7.50, 8.38, 3.81, 2.14, 3.67], [6.60, 8.48, 7.60, 8.23, 3.71, 2.35, 3.61], [6.21, 8.71, 7.15, 8.04, 3.94, 1.86, 3.39], [6.36, 8.79, 7.30, 7.91, 4.43, 2.14, 3.43], [6.82, 8.93, 7.80, 7.57, 4.07, 2.39, 3.33], [6.45, 9.36, 8.15, 7.73, 4.04, 2.53, 3.03], [6.85, 9.68, 8.40, 7.74, 4.34, 2.47, 3.28], [6.48, 10.16, 8.87, 8.07, 4.80, 2.93, 3.46], [6.10, 10.56, 8.53, 7.99, 5.18, 3.09, 3.25], [5.64, 10.63, 8.94, 7.92, 4.90, 2.93, 2.95], [6.01, 10.55, 8.52, 8.40, 5.40, 3.22, 2.87], [6.21, 10.65, 8.80, 8.80, 5.73, 3.06, 2.63], [6.61, 10.55, 8.92, 8.47, 5.62, 2.90, 2.40], [7.02, 10.19, 9.20, 8.07, 5.20, 2.68, 2.53], [7.04, 10.48, 8.71, 7.87, 4.85, 2.46, 2.96], [6.77, 10.36, 8.25, 8.02, 5.18, 2.41, 3.26], [7.09, 10.03, 8.19, 8.39, 4.72, 2.74, 2.97], [6.65, 10.24, 7.80, 8.69, 4.62, 3.15, 3.16], [7.07, 10.01, 7.69, 8.81, 4.55, 3.40, 3.58], [6.80, 10.14, 7.23, 8.99, 4.37, 3.82, 3.23], [6.79, 10.31, 6.98, 9.10, 4.26, 4.02, 3.62], [6.48, 9.88, 7.07, 8.90, 4.25, 3.76, 3.13], [6.39, 10.05, 6.95, 8.87, 4.59, 4.10, 2.93]]) # 精心设计的模拟PT持股仓位目标信号: self.pt_signals = np.array([[0.000, 0.000, 0.000, 0.000, 0.250, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.250, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.250, 0.100, 0.150], [0.200, 0.200, 0.000, 0.000, 0.250, 0.100, 0.150], [0.200, 0.200, 0.100, 0.000, 0.250, 0.100, 0.150], [0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150], [0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150], [0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150], [0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150], [0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150], [0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150], [0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150], [0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150], [0.133, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150], [0.133, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150], [0.133, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150], [0.133, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150], [0.133, 0.200, 0.050, 0.000, 0.062, 0.100, 0.150], [0.133, 0.200, 0.050, 0.000, 0.062, 0.100, 0.150], [0.133, 0.200, 0.050, 0.000, 0.062, 0.100, 0.150], [0.133, 0.200, 0.050, 0.000, 0.062, 0.100, 0.150], [0.133, 0.200, 0.050, 0.000, 0.062, 0.100, 0.000], [0.133, 0.200, 0.050, 0.000, 0.262, 0.100, 0.000], [0.133, 0.200, 0.050, 0.000, 0.262, 0.100, 0.000], [0.133, 0.200, 0.050, 0.000, 0.262, 0.100, 0.000], [0.066, 0.200, 0.050, 0.150, 0.262, 0.100, 0.000], [0.066, 0.200, 0.050, 0.150, 0.262, 0.100, 0.000], [0.066, 0.200, 0.050, 0.150, 0.262, 0.100, 0.000], [0.066, 0.200, 0.050, 0.150, 0.262, 0.100, 0.000], [0.066, 0.200, 0.050, 0.150, 0.262, 0.100, 0.000], [0.066, 0.200, 0.250, 0.150, 0.000, 0.300, 0.000], [0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000], [0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000], [0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000], [0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000], [0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000], [0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000], [0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000], [0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000], [0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000], [0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000], [0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000], [0.460, 0.119, 0.149, 0.089, 0.000, 0.179, 0.000], [0.460, 0.119, 0.149, 0.089, 0.000, 0.179, 0.000], [0.460, 0.119, 0.149, 0.089, 0.000, 0.179, 0.000], [0.446, 0.116, 0.145, 0.087, 0.000, 0.087, 0.116], [0.446, 0.116, 0.145, 0.087, 0.000, 0.087, 0.116], [0.446, 0.116, 0.145, 0.087, 0.000, 0.087, 0.116], [0.446, 0.116, 0.145, 0.087, 0.000, 0.087, 0.116], [0.446, 0.116, 0.145, 0.087, 0.000, 0.087, 0.116], [0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104], [0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104], [0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104], [0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104], [0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104], [0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104], [0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104], [0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104], [0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104], [0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104], [0.370, 0.193, 0.120, 0.072, 0.072, 0.072, 0.096], [0.000, 0.222, 0.138, 0.222, 0.083, 0.222, 0.111], [0.000, 0.222, 0.138, 0.222, 0.083, 0.222, 0.111], [0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097], [0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097], [0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097], [0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097], [0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097], [0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097], [0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097], [0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097], [0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160], [0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160], [0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160], [0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160], [0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160], [0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160], [0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160], [0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160], [0.047, 0.380, 0.238, 0.000, 0.142, 0.000, 0.190], [0.047, 0.380, 0.238, 0.000, 0.142, 0.000, 0.190], [0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173], [0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173], [0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173], [0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173], [0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173], [0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173], [0.045, 0.454, 0.227, 0.000, 0.000, 0.000, 0.272], [0.045, 0.454, 0.227, 0.000, 0.000, 0.000, 0.272], [0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300], [0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300], [0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300], [0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300], [0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300], [0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300], [0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300], [0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300], [0.000, 0.000, 0.400, 0.000, 0.000, 0.000, 0.300], [0.000, 0.000, 0.400, 0.000, 0.000, 0.000, 0.300], [0.000, 0.000, 0.400, 0.000, 0.000, 0.000, 0.300]]) # 精心设计的模拟PS比例交易信号,与模拟PT信号高度相似 self.ps_signals = np.array([[0.000, 0.000, 0.000, 0.000, 0.250, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.100, 0.150], [0.200, 0.200, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.100, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, -0.750, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [-0.333, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, -0.500, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, -1.000], [0.000, 0.000, 0.000, 0.000, 0.200, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [-0.500, 0.000, 0.000, 0.150, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.200, 0.000, -1.000, 0.200, 0.000], [0.500, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.200, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, -0.500, 0.200], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.200, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.150, 0.000, 0.000], [-1.000, 0.000, 0.000, 0.250, 0.000, 0.250, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.250, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, -1.000, 0.000, -1.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [-0.800, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.100, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, -1.000, 0.000, 0.100], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, -1.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [-1.000, 0.000, 0.150, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000]]) # 精心设计的模拟VS股票交易信号,与模拟PS信号类似 self.vs_signals = np.array([[000, 000, 000, 000, 500, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 300, 300], [400, 400, 000, 000, 000, 000, 000], [000, 000, 250, 000, 000, 000, 000], [000, 000, 000, 000, -400, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [-200, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, -200, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, -300], [000, 000, 000, 000, 500, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [-200, 000, 000, 300, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 400, 000, -300, 600, 000], [500, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [600, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, -400, 600], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 500, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 300, 000, 000], [-500, 000, 000, 500, 000, 200, 000], [000, 000, 000, 000, 000, 000, 000], [500, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, -700, 000, -600, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [-400, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 300, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, -600, 000, 300], [000, 000, 000, 000, 000, 000, 000], [000, -300, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [-200, 000, 700, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000]]) # 精心设计的模拟多价格交易信号,模拟50个交易日对三只股票的操作 self.multi_shares = ['000010', '000030', '000039'] self.multi_dates = ['2016/07/01', '2016/07/04', '2016/07/05', '2016/07/06', '2016/07/07', '2016/07/08', '2016/07/11', '2016/07/12', '2016/07/13', '2016/07/14', '2016/07/15', '2016/07/18', '2016/07/19', '2016/07/20', '2016/07/21', '2016/07/22', '2016/07/25', '2016/07/26', '2016/07/27', '2016/07/28', '2016/07/29', '2016/08/01', '2016/08/02', '2016/08/03', '2016/08/04', '2016/08/05', '2016/08/08', '2016/08/09', '2016/08/10', '2016/08/11', '2016/08/12', '2016/08/15', '2016/08/16', '2016/08/17', '2016/08/18', '2016/08/19', '2016/08/22', '2016/08/23', '2016/08/24', '2016/08/25', '2016/08/26', '2016/08/29', '2016/08/30', '2016/08/31', '2016/09/01', '2016/09/02', '2016/09/05', '2016/09/06', '2016/09/07', '2016/09/08'] self.multi_dates = [pd.Timestamp(date_text) for date_text in self.multi_dates] # 操作的交易价格包括开盘价、最高价和收盘价 self.multi_prices_open = np.array([[10.02, 9.88, 7.26], [10.00, 9.88, 7.00], [9.98, 9.89, 6.88], [9.97, 9.75, 6.91], [9.99, 9.74, np.nan], [10.01, 9.80, 6.81], [10.04, 9.62, 6.63], [10.06, 9.65, 6.45], [10.06, 9.58, 6.16], [10.11, 9.67, 6.24], [10.11, 9.81, 5.96], [10.07, 9.80, 5.97], [10.06, 10.00, 5.96], [10.09, 9.95, 6.20], [10.03, 10.10, 6.35], [10.02, 10.06, 6.11], [10.06, 10.14, 6.37], [10.08, 9.90, 5.58], [9.99, 10.20, 5.65], [10.00, 10.29, 5.65], [10.03, 9.86, 5.19], [10.02, 9.48, 5.42], [10.06, 10.01, 6.30], [10.03, 10.24, 6.15], [9.97, 10.26, 6.05], [9.94, 10.24, 5.89], [9.83, 10.12, 5.22], [9.78, 10.65, 5.20], [9.77, 10.64, 5.07], [9.91, 10.56, 6.04], [9.92, 10.42, 6.12], [9.97, 10.43, 5.85], [9.91, 10.29, 5.67], [9.90, 10.30, 6.02], [9.88, 10.44, 6.04], [9.91, 10.60, 7.07], [9.63, 10.67, 7.64], [9.64, 10.46, 7.99], [9.57, 10.39, 7.59], [9.55, 10.90, 8.73], [9.58, 11.01, 8.72], [9.61, 11.01, 8.97], [9.62, np.nan, 8.58], [9.55, np.nan, 8.71], [9.57, 10.82, 8.77], [9.61, 11.02, 8.40], [9.63, 10.96, 7.95], [9.64, 11.55, 7.76], [9.61, 11.74, 8.25], [9.56, 11.80, 7.51]]) self.multi_prices_high = np.array([[10.07, 9.91, 7.41], [10.00, 10.04, 7.31], [10.00, 9.93, 7.14], [10.00, 10.04, 7.00], [10.03, 9.84, np.nan], [10.03, 9.88, 6.82], [10.04, 9.99, 6.96], [10.09, 9.70, 6.85], [10.10, 9.67, 6.50], [10.14, 9.71, 6.34], [10.11, 9.85, 6.04], [10.10, 9.90, 6.02], [10.09, 10.00, 6.12], [10.09, 10.20, 6.38], [10.10, 10.11, 6.43], [10.05, 10.18, 6.46], [10.07, 10.21, 6.43], [10.09, 10.26, 6.27], [10.10, 10.38, 5.77], [10.00, 10.47, 6.01], [10.04, 10.42, 5.67], [10.04, 10.07, 5.67], [10.06, 10.24, 6.35], [10.09, 10.27, 6.32], [10.05, 10.38, 6.43], [9.97, 10.43, 6.36], [9.96, 10.39, 5.79], [9.86, 10.65, 5.47], [9.77, 10.84, 5.65], [9.92, 10.65, 6.04], [9.94, 10.73, 6.14], [9.97, 10.63, 6.23], [9.97, 10.51, 5.83], [9.92, 10.35, 6.25], [9.92, 10.46, 6.27], [9.92, 10.63, 7.12], [9.93, 10.74, 7.82], [9.64, 10.76, 8.14], [9.58, 10.54, 8.27], [9.60, 11.02, 8.92], [9.58, 11.12, 8.76], [9.62, 11.17, 9.15], [9.62, np.nan, 8.90], [9.64, np.nan, 9.01], [9.59, 10.92, 9.16], [9.62, 11.15, 9.00], [9.63, 11.11, 8.27], [9.70, 11.55, 7.99], [9.66, 11.95, 8.33], [9.64, 11.93, 8.25]]) self.multi_prices_close = np.array([[10.04, 9.68, 6.64], [10.00, 9.87, 7.26], [10.00, 9.86, 7.03], [9.99, 9.87, 6.87], [9.97, 9.79, np.nan], [9.99, 9.82, 6.64], [10.03, 9.80, 6.85], [10.03, 9.66, 6.70], [10.06, 9.62, 6.39], [10.06, 9.58, 6.22], [10.11, 9.69, 5.92], [10.09, 9.78, 5.91], [10.07, 9.75, 6.11], [10.06, 9.96, 5.91], [10.09, 9.90, 6.23], [10.03, 10.04, 6.28], [10.03, 10.06, 6.28], [10.06, 10.08, 6.27], [10.08, 10.24, 5.70], [10.00, 10.24, 5.56], [9.99, 10.24, 5.67], [10.03, 9.86, 5.16], [10.03, 10.13, 5.69], [10.06, 10.12, 6.32], [10.03, 10.10, 6.14], [9.97, 10.25, 6.25], [9.94, 10.24, 5.79], [9.83, 10.22, 5.26], [9.77, 10.75, 5.05], [9.84, 10.64, 5.45], [9.91, 10.56, 6.06], [9.93, 10.60, 6.21], [9.96, 10.42, 5.69], [9.91, 10.25, 5.46], [9.91, 10.24, 6.02], [9.88, 10.49, 6.69], [9.91, 10.57, 7.43], [9.64, 10.63, 7.72], [9.56, 10.48, 8.16], [9.57, 10.37, 7.83], [9.55, 10.96, 8.70], [9.57, 11.02, 8.71], [9.61, np.nan, 8.88], [9.61, np.nan, 8.54], [9.55, 10.88, 8.87], [9.57, 10.87, 8.87], [9.63, 11.01, 8.18], [9.64, 11.01, 7.80], [9.65, 11.58, 7.97], [9.62, 11.80, 8.25]]) # 交易信号包括三组,分别作用与开盘价、最高价和收盘价 # 此时的关键是股票交割期的处理,交割期不为0时,以交易日为单位交割 self.multi_signals = [] # multisignal的第一组信号为开盘价信号 self.multi_signals.append( pd.DataFrame(np.array([[0.000, 0.000, 0.000], [0.000, -0.500, 0.000], [0.000, -0.500, 0.000], [0.000, 0.000, 0.000], [0.150, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.300, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.300], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.350, 0.250], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.100, 0.000, 0.350], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.200, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.050, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000]]), columns=self.multi_shares, index=self.multi_dates ) ) # 第二组信号为最高价信号 self.multi_signals.append( pd.DataFrame(np.array([[0.000, 0.150, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, -0.200, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.200], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000]]), columns=self.multi_shares, index=self.multi_dates ) ) # 第三组信号为收盘价信号 self.multi_signals.append( pd.DataFrame(np.array([[0.000, 0.200, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [-0.500, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, -0.800], [0.000, 0.000, 0.000], [0.000, -1.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [-0.750, 0.000, 0.000], [0.000, 0.000, -0.850], [0.000, 0.000, 0.000], [0.000, -0.700, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, -1.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [-1.000, 0.000, 0.000], [0.000, -1.000, 0.000], [0.000, 0.000, 0.000]]), columns=self.multi_shares, index=self.multi_dates ) ) # 交易回测所需的价格也有三组,分别是开盘价、最高价和收盘价 self.multi_histories = [] # multisignal的第一组信号为开盘价信号 self.multi_histories.append( pd.DataFrame(self.multi_prices_open, columns=self.multi_shares, index=self.multi_dates ) ) # 第二组信号为最高价信号 self.multi_histories.append( pd.DataFrame(self.multi_prices_high, columns=self.multi_shares, index=self.multi_dates ) ) # 第三组信号为收盘价信号 self.multi_histories.append( pd.DataFrame(self.multi_prices_close, columns=self.multi_shares, index=self.multi_dates ) ) # 设置回测参数 self.cash = qt.CashPlan(['2016/07/01', '2016/08/12', '2016/09/23'], [10000, 10000, 10000]) self.rate = qt.Cost(buy_fix=0, sell_fix=0, buy_rate=0, sell_rate=0, buy_min=0, sell_min=0, slipage=0) self.rate2 = qt.Cost(buy_fix=0, sell_fix=0, buy_rate=0, sell_rate=0, buy_min=10, sell_min=5, slipage=0) self.pt_signal_hp = dataframe_to_hp( pd.DataFrame(self.pt_signals, index=self.dates, columns=self.shares), htypes='close' ) self.ps_signal_hp = dataframe_to_hp( pd.DataFrame(self.ps_signals, index=self.dates, columns=self.shares), htypes='close' ) self.vs_signal_hp = dataframe_to_hp( pd.DataFrame(self.vs_signals, index=self.dates, columns=self.shares), htypes='close' ) self.multi_signal_hp = stack_dataframes( self.multi_signals, stack_along='htypes', htypes='open, high, close' ) self.history_list = dataframe_to_hp( pd.DataFrame(self.prices, index=self.dates, columns=self.shares), htypes='close' ) self.multi_history_list = stack_dataframes( self.multi_histories, stack_along='htypes', htypes='open, high, close' ) # 模拟PT信号回测结果 # PT信号,先卖后买,交割期为0 self.pt_res_sb00 = np.array( [[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 10000.0000], [0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 9916.6667], [0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 6035.8333, 0.0000, 9761.1111], [348.0151, 417.9188, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 2165.9050, 0.0000, 9674.8209], [348.0151, 417.9188, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 2165.9050, 0.0000, 9712.5872], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9910.7240], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9919.3782], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9793.0692], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9513.8217], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9123.5935], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9000.5995], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9053.4865], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9248.7142], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9161.1372], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9197.3369], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9504.6981], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9875.2461], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10241.5400], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10449.2398], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10628.3269], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10500.7893], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 0.0000, 5233.1396, 0.0000, 10449.2776], [348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10338.2857], [348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10194.3474], [348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10471.0008], [101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10411.2629], [101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10670.0618], [101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10652.4799], [101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10526.1488], [101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10458.6614], [101.4983, 417.9188, 821.7315, 288.6672, 0.0000, 2576.1284, 0.0000, 4487.0722, 0.0000, 20609.0270], [1216.3282, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21979.4972], [1216.3282, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21880.9628], [1216.3282, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21630.0454], [1216.3282, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 20968.0007], [1216.3282, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21729.9339], [1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 21107.6400], [1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 21561.1745], [1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 21553.0916], [1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 22316.9366], [1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 22084.2862], [1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 21777.3543], [1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 22756.8225], [1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 22843.4697], [1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 22762.1766], [1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 22257.0973], [1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 23136.5259], [1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 21813.7852], [1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 22395.3204], [1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 23717.6858], [1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 22715.4263], [1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 22498.3254], [1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 23341.1733], [1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24162.3941], [1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24847.1508], [1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 23515.9755], [1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24555.8997], [1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24390.6372], [1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24073.3309], [1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24394.6500], [2076.3314, 903.0334, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 3487.5655, 0.0000, 34904.8150], [0.0000, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 4608.8037, 0.0000, 34198.4475], [0.0000, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 4608.8037, 0.0000, 33753.0190], [644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 34953.8178], [644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 33230.2498], [644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 35026.7819], [644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 36976.2649], [644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 38673.8147], [644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 38717.3429], [644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 36659.0854], [644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 35877.9607], [644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 36874.4840], [644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 37010.2695], [644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 38062.3510], [644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 36471.1357], [644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 37534.9927], [644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 37520.2569], [644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 36747.7952], [644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 36387.9409], [644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 35925.9715], [644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 36950.7028], [644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 37383.2463], [644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 37761.2724], [644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 39548.2653], [644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 41435.1291], [644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 41651.6261], [644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 41131.9920], [644.7274, 1657.3981, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 0.0000, 0.0000, 41286.4702], [644.7274, 1657.3981, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 0.0000, 0.0000, 40978.7259], [644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 40334.5453], [644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 41387.9172], [644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 42492.6707], [644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 42953.7188], [644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 42005.1092], [644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 42017.9106], [644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 43750.2824], [644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 41766.8679], [0.0000, 0.0000, 2461.8404, 0.0000, 0.0000, 0.0000, 3760.7116, 12161.6930, 0.0000, 42959.1150], [0.0000, 0.0000, 2461.8404, 0.0000, 0.0000, 0.0000, 3760.7116, 12161.6930, 0.0000, 41337.9320], [0.0000, 0.0000, 2461.8404, 0.0000, 0.0000, 0.0000, 3760.7116, 12161.6930, 0.0000, 40290.3688]]) # PT信号,先买后卖,交割期为0 self.pt_res_bs00 = np.array( [[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 10000.0000], [0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 9916.6667], [0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 6035.8333, 0.0000, 9761.1111], [348.0151, 417.9188, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 2165.9050, 0.0000, 9674.8209], [348.0151, 417.9188, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 2165.9050, 0.0000, 9712.5872], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9910.7240], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9919.3782], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9793.0692], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9513.8217], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9123.5935], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9000.5995], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9053.4865], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9248.7142], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9161.1372], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9197.3369], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9504.6981], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9875.2461], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10241.5400], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10449.2398], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10628.3269], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10500.7893], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 0.0000, 5233.1396, 0.0000, 10449.2776], [348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10338.2857], [348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10194.3474], [348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10471.0008], [101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10411.2629], [101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10670.0618], [101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10652.4799], [101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10526.1488], [101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10458.6614], [101.4983, 417.9188, 821.7315, 288.6672, 0.0000, 2576.1284, 0.0000, 4487.0722, 0.0000, 20609.0270], [797.1684, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 2703.5808, 0.0000, 21979.4972], [1190.1307, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21700.7241], [1190.1307, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21446.6630], [1190.1307, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 20795.3593], [1190.1307, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21557.2924], [1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 20933.6887], [1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 21392.5581], [1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 21390.2918], [1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 22147.7562], [1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 21910.9053], [1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 21594.2980], [1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 22575.4380], [1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 22655.8312], [1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 22578.4365], [1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 1467.7407, 0.0000, 0.0000, 22073.2661], [1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 1467.7407, 0.0000, 0.0000, 22955.2367], [1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 1467.7407, 0.0000, 0.0000, 21628.1647], [1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 1467.7407, 0.0000, 0.0000, 22203.4237], [1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 1467.7407, 0.0000, 0.0000, 23516.2598], [1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 22505.8428], [1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 22199.1042], [1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 23027.9302], [1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 23848.5806], [1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 24540.8871], [1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 23205.6838], [1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 24267.6685], [1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 24115.3796], [1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 23814.3667], [1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 24133.6611], [2061.6837, 896.6628, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 3285.8830, 0.0000, 34658.5742], [0.0000, 896.6628, 507.6643, 466.6033, 0.0000, 1523.7106, 1467.7407, 12328.8684, 0.0000, 33950.7917], [0.0000, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 4380.3797, 0.0000, 33711.4045], [644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 34922.0959], [644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 33237.1081], [644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 35031.8071], [644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 36976.3376], [644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 38658.5245], [644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 38712.2854], [644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 36655.3125], [644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 35904.3692], [644.1423, 902.2617, 514.8253, 0.0000, 15.5990, 0.0000, 1467.7407, 14821.9004, 0.0000, 36873.9080], [644.1423, 902.2617, 514.8253, 0.0000, 1220.8683, 0.0000, 1467.7407, 10470.8781, 0.0000, 36727.7895], [644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 37719.9840], [644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 36138.1277], [644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 37204.0760], [644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 37173.1201], [644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 36398.2298], [644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 36034.2178], [644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 35583.6399], [644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 36599.2645], [644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 37013.3408], [644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 37367.7449], [644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 39143.8273], [644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 41007.3074], [644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 41225.4657], [644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 40685.9525], [644.1423, 1646.4805, 1033.4242, 0.0000, 0.0000, 0.0000, 1467.7407, 6592.6891, 0.0000, 40851.5435], [644.1423, 1646.4805, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 0.0000, 0.0000, 41082.1210], [644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 40385.0135], [644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 41455.1513], [644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 42670.6769], [644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 43213.7233], [644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 42205.2480], [644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 42273.9386], [644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 44100.0777], [644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 42059.7208], [0.0000, 0.0000, 2483.9522, 0.0000, 0.0000, 0.0000, 3974.4666, 11619.4102, 0.0000, 43344.9653], [0.0000, 0.0000, 2483.9522, 0.0000, 0.0000, 0.0000, 3974.4666, 11619.4102, 0.0000, 41621.0324], [0.0000, 0.0000, 2483.9522, 0.0000, 0.0000, 0.0000, 3974.4666, 11619.4102, 0.0000, 40528.0648]]) # PT信号,先卖后买,交割期为2天(股票)0天(现金)以便利用先卖的现金继续买入 self.pt_res_sb20 = np.array( [[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 10000.000], [0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 9916.667], [0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 321.089, 6035.833, 0.000, 9761.111], [348.015, 417.919, 0.000, 0.000, 555.556, 0.000, 321.089, 2165.905, 0.000, 9674.821], [348.015, 417.919, 0.000, 0.000, 555.556, 0.000, 321.089, 2165.905, 0.000, 9712.587], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9910.724], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9919.378], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9793.069], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9513.822], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9123.593], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9000.600], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9053.487], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9248.714], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9161.137], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9197.337], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9504.698], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9875.246], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10241.540], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10449.240], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10628.327], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10500.789], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 0.000, 5233.140, 0.000, 10449.278], [348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10338.286], [348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10194.347], [348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10471.001], [101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10411.263], [101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10670.062], [101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10652.480], [101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10526.149], [101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10458.661], [101.498, 417.919, 821.732, 288.667, 0.000, 2576.128, 0.000, 4487.072, 0.000, 20609.027], [797.168, 417.919, 821.732, 288.667, 0.000, 2576.128, 0.000, 0.000, 0.000, 21979.497], [1156.912, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21584.441], [1156.912, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21309.576], [1156.912, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 20664.323], [1156.912, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21445.597], [1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 20806.458], [1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 21288.441], [1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 21294.365], [1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 22058.784], [1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 21805.540], [1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 21456.333], [1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22459.720], [1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22611.602], [1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22470.912], [1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21932.634], [1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22425.864], [1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21460.103], [1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22376.968], [1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 23604.295], [1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 22704.826], [1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 22286.293], [1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 23204.755], [1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 24089.017], [1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 24768.185], [1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 23265.196], [1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 24350.540], [1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 24112.706], [1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 23709.076], [1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 24093.545], [2060.275, 896.050, 504.579, 288.667, 0.000, 763.410, 1577.904, 2835.944, 0.000, 34634.888], [578.327, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 732.036, 0.000, 33912.261], [0.000, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 4415.981, 0.000, 33711.951], [644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 34951.433], [644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 33224.596], [644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 35065.209], [644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 37018.699], [644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 38706.035], [644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 38724.569], [644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 36647.268], [644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 35928.930], [644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 36967.229], [644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 37056.598], [644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 38129.862], [644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 36489.333], [644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 37599.602], [644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 37566.823], [644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 36799.280], [644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 36431.196], [644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 35940.942], [644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 36973.050], [644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 37393.292], [644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 37711.276], [644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 39515.991], [644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 41404.440], [644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 41573.523], [644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 41011.613], [644.683, 1606.361, 1074.629, 0.000, 0.000, 0.000, 3896.406, 0.000, 0.000, 41160.181], [644.683, 1606.361, 1074.629, 0.000, 0.000, 0.000, 3896.406, 0.000, 0.000, 40815.512], [644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 40145.531], [644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 41217.281], [644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 42379.061], [644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 42879.589], [644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 41891.452], [644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 41929.003], [644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 43718.052], [644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 41685.916], [0.000, 0.000, 2460.195, 0.000, 0.000, 0.000, 3896.406, 11653.255, 0.000, 42930.410], [0.000, 0.000, 2460.195, 0.000, 0.000, 0.000, 3896.406, 11653.255, 0.000, 41242.589], [0.000, 0.000, 2460.195, 0.000, 0.000, 0.000, 3896.406, 11653.255, 0.000, 40168.084]]) # PT信号,先买后卖,交割期为2天(股票)1天(现金) self.pt_res_bs21 = np.array([ [0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 10000.000], [0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 9916.667], [0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 321.089, 6035.833, 0.000, 9761.111], [348.015, 417.919, 0.000, 0.000, 555.556, 0.000, 321.089, 2165.905, 0.000, 9674.821], [348.015, 417.919, 0.000, 0.000, 555.556, 0.000, 321.089, 2165.905, 0.000, 9712.587], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9910.724], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9919.378], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9793.069], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9513.822], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9123.593], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9000.600], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9053.487], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9248.714], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9161.137], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9197.337], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9504.698], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9875.246], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10241.540], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10449.240], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10628.327], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10500.789], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 0.000, 5233.140, 0.000, 10449.278], [348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10338.286], [348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10194.347], [348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10471.001], [101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10411.263], [101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10670.062], [101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10652.480], [101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10526.149], [101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10458.661], [101.498, 417.919, 821.732, 288.667, 0.000, 2576.128, 0.000, 4487.072, 0.000, 20609.027], [797.168, 417.919, 821.732, 288.667, 0.000, 2576.128, 0.000, 0.000, 0.000, 21979.497], [797.168, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 2475.037, 0.000, 21584.441], [1150.745, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21266.406], [1150.745, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 20623.683], [1150.745, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21404.957], [1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 20765.509], [1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 21248.748], [1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 21256.041], [1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 22018.958], [1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 21764.725], [1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 21413.241], [1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22417.021], [1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22567.685], [1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22427.699], [1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21889.359], [1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22381.938], [1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21416.358], [1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22332.786], [1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 0.000, 2386.698, 0.000, 23557.595], [1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 23336.992], [1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 22907.742], [1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 24059.201], [1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 24941.902], [1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 25817.514], [1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 24127.939], [1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 25459.688], [1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 25147.370], [1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 25005.842], [1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 1086.639, 2752.004, 0.000, 25598.700], [2138.154, 929.921, 503.586, 288.667, 0.000, 761.900, 1086.639, 4818.835, 0.000, 35944.098], [661.356, 929.921, 503.586, 553.843, 0.000, 1954.237, 1086.639, 8831.252, 0.000, 35237.243], [0.000, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 9460.955, 0.000, 35154.442], [667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 36166.632], [667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 34293.883], [667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 35976.901], [667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 37848.552], [667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 39512.574], [667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 39538.024], [667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 37652.984], [667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 36687.909], [667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37749.277], [667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37865.518], [667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 38481.190], [667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37425.087], [667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 38051.341], [667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 38065.478], [667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37429.495], [667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37154.479], [667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 36692.717], [667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 37327.055], [667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 37937.630], [667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 38298.645], [667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 39689.369], [667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 40992.397], [667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 41092.265], [667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 40733.622], [667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 3726.579, 0.000, 0.000, 40708.515], [667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 3726.579, 0.000, 0.000, 40485.321], [667.098, 0.000, 745.260, 0.000, 512.148, 0.000, 3726.579, 16888.760, 0.000, 39768.059], [667.098, 0.000, 745.260, 0.000, 512.148, 0.000, 3726.579, 16888.760, 0.000, 40519.595], [667.098, 0.000, 745.260, 0.000, 512.148, 0.000, 3726.579, 16888.760, 0.000, 41590.937], [667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 42354.983], [667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 41175.149], [667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 41037.902], [667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 42706.213], [667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 40539.205], [0.000, 0.000, 2384.452, 0.000, 512.148, 0.000, 3726.579, 9293.252, 0.000, 41608.692], [0.000, 0.000, 2384.452, 0.000, 512.148, 0.000, 3726.579, 9293.252, 0.000, 39992.148], [0.000, 0.000, 2384.452, 0.000, 512.148, 0.000, 3726.579, 9293.252, 0.000, 39134.828]]) # 模拟PS信号回测结果 # PS信号,先卖后买,交割期为0 self.ps_res_sb00 = np.array( [[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 10000.0000], [0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 9916.6667], [0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 205.0654, 321.0892, 5059.7222, 0.0000, 9761.1111], [346.9824, 416.6787, 0.0000, 0.0000, 555.5556, 205.0654, 321.0892, 1201.2775, 0.0000, 9646.1118], [346.9824, 416.6787, 191.0372, 0.0000, 555.5556, 205.0654, 321.0892, 232.7189, 0.0000, 9685.5858], [346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9813.2184], [346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9803.1288], [346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9608.0198], [346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9311.5727], [346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8883.6246], [346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8751.3900], [346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8794.1811], [346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9136.5704], [231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9209.3588], [231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9093.8294], [231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9387.5537], [231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9585.9589], [231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 9928.7771], [231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10060.3806], [231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10281.0021], [231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10095.5613], [231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 0.0000, 4506.3926, 0.0000, 10029.9571], [231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9875.6133], [231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9614.9463], [231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9824.1722], [115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9732.5743], [115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9968.3391], [115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 10056.1579], [115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9921.4925], [115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9894.1621], [115.7186, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 6179.7742, 0.0000, 20067.9370], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21133.5080], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20988.8485], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20596.7429], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 19910.7730], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20776.7070], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20051.7969], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20725.3884], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20828.8795], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21647.1811], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21310.1687], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20852.0993], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21912.3952], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21937.8282], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21962.4576], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 21389.4018], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22027.4535], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 20939.9992], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 21250.0636], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22282.7812], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 21407.0658], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 21160.2373], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 21826.7682], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22744.9403], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 23466.1185], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22017.8821], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 23191.4662], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 23099.0822], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22684.7671], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22842.1346], [1073.8232, 416.6787, 735.6442, 269.8496, 1785.2055, 938.6967, 1339.2073, 5001.4246, 0.0000, 33323.8359], [0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 32820.2901], [0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 32891.2308], [0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 34776.5296], [0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 33909.0325], [0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 34560.1906], [0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 36080.4552], [0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 38618.4454], [0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 38497.9230], [0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 37110.0991], [0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 35455.2467], [0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35646.1860], [0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35472.3020], [0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 36636.4694], [0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35191.7035], [0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 36344.2242], [0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 36221.6005], [0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35943.5708], [0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35708.2608], [0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35589.0286], [0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 36661.0285], [0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 36310.5909], [0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 36466.7637], [0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 37784.4918], [0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 39587.6766], [0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 40064.0191], [0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 39521.6439], [0.0000, 823.2923, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 17142.1018, 0.0000, 39932.2761], [0.0000, 823.2923, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 17142.1018, 0.0000, 39565.2475], [0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 38943.1632], [0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 39504.1184], [0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 40317.8004], [0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 40798.5768], [0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 39962.5711], [0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 40194.4793], [0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 41260.4003], [0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 39966.3024], [0.0000, 0.0000, 1613.4518, 0.0000, 0.0000, 0.0000, 2730.5758, 19700.7377, 0.0000, 40847.3160], [0.0000, 0.0000, 1613.4518, 0.0000, 0.0000, 0.0000, 2730.5758, 19700.7377, 0.0000, 39654.5445], [0.0000, 0.0000, 1613.4518, 0.0000, 0.0000, 0.0000, 2730.5758, 19700.7377, 0.0000, 38914.8151]]) # PS信号,先买后卖,交割期为0 self.ps_res_bs00 = np.array( [[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 10000.0000], [0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 9916.6667], [0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 205.0654, 321.0892, 5059.7222, 0.0000, 9761.1111], [346.9824, 416.6787, 0.0000, 0.0000, 555.5556, 205.0654, 321.0892, 1201.2775, 0.0000, 9646.1118], [346.9824, 416.6787, 191.0372, 0.0000, 555.5556, 205.0654, 321.0892, 232.7189, 0.0000, 9685.5858], [346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9813.2184], [346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9803.1288], [346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9608.0198], [346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9311.5727], [346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8883.6246], [346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8751.3900], [346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8794.1811], [346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9136.5704], [231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9209.3588], [231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9093.8294], [231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9387.5537], [231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9585.9589], [231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 9928.7771], [231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10060.3806], [231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10281.0021], [231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10095.5613], [231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 0.0000, 4506.3926, 0.0000, 10029.9571], [231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9875.6133], [231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9614.9463], [231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9824.1722], [115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9732.5743], [115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9968.3391], [115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 10056.1579], [115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9921.4925], [115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9894.1621], [115.7186, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 6179.7742, 0.0000, 20067.9370], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21133.5080], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20988.8485], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20596.7429], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 19910.7730], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20776.7070], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20051.7969], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20725.3884], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20828.8795], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21647.1811], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21310.1687], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20852.0993], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21912.3952], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21937.8282], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21962.4576], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 2008.8110, 0.0000, 21389.4018], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 2008.8110, 0.0000, 21625.6913], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 2008.8110, 0.0000, 20873.0389], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 2008.8110, 0.0000, 21450.9447], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 2008.8110, 0.0000, 22269.3892], [1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 21969.5329], [1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 21752.6924], [1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 22000.6088], [1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 23072.5655], [1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 23487.5201], [1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 22441.0460], [1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 23201.2700], [1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 23400.9485], [1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 22306.2008], [1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 21989.5913], [1073.8232, 737.0632, 735.6442, 269.8496, 1708.7766, 938.6967, 0.0000, 5215.4255, 0.0000, 31897.1636], [0.0000, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 6421.4626, 0.0000, 31509.5059], [0.0000, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 6421.4626, 0.0000, 31451.7888], [978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 32773.4592], [978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 32287.0318], [978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 32698.1938], [978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 34031.5183], [978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 35537.8336], [978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 36212.6487], [978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 36007.5294], [978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 34691.3797], [978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 33904.8810], [978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 34341.6098], [978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 35479.9505], [978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 34418.4455], [978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 34726.7182], [978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 34935.0407], [978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 34136.7505], [978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 33804.1575], [195.7763, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 14025.8697, 0.0000, 33653.8970], [195.7763, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 14025.8697, 0.0000, 34689.8757], [195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 34635.7841], [195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 35253.2755], [195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 36388.1051], [195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 37987.4204], [195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 38762.2103], [195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 38574.0544], [195.7763, 1124.9219, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 15879.4935, 0.0000, 39101.9156], [195.7763, 1124.9219, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 15879.4935, 0.0000, 39132.5587], [195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 38873.2941], [195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39336.6594], [195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39565.9568], [195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39583.4317], [195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39206.8350], [195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39092.6551], [195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39666.1834], [195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 38798.0749], [0.0000, 0.0000, 1576.8381, 0.0000, 0.0000, 0.0000, 1362.4361, 23205.2077, 0.0000, 39143.5561], [0.0000, 0.0000, 1576.8381, 0.0000, 0.0000, 0.0000, 1362.4361, 23205.2077, 0.0000, 38617.8779], [0.0000, 0.0000, 1576.8381, 0.0000, 0.0000, 0.0000, 1362.4361, 23205.2077, 0.0000, 38156.1701]]) # PS信号,先卖后买,交割期为2天(股票)1天(现金) self.ps_res_sb20 = np.array( [[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 10000.000], [0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 9916.667], [0.000, 0.000, 0.000, 0.000, 555.556, 205.065, 321.089, 5059.722, 0.000, 9761.111], [346.982, 416.679, 0.000, 0.000, 555.556, 205.065, 321.089, 1201.278, 0.000, 9646.112], [346.982, 416.679, 191.037, 0.000, 555.556, 205.065, 321.089, 232.719, 0.000, 9685.586], [346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9813.218], [346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9803.129], [346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9608.020], [346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9311.573], [346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 8883.625], [346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 8751.390], [346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 8794.181], [346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9136.570], [231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9209.359], [231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9093.829], [231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9387.554], [231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9585.959], [231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 9928.777], [231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 10060.381], [231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 10281.002], [231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 10095.561], [231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 0.000, 4506.393, 0.000, 10029.957], [231.437, 416.679, 95.519, 0.000, 474.224, 205.065, 0.000, 2531.270, 0.000, 9875.613], [231.437, 416.679, 95.519, 0.000, 474.224, 205.065, 0.000, 2531.270, 0.000, 9614.946], [231.437, 416.679, 95.519, 0.000, 474.224, 205.065, 0.000, 2531.270, 0.000, 9824.172], [115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9732.574], [115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9968.339], [115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 10056.158], [115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9921.492], [115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9894.162], [115.719, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 6179.774, 0.000, 20067.937], [1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21133.508], [1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20988.848], [1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20596.743], [1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 19910.773], [1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20776.707], [1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20051.797], [1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20725.388], [1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20828.880], [1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21647.181], [1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21310.169], [1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20852.099], [1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21912.395], [1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21937.828], [1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21962.458], [1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21389.402], [1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22027.453], [1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 20939.999], [1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21250.064], [1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22282.781], [1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21407.066], [1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21160.237], [1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21826.768], [1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22744.940], [1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 23466.118], [1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22017.882], [1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 23191.466], [1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 23099.082], [1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22684.767], [1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22842.135], [1073.823, 416.679, 735.644, 269.850, 1785.205, 938.697, 1339.207, 5001.425, 0.000, 33323.836], [0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 32820.290], [0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 32891.231], [0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 34776.530], [0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 33909.032], [0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 34560.191], [0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 36080.455], [0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 38618.445], [0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 38497.923], [0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 37110.099], [0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 35455.247], [0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35646.186], [0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35472.302], [0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 36636.469], [0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35191.704], [0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 36344.224], [0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 36221.601], [0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35943.571], [0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35708.261], [0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35589.029], [0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 36661.029], [0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 36310.591], [0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 36466.764], [0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 37784.492], [0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 39587.677], [0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 40064.019], [0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 39521.644], [0.000, 823.292, 735.644, 0.000, 0.000, 0.000, 2730.576, 17142.102, 0.000, 39932.276], [0.000, 823.292, 735.644, 0.000, 0.000, 0.000, 2730.576, 17142.102, 0.000, 39565.248], [0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 38943.163], [0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 39504.118], [0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 40317.800], [0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 40798.577], [0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 39962.571], [0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 40194.479], [0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 41260.400], [0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 39966.302], [0.000, 0.000, 1613.452, 0.000, 0.000, 0.000, 2730.576, 19700.738, 0.000, 40847.316], [0.000, 0.000, 1613.452, 0.000, 0.000, 0.000, 2730.576, 19700.738, 0.000, 39654.544], [0.000, 0.000, 1613.452, 0.000, 0.000, 0.000, 2730.576, 19700.738, 0.000, 38914.815]]) # PS信号,先买后卖,交割期为2天(股票)1天(现金) self.ps_res_bs21 = np.array( [[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 10000.000], [0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 9916.667], [0.000, 0.000, 0.000, 0.000, 555.556, 208.333, 326.206, 5020.833, 0.000, 9761.111], [351.119, 421.646, 0.000, 0.000, 555.556, 208.333, 326.206, 1116.389, 0.000, 9645.961], [351.119, 421.646, 190.256, 0.000, 555.556, 208.333, 326.206, 151.793, 0.000, 9686.841], [351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 9813.932], [351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 9803.000], [351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 9605.334], [351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 9304.001], [351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 8870.741], [351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 8738.282], [351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 8780.664], [351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 9126.199], [234.196, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 2398.247, 0.000, 9199.746], [234.196, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 2398.247, 0.000, 9083.518], [234.196, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 2398.247, 0.000, 9380.932], [234.196, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 2398.247, 0.000, 9581.266], [234.196, 421.646, 95.128, 0.000, 138.889, 208.333, 326.206, 2959.501, 0.000, 9927.154], [234.196, 421.646, 95.128, 0.000, 138.889, 208.333, 326.206, 2959.501, 0.000, 10059.283], [234.196, 421.646, 95.128, 0.000, 138.889, 208.333, 326.206, 2959.501, 0.000, 10281.669], [234.196, 421.646, 95.128, 0.000, 138.889, 208.333, 326.206, 2959.501, 0.000, 10093.263], [234.196, 421.646, 95.128, 0.000, 138.889, 208.333, 0.000, 4453.525, 0.000, 10026.289], [234.196, 421.646, 95.128, 0.000, 479.340, 208.333, 0.000, 2448.268, 0.000, 9870.523], [234.196, 421.646, 95.128, 0.000, 479.340, 208.333, 0.000, 2448.268, 0.000, 9606.437], [234.196, 421.646, 95.128, 0.000, 479.340, 208.333, 0.000, 2448.268, 0.000, 9818.691], [117.098, 421.646, 95.128, 272.237, 479.340, 208.333, 0.000, 1768.219, 0.000, 9726.556], [117.098, 421.646, 95.128, 272.237, 479.340, 208.333, 0.000, 1768.219, 0.000, 9964.547], [117.098, 421.646, 95.128, 272.237, 479.340, 208.333, 0.000, 1768.219, 0.000, 10053.449], [117.098, 421.646, 95.128, 272.237, 479.340, 208.333, 0.000, 1768.219, 0.000, 9917.440], [117.098, 421.646, 95.128, 272.237, 479.340, 208.333, 0.000, 1768.219, 0.000, 9889.495], [117.098, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 6189.948, 0.000, 20064.523], [708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 21124.484], [708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20827.077], [708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20396.124], [708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 19856.445], [708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20714.156], [708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 19971.485], [708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20733.948], [708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20938.903], [708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 21660.772], [708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 21265.298], [708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20684.378], [1055.763, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 0.000, 0.000, 21754.770], [1055.763, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 0.000, 0.000, 21775.215], [1055.763, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 0.000, 0.000, 21801.488], [1055.763, 421.646, 729.561, 272.237, 0.000, 932.896, 0.000, 1996.397, 0.000, 21235.427], [1055.763, 421.646, 729.561, 272.237, 0.000, 932.896, 0.000, 1996.397, 0.000, 21466.714], [1055.763, 421.646, 729.561, 272.237, 0.000, 932.896, 0.000, 1996.397, 0.000, 20717.431], [1055.763, 421.646, 729.561, 272.237, 0.000, 932.896, 0.000, 1996.397, 0.000, 21294.450], [1055.763, 421.646, 729.561, 272.237, 0.000, 932.896, 0.000, 1996.397, 0.000, 22100.247], [1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 21802.552], [1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 21593.608], [1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 21840.028], [1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 22907.725], [1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 23325.945], [1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 22291.942], [1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 23053.050], [1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 23260.084], [1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 22176.244], [1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 21859.297], [1055.763, 740.051, 729.561, 272.237, 1706.748, 932.896, 0.000, 5221.105, 0.000, 31769.617], [0.000, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 6313.462, 0.000, 31389.961], [0.000, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 6313.462, 0.000, 31327.498], [962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 32647.140], [962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 32170.095], [962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 32577.742], [962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 33905.444], [962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 35414.492], [962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 36082.120], [962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 35872.293], [962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 34558.132], [962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 33778.138], [962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 34213.578], [962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 35345.791], [962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 34288.014], [962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 34604.406], [962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 34806.850], [962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 34012.232], [962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 33681.345], [192.484, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 13958.345, 0.000, 33540.463], [192.484, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 13958.345, 0.000, 34574.280], [192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 34516.781], [192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 35134.412], [192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 36266.530], [192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 37864.376], [192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 38642.633], [192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 38454.227], [192.484, 1127.221, 729.561, 0.000, 0.000, 0.000, 1339.869, 15871.934, 0.000, 38982.227], [192.484, 1127.221, 729.561, 0.000, 0.000, 0.000, 1339.869, 15871.934, 0.000, 39016.154], [192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 38759.803], [192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 39217.182], [192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 39439.690], [192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 39454.081], [192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 39083.341], [192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 38968.694], [192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 39532.030], [192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 38675.507], [0.000, 0.000, 1560.697, 0.000, 0.000, 0.000, 1339.869, 23269.751, 0.000, 39013.741], [0.000, 0.000, 1560.697, 0.000, 0.000, 0.000, 1339.869, 23269.751, 0.000, 38497.668], [0.000, 0.000, 1560.697, 0.000, 0.000, 0.000, 1339.869, 23269.751, 0.000, 38042.410]]) # 模拟VS信号回测结果 # VS信号,先卖后买,交割期为0 self.vs_res_sb00 = np.array( [[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 0.0000, 0.0000, 7750.0000, 0.0000, 10000.0000], [0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 0.0000, 0.0000, 7750.0000, 0.0000, 9925.0000], [0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 300.0000, 300.0000, 4954.0000, 0.0000, 9785.0000], [400.0000, 400.0000, 0.0000, 0.0000, 500.0000, 300.0000, 300.0000, 878.0000, 0.0000, 9666.0000], [400.0000, 400.0000, 173.1755, 0.0000, 500.0000, 300.0000, 300.0000, 0.0000, 0.0000, 9731.0000], [400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 9830.9270], [400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 9785.8540], [400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 9614.3412], [400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 9303.1953], [400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 8834.4398], [400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 8712.7554], [400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 8717.9507], [400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 9079.1479], [200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598.0000, 0.0000, 9166.0276], [200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598.0000, 0.0000, 9023.6607], [200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598.0000, 0.0000, 9291.6864], [200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598.0000, 0.0000, 9411.6371], [200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9706.7357], [200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9822.7357], [200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9986.7357], [200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9805.7357], [200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 0.0000, 4993.7357, 0.0000, 9704.7357], [200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9567.7357], [200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9209.7357], [200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9407.7357], [0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9329.7357], [0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9545.7357], [0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9652.7357], [0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9414.7357], [0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9367.7357], [0.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 9319.7357, 0.0000, 19556.7357], [500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 20094.7357], [500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19849.7357], [500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19802.7357], [500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19487.7357], [500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19749.7357], [500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19392.7357], [500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19671.7357], [500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19756.7357], [500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 20111.7357], [500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19867.7357], [500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19775.7357], [1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20314.7357], [1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20310.7357], [1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20253.7357], [1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20044.7357], [1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20495.7357], [1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 19798.7357], [1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20103.7357], [1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20864.7357], [1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 20425.7357], [1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 20137.8405], [1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 20711.3567], [1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21470.3891], [1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21902.9538], [1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 20962.9538], [1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21833.5184], [1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21941.8169], [1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21278.5184], [1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21224.4700], [1100.0000, 710.4842, 400.0000, 300.0000, 600.0000, 500.0000, 600.0000, 9160.0000, 0.0000, 31225.2119], [600.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 7488.0000, 0.0000, 30894.5748], [600.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 7488.0000, 0.0000, 30764.3811], [1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 31815.5828], [1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 31615.4215], [1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 32486.1394], [1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 33591.2847], [1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 34056.5428], [1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 34756.4863], [1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 34445.5428], [1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 34433.9541], [1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000, 33870.4703], [1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000, 34014.3010], [1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000, 34680.5671], [1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000, 33890.9945], [1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000, 34004.6640], [1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000, 34127.7768], [1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000, 33421.1638], [1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000, 33120.9057], [700.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 13830.0000, 0.0000, 32613.3171], [700.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 13830.0000, 0.0000, 33168.1558], [700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000, 33504.6236], [700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000, 33652.1318], [700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000, 34680.4867], [700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000, 35557.5191], [700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000, 35669.7128], [700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000, 35211.4466], [700.0000, 1010.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13530.0000, 0.0000, 35550.6079], [700.0000, 1010.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13530.0000, 0.0000, 35711.6563], [700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 35682.6079], [700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 35880.8336], [700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 36249.8740], [700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 36071.6159], [700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 35846.1562], [700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 35773.3578], [700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 36274.9465], [700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 35739.3094], [500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167.0000, 0.0000, 36135.0917], [500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167.0000, 0.0000, 35286.5835], [500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167.0000, 0.0000, 35081.3658]]) # VS信号,先买后卖,交割期为0 self.vs_res_bs00 = np.array( [[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 0.0000, 0.0000, 7750, 0.0000, 10000], [0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 0.0000, 0.0000, 7750, 0.0000, 9925], [0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 300.0000, 300.0000, 4954, 0.0000, 9785], [400.0000, 400.0000, 0.0000, 0.0000, 500.0000, 300.0000, 300.0000, 878, 0.0000, 9666], [400.0000, 400.0000, 173.1755, 0.0000, 500.0000, 300.0000, 300.0000, 0, 0.0000, 9731], [400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 9830.927022], [400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 9785.854043], [400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 9614.341223], [400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 9303.195266], [400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 8834.439842], [400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 8712.755424], [400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 8717.95069], [400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 9079.147929], [200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598, 0.0000, 9166.027613], [200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598, 0.0000, 9023.66075], [200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598, 0.0000, 9291.686391], [200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598, 0.0000, 9411.637081], [200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9706.7357], [200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9822.7357], [200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9986.7357], [200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9805.7357], [200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 0.0000, 4993.7357, 0.0000, 9704.7357], [200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9567.7357], [200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9209.7357], [200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9407.7357], [0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9329.7357], [0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9545.7357], [0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9652.7357], [0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9414.7357], [0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9367.7357], [0.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 9319.7357, 0.0000, 19556.7357], [500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 20094.7357], [500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19849.7357], [500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19802.7357], [500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19487.7357], [500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19749.7357], [500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19392.7357], [500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19671.7357], [500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19756.7357], [500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 20111.7357], [500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19867.7357], [500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19775.7357], [1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20314.7357], [1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20310.7357], [1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20253.7357], [1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20044.7357], [1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20495.7357], [1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 19798.7357], [1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20103.7357], [1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20864.7357], [1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 20425.7357], [1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 20137.84054], [1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 20711.35674], [1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21470.38914], [1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21902.95375], [1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 20962.95375], [1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21833.51837], [1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21941.81688], [1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21278.51837], [1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21224.46995], [1100.0000, 710.4842, 400.0000, 300.0000, 600.0000, 500.0000, 600.0000, 9160, 0.0000, 31225.21185], [600.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 7488, 0.0000, 30894.57479], [600.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 7488, 0.0000, 30764.38113], [1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 31815.5828], [1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 31615.42154], [1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 32486.13941], [1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 33591.28466], [1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 34056.54276], [1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 34756.48633], [1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 34445.54276], [1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 34433.95412], [1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 33870.47032], [1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 34014.30104], [1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 34680.56715], [1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 33890.99452], [1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 34004.66398], [1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 34127.77683], [1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 33421.1638], [1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 33120.9057], [700.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 13830, 0.0000, 32613.31706], [700.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 13830, 0.0000, 33168.15579], [700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 33504.62357], [700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 33652.13176], [700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 34680.4867], [700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 35557.51909], [700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 35669.71276], [700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 35211.44665], [700.0000, 1010.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13530, 0.0000, 35550.60792], [700.0000, 1010.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13530, 0.0000, 35711.65633], [700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 35682.60792], [700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 35880.83362], [700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 36249.87403], [700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 36071.61593], [700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 35846.15615], [700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 35773.35783], [700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 36274.94647], [700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 35739.30941], [500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167, 0.0000, 36135.09172], [500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167, 0.0000, 35286.58353], [500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167, 0.0000, 35081.36584]]) # VS信号,先卖后买,交割期为2天(股票)1天(现金) self.vs_res_sb20 = np.array( [[0.000, 0.000, 0.000, 0.000, 500.000, 0.000, 0.000, 7750.000, 0.000, 10000.000], [0.000, 0.000, 0.000, 0.000, 500.000, 0.000, 0.000, 7750.000, 0.000, 9925.000], [0.000, 0.000, 0.000, 0.000, 500.000, 300.000, 300.000, 4954.000, 0.000, 9785.000], [400.000, 400.000, 0.000, 0.000, 500.000, 300.000, 300.000, 878.000, 0.000, 9666.000], [400.000, 400.000, 173.176, 0.000, 500.000, 300.000, 300.000, 0.000, 0.000, 9731.000], [400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9830.927], [400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9785.854], [400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9614.341], [400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9303.195], [400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8834.440], [400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8712.755], [400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8717.951], [400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9079.148], [200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9166.028], [200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9023.661], [200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9291.686], [200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9411.637], [200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9706.736], [200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9822.736], [200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9986.736], [200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9805.736], [200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 0.000, 4993.736, 0.000, 9704.736], [200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9567.736], [200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9209.736], [200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9407.736], [0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9329.736], [0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9545.736], [0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9652.736], [0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9414.736], [0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9367.736], [0.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 9319.736, 0.000, 19556.736], [500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 20094.736], [500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19849.736], [500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19802.736], [500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19487.736], [500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19749.736], [500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19392.736], [500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19671.736], [500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19756.736], [500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 20111.736], [500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19867.736], [500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19775.736], [1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20314.736], [1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20310.736], [1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20253.736], [1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20044.736], [1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20495.736], [1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 19798.736], [1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20103.736], [1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20864.736], [1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20425.736], [1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20137.841], [1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20711.357], [1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21470.389], [1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21902.954], [1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20962.954], [1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21833.518], [1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21941.817], [1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21278.518], [1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21224.470], [1100.000, 710.484, 400.000, 300.000, 600.000, 500.000, 600.000, 9160.000, 0.000, 31225.212], [600.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 7488.000, 0.000, 30894.575], [600.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 7488.000, 0.000, 30764.381], [1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 31815.583], [1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 31615.422], [1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 32486.139], [1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 33591.285], [1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34056.543], [1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34756.486], [1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34445.543], [1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34433.954], [1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33870.470], [1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34014.301], [1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34680.567], [1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33890.995], [1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34004.664], [1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34127.777], [1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33421.164], [1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33120.906], [700.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 13830.000, 0.000, 32613.317], [700.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 13830.000, 0.000, 33168.156], [700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 33504.624], [700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 33652.132], [700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 34680.487], [700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35557.519], [700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35669.713], [700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35211.447], [700.000, 1010.484, 400.000, 100.000, 0.000, 100.000, 900.000, 13530.000, 0.000, 35550.608], [700.000, 1010.484, 400.000, 100.000, 0.000, 100.000, 900.000, 13530.000, 0.000, 35711.656], [700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35682.608], [700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35880.834], [700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36249.874], [700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36071.616], [700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35846.156], [700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35773.358], [700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36274.946], [700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35739.309], [500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 36135.092], [500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 35286.584], [500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 35081.366]]) # VS信号,先买后卖,交割期为2天(股票)1天(现金) self.vs_res_bs21 = np.array( [[0.000, 0.000, 0.000, 0.000, 500.000, 0.000, 0.000, 7750.000, 0.000, 10000.000], [0.000, 0.000, 0.000, 0.000, 500.000, 0.000, 0.000, 7750.000, 0.000, 9925.000], [0.000, 0.000, 0.000, 0.000, 500.000, 300.000, 300.000, 4954.000, 0.000, 9785.000], [400.000, 400.000, 0.000, 0.000, 500.000, 300.000, 300.000, 878.000, 0.000, 9666.000], [400.000, 400.000, 173.176, 0.000, 500.000, 300.000, 300.000, 0.000, 0.000, 9731.000], [400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9830.927], [400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9785.854], [400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9614.341], [400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9303.195], [400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8834.440], [400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8712.755], [400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8717.951], [400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9079.148], [200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9166.028], [200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9023.661], [200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9291.686], [200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9411.637], [200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9706.736], [200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9822.736], [200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9986.736], [200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9805.736], [200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 0.000, 4993.736, 0.000, 9704.736], [200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9567.736], [200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9209.736], [200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9407.736], [0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9329.736], [0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9545.736], [0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9652.736], [0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9414.736], [0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9367.736], [0.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 9319.736, 0.000, 19556.736], [500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 20094.736], [500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19849.736], [500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19802.736], [500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19487.736], [500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19749.736], [500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19392.736], [500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19671.736], [500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19756.736], [500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 20111.736], [500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19867.736], [500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19775.736], [1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20314.736], [1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20310.736], [1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20253.736], [1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20044.736], [1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20495.736], [1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 19798.736], [1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20103.736], [1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20864.736], [1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20425.736], [1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20137.841], [1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20711.357], [1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21470.389], [1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21902.954], [1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20962.954], [1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21833.518], [1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21941.817], [1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21278.518], [1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21224.470], [1100.000, 710.484, 400.000, 300.000, 600.000, 500.000, 600.000, 9160.000, 0.000, 31225.212], [600.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 7488.000, 0.000, 30894.575], [600.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 7488.000, 0.000, 30764.381], [1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 31815.583], [1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 31615.422], [1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 32486.139], [1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 33591.285], [1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34056.543], [1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34756.486], [1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34445.543], [1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34433.954], [1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33870.470], [1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34014.301], [1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34680.567], [1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33890.995], [1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34004.664], [1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34127.777], [1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33421.164], [1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33120.906], [700.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 13830.000, 0.000, 32613.317], [700.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 13830.000, 0.000, 33168.156], [700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 33504.624], [700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 33652.132], [700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 34680.487], [700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35557.519], [700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35669.713], [700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35211.447], [700.000, 1010.484, 400.000, 100.000, 0.000, 100.000, 900.000, 13530.000, 0.000, 35550.608], [700.000, 1010.484, 400.000, 100.000, 0.000, 100.000, 900.000, 13530.000, 0.000, 35711.656], [700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35682.608], [700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35880.834], [700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36249.874], [700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36071.616], [700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35846.156], [700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35773.358], [700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36274.946], [700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35739.309], [500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 36135.092], [500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 35286.584], [500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 35081.366]]) # Multi信号处理结果,先卖后买,使用卖出的现金买进,交割期为2天(股票)0天(现金) self.multi_res = np.array( [[0.0000, 357.2545, 0.0000, 6506.9627, 0.0000, 9965.1867], [0.0000, 357.2545, 0.0000, 6506.9627, 0.0000, 10033.0650], [0.0000, 178.6273, 0.0000, 8273.5864, 0.0000, 10034.8513], [0.0000, 178.6273, 0.0000, 8273.5864, 0.0000, 10036.6376], [150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10019.3404], [150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10027.7062], [150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10030.1477], [150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10005.1399], [150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10002.5054], [150.3516, 489.4532, 0.0000, 3765.8877, 0.0000, 9967.3860], [75.1758, 391.5625, 0.0000, 5490.1377, 0.0000, 10044.4059], [75.1758, 391.5625, 0.0000, 5490.1377, 0.0000, 10078.1430], [75.1758, 391.5625, 846.3525, 392.3025, 0.0000, 10138.2709], [75.1758, 391.5625, 846.3525, 392.3025, 0.0000, 10050.4768], [75.1758, 391.5625, 846.3525, 392.3025, 0.0000, 10300.0711], [75.1758, 391.5625, 846.3525, 392.3025, 0.0000, 10392.6970], [75.1758, 391.5625, 169.2705, 4644.3773, 0.0000, 10400.5282], [75.1758, 391.5625, 169.2705, 4644.3773, 0.0000, 10408.9220], [75.1758, 0.0000, 169.2705, 8653.9776, 0.0000, 10376.5914], [75.1758, 0.0000, 169.2705, 8653.9776, 0.0000, 10346.8794], [75.1758, 0.0000, 169.2705, 8653.9776, 0.0000, 10364.7474], [75.1758, 381.1856, 645.5014, 2459.1665, 0.0000, 10302.4570], [18.7939, 381.1856, 645.5014, 3024.6764, 0.0000, 10747.4929], [18.7939, 381.1856, 96.8252, 6492.3097, 0.0000, 11150.9107], [18.7939, 381.1856, 96.8252, 6492.3097, 0.0000, 11125.2946], [18.7939, 114.3557, 96.8252, 9227.3166, 0.0000, 11191.9956], [18.7939, 114.3557, 96.8252, 9227.3166, 0.0000, 11145.7486], [18.7939, 114.3557, 96.8252, 9227.3166, 0.0000, 11090.0768], [132.5972, 114.3557, 864.3802, 4223.9548, 0.0000, 11113.8733], [132.5972, 114.3557, 864.3802, 4223.9548, 0.0000, 11456.3281], [132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 21983.7333], [132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 22120.6165], [132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 21654.5327], [132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 21429.6550], [132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 21912.5643], [132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 22516.3100], [132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 23169.0777], [132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 23390.8080], [132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 23743.3742], [132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 23210.7311], [132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 24290.4375], [132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 24335.3279], [132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 18317.3553], [132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 18023.4660], [259.4270, 559.9112, 0.0000, 15820.6915, 0.0000, 24390.0527], [259.4270, 559.9112, 0.0000, 15820.6915, 0.0000, 24389.6421], [259.4270, 559.9112, 0.0000, 15820.6915, 0.0000, 24483.5953], [0.0000, 559.9112, 0.0000, 18321.5674, 0.0000, 24486.1895], [0.0000, 0.0000, 0.0000, 24805.3389, 0.0000, 24805.3389], [0.0000, 0.0000, 0.0000, 24805.3389, 0.0000, 24805.3389]]) def test_loop_step_pt_sb00(self): """ test loop step PT-signal, sell first""" c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0, own_cash=10000, own_amounts=np.zeros(7, dtype='float'), available_cash=10000, available_amounts=np.zeros(7, dtype='float'), op=self.pt_signals[0], prices=self.prices[0], rate=self.rate, pt_buy_threshold=0.1, pt_sell_threshold=0.1, maximize_cash_usage=True, allow_sell_short=False, moq_buy=0, moq_sell=0, print_log=True) print(f'day 1 result in complete looping: \n' f'cash_change: +{c_g:.2f} / {c_s:.2f}\n' f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n' f'----------------------------------\n') cash = 10000 + c_g + c_s amounts = np.zeros(7, dtype='float') + a_p + a_s self.assertAlmostEqual(cash, 7500) self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 555.5555556, 0, 0]))) c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0, own_cash=self.pt_res_sb00[2][7], own_amounts=self.pt_res_sb00[2][0:7], available_cash=self.pt_res_sb00[2][7], available_amounts=self.pt_res_sb00[2][0:7], op=self.pt_signals[3], prices=self.prices[3], rate=self.rate, pt_buy_threshold=0.1, pt_sell_threshold=0.1, maximize_cash_usage=True, allow_sell_short=False, moq_buy=0, moq_sell=0, print_log=True) print(f'day 4 result in complete looping: \n' f'cash_change: + {c_g:.2f} / {c_s:.2f}\n' f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n' f'----------------------------------\n') cash = self.pt_res_sb00[2][7] + c_g + c_s amounts = self.pt_res_sb00[2][0:7] + a_p + a_s self.assertAlmostEqual(cash, self.pt_res_sb00[3][7], 2) self.assertTrue(np.allclose(amounts, self.pt_res_sb00[3][0:7])) c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0, own_cash=self.pt_res_sb00[30][7], own_amounts=self.pt_res_sb00[30][0:7], available_cash=self.pt_res_sb00[30][7], available_amounts=self.pt_res_sb00[30][0:7], op=self.pt_signals[31], prices=self.prices[31], rate=self.rate, pt_buy_threshold=0.1, pt_sell_threshold=0.1, maximize_cash_usage=True, allow_sell_short=False, moq_buy=0, moq_sell=0, print_log=True) print(f'day 32 result in complete looping: \n' f'cash_change: + {c_g:.2f} / {c_s:.2f}\n' f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n' f'----------------------------------\n') cash = self.pt_res_sb00[30][7] + c_g + c_s amounts = self.pt_res_sb00[30][0:7] + a_p + a_s self.assertAlmostEqual(cash, self.pt_res_sb00[31][7], 2) self.assertTrue(np.allclose(amounts, self.pt_res_sb00[31][0:7])) c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0, own_cash=self.pt_res_sb00[59][7] + 10000, own_amounts=self.pt_res_sb00[59][0:7], available_cash=self.pt_res_sb00[59][7] + 10000, available_amounts=self.pt_res_sb00[59][0:7], op=self.pt_signals[60], prices=self.prices[60], rate=self.rate, pt_buy_threshold=0.1, pt_sell_threshold=0.1, maximize_cash_usage=True, allow_sell_short=False, moq_buy=0, moq_sell=0, print_log=True) print(f'day 61 result in complete looping: \n' f'cash_change: + {c_g:.2f} / {c_s:.2f}\n' f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n' f'----------------------------------\n') cash = self.pt_res_sb00[59][7] + c_g + c_s + 10000 amounts = self.pt_res_sb00[59][0:7] + a_p + a_s self.assertAlmostEqual(cash, self.pt_res_sb00[60][7], 2) self.assertTrue(np.allclose(amounts, self.pt_res_sb00[60][0:7])) c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0, own_cash=cash, own_amounts=amounts, available_cash=cash, available_amounts=amounts, op=self.pt_signals[61], prices=self.prices[61], rate=self.rate, pt_buy_threshold=0.1, pt_sell_threshold=0.1, maximize_cash_usage=True, allow_sell_short=False, moq_buy=0, moq_sell=0, print_log=True) print(f'day 62 result in complete looping: \n' f'cash_change: + {c_g:.2f} / {c_s:.2f}\n' f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n' f'----------------------------------\n') cash = cash + c_g + c_s amounts = amounts + a_p + a_s self.assertAlmostEqual(cash, self.pt_res_sb00[61][7], 2) self.assertTrue(np.allclose(amounts, self.pt_res_sb00[61][0:7])) c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0, own_cash=self.pt_res_sb00[95][7], own_amounts=self.pt_res_sb00[95][0:7], available_cash=self.pt_res_sb00[95][7], available_amounts=self.pt_res_sb00[95][0:7], op=self.pt_signals[96], prices=self.prices[96], rate=self.rate, pt_buy_threshold=0.1, pt_sell_threshold=0.1, maximize_cash_usage=True, allow_sell_short=False, moq_buy=0, moq_sell=0, print_log=True) print(f'day 97 result in complete looping: \n' f'cash_change: + {c_g:.2f} / {c_s:.2f}\n' f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n' f'----------------------------------\n') cash = self.pt_res_sb00[96][7] + c_g + c_s amounts = self.pt_res_sb00[96][0:7] + a_p + a_s self.assertAlmostEqual(cash, self.pt_res_sb00[96][7], 2) self.assertTrue(np.allclose(amounts, self.pt_res_sb00[96][0:7])) c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0, own_cash=cash, own_amounts=amounts, available_cash=cash, available_amounts=amounts, op=self.pt_signals[97], prices=self.prices[97], rate=self.rate, pt_buy_threshold=0.1, pt_sell_threshold=0.1, maximize_cash_usage=True, allow_sell_short=False, moq_buy=0, moq_sell=0, print_log=True) print(f'day 98 result in complete looping: \n' f'cash_change: + {c_g:.2f} / {c_s:.2f}\n' f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n' f'----------------------------------\n') cash = cash + c_g + c_s amounts = amounts + a_p + a_s self.assertAlmostEqual(cash, self.pt_res_sb00[97][7], 2) self.assertTrue(np.allclose(amounts, self.pt_res_sb00[97][0:7])) def test_loop_step_pt_bs00(self): """ test loop step PT-signal, buy first""" c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0, own_cash=10000, own_amounts=np.zeros(7, dtype='float'), available_cash=10000, available_amounts=np.zeros(7, dtype='float'), op=self.pt_signals[0], prices=self.prices[0], rate=self.rate, pt_buy_threshold=0.1, pt_sell_threshold=0.1, maximize_cash_usage=False, allow_sell_short=False, moq_buy=0, moq_sell=0, print_log=True) print(f'day 1 result in complete looping: \n' f'cash_change: +{c_g:.2f} / {c_s:.2f}\n' f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n' f'----------------------------------\n') cash = 10000 + c_g + c_s amounts = np.zeros(7, dtype='float') + a_p + a_s self.assertAlmostEqual(cash, 7500) self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 555.5555556, 0, 0]))) c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0, own_cash=self.pt_res_bs00[2][7], own_amounts=self.pt_res_bs00[2][0:7], available_cash=self.pt_res_bs00[2][7], available_amounts=self.pt_res_bs00[2][0:7], op=self.pt_signals[3], prices=self.prices[3], rate=self.rate, pt_buy_threshold=0.1, pt_sell_threshold=0.1, maximize_cash_usage=False, allow_sell_short=False, moq_buy=0, moq_sell=0, print_log=True) print(f'day 4 result in complete looping: \n' f'cash_change: + {c_g:.2f} / {c_s:.2f}\n' f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n' f'----------------------------------\n') cash = self.pt_res_bs00[2][7] + c_g + c_s amounts = self.pt_res_bs00[2][0:7] + a_p + a_s self.assertAlmostEqual(cash, self.pt_res_bs00[3][7], 2) self.assertTrue(np.allclose(amounts, self.pt_res_bs00[3][0:7])) c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0, own_cash=self.pt_res_bs00[30][7], own_amounts=self.pt_res_bs00[30][0:7], available_cash=self.pt_res_bs00[30][7], available_amounts=self.pt_res_bs00[30][0:7], op=self.pt_signals[31], prices=self.prices[31], rate=self.rate, pt_buy_threshold=0.1, pt_sell_threshold=0.1, maximize_cash_usage=False, allow_sell_short=False, moq_buy=0, moq_sell=0, print_log=True) print(f'day 32 result in complete looping: \n' f'cash_change: + {c_g:.2f} / {c_s:.2f}\n' f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n' f'----------------------------------\n') cash = self.pt_res_bs00[30][7] + c_g + c_s amounts = self.pt_res_bs00[30][0:7] + a_p + a_s self.assertAlmostEqual(cash, self.pt_res_bs00[31][7], 2) self.assertTrue(np.allclose(amounts, self.pt_res_bs00[31][0:7])) c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0, own_cash=self.pt_res_bs00[59][7] + 10000, own_amounts=self.pt_res_bs00[59][0:7], available_cash=self.pt_res_bs00[59][7] + 10000, available_amounts=self.pt_res_bs00[59][0:7], op=self.pt_signals[60], prices=self.prices[60], rate=self.rate, pt_buy_threshold=0.1, pt_sell_threshold=0.1, maximize_cash_usage=False, allow_sell_short=False, moq_buy=0, moq_sell=0, print_log=True) print(f'day 61 result in complete looping: \n' f'cash_change: + {c_g:.2f} / {c_s:.2f}\n' f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n' f'----------------------------------\n') cash = self.pt_res_bs00[59][7] + c_g + c_s + 10000 amounts = self.pt_res_bs00[59][0:7] + a_p + a_s self.assertAlmostEqual(cash, self.pt_res_bs00[60][7], 2) self.assertTrue(np.allclose(amounts, self.pt_res_bs00[60][0:7])) c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0, own_cash=cash, own_amounts=amounts, available_cash=cash, available_amounts=amounts, op=self.pt_signals[61], prices=self.prices[61], rate=self.rate, pt_buy_threshold=0.1, pt_sell_threshold=0.1, maximize_cash_usage=False, allow_sell_short=False, moq_buy=0, moq_sell=0, print_log=True) print(f'day 62 result in complete looping: \n' f'cash_change: + {c_g:.2f} / {c_s:.2f}\n' f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n' f'----------------------------------\n') cash = cash + c_g + c_s amounts = amounts + a_p + a_s self.assertAlmostEqual(cash, self.pt_res_bs00[61][7], 2) self.assertTrue(np.allclose(amounts, self.pt_res_bs00[61][0:7])) c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0, own_cash=self.pt_res_bs00[95][7], own_amounts=self.pt_res_bs00[95][0:7], available_cash=self.pt_res_bs00[95][7], available_amounts=self.pt_res_bs00[95][0:7], op=self.pt_signals[96], prices=self.prices[96], rate=self.rate, pt_buy_threshold=0.1, pt_sell_threshold=0.1, maximize_cash_usage=False, allow_sell_short=False, moq_buy=0, moq_sell=0, print_log=True) print(f'day 97 result in complete looping: \n' f'cash_change: + {c_g:.2f} / {c_s:.2f}\n' f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n' f'----------------------------------\n') cash = self.pt_res_bs00[96][7] + c_g + c_s amounts = self.pt_res_bs00[96][0:7] + a_p + a_s self.assertAlmostEqual(cash, self.pt_res_bs00[96][7], 2) self.assertTrue(np.allclose(amounts, self.pt_res_bs00[96][0:7])) c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0, own_cash=cash, own_amounts=amounts, available_cash=cash, available_amounts=amounts, op=self.pt_signals[97], prices=self.prices[97], rate=self.rate, pt_buy_threshold=0.1, pt_sell_threshold=0.1, maximize_cash_usage=False, allow_sell_short=False, moq_buy=0, moq_sell=0, print_log=True) print(f'day 98 result in complete looping: \n' f'cash_change: + {c_g:.2f} / {c_s:.2f}\n' f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n' f'----------------------------------\n') cash = cash + c_g + c_s amounts = amounts + a_p + a_s self.assertAlmostEqual(cash, self.pt_res_bs00[97][7], 2) self.assertTrue(np.allclose(amounts, self.pt_res_bs00[97][0:7])) def test_loop_step_ps_sb00(self): """ test loop step PS-signal, sell first""" c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1, own_cash=10000, own_amounts=np.zeros(7, dtype='float'), available_cash=10000, available_amounts=np.zeros(7, dtype='float'), op=self.ps_signals[0], prices=self.prices[0], rate=self.rate, pt_buy_threshold=0.1, pt_sell_threshold=0.1, maximize_cash_usage=True, allow_sell_short=False, moq_buy=0, moq_sell=0, print_log=True) print(f'day 1 result in complete looping: \n' f'cash_change: +{c_g:.2f} / {c_s:.2f}\n' f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n' f'----------------------------------\n') cash = 10000 + c_g + c_s amounts = np.zeros(7, dtype='float') + a_p + a_s self.assertAlmostEqual(cash, 7500) self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 555.5555556, 0, 0]))) c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1, own_cash=self.ps_res_sb00[2][7], own_amounts=self.ps_res_sb00[2][0:7], available_cash=self.ps_res_sb00[2][7], available_amounts=self.ps_res_sb00[2][0:7], op=self.ps_signals[3], prices=self.prices[3], rate=self.rate, pt_buy_threshold=0.1, pt_sell_threshold=0.1, maximize_cash_usage=True, allow_sell_short=False, moq_buy=0, moq_sell=0, print_log=True) print(f'day 4 result in complete looping: \n' f'cash_change: + {c_g:.2f} / {c_s:.2f}\n' f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n' f'----------------------------------\n') cash = self.ps_res_sb00[2][7] + c_g + c_s amounts = self.ps_res_sb00[2][0:7] + a_p + a_s self.assertAlmostEqual(cash, self.ps_res_sb00[3][7], 2) self.assertTrue(np.allclose(amounts, self.ps_res_sb00[3][0:7])) c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1, own_cash=self.ps_res_sb00[30][7], own_amounts=self.ps_res_sb00[30][0:7], available_cash=self.ps_res_sb00[30][7], available_amounts=self.ps_res_sb00[30][0:7], op=self.ps_signals[31], prices=self.prices[31], rate=self.rate, pt_buy_threshold=0.1, pt_sell_threshold=0.1, maximize_cash_usage=True, allow_sell_short=False, moq_buy=0, moq_sell=0, print_log=True) print(f'day 32 result in complete looping: \n' f'cash_change: + {c_g:.2f} / {c_s:.2f}\n' f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n' f'----------------------------------\n') cash = self.ps_res_sb00[30][7] + c_g + c_s amounts = self.ps_res_sb00[30][0:7] + a_p + a_s self.assertAlmostEqual(cash, self.ps_res_sb00[31][7], 2) self.assertTrue(np.allclose(amounts, self.ps_res_sb00[31][0:7])) c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1, own_cash=self.ps_res_sb00[59][7] + 10000, own_amounts=self.ps_res_sb00[59][0:7], available_cash=self.ps_res_sb00[59][7] + 10000, available_amounts=self.ps_res_sb00[59][0:7], op=self.ps_signals[60], prices=self.prices[60], rate=self.rate, pt_buy_threshold=0.1, pt_sell_threshold=0.1, maximize_cash_usage=True, allow_sell_short=False, moq_buy=0, moq_sell=0, print_log=True) print(f'day 61 result in complete looping: \n' f'cash_change: + {c_g:.2f} / {c_s:.2f}\n' f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n' f'----------------------------------\n') cash = self.ps_res_sb00[59][7] + c_g + c_s + 10000 amounts = self.ps_res_sb00[59][0:7] + a_p + a_s self.assertAlmostEqual(cash, self.ps_res_sb00[60][7], 2) self.assertTrue(np.allclose(amounts, self.ps_res_sb00[60][0:7])) c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1, own_cash=cash, own_amounts=amounts, available_cash=cash, available_amounts=amounts, op=self.ps_signals[61], prices=self.prices[61], rate=self.rate, pt_buy_threshold=0.1, pt_sell_threshold=0.1, maximize_cash_usage=True, allow_sell_short=False, moq_buy=0, moq_sell=0, print_log=True) print(f'day 62 result in complete looping: \n' f'cash_change: + {c_g:.2f} / {c_s:.2f}\n' f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n' f'----------------------------------\n') cash = cash + c_g + c_s amounts = amounts + a_p + a_s self.assertAlmostEqual(cash, self.ps_res_sb00[61][7], 2) self.assertTrue(np.allclose(amounts, self.ps_res_sb00[61][0:7])) c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1, own_cash=self.ps_res_sb00[95][7], own_amounts=self.ps_res_sb00[95][0:7], available_cash=self.ps_res_sb00[95][7], available_amounts=self.ps_res_sb00[95][0:7], op=self.ps_signals[96], prices=self.prices[96], rate=self.rate, pt_buy_threshold=0.1, pt_sell_threshold=0.1, maximize_cash_usage=True, allow_sell_short=False, moq_buy=0, moq_sell=0, print_log=True) print(f'day 97 result in complete looping: \n' f'cash_change: + {c_g:.2f} / {c_s:.2f}\n' f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n' f'----------------------------------\n') cash = self.ps_res_sb00[96][7] + c_g + c_s amounts = self.ps_res_sb00[96][0:7] + a_p + a_s self.assertAlmostEqual(cash, self.ps_res_sb00[96][7], 2) self.assertTrue(np.allclose(amounts, self.ps_res_sb00[96][0:7])) c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1, own_cash=cash, own_amounts=amounts, available_cash=cash, available_amounts=amounts, op=self.ps_signals[97], prices=self.prices[97], rate=self.rate, pt_buy_threshold=0.1, pt_sell_threshold=0.1, maximize_cash_usage=True, allow_sell_short=False, moq_buy=0, moq_sell=0, print_log=True) print(f'day 98 result in complete looping: \n' f'cash_change: + {c_g:.2f} / {c_s:.2f}\n' f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n' f'----------------------------------\n') cash = cash + c_g + c_s amounts = amounts + a_p + a_s self.assertAlmostEqual(cash, self.ps_res_sb00[97][7], 2) self.assertTrue(np.allclose(amounts, self.ps_res_sb00[97][0:7])) def test_loop_step_ps_bs00(self): """ test loop step PS-signal, buy first""" c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1, own_cash=10000, own_amounts=np.zeros(7, dtype='float'), available_cash=10000, available_amounts=np.zeros(7, dtype='float'), op=self.ps_signals[0], prices=self.prices[0], rate=self.rate, pt_buy_threshold=0.1, pt_sell_threshold=0.1, maximize_cash_usage=False, allow_sell_short=False, moq_buy=0, moq_sell=0, print_log=True) print(f'day 1 result in complete looping: \n' f'cash_change: +{c_g:.2f} / {c_s:.2f}\n' f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n' f'----------------------------------\n') cash = 10000 + c_g + c_s amounts = np.zeros(7, dtype='float') + a_p + a_s self.assertAlmostEqual(cash, 7500) self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 555.5555556, 0, 0]))) c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1, own_cash=self.ps_res_bs00[2][7], own_amounts=self.ps_res_sb00[2][0:7], available_cash=self.ps_res_bs00[2][7], available_amounts=self.ps_res_bs00[2][0:7], op=self.ps_signals[3], prices=self.prices[3], rate=self.rate, pt_buy_threshold=0.1, pt_sell_threshold=0.1, maximize_cash_usage=False, allow_sell_short=False, moq_buy=0, moq_sell=0, print_log=True) print(f'day 4 result in complete looping: \n' f'cash_change: + {c_g:.2f} / {c_s:.2f}\n' f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n' f'----------------------------------\n') cash = self.ps_res_bs00[2][7] + c_g + c_s amounts = self.ps_res_bs00[2][0:7] + a_p + a_s self.assertAlmostEqual(cash, self.ps_res_bs00[3][7], 2) self.assertTrue(np.allclose(amounts, self.ps_res_bs00[3][0:7])) c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1, own_cash=self.ps_res_bs00[30][7], own_amounts=self.ps_res_sb00[30][0:7], available_cash=self.ps_res_bs00[30][7], available_amounts=self.ps_res_bs00[30][0:7], op=self.ps_signals[31], prices=self.prices[31], rate=self.rate, pt_buy_threshold=0.1, pt_sell_threshold=0.1, maximize_cash_usage=False, allow_sell_short=False, moq_buy=0, moq_sell=0, print_log=True) print(f'day 32 result in complete looping: \n' f'cash_change: + {c_g:.2f} / {c_s:.2f}\n' f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n' f'----------------------------------\n') cash = self.ps_res_bs00[30][7] + c_g + c_s amounts = self.ps_res_bs00[30][0:7] + a_p + a_s self.assertAlmostEqual(cash, self.ps_res_bs00[31][7], 2) self.assertTrue(np.allclose(amounts, self.ps_res_bs00[31][0:7])) c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1, own_cash=self.ps_res_bs00[59][7] + 10000, own_amounts=self.ps_res_bs00[59][0:7], available_cash=self.ps_res_bs00[59][7] + 10000, available_amounts=self.ps_res_bs00[59][0:7], op=self.ps_signals[60], prices=self.prices[60], rate=self.rate, pt_buy_threshold=0.1, pt_sell_threshold=0.1, maximize_cash_usage=False, allow_sell_short=False, moq_buy=0, moq_sell=0, print_log=True) print(f'day 61 result in complete looping: \n' f'cash_change: + {c_g:.2f} / {c_s:.2f}\n' f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n' f'----------------------------------\n') cash = self.ps_res_bs00[59][7] + c_g + c_s + 10000 amounts = self.ps_res_bs00[59][0:7] + a_p + a_s self.assertAlmostEqual(cash, self.ps_res_bs00[60][7], 2) self.assertTrue(np.allclose(amounts, self.ps_res_bs00[60][0:7])) c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1, own_cash=cash, own_amounts=amounts, available_cash=cash, available_amounts=amounts, op=self.ps_signals[61], prices=self.prices[61], rate=self.rate, pt_buy_threshold=0.1, pt_sell_threshold=0.1, maximize_cash_usage=False, allow_sell_short=False, moq_buy=0, moq_sell=0, print_log=True) print(f'day 62 result in complete looping: \n' f'cash_change: + {c_g:.2f} / {c_s:.2f}\n' f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n' f'----------------------------------\n') cash = cash + c_g + c_s amounts = amounts + a_p + a_s self.assertAlmostEqual(cash, self.ps_res_bs00[61][7], 2) self.assertTrue(np.allclose(amounts, self.ps_res_bs00[61][0:7])) c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1, own_cash=self.ps_res_bs00[95][7], own_amounts=self.ps_res_bs00[95][0:7], available_cash=self.ps_res_bs00[95][7], available_amounts=self.ps_res_bs00[95][0:7], op=self.ps_signals[96], prices=self.prices[96], rate=self.rate, pt_buy_threshold=0.1, pt_sell_threshold=0.1, maximize_cash_usage=False, allow_sell_short=False, moq_buy=0, moq_sell=0, print_log=True) print(f'day 97 result in complete looping: \n' f'cash_change: + {c_g:.2f} / {c_s:.2f}\n' f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n' f'----------------------------------\n') cash = self.ps_res_bs00[96][7] + c_g + c_s amounts = self.ps_res_bs00[96][0:7] + a_p + a_s self.assertAlmostEqual(cash, self.ps_res_bs00[96][7], 2) self.assertTrue(np.allclose(amounts, self.ps_res_bs00[96][0:7])) c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1, own_cash=cash, own_amounts=amounts, available_cash=cash, available_amounts=amounts, op=self.ps_signals[97], prices=self.prices[97], rate=self.rate, pt_buy_threshold=0.1, pt_sell_threshold=0.1, maximize_cash_usage=False, allow_sell_short=False, moq_buy=0, moq_sell=0, print_log=True) print(f'day 98 result in complete looping: \n' f'cash_change: + {c_g:.2f} / {c_s:.2f}\n' f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n' f'----------------------------------\n') cash = cash + c_g + c_s amounts = amounts + a_p + a_s self.assertAlmostEqual(cash, self.ps_res_bs00[97][7], 2) self.assertTrue(np.allclose(amounts, self.ps_res_bs00[97][0:7])) def test_loop_step_vs_sb00(self): """test loop step of Volume Signal type of signals""" c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2, own_cash=10000, own_amounts=np.zeros(7, dtype='float'), available_cash=10000, available_amounts=np.zeros(7, dtype='float'), op=self.vs_signals[0], prices=self.prices[0], rate=self.rate, pt_buy_threshold=0.1, pt_sell_threshold=0.1, maximize_cash_usage=True, allow_sell_short=False, moq_buy=0, moq_sell=0, print_log=True) print(f'day 1 result in complete looping: \n' f'cash_change: +{c_g:.2f} / {c_s:.2f}\n' f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n' f'----------------------------------\n') cash = 10000 + c_g + c_s amounts = np.zeros(7, dtype='float') + a_p + a_s self.assertAlmostEqual(cash, 7750) self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 500., 0, 0]))) c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2, own_cash=self.vs_res_sb00[2][7], own_amounts=self.vs_res_sb00[2][0:7], available_cash=self.vs_res_sb00[2][7], available_amounts=self.vs_res_sb00[2][0:7], op=self.vs_signals[3], prices=self.prices[3], rate=self.rate, pt_buy_threshold=0.1, pt_sell_threshold=0.1, maximize_cash_usage=True, allow_sell_short=False, moq_buy=0, moq_sell=0, print_log=True) print(f'day 4 result in complete looping: \n' f'cash_change: + {c_g:.2f} / {c_s:.2f}\n' f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n' f'----------------------------------\n') cash = self.vs_res_sb00[2][7] + c_g + c_s amounts = self.vs_res_sb00[2][0:7] + a_p + a_s self.assertAlmostEqual(cash, self.vs_res_sb00[3][7], 2) self.assertTrue(np.allclose(amounts, self.vs_res_sb00[3][0:7])) c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2, own_cash=self.vs_res_sb00[30][7], own_amounts=self.vs_res_sb00[30][0:7], available_cash=self.vs_res_sb00[30][7], available_amounts=self.vs_res_sb00[30][0:7], op=self.vs_signals[31], prices=self.prices[31], rate=self.rate, pt_buy_threshold=0.1, pt_sell_threshold=0.1, maximize_cash_usage=True, allow_sell_short=False, moq_buy=0, moq_sell=0, print_log=True) print(f'day 32 result in complete looping: \n' f'cash_change: + {c_g:.2f} / {c_s:.2f}\n' f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n' f'----------------------------------\n') cash = self.vs_res_sb00[30][7] + c_g + c_s amounts = self.vs_res_sb00[30][0:7] + a_p + a_s self.assertAlmostEqual(cash, self.vs_res_sb00[31][7], 2) self.assertTrue(np.allclose(amounts, self.vs_res_sb00[31][0:7])) c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2, own_cash=self.vs_res_sb00[59][7] + 10000, own_amounts=self.vs_res_sb00[59][0:7], available_cash=self.vs_res_sb00[59][7] + 10000, available_amounts=self.vs_res_sb00[59][0:7], op=self.vs_signals[60], prices=self.prices[60], rate=self.rate, pt_buy_threshold=0.1, pt_sell_threshold=0.1, maximize_cash_usage=True, allow_sell_short=False, moq_buy=0, moq_sell=0, print_log=True) print(f'day 61 result in complete looping: \n' f'cash_change: + {c_g:.2f} / {c_s:.2f}\n' f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n' f'----------------------------------\n') cash = self.vs_res_sb00[59][7] + c_g + c_s + 10000 amounts = self.vs_res_sb00[59][0:7] + a_p + a_s self.assertAlmostEqual(cash, self.vs_res_sb00[60][7], 2) self.assertTrue(np.allclose(amounts, self.vs_res_sb00[60][0:7])) c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2, own_cash=cash, own_amounts=amounts, available_cash=cash, available_amounts=amounts, op=self.vs_signals[61], prices=self.prices[61], rate=self.rate, pt_buy_threshold=0.1, pt_sell_threshold=0.1, maximize_cash_usage=True, allow_sell_short=False, moq_buy=0, moq_sell=0, print_log=True) print(f'day 62 result in complete looping: \n' f'cash_change: + {c_g:.2f} / {c_s:.2f}\n' f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n' f'----------------------------------\n') cash = cash + c_g + c_s amounts = amounts + a_p + a_s self.assertAlmostEqual(cash, self.vs_res_sb00[61][7], 2) self.assertTrue(np.allclose(amounts, self.vs_res_sb00[61][0:7])) c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2, own_cash=self.vs_res_sb00[95][7], own_amounts=self.vs_res_sb00[95][0:7], available_cash=self.vs_res_sb00[95][7], available_amounts=self.vs_res_sb00[95][0:7], op=self.vs_signals[96], prices=self.prices[96], rate=self.rate, pt_buy_threshold=0.1, pt_sell_threshold=0.1, maximize_cash_usage=True, allow_sell_short=False, moq_buy=0, moq_sell=0, print_log=True) print(f'day 97 result in complete looping: \n' f'cash_change: + {c_g:.2f} / {c_s:.2f}\n' f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n' f'----------------------------------\n') cash = self.vs_res_sb00[96][7] + c_g + c_s amounts = self.vs_res_sb00[96][0:7] + a_p + a_s self.assertAlmostEqual(cash, self.vs_res_sb00[96][7], 2) self.assertTrue(np.allclose(amounts, self.vs_res_sb00[96][0:7])) c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2, own_cash=cash, own_amounts=amounts, available_cash=cash, available_amounts=amounts, op=self.vs_signals[97], prices=self.prices[97], rate=self.rate, pt_buy_threshold=0.1, pt_sell_threshold=0.1, maximize_cash_usage=True, allow_sell_short=False, moq_buy=0, moq_sell=0, print_log=True) print(f'day 98 result in complete looping: \n' f'cash_change: + {c_g:.2f} / {c_s:.2f}\n' f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n' f'----------------------------------\n') cash = cash + c_g + c_s amounts = amounts + a_p + a_s self.assertAlmostEqual(cash, self.vs_res_sb00[97][7], 2) self.assertTrue(np.allclose(amounts, self.vs_res_sb00[97][0:7])) def test_loop_step_vs_bs00(self): """test loop step of Volume Signal type of signals""" c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2, own_cash=10000, own_amounts=np.zeros(7, dtype='float'), available_cash=10000, available_amounts=np.zeros(7, dtype='float'), op=self.vs_signals[0], prices=self.prices[0], rate=self.rate, pt_buy_threshold=0.1, pt_sell_threshold=0.1, maximize_cash_usage=False, allow_sell_short=False, moq_buy=0, moq_sell=0, print_log=True) print(f'day 1 result in complete looping: \n' f'cash_change: +{c_g:.2f} / {c_s:.2f}\n' f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n' f'----------------------------------\n') cash = 10000 + c_g + c_s amounts = np.zeros(7, dtype='float') + a_p + a_s self.assertAlmostEqual(cash, 7750) self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 500., 0, 0]))) c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2, own_cash=self.vs_res_bs00[2][7], own_amounts=self.vs_res_bs00[2][0:7], available_cash=self.vs_res_bs00[2][7], available_amounts=self.vs_res_bs00[2][0:7], op=self.vs_signals[3], prices=self.prices[3], rate=self.rate, pt_buy_threshold=0.1, pt_sell_threshold=0.1, maximize_cash_usage=False, allow_sell_short=False, moq_buy=0, moq_sell=0, print_log=True) print(f'day 4 result in complete looping: \n' f'cash_change: + {c_g:.2f} / {c_s:.2f}\n' f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n' f'----------------------------------\n') cash = self.vs_res_bs00[2][7] + c_g + c_s amounts = self.vs_res_bs00[2][0:7] + a_p + a_s self.assertAlmostEqual(cash, self.vs_res_bs00[3][7], 2) self.assertTrue(np.allclose(amounts, self.vs_res_bs00[3][0:7])) c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2, own_cash=self.vs_res_bs00[30][7], own_amounts=self.vs_res_bs00[30][0:7], available_cash=self.vs_res_bs00[30][7], available_amounts=self.vs_res_bs00[30][0:7], op=self.vs_signals[31], prices=self.prices[31], rate=self.rate, pt_buy_threshold=0.1, pt_sell_threshold=0.1, maximize_cash_usage=False, allow_sell_short=False, moq_buy=0, moq_sell=0, print_log=True) print(f'day 32 result in complete looping: \n' f'cash_change: + {c_g:.2f} / {c_s:.2f}\n' f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n' f'----------------------------------\n') cash = self.vs_res_bs00[30][7] + c_g + c_s amounts = self.vs_res_bs00[30][0:7] + a_p + a_s self.assertAlmostEqual(cash, self.vs_res_bs00[31][7], 2) self.assertTrue(np.allclose(amounts, self.vs_res_bs00[31][0:7])) c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2, own_cash=self.vs_res_bs00[59][7] + 10000, own_amounts=self.vs_res_bs00[59][0:7], available_cash=self.vs_res_bs00[59][7] + 10000, available_amounts=self.vs_res_bs00[59][0:7], op=self.vs_signals[60], prices=self.prices[60], rate=self.rate, pt_buy_threshold=0.1, pt_sell_threshold=0.1, maximize_cash_usage=False, allow_sell_short=False, moq_buy=0, moq_sell=0, print_log=True) print(f'day 61 result in complete looping: \n' f'cash_change: + {c_g:.2f} / {c_s:.2f}\n' f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n' f'----------------------------------\n') cash = self.vs_res_bs00[59][7] + c_g + c_s + 10000 amounts = self.vs_res_bs00[59][0:7] + a_p + a_s self.assertAlmostEqual(cash, self.vs_res_bs00[60][7], 2) self.assertTrue(np.allclose(amounts, self.vs_res_bs00[60][0:7])) c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2, own_cash=cash, own_amounts=amounts, available_cash=cash, available_amounts=amounts, op=self.vs_signals[61], prices=self.prices[61], rate=self.rate, pt_buy_threshold=0.1, pt_sell_threshold=0.1, maximize_cash_usage=False, allow_sell_short=False, moq_buy=0, moq_sell=0, print_log=True) print(f'day 62 result in complete looping: \n' f'cash_change: + {c_g:.2f} / {c_s:.2f}\n' f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n' f'----------------------------------\n') cash = cash + c_g + c_s amounts = amounts + a_p + a_s self.assertAlmostEqual(cash, self.vs_res_bs00[61][7], 2) self.assertTrue(np.allclose(amounts, self.vs_res_bs00[61][0:7])) c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2, own_cash=self.vs_res_bs00[95][7], own_amounts=self.vs_res_bs00[95][0:7], available_cash=self.vs_res_bs00[95][7], available_amounts=self.vs_res_bs00[95][0:7], op=self.vs_signals[96], prices=self.prices[96], rate=self.rate, pt_buy_threshold=0.1, pt_sell_threshold=0.1, maximize_cash_usage=False, allow_sell_short=False, moq_buy=0, moq_sell=0, print_log=True) print(f'day 97 result in complete looping: \n' f'cash_change: + {c_g:.2f} / {c_s:.2f}\n' f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n' f'----------------------------------\n') cash = self.vs_res_bs00[96][7] + c_g + c_s amounts = self.vs_res_bs00[96][0:7] + a_p + a_s self.assertAlmostEqual(cash, self.vs_res_bs00[96][7], 2) self.assertTrue(np.allclose(amounts, self.vs_res_bs00[96][0:7])) c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2, own_cash=cash, own_amounts=amounts, available_cash=cash, available_amounts=amounts, op=self.vs_signals[97], prices=self.prices[97], rate=self.rate, pt_buy_threshold=0.1, pt_sell_threshold=0.1, maximize_cash_usage=False, allow_sell_short=False, moq_buy=0, moq_sell=0, print_log=True) print(f'day 98 result in complete looping: \n' f'cash_change: + {c_g:.2f} / {c_s:.2f}\n' f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n' f'----------------------------------\n') cash = cash + c_g + c_s amounts = amounts + a_p + a_s self.assertAlmostEqual(cash, self.vs_res_bs00[97][7], 2) self.assertTrue(np.allclose(amounts, self.vs_res_bs00[97][0:7])) def test_loop_pt(self): """ Test looping of PT proportion target signals, with stock delivery delay = 0 days cash delivery delay = 0 day buy-sell sequence = sell first """ print('Test looping of PT proportion target signals, with:\n' 'stock delivery delay = 0 days \n' 'cash delivery delay = 0 day \n' 'buy-sell sequence = sell first') res = apply_loop(op_type=0, op_list=self.pt_signal_hp, history_list=self.history_list, cash_plan=self.cash, cost_rate=self.rate, moq_buy=0, moq_sell=0, inflation_rate=0, print_log=False) self.assertIsInstance(res, pd.DataFrame) # print(f'in test_loop:\nresult of loop test is \n{res}') self.assertTrue(np.allclose(res, self.pt_res_bs00, 2)) print(f'test assertion errors in apply_loop: detect moqs that are not compatible') self.assertRaises(AssertionError, apply_loop, 0, self.ps_signal_hp, self.history_list, self.cash, self.rate, 0, 1, 0, False) self.assertRaises(AssertionError, apply_loop, 0, self.ps_signal_hp, self.history_list, self.cash, self.rate, 1, 5, 0, False) print(f'test loop results with moq equal to 100') res = apply_loop(op_type=0, op_list=self.ps_signal_hp, history_list=self.history_list, cash_plan=self.cash, cost_rate=self.rate2, moq_buy=100, moq_sell=1, inflation_rate=0, print_log=False) self.assertIsInstance(res, pd.DataFrame) # print(f'in test_loop:\nresult of loop test is \n{res}') def test_loop_pt_with_delay(self): """ Test looping of PT proportion target signals, with: stock delivery delay = 2 days cash delivery delay = 1 day use_sell_cash = False """ print('Test looping of PT proportion target signals, with:\n' 'stock delivery delay = 2 days \n' 'cash delivery delay = 1 day \n' 'maximize_cash = False (buy and sell at the same time)') res = apply_loop( op_type=0, op_list=self.pt_signal_hp, history_list=self.history_list, cash_plan=self.cash, cost_rate=self.rate, moq_buy=0, moq_sell=0, inflation_rate=0, cash_delivery_period=1, stock_delivery_period=2, print_log=False) self.assertIsInstance(res, pd.DataFrame) print(f'in test_loop:\nresult of loop test is \n{res}\n' f'result comparison line by line:') for i in range(len(res)): print(np.around(res.values[i])) print(np.around(self.pt_res_bs21[i])) print() self.assertTrue(np.allclose(res, self.pt_res_bs21, 3)) print(f'test assertion errors in apply_loop: detect moqs that are not compatible') self.assertRaises(AssertionError, apply_loop, 0, self.ps_signal_hp, self.history_list, self.cash, self.rate, 0, 1, 0, False) self.assertRaises(AssertionError, apply_loop, 0, self.ps_signal_hp, self.history_list, self.cash, self.rate, 1, 5, 0, False) print(f'test loop results with moq equal to 100') res = apply_loop( op_type=1, op_list=self.ps_signal_hp, history_list=self.history_list, cash_plan=self.cash, cost_rate=self.rate2, moq_buy=100, moq_sell=1, inflation_rate=0, print_log=False) self.assertIsInstance(res, pd.DataFrame) print(f'in test_loop:\nresult of loop test is \n{res}') def test_loop_pt_with_delay_use_cash(self): """ Test looping of PT proportion target signals, with: stock delivery delay = 2 days cash delivery delay = 0 day use sell cash = True (sell stock first to use cash when possible (not possible when cash delivery period != 0)) """ print('Test looping of PT proportion target signals, with:\n' 'stock delivery delay = 2 days \n' 'cash delivery delay = 1 day \n' 'maximize cash usage = True \n' 'but not applicable because cash delivery period == 1') res = apply_loop( op_type=0, op_list=self.pt_signal_hp, history_list=self.history_list, cash_plan=self.cash, cost_rate=self.rate, moq_buy=0, moq_sell=0, cash_delivery_period=0, stock_delivery_period=2, inflation_rate=0, max_cash_usage=True, print_log=True) self.assertIsInstance(res, pd.DataFrame) print(f'in test_loop:\nresult of loop test is \n{res}\n' f'result comparison line by line:') for i in range(len(res)): print(np.around(res.values[i])) print(np.around(self.pt_res_sb20[i])) print() self.assertTrue(np.allclose(res, self.pt_res_sb20, 3)) print(f'test assertion errors in apply_loop: detect moqs that are not compatible') self.assertRaises(AssertionError, apply_loop, 0, self.ps_signal_hp, self.history_list, self.cash, self.rate, 0, 1, 0, False) self.assertRaises(AssertionError, apply_loop, 0, self.ps_signal_hp, self.history_list, self.cash, self.rate, 1, 5, 0, False) print(f'test loop results with moq equal to 100') res = apply_loop( op_type=1, op_list=self.ps_signal_hp, history_list=self.history_list, cash_plan=self.cash, cost_rate=self.rate2, moq_buy=100, moq_sell=1, cash_delivery_period=1, stock_delivery_period=2, inflation_rate=0, print_log=True) self.assertIsInstance(res, pd.DataFrame) print(f'in test_loop:\nresult of loop test is \n{res}') def test_loop_ps(self): """ Test looping of PS Proportion Signal type of signals """ res = apply_loop(op_type=1, op_list=self.ps_signal_hp, history_list=self.history_list, cash_plan=self.cash, cost_rate=self.rate, moq_buy=0, moq_sell=0, inflation_rate=0, print_log=False) self.assertIsInstance(res, pd.DataFrame) print(f'in test_loop:\nresult of loop test is \n{res}') self.assertTrue(np.allclose(res, self.ps_res_bs00, 5)) print(f'test assertion errors in apply_loop: detect moqs that are not compatible') self.assertRaises(AssertionError, apply_loop, 0, self.ps_signal_hp, self.history_list, self.cash, self.rate, 0, 1, 0, False) self.assertRaises(AssertionError, apply_loop, 0, self.ps_signal_hp, self.history_list, self.cash, self.rate, 1, 5, 0, False) print(f'test loop results with moq equal to 100') res = apply_loop(op_type=1, op_list=self.ps_signal_hp, history_list=self.history_list, cash_plan=self.cash, cost_rate=self.rate2, moq_buy=100, moq_sell=1, inflation_rate=0, print_log=False) self.assertIsInstance(res, pd.DataFrame) print(f'in test_loop:\nresult of loop test is \n{res}') def test_loop_ps_with_delay(self): """ Test looping of PT proportion target signals, with: stock delivery delay = 2 days cash delivery delay = 1 day use_sell_cash = False """ print('Test looping of PT proportion target signals, with:\n' 'stock delivery delay = 2 days \n' 'cash delivery delay = 1 day \n' 'maximize_cash = False (buy and sell at the same time)') res = apply_loop( op_type=1, op_list=self.ps_signal_hp, history_list=self.history_list, cash_plan=self.cash, cost_rate=self.rate, moq_buy=0, moq_sell=0, inflation_rate=0, cash_delivery_period=1, stock_delivery_period=2, print_log=False) self.assertIsInstance(res, pd.DataFrame) print(f'in test_loop:\nresult of loop test is \n{res}\n' f'result comparison line by line:') for i in range(len(res)): print(np.around(res.values[i])) print(np.around(self.ps_res_bs21[i])) print() self.assertTrue(np.allclose(res, self.ps_res_bs21, 3)) print(f'test assertion errors in apply_loop: detect moqs that are not compatible') self.assertRaises(AssertionError, apply_loop, 0, self.ps_signal_hp, self.history_list, self.cash, self.rate, 0, 1, 0, False) self.assertRaises(AssertionError, apply_loop, 0, self.ps_signal_hp, self.history_list, self.cash, self.rate, 1, 5, 0, False) print(f'test loop results with moq equal to 100') res = apply_loop( op_type=1, op_list=self.ps_signal_hp, history_list=self.history_list, cash_plan=self.cash, cost_rate=self.rate2, moq_buy=100, moq_sell=1, inflation_rate=0, print_log=False) self.assertIsInstance(res, pd.DataFrame) print(f'in test_loop:\nresult of loop test is \n{res}') def test_loop_ps_with_delay_use_cash(self): """ Test looping of PT proportion target signals, with: stock delivery delay = 2 days cash delivery delay = 0 day use sell cash = True (sell stock first to use cash when possible (not possible when cash delivery period != 0)) """ print('Test looping of PT proportion target signals, with:\n' 'stock delivery delay = 2 days \n' 'cash delivery delay = 1 day \n' 'maximize cash usage = True \n' 'but not applicable because cash delivery period == 1') res = apply_loop( op_type=1, op_list=self.ps_signal_hp, history_list=self.history_list, cash_plan=self.cash, cost_rate=self.rate, moq_buy=0, moq_sell=0, cash_delivery_period=0, stock_delivery_period=2, inflation_rate=0, max_cash_usage=True, print_log=True) self.assertIsInstance(res, pd.DataFrame) print(f'in test_loop:\nresult of loop test is \n{res}\n' f'result comparison line by line:') for i in range(len(res)): print(np.around(res.values[i])) print(np.around(self.ps_res_sb20[i])) print() self.assertTrue(np.allclose(res, self.ps_res_sb20, 3)) print(f'test assertion errors in apply_loop: detect moqs that are not compatible') self.assertRaises(AssertionError, apply_loop, 0, self.ps_signal_hp, self.history_list, self.cash, self.rate, 0, 1, 0, False) self.assertRaises(AssertionError, apply_loop, 0, self.ps_signal_hp, self.history_list, self.cash, self.rate, 1, 5, 0, False) print(f'test loop results with moq equal to 100') res = apply_loop( op_type=1, op_list=self.ps_signal_hp, history_list=self.history_list, cash_plan=self.cash, cost_rate=self.rate2, moq_buy=100, moq_sell=1, cash_delivery_period=1, stock_delivery_period=2, inflation_rate=0, print_log=True) self.assertIsInstance(res, pd.DataFrame) print(f'in test_loop:\nresult of loop test is \n{res}') def test_loop_vs(self): """ Test looping of VS Volume Signal type of signals """ res = apply_loop(op_type=2, op_list=self.vs_signal_hp, history_list=self.history_list, cash_plan=self.cash, cost_rate=self.rate, moq_buy=0, moq_sell=0, inflation_rate=0, print_log=False) self.assertIsInstance(res, pd.DataFrame) print(f'in test_loop:\nresult of loop test is \n{res}') self.assertTrue(np.allclose(res, self.vs_res_bs00, 5)) print(f'test assertion errors in apply_loop: detect moqs that are not compatible') self.assertRaises(AssertionError, apply_loop, 0, self.ps_signal_hp, self.history_list, self.cash, self.rate, 0, 1, 0, False) self.assertRaises(AssertionError, apply_loop, 0, self.ps_signal_hp, self.history_list, self.cash, self.rate, 1, 5, 0, False) print(f'test loop results with moq equal to 100') res = apply_loop(op_type=2, op_list=self.vs_signal_hp, history_list=self.history_list, cash_plan=self.cash, cost_rate=self.rate2, moq_buy=100, moq_sell=1, inflation_rate=0, print_log=False) self.assertIsInstance(res, pd.DataFrame) print(f'in test_loop:\nresult of loop test is \n{res}') def test_loop_vs_with_delay(self): """ Test looping of PT proportion target signals, with: stock delivery delay = 2 days cash delivery delay = 1 day use_sell_cash = False """ print('Test looping of PT proportion target signals, with:\n' 'stock delivery delay = 2 days \n' 'cash delivery delay = 1 day \n' 'maximize_cash = False (buy and sell at the same time)') res = apply_loop( op_type=2, op_list=self.vs_signal_hp, history_list=self.history_list, cash_plan=self.cash, cost_rate=self.rate, moq_buy=0, moq_sell=0, inflation_rate=0, cash_delivery_period=1, stock_delivery_period=2, print_log=True) self.assertIsInstance(res, pd.DataFrame) print(f'in test_loop:\nresult of loop test is \n{res}\n' f'result comparison line by line:') for i in range(len(res)): print(np.around(res.values[i])) print(np.around(self.vs_res_bs21[i])) print() self.assertTrue(np.allclose(res, self.vs_res_bs21, 3)) print(f'test assertion errors in apply_loop: detect moqs that are not compatible') self.assertRaises(AssertionError, apply_loop, 0, self.vs_signal_hp, self.history_list, self.cash, self.rate, 0, 1, 0, False) self.assertRaises(AssertionError, apply_loop, 0, self.vs_signal_hp, self.history_list, self.cash, self.rate, 1, 5, 0, False) print(f'test loop results with moq equal to 100') res = apply_loop( op_type=1, op_list=self.vs_signal_hp, history_list=self.history_list, cash_plan=self.cash, cost_rate=self.rate2, moq_buy=100, moq_sell=1, inflation_rate=0, print_log=False) self.assertIsInstance(res, pd.DataFrame) print(f'in test_loop:\nresult of loop test is \n{res}') def test_loop_vs_with_delay_use_cash(self): """ Test looping of PT proportion target signals, with: stock delivery delay = 2 days cash delivery delay = 0 day use sell cash = True (sell stock first to use cash when possible (not possible when cash delivery period != 0)) """ print('Test looping of PT proportion target signals, with:\n' 'stock delivery delay = 2 days \n' 'cash delivery delay = 1 day \n' 'maximize cash usage = True \n' 'but not applicable because cash delivery period == 1') res = apply_loop( op_type=2, op_list=self.vs_signal_hp, history_list=self.history_list, cash_plan=self.cash, cost_rate=self.rate, moq_buy=0, moq_sell=0, cash_delivery_period=0, stock_delivery_period=2, inflation_rate=0, max_cash_usage=True, print_log=False) self.assertIsInstance(res, pd.DataFrame) print(f'in test_loop:\nresult of loop test is \n{res}\n' f'result comparison line by line:') for i in range(len(res)): print(np.around(res.values[i])) print(np.around(self.vs_res_sb20[i])) print() self.assertTrue(np.allclose(res, self.vs_res_sb20, 3)) print(f'test assertion errors in apply_loop: detect moqs that are not compatible') self.assertRaises(AssertionError, apply_loop, 0, self.vs_signal_hp, self.history_list, self.cash, self.rate, 0, 1, 0, False) self.assertRaises(AssertionError, apply_loop, 0, self.vs_signal_hp, self.history_list, self.cash, self.rate, 1, 5, 0, False) print(f'test loop results with moq equal to 100') res = apply_loop( op_type=1, op_list=self.vs_signal_hp, history_list=self.history_list, cash_plan=self.cash, cost_rate=self.rate2, moq_buy=100, moq_sell=1, cash_delivery_period=1, stock_delivery_period=2, inflation_rate=0, print_log=False) self.assertIsInstance(res, pd.DataFrame) print(f'in test_loop:\nresult of loop test is \n{res}') def test_loop_multiple_signal(self): """ Test looping of PS Proportion Signal type of signals """ res = apply_loop(op_type=1, op_list=self.multi_signal_hp, history_list=self.multi_history_list, cash_plan=self.cash, cost_rate=self.rate, moq_buy=0, moq_sell=0, cash_delivery_period=0, stock_delivery_period=2, max_cash_usage=True, inflation_rate=0, print_log=False) self.assertIsInstance(res, pd.DataFrame) print(f'in test_loop:\nresult of loop test is \n{res}\n' f'result comparison line by line:') for i in range(len(res)): print(np.around(res.values[i])) print(np.around(self.multi_res[i])) print() self.assertTrue(np.allclose(res, self.multi_res, 5)) print(f'test assertion errors in apply_loop: detect moqs that are not compatible') self.assertRaises(AssertionError, apply_loop, 0, self.ps_signal_hp, self.history_list, self.cash, self.rate, 0, 1, 0, False) self.assertRaises(AssertionError, apply_loop, 0, self.ps_signal_hp, self.history_list, self.cash, self.rate, 1, 5, 0, False) print(f'test loop results with moq equal to 100') res = apply_loop(op_type=1, op_list=self.multi_signal_hp, history_list=self.multi_history_list, cash_plan=self.cash, cost_rate=self.rate2, moq_buy=100, moq_sell=1, cash_delivery_period=0, stock_delivery_period=2, max_cash_usage=False, inflation_rate=0, print_log=True) self.assertIsInstance(res, pd.DataFrame) print(f'in test_loop:\nresult of loop test is \n{res}') class TestStrategy(unittest.TestCase): """ test all properties and methods of strategy base class""" def setUp(self) -> None: pass class TestLSStrategy(RollingTiming): """用于test测试的简单多空蒙板生成策略。基于RollingTiming滚动择时方法生成 该策略有两个参数,N与Price N用于计算OHLC价格平均值的N日简单移动平均,判断,当移动平均值大于等于Price时,状态为看多,否则为看空 """ def __init__(self): super().__init__(stg_name='test_LS', stg_text='test long/short strategy', par_count=2, par_types='discr, conti', par_bounds_or_enums=([1, 5], [2, 10]), data_types='close, open, high, low', data_freq='d', window_length=5) pass def _realize(self, hist_data: np.ndarray, params: tuple): n, price = params h = hist_data.T avg = (h[0] + h[1] + h[2] + h[3]) / 4 ma = sma(avg, n) if ma[-1] < price: return 0 else: return 1 class TestSelStrategy(SimpleSelecting): """用于Test测试的简单选股策略,基于Selecting策略生成 策略没有参数,选股周期为5D 在每个选股周期内,从股票池的三只股票中选出今日变化率 = (今收-昨收)/平均股价(OHLC平均股价)最高的两支,放入中选池,否则落选。 选股比例为平均分配 """ def __init__(self): super().__init__(stg_name='test_SEL', stg_text='test portfolio selection strategy', par_count=0, par_types='', par_bounds_or_enums=(), data_types='high, low, close', data_freq='d', sample_freq='10d', window_length=5) pass def _realize(self, hist_data: np.ndarray, params: tuple): avg = np.nanmean(hist_data, axis=(1, 2)) dif = (hist_data[:, :, 2] - np.roll(hist_data[:, :, 2], 1, 1)) dif_no_nan = np.array([arr[~np.isnan(arr)][-1] for arr in dif]) difper = dif_no_nan / avg large2 = difper.argsort()[1:] chosen = np.zeros_like(avg) chosen[large2] = 0.5 return chosen class TestSelStrategyDiffTime(SimpleSelecting): """用于Test测试的简单选股策略,基于Selecting策略生成 策略没有参数,选股周期为5D 在每个选股周期内,从股票池的三只股票中选出今日变化率 = (今收-昨收)/平均股价(OHLC平均股价)最高的两支,放入中选池,否则落选。 选股比例为平均分配 """ # TODO: This strategy is not working, find out why and improve def __init__(self): super().__init__(stg_name='test_SEL', stg_text='test portfolio selection strategy', par_count=0, par_types='', par_bounds_or_enums=(), data_types='close, low, open', data_freq='d', sample_freq='w', window_length=2) pass def _realize(self, hist_data: np.ndarray, params: tuple): avg = hist_data.mean(axis=1).squeeze() difper = (hist_data[:, :, 0] - np.roll(hist_data[:, :, 0], 1))[:, -1] / avg large2 = difper.argsort()[0:2] chosen = np.zeros_like(avg) chosen[large2] = 0.5 return chosen class TestSigStrategy(SimpleTiming): """用于Test测试的简单信号生成策略,基于SimpleTiming策略生成 策略有三个参数,第一个参数为ratio,另外两个参数为price1以及price2 ratio是k线形状比例的阈值,定义为abs((C-O)/(H-L))。当这个比值小于ratio阈值时,判断该K线为十字交叉(其实还有丁字等多种情形,但这里做了 简化处理。 信号生成的规则如下: 1,当某个K线出现十字交叉,且昨收与今收之差大于price1时,买入信号 2,当某个K线出现十字交叉,且昨收与今收之差小于price2时,卖出信号 """ def __init__(self): super().__init__(stg_name='test_SIG', stg_text='test signal creation strategy', par_count=3, par_types='conti, conti, conti', par_bounds_or_enums=([2, 10], [0, 3], [0, 3]), data_types='close, open, high, low', window_length=2) pass def _realize(self, hist_data: np.ndarray, params: tuple): r, price1, price2 = params h = hist_data.T ratio = np.abs((h[0] - h[1]) / (h[3] - h[2])) diff = h[0] - np.roll(h[0], 1) sig = np.where((ratio < r) & (diff > price1), 1, np.where((ratio < r) & (diff < price2), -1, 0)) return sig class MyStg(qt.RollingTiming): """自定义双均线择时策略策略""" def __init__(self): """这个均线择时策略只有三个参数: - SMA 慢速均线,所选择的股票 - FMA 快速均线 - M 边界值 策略的其他说明 """ """ 必须初始化的关键策略参数清单: """ super().__init__( pars=(20, 100, 0.01), par_count=3, par_types=['discr', 'discr', 'conti'], par_bounds_or_enums=[(10, 250), (10, 250), (0.0, 0.5)], stg_name='CUSTOM ROLLING TIMING STRATEGY', stg_text='Customized Rolling Timing Strategy for Testing', data_types='close', window_length=100, ) print(f'=====================\n====================\n' f'custom strategy initialized, \npars: {self.pars}\npar_count:{self.par_count}\npar_types:' f'{self.par_types}\n' f'{self.info()}') # 策略的具体实现代码写在策略的_realize()函数中 # 这个函数固定接受两个参数: hist_price代表特定组合的历史数据, params代表具体的策略参数 def _realize(self, hist_price, params): """策略的具体实现代码: s:短均线计算日期;l:长均线计算日期;m:均线边界宽度;hesitate:均线跨越类型""" f, s, m = params # 临时处理措施,在策略实现层对传入的数据切片,后续应该在策略实现层以外事先对数据切片,保证传入的数据符合data_types参数即可 h = hist_price.T # 计算长短均线的当前值 s_ma = qt.sma(h[0], s)[-1] f_ma = qt.sma(h[0], f)[-1] # 计算慢均线的停止边界,当快均线在停止边界范围内时,平仓,不发出买卖信号 s_ma_u = s_ma * (1 + m) s_ma_l = s_ma * (1 - m) # 根据观望模式在不同的点位产生Long/short/empty标记 if f_ma > s_ma_u: # 当快均线在慢均线停止范围以上时,持有多头头寸 return 1 elif s_ma_l < f_ma < s_ma_u: # 当均线在停止边界以内时,平仓 return 0 else: # f_ma < s_ma_l 当快均线在慢均线停止范围以下时,持有空头头寸 return -1 class TestOperator(unittest.TestCase): """全面测试Operator对象的所有功能。包括: 1, Strategy 参数的设置 2, 历史数据的获取与分配提取 3, 策略优化参数的批量设置和优化空间的获取 4, 策略输出值的正确性验证 5, 策略结果的混合结果确认 """ def setUp(self): """prepare data for Operator test""" print('start testing HistoryPanel object\n') # build up test data: a 4-type, 3-share, 50-day matrix of prices that contains nan values in some days # for some share_pool # for share1: data_rows = 50 share1_close = [10.04, 10, 10, 9.99, 9.97, 9.99, 10.03, 10.03, 10.06, 10.06, 10.11, 10.09, 10.07, 10.06, 10.09, 10.03, 10.03, 10.06, 10.08, 10, 9.99, 10.03, 10.03, 10.06, 10.03, 9.97, 9.94, 9.83, 9.77, 9.84, 9.91, 9.93, 9.96, 9.91, 9.91, 9.88, 9.91, 9.64, 9.56, 9.57, 9.55, 9.57, 9.61, 9.61, 9.55, 9.57, 9.63, 9.64, 9.65, 9.62] share1_open = [10.02, 10, 9.98, 9.97, 9.99, 10.01, 10.04, 10.06, 10.06, 10.11, 10.11, 10.07, 10.06, 10.09, 10.03, 10.02, 10.06, 10.08, 9.99, 10, 10.03, 10.02, 10.06, 10.03, 9.97, 9.94, 9.83, 9.78, 9.77, 9.91, 9.92, 9.97, 9.91, 9.9, 9.88, 9.91, 9.63, 9.64, 9.57, 9.55, 9.58, 9.61, 9.62, 9.55, 9.57, 9.61, 9.63, 9.64, 9.61, 9.56] share1_high = [10.07, 10, 10, 10, 10.03, 10.03, 10.04, 10.09, 10.1, 10.14, 10.11, 10.1, 10.09, 10.09, 10.1, 10.05, 10.07, 10.09, 10.1, 10, 10.04, 10.04, 10.06, 10.09, 10.05, 9.97, 9.96, 9.86, 9.77, 9.92, 9.94, 9.97, 9.97, 9.92, 9.92, 9.92, 9.93, 9.64, 9.58, 9.6, 9.58, 9.62, 9.62, 9.64, 9.59, 9.62, 9.63, 9.7, 9.66, 9.64] share1_low = [9.99, 10, 9.97, 9.97, 9.97, 9.98, 9.99, 10.03, 10.03, 10.04, 10.11, 10.07, 10.05, 10.03, 10.03, 10.01, 9.99, 10.03, 9.95, 10, 9.95, 10, 10.01, 9.99, 9.96, 9.89, 9.83, 9.77, 9.77, 9.8, 9.9, 9.91, 9.89, 9.89, 9.87, 9.85, 9.6, 9.64, 9.53, 9.55, 9.54, 9.55, 9.58, 9.54, 9.53, 9.53, 9.63, 9.64, 9.59, 9.56] # for share2: share2_close = [9.68, 9.87, 9.86, 9.87, 9.79, 9.82, 9.8, 9.66, 9.62, 9.58, 9.69, 9.78, 9.75, 9.96, 9.9, 10.04, 10.06, 10.08, 10.24, 10.24, 10.24, 9.86, 10.13, 10.12, 10.1, 10.25, 10.24, 10.22, 10.75, 10.64, 10.56, 10.6, 10.42, 10.25, 10.24, 10.49, 10.57, 10.63, 10.48, 10.37, 10.96, 11.02, np.nan, np.nan, 10.88, 10.87, 11.01, 11.01, 11.58, 11.8] share2_open = [9.88, 9.88, 9.89, 9.75, 9.74, 9.8, 9.62, 9.65, 9.58, 9.67, 9.81, 9.8, 10, 9.95, 10.1, 10.06, 10.14, 9.9, 10.2, 10.29, 9.86, 9.48, 10.01, 10.24, 10.26, 10.24, 10.12, 10.65, 10.64, 10.56, 10.42, 10.43, 10.29, 10.3, 10.44, 10.6, 10.67, 10.46, 10.39, 10.9, 11.01, 11.01, np.nan, np.nan, 10.82, 11.02, 10.96, 11.55, 11.74, 11.8] share2_high = [9.91, 10.04, 9.93, 10.04, 9.84, 9.88, 9.99, 9.7, 9.67, 9.71, 9.85, 9.9, 10, 10.2, 10.11, 10.18, 10.21, 10.26, 10.38, 10.47, 10.42, 10.07, 10.24, 10.27, 10.38, 10.43, 10.39, 10.65, 10.84, 10.65, 10.73, 10.63, 10.51, 10.35, 10.46, 10.63, 10.74, 10.76, 10.54, 11.02, 11.12, 11.17, np.nan, np.nan, 10.92, 11.15, 11.11, 11.55, 11.95, 11.93] share2_low = [9.63, 9.84, 9.81, 9.74, 9.67, 9.72, 9.57, 9.54, 9.51, 9.47, 9.68, 9.63, 9.75, 9.65, 9.9, 9.93, 10.03, 9.8, 10.14, 10.09, 9.78, 9.21, 9.11, 9.68, 10.05, 10.12, 9.89, 9.89, 10.59, 10.43, 10.34, 10.32, 10.21, 10.2, 10.18, 10.36, 10.51, 10.41, 10.32, 10.37, 10.87, 10.95, np.nan, np.nan, 10.65, 10.71, 10.75, 10.91, 11.31, 11.58] # for share3: share3_close = [6.64, 7.26, 7.03, 6.87, np.nan, 6.64, 6.85, 6.7, 6.39, 6.22, 5.92, 5.91, 6.11, 5.91, 6.23, 6.28, 6.28, 6.27, np.nan, 5.56, 5.67, 5.16, 5.69, 6.32, 6.14, 6.25, 5.79, 5.26, 5.05, 5.45, 6.06, 6.21, 5.69, 5.46, 6.02, 6.69, 7.43, 7.72, 8.16, 7.83, 8.7, 8.71, 8.88, 8.54, 8.87, 8.87, 8.18, 7.8, 7.97, 8.25] share3_open = [7.26, 7, 6.88, 6.91, np.nan, 6.81, 6.63, 6.45, 6.16, 6.24, 5.96, 5.97, 5.96, 6.2, 6.35, 6.11, 6.37, 5.58, np.nan, 5.65, 5.19, 5.42, 6.3, 6.15, 6.05, 5.89, 5.22, 5.2, 5.07, 6.04, 6.12, 5.85, 5.67, 6.02, 6.04, 7.07, 7.64, 7.99, 7.59, 8.73, 8.72, 8.97, 8.58, 8.71, 8.77, 8.4, 7.95, 7.76, 8.25, 7.51] share3_high = [7.41, 7.31, 7.14, 7, np.nan, 6.82, 6.96, 6.85, 6.5, 6.34, 6.04, 6.02, 6.12, 6.38, 6.43, 6.46, 6.43, 6.27, np.nan, 6.01, 5.67, 5.67, 6.35, 6.32, 6.43, 6.36, 5.79, 5.47, 5.65, 6.04, 6.14, 6.23, 5.83, 6.25, 6.27, 7.12, 7.82, 8.14, 8.27, 8.92, 8.76, 9.15, 8.9, 9.01, 9.16, 9, 8.27, 7.99, 8.33, 8.25] share3_low = [6.53, 6.87, 6.83, 6.7, np.nan, 6.63, 6.57, 6.41, 6.15, 6.07, 5.89, 5.82, 5.73, 5.81, 6.1, 6.06, 6.16, 5.57, np.nan, 5.51, 5.19, 5.12, 5.69, 6.01, 5.97, 5.86, 5.18, 5.19, 4.96, 5.45, 5.84, 5.85, 5.28, 5.42, 6.02, 6.69, 7.28, 7.64, 7.25, 7.83, 8.41, 8.66, 8.53, 8.54, 8.73, 8.27, 7.95, 7.67, 7.8, 7.51] # for sel_finance test shares_eps = np.array([[np.nan, np.nan, np.nan], [0.1, np.nan, np.nan], [np.nan, 0.2, np.nan], [np.nan, np.nan, 0.3], [np.nan, np.nan, np.nan], [np.nan, np.nan, np.nan], [np.nan, np.nan, np.nan], [np.nan, np.nan, 0.2], [0.1, np.nan, np.nan], [np.nan, 0.3, np.nan], [np.nan, np.nan, np.nan], [np.nan, np.nan, np.nan], [0.3, np.nan, np.nan], [np.nan, np.nan, np.nan], [np.nan, 0.3, np.nan], [np.nan, np.nan, np.nan], [np.nan, np.nan, 0.3], [np.nan, np.nan, np.nan], [np.nan, np.nan, np.nan], [np.nan, np.nan, np.nan], [np.nan, np.nan, np.nan], [np.nan, 0, 0.2], [np.nan, np.nan, np.nan], [np.nan, np.nan, np.nan], [0.1, np.nan, np.nan], [np.nan, np.nan, np.nan], [np.nan, np.nan, np.nan], [np.nan, np.nan, np.nan], [np.nan, np.nan, np.nan], [np.nan, np.nan, 0.2], [np.nan, np.nan, np.nan], [np.nan, np.nan, np.nan], [np.nan, np.nan, np.nan], [0.15, np.nan, np.nan], [np.nan, 0.1, np.nan], [np.nan, np.nan, np.nan], [0.1, np.nan, np.nan], [np.nan, np.nan, np.nan], [np.nan, np.nan, 0.3], [np.nan, np.nan, np.nan], [np.nan, np.nan, np.nan], [np.nan, np.nan, np.nan], [np.nan, np.nan, np.nan], [0.2, np.nan, np.nan], [np.nan, 0.5, np.nan], [0.4, np.nan, 0.3], [np.nan, np.nan, np.nan], [np.nan, 0.3, np.nan], [0.9, np.nan, np.nan], [np.nan, np.nan, 0.1]]) self.date_indices = ['2016-07-01', '2016-07-04', '2016-07-05', '2016-07-06', '2016-07-07', '2016-07-08', '2016-07-11', '2016-07-12', '2016-07-13', '2016-07-14', '2016-07-15', '2016-07-18', '2016-07-19', '2016-07-20', '2016-07-21', '2016-07-22', '2016-07-25', '2016-07-26', '2016-07-27', '2016-07-28', '2016-07-29', '2016-08-01', '2016-08-02', '2016-08-03', '2016-08-04', '2016-08-05', '2016-08-08', '2016-08-09', '2016-08-10', '2016-08-11', '2016-08-12', '2016-08-15', '2016-08-16', '2016-08-17', '2016-08-18', '2016-08-19', '2016-08-22', '2016-08-23', '2016-08-24', '2016-08-25', '2016-08-26', '2016-08-29', '2016-08-30', '2016-08-31', '2016-09-01', '2016-09-02', '2016-09-05', '2016-09-06', '2016-09-07', '2016-09-08'] self.shares = ['000010', '000030', '000039'] self.types = ['close', 'open', 'high', 'low'] self.sel_finance_tyeps = ['eps'] self.test_data_3D = np.zeros((3, data_rows, 4)) self.test_data_2D = np.zeros((data_rows, 3)) self.test_data_2D2 = np.zeros((data_rows, 4)) self.test_data_sel_finance = np.empty((3, data_rows, 1)) # Build up 3D data self.test_data_3D[0, :, 0] = share1_close self.test_data_3D[0, :, 1] = share1_open self.test_data_3D[0, :, 2] = share1_high self.test_data_3D[0, :, 3] = share1_low self.test_data_3D[1, :, 0] = share2_close self.test_data_3D[1, :, 1] = share2_open self.test_data_3D[1, :, 2] = share2_high self.test_data_3D[1, :, 3] = share2_low self.test_data_3D[2, :, 0] = share3_close self.test_data_3D[2, :, 1] = share3_open self.test_data_3D[2, :, 2] = share3_high self.test_data_3D[2, :, 3] = share3_low self.test_data_sel_finance[:, :, 0] = shares_eps.T self.hp1 = qt.HistoryPanel(values=self.test_data_3D, levels=self.shares, columns=self.types, rows=self.date_indices) print(f'in test Operator, history panel is created for timing test') self.hp1.info() self.hp2 = qt.HistoryPanel(values=self.test_data_sel_finance, levels=self.shares, columns=self.sel_finance_tyeps, rows=self.date_indices) print(f'in test_Operator, history panel is created for selection finance test:') self.hp2.info() self.op = qt.Operator(strategies='dma', signal_type='PS') self.op2 = qt.Operator(strategies='dma, macd, trix') def test_init(self): """ test initialization of Operator class""" op = qt.Operator() self.assertIsInstance(op, qt.Operator) self.assertEqual(op.signal_type, 'pt') self.assertIsInstance(op.strategies, list) self.assertEqual(len(op.strategies), 0) op = qt.Operator('dma') self.assertIsInstance(op, qt.Operator) self.assertIsInstance(op.strategies, list) self.assertIsInstance(op.strategies[0], TimingDMA) op = qt.Operator('dma, macd') self.assertIsInstance(op, qt.Operator) op = qt.Operator(['dma', 'macd']) self.assertIsInstance(op, qt.Operator) def test_repr(self): """ test basic representation of Opeartor class""" op = qt.Operator() self.assertEqual(op.__repr__(), 'Operator()') op = qt.Operator('macd, dma, trix, random, avg_low') self.assertEqual(op.__repr__(), 'Operator(macd, dma, trix, random, avg_low)') self.assertEqual(op['dma'].__repr__(), 'Q-TIMING(DMA)') self.assertEqual(op['macd'].__repr__(), 'R-TIMING(MACD)') self.assertEqual(op['trix'].__repr__(), 'R-TIMING(TRIX)') self.assertEqual(op['random'].__repr__(), 'SELECT(RANDOM)') self.assertEqual(op['avg_low'].__repr__(), 'FACTOR(AVG LOW)') def test_info(self): """Test information output of Operator""" print(f'test printing information of operator object') self.op.info() def test_get_strategy_by_id(self): """ test get_strategy_by_id()""" op = qt.Operator() self.assertIsInstance(op, qt.Operator) self.assertEqual(op.strategy_count, 0) self.assertEqual(op.strategy_ids, []) op = qt.Operator('macd, dma, trix') self.assertEqual(op.strategy_ids, ['macd', 'dma', 'trix']) self.assertIs(op.get_strategy_by_id('macd'), op.strategies[0]) self.assertIs(op.get_strategy_by_id(1), op.strategies[1]) self.assertIs(op.get_strategy_by_id('trix'), op.strategies[2]) def test_get_items(self): """ test method __getitem__(), it should be the same as geting strategies by id""" op = qt.Operator() self.assertIsInstance(op, qt.Operator) self.assertEqual(op.strategy_count, 0) self.assertEqual(op.strategy_ids, []) op = qt.Operator('macd, dma, trix') self.assertEqual(op.strategy_ids, ['macd', 'dma', 'trix']) self.assertIs(op['macd'], op.strategies[0]) self.assertIs(op['trix'], op.strategies[2]) self.assertIs(op[1], op.strategies[1]) self.assertIs(op[3], op.strategies[2]) def test_get_strategies_by_price_type(self): """ test get_strategies_by_price_type""" op = qt.Operator() self.assertIsInstance(op, qt.Operator) self.assertEqual(op.strategy_count, 0) self.assertEqual(op.strategy_ids, []) op = qt.Operator('macd, dma, trix') op.set_parameter('macd', price_type='open') op.set_parameter('dma', price_type='close') op.set_parameter('trix', price_type='open') stg_close = op.get_strategies_by_price_type('close') stg_open = op.get_strategies_by_price_type('open') stg_high = op.get_strategies_by_price_type('high') self.assertIsInstance(stg_close, list) self.assertIsInstance(stg_open, list) self.assertIsInstance(stg_high, list) self.assertEqual(stg_close, [op.strategies[1]]) self.assertEqual(stg_open, [op.strategies[0], op.strategies[2]]) self.assertEqual(stg_high, []) stg_wrong = op.get_strategies_by_price_type(123) self.assertIsInstance(stg_wrong, list) self.assertEqual(stg_wrong, []) def test_get_strategy_count_by_price_type(self): """ test get_strategy_count_by_price_type""" op = qt.Operator() self.assertIsInstance(op, qt.Operator) self.assertEqual(op.strategy_count, 0) self.assertEqual(op.strategy_ids, []) op = qt.Operator('macd, dma, trix') op.set_parameter('macd', price_type='open') op.set_parameter('dma', price_type='close') op.set_parameter('trix', price_type='open') stg_close = op.get_strategy_count_by_price_type('close') stg_open = op.get_strategy_count_by_price_type('open') stg_high = op.get_strategy_count_by_price_type('high') self.assertIsInstance(stg_close, int) self.assertIsInstance(stg_open, int) self.assertIsInstance(stg_high, int) self.assertEqual(stg_close, 1) self.assertEqual(stg_open, 2) self.assertEqual(stg_high, 0) stg_wrong = op.get_strategy_count_by_price_type(123) self.assertIsInstance(stg_wrong, int) self.assertEqual(stg_wrong, 0) def test_get_strategy_names_by_price_type(self): """ test get_strategy_names_by_price_type""" op = qt.Operator() self.assertIsInstance(op, qt.Operator) self.assertEqual(op.strategy_count, 0) self.assertEqual(op.strategy_ids, []) op = qt.Operator('macd, dma, trix') op.set_parameter('macd', price_type='open') op.set_parameter('dma', price_type='close') op.set_parameter('trix', price_type='open') stg_close = op.get_strategy_names_by_price_type('close') stg_open = op.get_strategy_names_by_price_type('open') stg_high = op.get_strategy_names_by_price_type('high') self.assertIsInstance(stg_close, list) self.assertIsInstance(stg_open, list) self.assertIsInstance(stg_high, list) self.assertEqual(stg_close, ['DMA']) self.assertEqual(stg_open, ['MACD', 'TRIX']) self.assertEqual(stg_high, []) stg_wrong = op.get_strategy_names_by_price_type(123) self.assertIsInstance(stg_wrong, list) self.assertEqual(stg_wrong, []) def test_get_strategy_id_by_price_type(self): """ test get_strategy_IDs_by_price_type""" print('-----Test get strategy IDs by price type------\n') op = qt.Operator() self.assertIsInstance(op, qt.Operator) self.assertEqual(op.strategy_count, 0) self.assertEqual(op.strategy_ids, []) op = qt.Operator('macd, dma, trix') op.set_parameter('macd', price_type='open') op.set_parameter('dma', price_type='close') op.set_parameter('trix', price_type='open') stg_close = op.get_strategy_id_by_price_type('close') stg_open = op.get_strategy_id_by_price_type('open') stg_high = op.get_strategy_id_by_price_type('high') self.assertIsInstance(stg_close, list) self.assertIsInstance(stg_open, list) self.assertIsInstance(stg_high, list) self.assertEqual(stg_close, ['dma']) self.assertEqual(stg_open, ['macd', 'trix']) self.assertEqual(stg_high, []) op.add_strategies('dma, macd') op.set_parameter('dma_1', price_type='open') op.set_parameter('macd', price_type='open') op.set_parameter('macd_1', price_type='high') op.set_parameter('trix', price_type='close') print(f'Operator strategy id:\n' f'{op.strategies} on memory pos:\n' f'{[id(stg) for stg in op.strategies]}') stg_close = op.get_strategy_id_by_price_type('close') stg_open = op.get_strategy_id_by_price_type('open') stg_high = op.get_strategy_id_by_price_type('high') stg_all = op.get_strategy_id_by_price_type() print(f'All IDs of strategies:\n' f'{stg_all}\n' f'All price types of strategies:\n' f'{[stg.price_type for stg in op.strategies]}') self.assertEqual(stg_close, ['dma', 'trix']) self.assertEqual(stg_open, ['macd', 'dma_1']) self.assertEqual(stg_high, ['macd_1']) stg_wrong = op.get_strategy_id_by_price_type(123) self.assertIsInstance(stg_wrong, list) self.assertEqual(stg_wrong, []) def test_property_strategies(self): """ test property strategies""" print(f'created a new simple Operator with only one strategy: DMA') op = qt.Operator('dma') strategies = op.strategies self.assertIsInstance(strategies, list) op.info() print(f'created the second simple Operator with three strategies') self.assertIsInstance(strategies[0], TimingDMA) op = qt.Operator('dma, macd, cdl') strategies = op.strategies op.info() self.assertIsInstance(strategies, list) self.assertIsInstance(strategies[0], TimingDMA) self.assertIsInstance(strategies[1], TimingMACD) self.assertIsInstance(strategies[2], TimingCDL) def test_property_strategy_count(self): """ test Property strategy_count, and the method get_strategy_count_by_price_type()""" self.assertEqual(self.op.strategy_count, 1) self.assertEqual(self.op2.strategy_count, 3) self.assertEqual(self.op.get_strategy_count_by_price_type(), 1) self.assertEqual(self.op2.get_strategy_count_by_price_type(), 3) self.assertEqual(self.op.get_strategy_count_by_price_type('close'), 1) self.assertEqual(self.op.get_strategy_count_by_price_type('high'), 0) self.assertEqual(self.op2.get_strategy_count_by_price_type('close'), 3) self.assertEqual(self.op2.get_strategy_count_by_price_type('open'), 0) def test_property_strategy_names(self): """ test property strategy_ids""" op = qt.Operator('dma') self.assertIsInstance(op.strategy_ids, list) names = op.strategy_ids[0] print(f'names are {names}') self.assertEqual(names, 'dma') op = qt.Operator('dma, macd, trix, cdl') self.assertIsInstance(op.strategy_ids, list) self.assertEqual(op.strategy_ids[0], 'dma') self.assertEqual(op.strategy_ids[1], 'macd') self.assertEqual(op.strategy_ids[2], 'trix') self.assertEqual(op.strategy_ids[3], 'cdl') op = qt.Operator('dma, macd, trix, dma, dma') self.assertIsInstance(op.strategy_ids, list) self.assertEqual(op.strategy_ids[0], 'dma') self.assertEqual(op.strategy_ids[1], 'macd') self.assertEqual(op.strategy_ids[2], 'trix') self.assertEqual(op.strategy_ids[3], 'dma_1') self.assertEqual(op.strategy_ids[4], 'dma_2') def test_property_strategy_blenders(self): """ test property strategy blenders including property setter, and test the method get_blender()""" print(f'------- Test property strategy blenders ---------') op = qt.Operator() self.assertIsInstance(op.strategy_blenders, dict) self.assertIsInstance(op.signal_type, str) self.assertEqual(op.strategy_blenders, {}) self.assertEqual(op.signal_type, 'pt') # test adding blender to empty operator op.strategy_blenders = '1 + 2' op.signal_type = 'proportion signal' self.assertEqual(op.strategy_blenders, {}) self.assertEqual(op.signal_type, 'ps') op.add_strategy('dma') op.strategy_blenders = '1+2' self.assertEqual(op.strategy_blenders, {'close': ['+', '2', '1']}) op.clear_strategies() self.assertEqual(op.strategy_blenders, {}) op.add_strategies('dma, trix, macd, dma') op.set_parameter('dma', price_type='open') op.set_parameter('trix', price_type='high') op.set_blender('open', '1+2') blender_open = op.get_blender('open') blender_close = op.get_blender('close') blender_high = op.get_blender('high') self.assertEqual(blender_open, ['+', '2', '1']) self.assertEqual(blender_close, None) self.assertEqual(blender_high, None) op.set_blender('open', '1+2+3') op.set_blender('abc', '1+2+3') blender_open = op.get_blender('open') blender_close = op.get_blender('close') blender_high = op.get_blender('high') blender_abc = op.get_blender('abc') self.assertEqual(op.strategy_blenders, {'open': ['+', '3', '+', '2', '1']}) self.assertEqual(blender_open, ['+', '3', '+', '2', '1']) self.assertEqual(blender_close, None) self.assertEqual(blender_high, None) self.assertEqual(blender_abc, None) op.set_blender('open', 123) blender_open = op.get_blender('open') self.assertEqual(blender_open, []) op.set_blender(None, '1+1') blender_open = op.get_blender('open') blender_close = op.get_blender('close') blender_high = op.get_blender('high') self.assertEqual(op.bt_price_types, ['close', 'high', 'open']) self.assertEqual(op.get_blender(), {'close': ['+', '1', '1'], 'open': ['+', '1', '1'], 'high': ['+', '1', '1']}) self.assertEqual(blender_open, ['+', '1', '1']) self.assertEqual(blender_close, ['+', '1', '1']) self.assertEqual(blender_high, ['+', '1', '1']) op.set_blender(None, ['1+1', '3+4']) blender_open = op.get_blender('open') blender_close = op.get_blender('close') blender_high = op.get_blender('high') self.assertEqual(blender_open, ['+', '4', '3']) self.assertEqual(blender_close, ['+', '1', '1']) self.assertEqual(blender_high, ['+', '4', '3']) self.assertEqual(op.view_blender('open'), '3+4') self.assertEqual(op.view_blender('close'), '1+1') self.assertEqual(op.view_blender('high'), '3+4') op.strategy_blenders = (['1+2', '2*3', '1+4']) blender_open = op.get_blender('open') blender_close = op.get_blender('close') blender_high = op.get_blender('high') self.assertEqual(blender_open, ['+', '4', '1']) self.assertEqual(blender_close, ['+', '2', '1']) self.assertEqual(blender_high, ['*', '3', '2']) self.assertEqual(op.view_blender('open'), '1+4') self.assertEqual(op.view_blender('close'), '1+2') self.assertEqual(op.view_blender('high'), '2*3') # test error inputs: # wrong type of price_type self.assertRaises(TypeError, op.set_blender, 1, '1+3') # price_type not found, no change is made op.set_blender('volume', '1+3') blender_open = op.get_blender('open') blender_close = op.get_blender('close') blender_high = op.get_blender('high') self.assertEqual(blender_open, ['+', '4', '1']) self.assertEqual(blender_close, ['+', '2', '1']) self.assertEqual(blender_high, ['*', '3', '2']) # price_type not valid, no change is made op.set_blender('closee', '1+2') blender_open = op.get_blender('open') blender_close = op.get_blender('close') blender_high = op.get_blender('high') self.assertEqual(blender_open, ['+', '4', '1']) self.assertEqual(blender_close, ['+', '2', '1']) self.assertEqual(blender_high, ['*', '3', '2']) # wrong type of blender, set to empty list op.set_blender('open', 55) blender_open = op.get_blender('open') blender_close = op.get_blender('close') blender_high = op.get_blender('high') self.assertEqual(blender_open, []) self.assertEqual(blender_close, ['+', '2', '1']) self.assertEqual(blender_high, ['*', '3', '2']) # wrong type of blender, set to empty list op.set_blender('close', ['1+2']) blender_open = op.get_blender('open') blender_close = op.get_blender('close') blender_high = op.get_blender('high') self.assertEqual(blender_open, []) self.assertEqual(blender_close, []) self.assertEqual(blender_high, ['*', '3', '2']) # can't parse blender, set to empty list op.set_blender('high', 'a+bc') blender_open = op.get_blender('open') blender_close = op.get_blender('close') blender_high = op.get_blender('high') self.assertEqual(blender_open, []) self.assertEqual(blender_close, []) self.assertEqual(blender_high, []) def test_property_singal_type(self): """ test property signal_type""" op = qt.Operator() self.assertIsInstance(op.signal_type, str) self.assertEqual(op.signal_type, 'pt') op = qt.Operator(signal_type='ps') self.assertIsInstance(op.signal_type, str) self.assertEqual(op.signal_type, 'ps') op = qt.Operator(signal_type='PS') self.assertEqual(op.signal_type, 'ps') op = qt.Operator(signal_type='proportion signal') self.assertEqual(op.signal_type, 'ps') print(f'"pt" will be the default type if wrong value is given') op = qt.Operator(signal_type='wrong value') self.assertEqual(op.signal_type, 'pt') print(f'test signal_type.setter') op.signal_type = 'ps' self.assertEqual(op.signal_type, 'ps') print(f'test error raising') self.assertRaises(TypeError, setattr, op, 'signal_type', 123) self.assertRaises(ValueError, setattr, op, 'signal_type', 'wrong value') def test_property_op_data_types(self): """ test property op_data_types""" op = qt.Operator() self.assertIsInstance(op.op_data_types, list) self.assertEqual(op.op_data_types, []) op = qt.Operator('macd, dma, trix') dt = op.op_data_types self.assertEqual(dt[0], 'close') op = qt.Operator('macd, cdl') dt = op.op_data_types self.assertEqual(dt[0], 'close') self.assertEqual(dt[1], 'high') self.assertEqual(dt[2], 'low') self.assertEqual(dt[3], 'open') self.assertEqual(dt, ['close', 'high', 'low', 'open']) op.add_strategy('dma') dt = op.op_data_types self.assertEqual(dt[0], 'close') self.assertEqual(dt[1], 'high') self.assertEqual(dt[2], 'low') self.assertEqual(dt[3], 'open') self.assertEqual(dt, ['close', 'high', 'low', 'open']) def test_property_op_data_type_count(self): """ test property op_data_type_count""" op = qt.Operator() self.assertIsInstance(op.op_data_type_count, int) self.assertEqual(op.op_data_type_count, 0) op = qt.Operator('macd, dma, trix') dtn = op.op_data_type_count self.assertEqual(dtn, 1) op = qt.Operator('macd, cdl') dtn = op.op_data_type_count self.assertEqual(dtn, 4) op.add_strategy('dma') dtn = op.op_data_type_count self.assertEqual(dtn, 4) def test_property_op_data_freq(self): """ test property op_data_freq""" op = qt.Operator() self.assertIsInstance(op.op_data_freq, str) self.assertEqual(len(op.op_data_freq), 0) self.assertEqual(op.op_data_freq, '') op = qt.Operator('macd, dma, trix') dtf = op.op_data_freq self.assertIsInstance(dtf, str) self.assertEqual(dtf[0], 'd') op.set_parameter('macd', data_freq='m') dtf = op.op_data_freq self.assertIsInstance(dtf, list) self.assertEqual(len(dtf), 2) self.assertEqual(dtf[0], 'd') self.assertEqual(dtf[1], 'm') def test_property_bt_price_types(self): """ test property bt_price_types""" print('------test property bt_price_tyeps-------') op = qt.Operator() self.assertIsInstance(op.bt_price_types, list) self.assertEqual(len(op.bt_price_types), 0) self.assertEqual(op.bt_price_types, []) op = qt.Operator('macd, dma, trix') btp = op.bt_price_types self.assertIsInstance(btp, list) self.assertEqual(btp[0], 'close') op.set_parameter('macd', price_type='open') btp = op.bt_price_types btpc = op.bt_price_type_count print(f'price_types are \n{btp}') self.assertIsInstance(btp, list) self.assertEqual(len(btp), 2) self.assertEqual(btp[0], 'close') self.assertEqual(btp[1], 'open') self.assertEqual(btpc, 2) op.add_strategies(['dma', 'macd']) op.set_parameter('dma_1', price_type='high') btp = op.bt_price_types btpc = op.bt_price_type_count self.assertEqual(btp[0], 'close') self.assertEqual(btp[1], 'high') self.assertEqual(btp[2], 'open') self.assertEqual(btpc, 3) op.remove_strategy('dma_1') btp = op.bt_price_types btpc = op.bt_price_type_count self.assertEqual(btp[0], 'close') self.assertEqual(btp[1], 'open') self.assertEqual(btpc, 2) op.remove_strategy('macd_1') btp = op.bt_price_types btpc = op.bt_price_type_count self.assertEqual(btp[0], 'close') self.assertEqual(btp[1], 'open') self.assertEqual(btpc, 2) def test_property_op_data_type_list(self): """ test property op_data_type_list""" op = qt.Operator() self.assertIsInstance(op.op_data_type_list, list) self.assertEqual(len(op.op_data_type_list), 0) self.assertEqual(op.op_data_type_list, []) op = qt.Operator('macd, dma, trix, cdl') ohd = op.op_data_type_list print(f'ohd is {ohd}') self.assertIsInstance(ohd, list) self.assertEqual(ohd[0], ['close']) op.set_parameter('macd', data_types='open, close') ohd = op.op_data_type_list print(f'ohd is {ohd}') self.assertIsInstance(ohd, list) self.assertEqual(len(ohd), 4) self.assertEqual(ohd[0], ['open', 'close']) self.assertEqual(ohd[1], ['close']) self.assertEqual(ohd[2], ['close']) self.assertEqual(ohd[3], ['open', 'high', 'low', 'close']) def test_property_op_history_data(self): """ Test this important function to get operation history data that shall be used in signal generation these data are stored in list of nd-arrays, each ndarray represents the data that is needed for each and every strategy """ print(f'------- Test getting operation history data ---------') op = qt.Operator() self.assertIsInstance(op.strategy_blenders, dict) self.assertIsInstance(op.signal_type, str) self.assertEqual(op.strategy_blenders, {}) self.assertEqual(op.op_history_data, {}) self.assertEqual(op.signal_type, 'pt') def test_property_opt_space_par(self): """ test property opt_space_par""" print(f'-----test property opt_space_par--------:\n') op = qt.Operator() self.assertIsInstance(op.opt_space_par, tuple) self.assertIsInstance(op.opt_space_par[0], list) self.assertIsInstance(op.opt_space_par[1], list) self.assertEqual(len(op.opt_space_par), 2) self.assertEqual(op.opt_space_par, ([], [])) op = qt.Operator('macd, dma, trix, cdl') osp = op.opt_space_par print(f'before setting opt_tags opt_space_par is empty:\n' f'osp is {osp}\n') self.assertIsInstance(osp, tuple) self.assertEqual(osp[0], []) self.assertEqual(osp[1], []) op.set_parameter('macd', opt_tag=1) op.set_parameter('dma', opt_tag=1) osp = op.opt_space_par print(f'after setting opt_tags opt_space_par is not empty:\n' f'osp is {osp}\n') self.assertIsInstance(osp, tuple) self.assertEqual(len(osp), 2) self.assertIsInstance(osp[0], list) self.assertIsInstance(osp[1], list) self.assertEqual(len(osp[0]), 6) self.assertEqual(len(osp[1]), 6) self.assertEqual(osp[0], [(10, 250), (10, 250), (10, 250), (10, 250), (10, 250), (10, 250)]) self.assertEqual(osp[1], ['discr', 'discr', 'discr', 'discr', 'discr', 'discr']) def test_property_opt_types(self): """ test property opt_tags""" print(f'-----test property opt_tags--------:\n') op = qt.Operator() self.assertIsInstance(op.opt_tags, list) self.assertEqual(len(op.opt_tags), 0) self.assertEqual(op.opt_tags, []) op = qt.Operator('macd, dma, trix, cdl') otp = op.opt_tags print(f'before setting opt_tags opt_space_par is empty:\n' f'otp is {otp}\n') self.assertIsInstance(otp, list) self.assertEqual(otp, [0, 0, 0, 0]) op.set_parameter('macd', opt_tag=1) op.set_parameter('dma', opt_tag=1) otp = op.opt_tags print(f'after setting opt_tags opt_space_par is not empty:\n' f'otp is {otp}\n') self.assertIsInstance(otp, list) self.assertEqual(len(otp), 4) self.assertEqual(otp, [1, 1, 0, 0]) def test_property_max_window_length(self): """ test property max_window_length""" print(f'-----test property max window length--------:\n') op = qt.Operator() self.assertIsInstance(op.max_window_length, int) self.assertEqual(op.max_window_length, 0) op = qt.Operator('macd, dma, trix, cdl') mwl = op.max_window_length print(f'before setting window_length the value is 270:\n' f'mwl is {mwl}\n') self.assertIsInstance(mwl, int) self.assertEqual(mwl, 270) op.set_parameter('macd', window_length=300) op.set_parameter('dma', window_length=350) mwl = op.max_window_length print(f'after setting window_length the value is new set value:\n' f'mwl is {mwl}\n') self.assertIsInstance(mwl, int) self.assertEqual(mwl, 350) def test_property_bt_price_type_count(self): """ test property bt_price_type_count""" print(f'-----test property bt_price_type_count--------:\n') op = qt.Operator() self.assertIsInstance(op.bt_price_type_count, int) self.assertEqual(op.bt_price_type_count, 0) op = qt.Operator('macd, dma, trix, cdl') otp = op.bt_price_type_count print(f'before setting price_type the price count is 1:\n' f'otp is {otp}\n') self.assertIsInstance(otp, int) self.assertEqual(otp, 1) op.set_parameter('macd', price_type='open') op.set_parameter('dma', price_type='open') otp = op.bt_price_type_count print(f'after setting price_type the price type count is 2:\n' f'otp is {otp}\n') self.assertIsInstance(otp, int) self.assertEqual(otp, 2) def test_property_set(self): """ test all property setters: setting following properties: - strategy_blenders - signal_type other properties can not be set""" print(f'------- Test setting properties ---------') op = qt.Operator() self.assertIsInstance(op.strategy_blenders, dict) self.assertIsInstance(op.signal_type, str) self.assertEqual(op.strategy_blenders, {}) self.assertEqual(op.signal_type, 'pt') op.strategy_blenders = '1 + 2' op.signal_type = 'proportion signal' self.assertEqual(op.strategy_blenders, {}) self.assertEqual(op.signal_type, 'ps') op = qt.Operator('macd, dma, trix, cdl') # TODO: 修改set_parameter(),使下面的用法成立 # a_to_sell.set_parameter('dma, cdl', price_type='open') op.set_parameter('dma', price_type='open') op.set_parameter('cdl', price_type='open') sb = op.strategy_blenders st = op.signal_type self.assertIsInstance(sb, dict) print(f'before setting: strategy_blenders={sb}') self.assertEqual(sb, {}) op.strategy_blenders = '1+2 * 3' sb = op.strategy_blenders print(f'after setting strategy_blender={sb}') self.assertEqual(sb, {'close': ['+', '*', '3', '2', '1'], 'open': ['+', '*', '3', '2', '1']}) op.strategy_blenders = ['1+2', '3-4'] sb = op.strategy_blenders print(f'after setting strategy_blender={sb}') self.assertEqual(sb, {'close': ['+', '2', '1'], 'open': ['-', '4', '3']}) def test_operator_ready(self): """test the method ready of Operator""" op = qt.Operator() print(f'operator is ready? "{op.ready}"') def test_operator_add_strategy(self): """test adding strategies to Operator""" op = qt.Operator('dma, all, urgent') self.assertIsInstance(op, qt.Operator) self.assertIsInstance(op.strategies[0], qt.TimingDMA) self.assertIsInstance(op.strategies[1], qt.SelectingAll) self.assertIsInstance(op.strategies[2], qt.RiconUrgent) self.assertIsInstance(op[0], qt.TimingDMA) self.assertIsInstance(op[1], qt.SelectingAll) self.assertIsInstance(op[2], qt.RiconUrgent) self.assertIsInstance(op['dma'], qt.TimingDMA) self.assertIsInstance(op['all'], qt.SelectingAll) self.assertIsInstance(op['urgent'], qt.RiconUrgent) self.assertEqual(op.strategy_count, 3) print(f'test adding strategies into existing op') print('test adding strategy by string') op.add_strategy('macd') self.assertIsInstance(op.strategies[0], qt.TimingDMA) self.assertIsInstance(op.strategies[3], qt.TimingMACD) self.assertEqual(op.strategy_count, 4) op.add_strategy('random') self.assertIsInstance(op.strategies[0], qt.TimingDMA) self.assertIsInstance(op.strategies[4], qt.SelectingRandom) self.assertEqual(op.strategy_count, 5) test_ls = TestLSStrategy() op.add_strategy(test_ls) self.assertIsInstance(op.strategies[0], qt.TimingDMA) self.assertIsInstance(op.strategies[5], TestLSStrategy) self.assertEqual(op.strategy_count, 6) print(f'Test different instance of objects are added to operator') op.add_strategy('dma') self.assertIsInstance(op.strategies[0], qt.TimingDMA) self.assertIsInstance(op.strategies[6], qt.TimingDMA) self.assertIsNot(op.strategies[0], op.strategies[6]) def test_operator_add_strategies(self): """ etst adding multiple strategies to Operator""" op = qt.Operator('dma, all, urgent') self.assertEqual(op.strategy_count, 3) print('test adding multiple strategies -- adding strategy by list of strings') op.add_strategies(['dma', 'macd']) self.assertEqual(op.strategy_count, 5) self.assertIsInstance(op.strategies[0], qt.TimingDMA) self.assertIsInstance(op.strategies[3], qt.TimingDMA) self.assertIsInstance(op.strategies[4], qt.TimingMACD) print('test adding multiple strategies -- adding strategy by comma separated strings') op.add_strategies('dma, macd') self.assertEqual(op.strategy_count, 7) self.assertIsInstance(op.strategies[0], qt.TimingDMA) self.assertIsInstance(op.strategies[5], qt.TimingDMA) self.assertIsInstance(op.strategies[6], qt.TimingMACD) print('test adding multiple strategies -- adding strategy by list of strategies') op.add_strategies([qt.TimingDMA(), qt.TimingMACD()]) self.assertEqual(op.strategy_count, 9) self.assertIsInstance(op.strategies[0], qt.TimingDMA) self.assertIsInstance(op.strategies[7], qt.TimingDMA) self.assertIsInstance(op.strategies[8], qt.TimingMACD) print('test adding multiple strategies -- adding strategy by list of strategy and str') op.add_strategies(['DMA', qt.TimingMACD()]) self.assertEqual(op.strategy_count, 11) self.assertIsInstance(op.strategies[0], qt.TimingDMA) self.assertIsInstance(op.strategies[9], qt.TimingDMA) self.assertIsInstance(op.strategies[10], qt.TimingMACD) self.assertIsNot(op.strategies[0], op.strategies[9]) self.assertIs(type(op.strategies[0]), type(op.strategies[9])) print('test adding fault data') self.assertRaises(AssertionError, op.add_strategies, 123) self.assertRaises(AssertionError, op.add_strategies, None) def test_opeartor_remove_strategy(self): """ test method remove strategy""" op = qt.Operator('dma, all, urgent') op.add_strategies(['dma', 'macd']) op.add_strategies(['DMA', TestLSStrategy()]) self.assertEqual(op.strategy_count, 7) print('test removing strategies from Operator') op.remove_strategy('dma') self.assertEqual(op.strategy_count, 6) self.assertEqual(op.strategy_ids, ['all', 'urgent', 'dma_1', 'macd', 'dma_2', 'custom']) self.assertEqual(op.strategies[0], op['all']) self.assertEqual(op.strategies[1], op['urgent']) self.assertEqual(op.strategies[2], op['dma_1']) self.assertEqual(op.strategies[3], op['macd']) self.assertEqual(op.strategies[4], op['dma_2']) self.assertEqual(op.strategies[5], op['custom']) op.remove_strategy('dma_1') self.assertEqual(op.strategy_count, 5) self.assertEqual(op.strategy_ids, ['all', 'urgent', 'macd', 'dma_2', 'custom']) self.assertEqual(op.strategies[0], op['all']) self.assertEqual(op.strategies[1], op['urgent']) self.assertEqual(op.strategies[2], op['macd']) self.assertEqual(op.strategies[3], op['dma_2']) self.assertEqual(op.strategies[4], op['custom']) def test_opeartor_clear_strategies(self): """ test operator clear strategies""" op = qt.Operator('dma, all, urgent') op.add_strategies(['dma', 'macd']) op.add_strategies(['DMA', TestLSStrategy()]) self.assertEqual(op.strategy_count, 7) print('test removing strategies from Operator') op.clear_strategies() self.assertEqual(op.strategy_count, 0) self.assertEqual(op.strategy_ids, []) op.add_strategy('dma', pars=(12, 123, 25)) self.assertEqual(op.strategy_count, 1) self.assertEqual(op.strategy_ids, ['dma']) self.assertEqual(type(op.strategies[0]), TimingDMA) self.assertEqual(op.strategies[0].pars, (12, 123, 25)) op.clear_strategies() self.assertEqual(op.strategy_count, 0) self.assertEqual(op.strategy_ids, []) def test_operator_prepare_data(self): """test processes that related to prepare data""" test_ls = TestLSStrategy() test_sel = TestSelStrategy() test_sig = TestSigStrategy() self.op = qt.Operator(strategies=[test_ls, test_sel, test_sig]) too_early_cash = qt.CashPlan(dates='2016-01-01', amounts=10000) early_cash = qt.CashPlan(dates='2016-07-01', amounts=10000) on_spot_cash = qt.CashPlan(dates='2016-07-08', amounts=10000) no_trade_cash = qt.CashPlan(dates='2016-07-08, 2016-07-30, 2016-08-11, 2016-09-03', amounts=[10000, 10000, 10000, 10000]) # 在所有策略的参数都设置好之前调用prepare_data会发生assertion Error self.assertRaises(AssertionError, self.op.prepare_data, hist_data=self.hp1, cash_plan=qt.CashPlan(dates='2016-07-08', amounts=10000)) late_cash = qt.CashPlan(dates='2016-12-31', amounts=10000) multi_cash = qt.CashPlan(dates='2016-07-08, 2016-08-08', amounts=[10000, 10000]) self.op.set_parameter(stg_id='custom', pars={'000300': (5, 10.), '000400': (5, 10.), '000500': (5, 6.)}) self.assertEqual(self.op.strategies[0].pars, {'000300': (5, 10.), '000400': (5, 10.), '000500': (5, 6.)}) self.op.set_parameter(stg_id='custom_1', pars=()) self.assertEqual(self.op.strategies[1].pars, ()), self.op.set_parameter(stg_id='custom_2', pars=(0.2, 0.02, -0.02)) self.assertEqual(self.op.strategies[2].pars, (0.2, 0.02, -0.02)), self.op.prepare_data(hist_data=self.hp1, cash_plan=on_spot_cash) self.assertIsInstance(self.op._op_history_data, dict) self.assertEqual(len(self.op._op_history_data), 3) # test if automatic strategy blenders are set self.assertEqual(self.op.strategy_blenders, {'close': ['+', '2', '+', '1', '0']}) tim_hist_data = self.op._op_history_data['custom'] sel_hist_data = self.op._op_history_data['custom_1'] ric_hist_data = self.op._op_history_data['custom_2'] print(f'in test_prepare_data in TestOperator:') print('selecting history data:\n', sel_hist_data) print('originally passed data in correct sequence:\n', self.test_data_3D[:, 3:, [2, 3, 0]]) print('difference is \n', sel_hist_data - self.test_data_3D[:, :, [2, 3, 0]]) self.assertTrue(np.allclose(sel_hist_data, self.test_data_3D[:, :, [2, 3, 0]], equal_nan=True)) self.assertTrue(np.allclose(tim_hist_data, self.test_data_3D, equal_nan=True)) self.assertTrue(np.allclose(ric_hist_data, self.test_data_3D[:, 3:, :], equal_nan=True)) # raises Value Error if empty history panel is given empty_hp = qt.HistoryPanel() correct_hp = qt.HistoryPanel(values=np.random.randint(10, size=(3, 50, 4)), columns=self.types, levels=self.shares, rows=self.date_indices) too_many_shares = qt.HistoryPanel(values=np.random.randint(10, size=(5, 50, 4))) too_many_types = qt.HistoryPanel(values=np.random.randint(10, size=(3, 50, 5))) # raises Error when history panel is empty self.assertRaises(ValueError, self.op.prepare_data, empty_hp, on_spot_cash) # raises Error when first investment date is too early self.assertRaises(AssertionError, self.op.prepare_data, correct_hp, early_cash) # raises Error when last investment date is too late self.assertRaises(AssertionError, self.op.prepare_data, correct_hp, late_cash) # raises Error when some of the investment dates are on no-trade-days self.assertRaises(ValueError, self.op.prepare_data, correct_hp, no_trade_cash) # raises Error when number of shares in history data does not fit self.assertRaises(AssertionError, self.op.prepare_data, too_many_shares, on_spot_cash) # raises Error when too early cash investment date self.assertRaises(AssertionError, self.op.prepare_data, correct_hp, too_early_cash) # raises Error when number of d_types in history data does not fit self.assertRaises(AssertionError, self.op.prepare_data, too_many_types, on_spot_cash) # test the effect of data type sequence in strategy definition def test_operator_generate(self): """ Test signal generation process of operator objects :return: """ # 使用test模块的自定义策略生成三种交易策略 test_ls = TestLSStrategy() test_sel = TestSelStrategy() test_sel2 = TestSelStrategyDiffTime() test_sig = TestSigStrategy() print('--Test PT type signal generation--') # 测试PT类型的信号生成: # 创建一个Operator对象,信号类型为PT(比例目标信号) # 这个Operator对象包含两个策略,分别为LS-Strategy以及Sel-Strategy,代表择时和选股策略 # 两个策略分别生成PT信号后混合成一个信号输出 self.op = qt.Operator(strategies=[test_ls, test_sel]) self.op.set_parameter(stg_id='custom', pars={'000010': (5, 10.), '000030': (5, 10.), '000039': (5, 6.)}) self.op.set_parameter(stg_id=1, pars=()) # self.a_to_sell.set_blender(blender='0+1+2') self.op.prepare_data(hist_data=self.hp1, cash_plan=qt.CashPlan(dates='2016-07-08', amounts=10000)) print('--test operator information in normal mode--') self.op.info() self.assertEqual(self.op.strategy_blenders, {'close': ['+', '1', '0']}) self.op.set_blender(None, '0*1') self.assertEqual(self.op.strategy_blenders, {'close': ['*', '1', '0']}) print('--test operation signal created in Proportional Target (PT) Mode--') op_list = self.op.create_signal(hist_data=self.hp1) self.assertTrue(isinstance(op_list, HistoryPanel)) backtest_price_types = op_list.htypes self.assertEqual(backtest_price_types[0], 'close') self.assertEqual(op_list.shape, (3, 45, 1)) reduced_op_list = op_list.values.squeeze().T print(f'op_list created, it is a 3 share/45 days/1 htype array, to make comparison happen, \n' f'it will be squeezed to a 2-d array to compare on share-wise:\n' f'{reduced_op_list}') target_op_values = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.5, 0.0, 0.0], [0.5, 0.0, 0.0], [0.5, 0.0, 0.0], [0.5, 0.0, 0.0], [0.5, 0.0, 0.0], [0.5, 0.0, 0.0], [0.5, 0.0, 0.0], [0.5, 0.0, 0.0], [0.5, 0.0, 0.0], [0.5, 0.0, 0.0], [0.5, 0.5, 0.0], [0.5, 0.5, 0.0], [0.5, 0.5, 0.0], [0.5, 0.5, 0.0], [0.5, 0.5, 0.0], [0.5, 0.5, 0.0], [0.5, 0.0, 0.0], [0.5, 0.0, 0.0], [0.5, 0.5, 0.0], [0.0, 0.5, 0.0], [0.0, 0.5, 0.0], [0.0, 0.5, 0.0], [0.0, 0.5, 0.0], [0.0, 0.5, 0.0], [0.0, 0.5, 0.0], [0.0, 0.5, 0.0], [0.0, 0.5, 0.0], [0.0, 0.5, 0.0], [0.0, 0.5, 0.0], [0.0, 0.5, 0.0], [0.0, 0.5, 0.0], [0.0, 0.5, 0.0], [0.0, 0.5, 0.0], [0.0, 0.5, 0.0], [0.0, 0.5, 0.5], [0.0, 0.5, 0.5], [0.0, 0.5, 0.5], [0.0, 0.5, 0.5], [0.0, 0.5, 0.5], [0.0, 0.5, 0.5], [0.0, 0.5, 0.0], [0.0, 0.5, 0.0], [0.0, 0.5, 0.0]]) self.assertTrue(np.allclose(target_op_values, reduced_op_list, equal_nan=True)) print('--Test two separate signal generation for different price types--') # 测试两组PT类型的信号生成: # 在Operator对象中增加两个SigStrategy策略,策略类型相同但是策略的参数不同,回测价格类型为"OPEN" # Opeartor应该生成两组交易信号,分别用于"close"和"open"两中不同的价格类型 # 这里需要重新生成两个新的交易策略对象,否则在op的strategies列表中产生重复的对象引用,从而引起错误 test_ls = TestLSStrategy() test_sel = TestSelStrategy() self.op.add_strategies([test_ls, test_sel]) self.op.set_parameter(stg_id='custom_2', price_type='open') self.op.set_parameter(stg_id='custom_3', price_type='open') self.assertEqual(self.op['custom'].price_type, 'close') self.assertEqual(self.op['custom_2'].price_type, 'open') self.op.set_parameter(stg_id='custom_2', pars={'000010': (5, 10.), '000030': (5, 10.), '000039': (5, 6.)}) self.op.set_parameter(stg_id='custom_3', pars=()) self.op.set_blender(blender='0 or 1', price_type='open') self.op.prepare_data(hist_data=self.hp1, cash_plan=qt.CashPlan(dates='2016-07-08', amounts=10000)) print('--test how operator information is printed out--') self.op.info() self.assertEqual(self.op.strategy_blenders, {'close': ['*', '1', '0'], 'open': ['or', '1', '0']}) print('--test opeartion signal created in Proportional Target (PT) Mode--') op_list = self.op.create_signal(hist_data=self.hp1) self.assertTrue(isinstance(op_list, HistoryPanel)) signal_close = op_list['close'].squeeze().T signal_open = op_list['open'].squeeze().T self.assertEqual(signal_close.shape, (45, 3)) self.assertEqual(signal_open.shape, (45, 3)) target_op_close = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.5, 0.0, 0.0], [0.5, 0.0, 0.0], [0.5, 0.0, 0.0], [0.5, 0.0, 0.0], [0.5, 0.0, 0.0], [0.5, 0.0, 0.0], [0.5, 0.0, 0.0], [0.5, 0.0, 0.0], [0.5, 0.0, 0.0], [0.5, 0.0, 0.0], [0.5, 0.5, 0.0], [0.5, 0.5, 0.0], [0.5, 0.5, 0.0], [0.5, 0.5, 0.0], [0.5, 0.5, 0.0], [0.5, 0.5, 0.0], [0.5, 0.0, 0.0], [0.5, 0.0, 0.0], [0.5, 0.5, 0.0], [0.0, 0.5, 0.0], [0.0, 0.5, 0.0], [0.0, 0.5, 0.0], [0.0, 0.5, 0.0], [0.0, 0.5, 0.0], [0.0, 0.5, 0.0], [0.0, 0.5, 0.0], [0.0, 0.5, 0.0], [0.0, 0.5, 0.0], [0.0, 0.5, 0.0], [0.0, 0.5, 0.0], [0.0, 0.5, 0.0], [0.0, 0.5, 0.0], [0.0, 0.5, 0.0], [0.0, 0.5, 0.0], [0.0, 0.5, 0.5], [0.0, 0.5, 0.5], [0.0, 0.5, 0.5], [0.0, 0.5, 0.5], [0.0, 0.5, 0.5], [0.0, 0.5, 0.5], [0.0, 0.5, 0.0], [0.0, 0.5, 0.0], [0.0, 0.5, 0.0]]) target_op_open = np.array([[0.5, 0.5, 1.0], [0.5, 0.5, 1.0], [1.0, 0.5, 1.0], [1.0, 0.5, 1.0], [1.0, 0.5, 1.0], [1.0, 0.5, 1.0], [1.0, 0.5, 1.0], [1.0, 0.5, 1.0], [1.0, 0.5, 1.0], [1.0, 0.5, 1.0], [1.0, 0.5, 1.0], [1.0, 0.5, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 0.0], [1.0, 1.0, 0.0], [1.0, 1.0, 0.0], [1.0, 0.5, 0.0], [1.0, 0.5, 0.0], [1.0, 1.0, 0.0], [0.0, 1.0, 0.5], [0.0, 1.0, 0.5], [0.0, 1.0, 0.5], [0.0, 1.0, 0.5], [0.0, 1.0, 0.5], [0.0, 1.0, 0.5], [0.0, 1.0, 0.5], [0.5, 1.0, 0.0], [0.5, 1.0, 0.0], [0.5, 1.0, 1.0], [0.5, 1.0, 1.0], [0.5, 1.0, 1.0], [0.5, 1.0, 1.0], [0.5, 1.0, 1.0], [0.5, 1.0, 1.0], [0.0, 1.0, 1.0], [0.0, 1.0, 1.0], [0.0, 1.0, 1.0], [0.0, 1.0, 1.0], [0.0, 1.0, 1.0], [0.0, 1.0, 1.0], [0.5, 1.0, 1.0], [0.5, 1.0, 1.0], [0.5, 1.0, 1.0]]) signal_pairs = [[list(sig1), list(sig2), sig1 == sig2] for sig1, sig2 in zip(list(target_op_close), list(signal_close))] print(f'signals side by side:\n' f'{signal_pairs}') self.assertTrue(np.allclose(target_op_close, signal_close, equal_nan=True)) signal_pairs = [[list(sig1), list(sig2), sig1 == sig2] for sig1, sig2 in zip(list(target_op_open), list(signal_open))] print(f'signals side by side:\n' f'{signal_pairs}') self.assertTrue(np.allclose(target_op_open, signal_open, equal_nan=True)) print('--Test two separate signal generation for different price types--') # 更多测试集合 def test_stg_parameter_setting(self): """ test setting parameters of strategies test the method set_parameters :return: """ op = qt.Operator(strategies='dma, all, urgent') print(op.strategies, '\n', [qt.TimingDMA, qt.SelectingAll, qt.RiconUrgent]) print(f'info of Timing strategy in new op: \n{op.strategies[0].info()}') # TODO: allow set_parameters to a list of strategies or str-listed strategies # TODO: allow set_parameters to all strategies of specific bt price type print(f'Set up strategy parameters by strategy id') op.set_parameter('dma', pars=(5, 10, 5), opt_tag=1, par_boes=((5, 10), (5, 15), (10, 15)), window_length=10, data_types=['close', 'open', 'high']) op.set_parameter('all', window_length=20) op.set_parameter('all', price_type='high') print(f'Can also set up strategy parameters by strategy index') op.set_parameter(2, price_type='open') op.set_parameter(2, opt_tag=1, pars=(9, -0.09), window_length=10) self.assertEqual(op.strategies[0].pars, (5, 10, 5)) self.assertEqual(op.strategies[0].par_boes, ((5, 10), (5, 15), (10, 15))) self.assertEqual(op.strategies[2].pars, (9, -0.09)) self.assertEqual(op.op_data_freq, 'd') self.assertEqual(op.op_data_types, ['close', 'high', 'open']) self.assertEqual(op.opt_space_par, ([(5, 10), (5, 15), (10, 15), (1, 40), (-0.5, 0.5)], ['discr', 'discr', 'discr', 'discr', 'conti'])) self.assertEqual(op.max_window_length, 20) print(f'KeyError will be raised if wrong strategy id is given') self.assertRaises(KeyError, op.set_parameter, stg_id='t-1', pars=(1, 2)) self.assertRaises(KeyError, op.set_parameter, stg_id='wrong_input', pars=(1, 2)) print(f'ValueError will be raised if parameter can be set') self.assertRaises(ValueError, op.set_parameter, stg_id=0, pars=('wrong input', 'wrong input')) # test blenders of different price types # test setting blenders to different price types # TODO: to allow operands like "and", "or", "not", "xor" # a_to_sell.set_blender('close', '0 and 1 or 2') # self.assertEqual(a_to_sell.get_blender('close'), 'str-1.2') self.assertEqual(op.bt_price_types, ['close', 'high', 'open']) op.set_blender('open', '0 & 1 | 2') self.assertEqual(op.get_blender('open'), ['|', '2', '&', '1', '0']) op.set_blender('high', '(0|1) & 2') self.assertEqual(op.get_blender('high'), ['&', '2', '|', '1', '0']) op.set_blender('close', '0 & 1 | 2') self.assertEqual(op.get_blender(), {'close': ['|', '2', '&', '1', '0'], 'high': ['&', '2', '|', '1', '0'], 'open': ['|', '2', '&', '1', '0']}) self.assertEqual(op.opt_space_par, ([(5, 10), (5, 15), (10, 15), (1, 40), (-0.5, 0.5)], ['discr', 'discr', 'discr', 'discr', 'conti'])) self.assertEqual(op.opt_tags, [1, 0, 1]) def test_signal_blend(self): self.assertEqual(blender_parser('0 & 1'), ['&', '1', '0']) self.assertEqual(blender_parser('0 or 1'), ['or', '1', '0']) self.assertEqual(blender_parser('0 & 1 | 2'), ['|', '2', '&', '1', '0']) blender = blender_parser('0 & 1 | 2') self.assertEqual(signal_blend([1, 1, 1], blender), 1) self.assertEqual(signal_blend([1, 0, 1], blender), 1) self.assertEqual(signal_blend([1, 1, 0], blender), 1) self.assertEqual(signal_blend([0, 1, 1], blender), 1) self.assertEqual(signal_blend([0, 0, 1], blender), 1) self.assertEqual(signal_blend([1, 0, 0], blender), 0) self.assertEqual(signal_blend([0, 1, 0], blender), 0) self.assertEqual(signal_blend([0, 0, 0], blender), 0) # parse: '0 & ( 1 | 2 )' self.assertEqual(blender_parser('0 & ( 1 | 2 )'), ['&', '|', '2', '1', '0']) blender = blender_parser('0 & ( 1 | 2 )') self.assertEqual(signal_blend([1, 1, 1], blender), 1) self.assertEqual(signal_blend([1, 0, 1], blender), 1) self.assertEqual(signal_blend([1, 1, 0], blender), 1) self.assertEqual(signal_blend([0, 1, 1], blender), 0) self.assertEqual(signal_blend([0, 0, 1], blender), 0) self.assertEqual(signal_blend([1, 0, 0], blender), 0) self.assertEqual(signal_blend([0, 1, 0], blender), 0) self.assertEqual(signal_blend([0, 0, 0], blender), 0) # parse: '(1-2)/3 + 0' self.assertEqual(blender_parser('(1-2)/3 + 0'), ['+', '0', '/', '3', '-', '2', '1']) blender = blender_parser('(1-2)/3 + 0') self.assertEqual(signal_blend([5, 9, 1, 4], blender), 7) # pars: '(0*1/2*(3+4))+5*(6+7)-8' self.assertEqual(blender_parser('(0*1/2*(3+4))+5*(6+7)-8'), ['-', '8', '+', '*', '+', '7', '6', '5', '*', '+', '4', '3', '/', '2', '*', '1', '0']) blender = blender_parser('(0*1/2*(3+4))+5*(6+7)-8') self.assertEqual(signal_blend([1, 1, 1, 1, 1, 1, 1, 1, 1], blender), 3) self.assertEqual(signal_blend([2, 1, 4, 3, 5, 5, 2, 2, 10], blender), 14) # parse: '0/max(2,1,3 + 5)+4' self.assertEqual(blender_parser('0/max(2,1,3 + 5)+4'), ['+', '4', '/', 'max(3)', '+', '5', '3', '1', '2', '0']) blender = blender_parser('0/max(2,1,3 + 5)+4') self.assertEqual(signal_blend([8.0, 4, 3, 5.0, 0.125, 5], blender), 0.925) self.assertEqual(signal_blend([2, 1, 4, 3, 5, 5, 2, 2, 10], blender), 5.25) print('speed test') import time st = time.time() blender = blender_parser('0+max(1,2,(3+4)*5, max(6, (7+8)*9), 10-11) * (12+13)') res = [] for i in range(10000): res = signal_blend([1, 1, 2, 3, 4, 5, 3, 4, 5, 6, 7, 8, 2, 3], blender) et = time.time() print(f'total time for RPN processing: {et - st}, got result: {res}') blender = blender_parser("0 + 1 * 2") self.assertEqual(signal_blend([1, 2, 3], blender), 7) blender = blender_parser("(0 + 1) * 2") self.assertEqual(signal_blend([1, 2, 3], blender), 9) blender = blender_parser("(0+1) * 2") self.assertEqual(signal_blend([1, 2, 3], blender), 9) blender = blender_parser("(0 + 1) * 2") self.assertEqual(signal_blend([1, 2, 3], blender), 9) # TODO: 目前对于-(1+2)这样的表达式还无法处理 # self.a_to_sell.set_blender('selecting', "-(0 + 1) * 2") # self.assertEqual(self.a_to_sell.signal_blend([1, 2, 3]), -9) blender = blender_parser("(0-1)/2 + 3") print(f'RPN of notation: "(0-1)/2 + 3" is:\n' f'{" ".join(blender[::-1])}') self.assertAlmostEqual(signal_blend([1, 2, 3, 0.0], blender), -0.33333333) blender = blender_parser("0 + 1 / 2") print(f'RPN of notation: "0 + 1 / 2" is:\n' f'{" ".join(blender[::-1])}') self.assertAlmostEqual(signal_blend([1, math.pi, 4], blender), 1.78539816) blender = blender_parser("(0 + 1) / 2") print(f'RPN of notation: "(0 + 1) / 2" is:\n' f'{" ".join(blender[::-1])}') self.assertEqual(signal_blend([1, 2, 3], blender), 1) blender = blender_parser("(0 + 1 * 2) / 3") print(f'RPN of notation: "(0 + 1 * 2) / 3" is:\n' f'{" ".join(blender[::-1])}') self.assertAlmostEqual(signal_blend([3, math.e, 10, 10], blender), 3.0182818284590454) blender = blender_parser("0 / 1 * 2") print(f'RPN of notation: "0 / 1 * 2" is:\n' f'{" ".join(blender[::-1])}') self.assertEqual(signal_blend([1, 3, 6], blender), 2) blender = blender_parser("(0 - 1 + 2) * 4") print(f'RPN of notation: "(0 - 1 + 2) * 4" is:\n' f'{" ".join(blender[::-1])}') self.assertAlmostEqual(signal_blend([1, 1, -1, np.nan, math.pi], blender), -3.141592653589793) blender = blender_parser("0 * 1") print(f'RPN of notation: "0 * 1" is:\n' f'{" ".join(blender[::-1])}') self.assertAlmostEqual(signal_blend([math.pi, math.e], blender), 8.539734222673566) blender = blender_parser('abs(3-sqrt(2) / cos(1))') print(f'RPN of notation: "abs(3-sqrt(2) / cos(1))" is:\n' f'{" ".join(blender[::-1])}') self.assertEqual(blender, ['abs(1)', '-', '/', 'cos(1)', '1', 'sqrt(1)', '2', '3']) blender = blender_parser('0/max(2,1,3 + 5)+4') print(f'RPN of notation: "0/max(2,1,3 + 5)+4" is:\n' f'{" ".join(blender[::-1])}') self.assertEqual(blender, ['+', '4', '/', 'max(3)', '+', '5', '3', '1', '2', '0']) blender = blender_parser('1 + sum(1,2,3+3, sum(1, 2) + 3) *5') print(f'RPN of notation: "1 + sum(1,2,3+3, sum(1, 2) + 3) *5" is:\n' f'{" ".join(blender[::-1])}') self.assertEqual(blender, ['+', '*', '5', 'sum(4)', '+', '3', 'sum(2)', '2', '1', '+', '3', '3', '2', '1', '1']) blender = blender_parser('1+sum(1,2,(3+5)*4, sum(3, (4+5)*6), 7-8) * (2+3)') print(f'RPN of notation: "1+sum(1,2,(3+5)*4, sum(3, (4+5)*6), 7-8) * (2+3)" is:\n' f'{" ".join(blender[::-1])}') self.assertEqual(blender, ['+', '*', '+', '3', '2', 'sum(5)', '-', '8', '7', 'sum(2)', '*', '6', '+', '5', '4', '3', '*', '4', '+', '5', '3', '2', '1', '1']) # TODO: ndarray type of signals to be tested: def test_set_opt_par(self): """ test setting opt pars in batch""" print(f'--------- Testing setting Opt Pars: set_opt_par -------') op = qt.Operator('dma, random, crossline') op.set_parameter('dma', pars=(5, 10, 5), opt_tag=1, par_boes=((5, 10), (5, 15), (10, 15)), window_length=10, data_types=['close', 'open', 'high']) self.assertEqual(op.strategies[0].pars, (5, 10, 5)) self.assertEqual(op.strategies[1].pars, (0.5,)) self.assertEqual(op.strategies[2].pars, (35, 120, 10, 'buy')) self.assertEqual(op.opt_tags, [1, 0, 0]) op.set_opt_par((5, 12, 9)) self.assertEqual(op.strategies[0].pars, (5, 12, 9)) self.assertEqual(op.strategies[1].pars, (0.5,)) self.assertEqual(op.strategies[2].pars, (35, 120, 10, 'buy')) op.set_parameter('crossline', pars=(5, 10, 5, 'sell'), opt_tag=1, par_boes=((5, 10), (5, 15), (10, 15), ('buy', 'sell', 'none')), window_length=10, data_types=['close', 'open', 'high']) self.assertEqual(op.opt_tags, [1, 0, 1]) op.set_opt_par((5, 12, 9, 8, 26, 9, 'buy')) self.assertEqual(op.strategies[0].pars, (5, 12, 9)) self.assertEqual(op.strategies[1].pars, (0.5,)) self.assertEqual(op.strategies[2].pars, (8, 26, 9, 'buy')) op.set_opt_par((9, 200, 155, 8, 26, 9, 'buy', 5, 12, 9)) self.assertEqual(op.strategies[0].pars, (9, 200, 155)) self.assertEqual(op.strategies[1].pars, (0.5,)) self.assertEqual(op.strategies[2].pars, (8, 26, 9, 'buy')) # test set_opt_par when opt_tag is set to be 2 (enumerate type of parameters) op.set_parameter('crossline', pars=(5, 10, 5, 'sell'), opt_tag=2, par_boes=((5, 10), (5, 15), (10, 15), ('buy', 'sell', 'none')), window_length=10, data_types=['close', 'open', 'high']) self.assertEqual(op.opt_tags, [1, 0, 2]) self.assertEqual(op.strategies[0].pars, (9, 200, 155)) self.assertEqual(op.strategies[1].pars, (0.5,)) self.assertEqual(op.strategies[2].pars, (5, 10, 5, 'sell')) op.set_opt_par((5, 12, 9, (8, 26, 9, 'buy'))) self.assertEqual(op.strategies[0].pars, (5, 12, 9)) self.assertEqual(op.strategies[1].pars, (0.5,)) self.assertEqual(op.strategies[2].pars, (8, 26, 9, 'buy')) # Test Errors # Not enough values for parameter op.set_parameter('crossline', opt_tag=1) self.assertRaises(ValueError, op.set_opt_par, (5, 12, 9, 8)) # wrong type of input self.assertRaises(AssertionError, op.set_opt_par, [5, 12, 9, 7, 15, 12, 'sell']) def test_stg_attribute_get_and_set(self): self.stg = qt.TimingCrossline() self.stg_type = 'R-TIMING' self.stg_name = "CROSSLINE" self.stg_text = 'Moving average crossline strategy, determine long/short position according to the cross ' \ 'point' \ ' of long and short term moving average prices ' self.pars = (35, 120, 10, 'buy') self.par_boes = [(10, 250), (10, 250), (1, 100), ('buy', 'sell', 'none')] self.par_count = 4 self.par_types = ['discr', 'discr', 'conti', 'enum'] self.opt_tag = 0 self.data_types = ['close'] self.data_freq = 'd' self.sample_freq = 'd' self.window_length = 270 self.assertEqual(self.stg.stg_type, self.stg_type) self.assertEqual(self.stg.stg_name, self.stg_name) self.assertEqual(self.stg.stg_text, self.stg_text) self.assertEqual(self.stg.pars, self.pars) self.assertEqual(self.stg.par_types, self.par_types) self.assertEqual(self.stg.par_boes, self.par_boes) self.assertEqual(self.stg.par_count, self.par_count) self.assertEqual(self.stg.opt_tag, self.opt_tag) self.assertEqual(self.stg.data_freq, self.data_freq) self.assertEqual(self.stg.sample_freq, self.sample_freq) self.assertEqual(self.stg.data_types, self.data_types) self.assertEqual(self.stg.window_length, self.window_length) self.stg.stg_name = 'NEW NAME' self.stg.stg_text = 'NEW TEXT' self.assertEqual(self.stg.stg_name, 'NEW NAME') self.assertEqual(self.stg.stg_text, 'NEW TEXT') self.stg.pars = (1, 2, 3, 4) self.assertEqual(self.stg.pars, (1, 2, 3, 4)) self.stg.par_count = 3 self.assertEqual(self.stg.par_count, 3) self.stg.par_boes = [(1, 10), (1, 10), (1, 10), (1, 10)] self.assertEqual(self.stg.par_boes, [(1, 10), (1, 10), (1, 10), (1, 10)]) self.stg.par_types = ['conti', 'conti', 'discr', 'enum'] self.assertEqual(self.stg.par_types, ['conti', 'conti', 'discr', 'enum']) self.stg.par_types = 'conti, conti, discr, conti' self.assertEqual(self.stg.par_types, ['conti', 'conti', 'discr', 'conti']) self.stg.data_types = 'close, open' self.assertEqual(self.stg.data_types, ['close', 'open']) self.stg.data_types = ['close', 'high', 'low'] self.assertEqual(self.stg.data_types, ['close', 'high', 'low']) self.stg.data_freq = 'w' self.assertEqual(self.stg.data_freq, 'w') self.stg.window_length = 300 self.assertEqual(self.stg.window_length, 300) def test_rolling_timing(self): stg = TestLSStrategy() stg_pars = {'000100': (5, 10), '000200': (5, 10), '000300': (5, 6)} stg.set_pars(stg_pars) history_data = self.hp1.values output = stg.generate(hist_data=history_data) self.assertIsInstance(output, np.ndarray) self.assertEqual(output.shape, (45, 3)) lsmask = np.array([[0., 0., 1.], [0., 0., 1.], [1., 0., 1.], [1., 0., 1.], [1., 0., 1.], [1., 0., 1.], [1., 0., 1.], [1., 0., 1.], [1., 0., 1.], [1., 0., 1.], [1., 0., 1.], [1., 0., 1.], [1., 1., 1.], [1., 1., 1.], [1., 1., 1.], [1., 1., 0.], [1., 1., 0.], [1., 1., 0.], [1., 0., 0.], [1., 0., 0.], [1., 1., 0.], [0., 1., 0.], [0., 1., 0.], [0., 1., 0.], [0., 1., 0.], [0., 1., 0.], [0., 1., 0.], [0., 1., 0.], [0., 1., 0.], [0., 1., 0.], [0., 1., 1.], [0., 1., 1.], [0., 1., 1.], [0., 1., 1.], [0., 1., 1.], [0., 1., 1.], [0., 1., 1.], [0., 1., 1.], [0., 1., 1.], [0., 1., 1.], [0., 1., 1.], [0., 1., 1.], [0., 1., 1.], [0., 1., 1.], [0., 1., 1.]]) # TODO: Issue to be solved: the np.nan value are converted to 0 in the lsmask,这样做可能会有意想不到的后果 # TODO: 需要解决nan值的问题 self.assertEqual(output.shape, lsmask.shape) self.assertTrue(np.allclose(output, lsmask, equal_nan=True)) def test_sel_timing(self): stg = TestSelStrategy() stg_pars = () stg.set_pars(stg_pars) history_data = self.hp1['high, low, close', :, :] seg_pos, seg_length, seg_count = stg._seg_periods(dates=self.hp1.hdates, freq=stg.sample_freq) self.assertEqual(list(seg_pos), [0, 5, 11, 19, 26, 33, 41, 47, 49]) self.assertEqual(list(seg_length), [5, 6, 8, 7, 7, 8, 6, 2]) self.assertEqual(seg_count, 8) output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates) self.assertIsInstance(output, np.ndarray) self.assertEqual(output.shape, (45, 3)) selmask = np.array([[0.5, 0.5, 0.0], [0.5, 0.5, 0.0], [0.5, 0.5, 0.0], [0.5, 0.5, 0.0], [0.5, 0.5, 0.0], [0.5, 0.5, 0.0], [0.5, 0.5, 0.0], [0.5, 0.5, 0.0], [0.5, 0.5, 0.0], [0.5, 0.5, 0.0], [0.5, 0.5, 0.0], [0.5, 0.5, 0.0], [0.5, 0.5, 0.0], [0.5, 0.5, 0.0], [0.5, 0.5, 0.0], [0.5, 0.5, 0.0], [0.5, 0.5, 0.0], [0.5, 0.5, 0.0], [0.5, 0.5, 0.0], [0.5, 0.5, 0.0], [0.5, 0.5, 0.0], [0.0, 0.5, 0.5], [0.0, 0.5, 0.5], [0.0, 0.5, 0.5], [0.0, 0.5, 0.5], [0.0, 0.5, 0.5], [0.0, 0.5, 0.5], [0.0, 0.5, 0.5], [0.5, 0.5, 0.0], [0.5, 0.5, 0.0], [0.5, 0.5, 0.0], [0.5, 0.5, 0.0], [0.5, 0.5, 0.0], [0.5, 0.5, 0.0], [0.5, 0.5, 0.0], [0.5, 0.5, 0.0], [0.0, 0.5, 0.5], [0.0, 0.5, 0.5], [0.0, 0.5, 0.5], [0.0, 0.5, 0.5], [0.0, 0.5, 0.5], [0.0, 0.5, 0.5], [0.5, 0.5, 0.0], [0.5, 0.5, 0.0], [0.5, 0.5, 0.0]]) self.assertEqual(output.shape, selmask.shape) self.assertTrue(np.allclose(output, selmask)) def test_simple_timing(self): stg = TestSigStrategy() stg_pars = (0.2, 0.02, -0.02) stg.set_pars(stg_pars) history_data = self.hp1['close, open, high, low', :, 3:50] output = stg.generate(hist_data=history_data, shares=self.shares, dates=self.date_indices) self.assertIsInstance(output, np.ndarray) self.assertEqual(output.shape, (45, 3)) sigmatrix = np.array([[0.0, 1.0, 0.0], [0.0, 0.0, 0.0], [0.0, -1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, -1.0], [0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, -1.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, -1.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, -1.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, -1.0], [0.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [-1.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 1.0, 1.0], [0.0, 1.0, 0.0], [0.0, 1.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, -1.0], [0.0, 0.0, 0.0], [0.0, 1.0, 0.0]]) side_by_side_array = np.array([[i, out_line, sig_line] for i, out_line, sig_line in zip(range(len(output)), output, sigmatrix)]) print(f'output and signal matrix lined up side by side is \n' f'{side_by_side_array}') self.assertEqual(sigmatrix.shape, output.shape) self.assertTrue(np.allclose(output, sigmatrix)) def test_sel_finance(self): """Test selecting_finance strategy, test all built-in strategy parameters""" stg = SelectingFinanceIndicator() stg_pars = (False, 'even', 'greater', 0, 0, 0.67) stg.set_pars(stg_pars) stg.window_length = 5 stg.data_freq = 'd' stg.sample_freq = '10d' stg.sort_ascending = False stg.condition = 'greater' stg.lbound = 0 stg.ubound = 0 stg._poq = 0.67 history_data = self.hp2.values print(f'Start to test financial selection parameter {stg_pars}') seg_pos, seg_length, seg_count = stg._seg_periods(dates=self.hp1.hdates, freq=stg.sample_freq) self.assertEqual(list(seg_pos), [0, 5, 11, 19, 26, 33, 41, 47, 49]) self.assertEqual(list(seg_length), [5, 6, 8, 7, 7, 8, 6, 2]) self.assertEqual(seg_count, 8) output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates) self.assertIsInstance(output, np.ndarray) self.assertEqual(output.shape, (45, 3)) selmask = np.array([[0.0, 0.5, 0.5], [0.0, 0.5, 0.5], [0.0, 0.5, 0.5], [0.0, 0.5, 0.5], [0.0, 0.5, 0.5], [0.0, 0.5, 0.5], [0.0, 0.5, 0.5], [0.0, 0.5, 0.5], [0.0, 0.5, 0.5], [0.0, 0.5, 0.5], [0.0, 0.5, 0.5], [0.0, 0.5, 0.5], [0.0, 0.5, 0.5], [0.0, 0.5, 0.5], [0.0, 0.5, 0.5], [0.0, 0.5, 0.5], [0.0, 0.5, 0.5], [0.0, 0.5, 0.5], [0.0, 0.5, 0.5], [0.0, 0.5, 0.5], [0.0, 0.5, 0.5], [0.5, 0.0, 0.5], [0.5, 0.0, 0.5], [0.5, 0.0, 0.5], [0.5, 0.0, 0.5], [0.5, 0.0, 0.5], [0.5, 0.0, 0.5], [0.5, 0.0, 0.5], [0.0, 0.0, 1.0], [0.0, 0.0, 1.0], [0.0, 0.0, 1.0], [0.0, 0.0, 1.0], [0.0, 0.0, 1.0], [0.0, 0.0, 1.0], [0.0, 0.0, 1.0], [0.0, 0.0, 1.0], [0.5, 0.0, 0.5], [0.5, 0.0, 0.5], [0.5, 0.0, 0.5], [0.5, 0.0, 0.5], [0.5, 0.0, 0.5], [0.5, 0.0, 0.5], [0.5, 0.5, 0.0], [0.5, 0.5, 0.0], [0.5, 0.5, 0.0]]) self.assertEqual(output.shape, selmask.shape) self.assertTrue(np.allclose(output, selmask)) # test single factor, get mininum factor stg_pars = (True, 'even', 'less', 1, 1, 0.67) stg.sort_ascending = True stg.condition = 'less' stg.lbound = 1 stg.ubound = 1 stg.set_pars(stg_pars) print(f'Start to test financial selection parameter {stg_pars}') output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates) selmask = np.array([[0.5, 0.5, 0.0], [0.5, 0.5, 0.0], [0.5, 0.5, 0.0], [0.5, 0.5, 0.0], [0.5, 0.5, 0.0], [0.5, 0.5, 0.0], [0.5, 0.0, 0.5], [0.5, 0.0, 0.5], [0.5, 0.0, 0.5], [0.5, 0.0, 0.5], [0.5, 0.0, 0.5], [0.5, 0.0, 0.5], [0.5, 0.0, 0.5], [0.5, 0.0, 0.5], [0.5, 0.5, 0.0], [0.5, 0.5, 0.0], [0.5, 0.5, 0.0], [0.5, 0.5, 0.0], [0.5, 0.5, 0.0], [0.5, 0.5, 0.0], [0.5, 0.5, 0.0], [0.5, 0.5, 0.0], [0.5, 0.5, 0.0], [0.5, 0.5, 0.0], [0.5, 0.5, 0.0], [0.5, 0.5, 0.0], [0.5, 0.5, 0.0], [0.5, 0.5, 0.0], [0.0, 0.0, 1.0], [0.0, 0.0, 1.0], [0.0, 0.0, 1.0], [0.0, 0.0, 1.0], [0.0, 0.0, 1.0], [0.0, 0.0, 1.0], [0.0, 0.0, 1.0], [0.0, 0.0, 1.0], [0.5, 0.5, 0.0], [0.5, 0.5, 0.0], [0.5, 0.5, 0.0], [0.5, 0.5, 0.0], [0.5, 0.5, 0.0], [0.5, 0.5, 0.0], [0.5, 0.0, 0.5], [0.5, 0.0, 0.5], [0.5, 0.0, 0.5]]) self.assertEqual(output.shape, selmask.shape) self.assertTrue(np.allclose(output, selmask)) # test single factor, get max factor in linear weight stg_pars = (False, 'linear', 'greater', 0, 0, 0.67) stg.sort_ascending = False stg.weighting = 'linear' stg.condition = 'greater' stg.lbound = 0 stg.ubound = 0 stg.set_pars(stg_pars) print(f'Start to test financial selection parameter {stg_pars}') output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates) selmask = np.array([[0.00000, 0.33333, 0.66667], [0.00000, 0.33333, 0.66667], [0.00000, 0.33333, 0.66667], [0.00000, 0.33333, 0.66667], [0.00000, 0.33333, 0.66667], [0.00000, 0.33333, 0.66667], [0.00000, 0.66667, 0.33333], [0.00000, 0.66667, 0.33333], [0.00000, 0.66667, 0.33333], [0.00000, 0.66667, 0.33333], [0.00000, 0.66667, 0.33333], [0.00000, 0.66667, 0.33333], [0.00000, 0.66667, 0.33333], [0.00000, 0.66667, 0.33333], [0.00000, 0.33333, 0.66667], [0.00000, 0.33333, 0.66667], [0.00000, 0.33333, 0.66667], [0.00000, 0.33333, 0.66667], [0.00000, 0.33333, 0.66667], [0.00000, 0.33333, 0.66667], [0.00000, 0.33333, 0.66667], [0.33333, 0.00000, 0.66667], [0.33333, 0.00000, 0.66667], [0.33333, 0.00000, 0.66667], [0.33333, 0.00000, 0.66667], [0.33333, 0.00000, 0.66667], [0.33333, 0.00000, 0.66667], [0.33333, 0.00000, 0.66667], [0.00000, 0.00000, 1.00000], [0.00000, 0.00000, 1.00000], [0.00000, 0.00000, 1.00000], [0.00000, 0.00000, 1.00000], [0.00000, 0.00000, 1.00000], [0.00000, 0.00000, 1.00000], [0.00000, 0.00000, 1.00000], [0.00000, 0.00000, 1.00000], [0.33333, 0.00000, 0.66667], [0.33333, 0.00000, 0.66667], [0.33333, 0.00000, 0.66667], [0.33333, 0.00000, 0.66667], [0.33333, 0.00000, 0.66667], [0.33333, 0.00000, 0.66667], [0.33333, 0.66667, 0.00000], [0.33333, 0.66667, 0.00000], [0.33333, 0.66667, 0.00000]]) self.assertEqual(output.shape, selmask.shape) self.assertTrue(np.allclose(output, selmask)) # test single factor, get max factor in linear weight stg_pars = (False, 'proportion', 'greater', 0, 0, 0.67) stg.sort_ascending = False stg.weighting = 'proportion' stg.condition = 'greater' stg.lbound = 0 stg.ubound = 0 stg.set_pars(stg_pars) print(f'Start to test financial selection parameter {stg_pars}') output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates) selmask = np.array([[0.00000, 0.08333, 0.91667], [0.00000, 0.08333, 0.91667], [0.00000, 0.08333, 0.91667], [0.00000, 0.08333, 0.91667], [0.00000, 0.08333, 0.91667], [0.00000, 0.08333, 0.91667], [0.00000, 0.91667, 0.08333], [0.00000, 0.91667, 0.08333], [0.00000, 0.91667, 0.08333], [0.00000, 0.91667, 0.08333], [0.00000, 0.91667, 0.08333], [0.00000, 0.91667, 0.08333], [0.00000, 0.91667, 0.08333], [0.00000, 0.91667, 0.08333], [0.00000, 0.50000, 0.50000], [0.00000, 0.50000, 0.50000], [0.00000, 0.50000, 0.50000], [0.00000, 0.50000, 0.50000], [0.00000, 0.50000, 0.50000], [0.00000, 0.50000, 0.50000], [0.00000, 0.50000, 0.50000], [0.08333, 0.00000, 0.91667], [0.08333, 0.00000, 0.91667], [0.08333, 0.00000, 0.91667], [0.08333, 0.00000, 0.91667], [0.08333, 0.00000, 0.91667], [0.08333, 0.00000, 0.91667], [0.08333, 0.00000, 0.91667], [0.00000, 0.00000, 1.00000], [0.00000, 0.00000, 1.00000], [0.00000, 0.00000, 1.00000], [0.00000, 0.00000, 1.00000], [0.00000, 0.00000, 1.00000], [0.00000, 0.00000, 1.00000], [0.00000, 0.00000, 1.00000], [0.00000, 0.00000, 1.00000], [0.08333, 0.00000, 0.91667], [0.08333, 0.00000, 0.91667], [0.08333, 0.00000, 0.91667], [0.08333, 0.00000, 0.91667], [0.08333, 0.00000, 0.91667], [0.08333, 0.00000, 0.91667], [0.08333, 0.91667, 0.00000], [0.08333, 0.91667, 0.00000], [0.08333, 0.91667, 0.00000]]) self.assertEqual(output.shape, selmask.shape) self.assertTrue(np.allclose(output, selmask, 0.001)) # test single factor, get max factor in linear weight, threshold 0.2 stg_pars = (False, 'even', 'greater', 0.2, 0.2, 0.67) stg.sort_ascending = False stg.weighting = 'even' stg.condition = 'greater' stg.lbound = 0.2 stg.ubound = 0.2 stg.set_pars(stg_pars) print(f'Start to test financial selection parameter {stg_pars}') output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates) selmask = np.array([[0.0, 0.5, 0.5], [0.0, 0.5, 0.5], [0.0, 0.5, 0.5], [0.0, 0.5, 0.5], [0.0, 0.5, 0.5], [0.0, 0.5, 0.5], [0.0, 0.5, 0.5], [0.0, 0.5, 0.5], [0.0, 0.5, 0.5], [0.0, 0.5, 0.5], [0.0, 0.5, 0.5], [0.0, 0.5, 0.5], [0.0, 0.5, 0.5], [0.0, 0.5, 0.5], [0.0, 0.5, 0.5], [0.0, 0.5, 0.5], [0.0, 0.5, 0.5], [0.0, 0.5, 0.5], [0.0, 0.5, 0.5], [0.0, 0.5, 0.5], [0.0, 0.5, 0.5], [0.0, 0.0, 1.0], [0.0, 0.0, 1.0], [0.0, 0.0, 1.0], [0.0, 0.0, 1.0], [0.0, 0.0, 1.0], [0.0, 0.0, 1.0], [0.0, 0.0, 1.0], [0.0, 0.0, 1.0], [0.0, 0.0, 1.0], [0.0, 0.0, 1.0], [0.0, 0.0, 1.0], [0.0, 0.0, 1.0], [0.0, 0.0, 1.0], [0.0, 0.0, 1.0], [0.0, 0.0, 1.0], [0.0, 0.0, 1.0], [0.0, 0.0, 1.0], [0.0, 0.0, 1.0], [0.0, 0.0, 1.0], [0.0, 0.0, 1.0], [0.0, 0.0, 1.0], [0.5, 0.5, 0.0], [0.5, 0.5, 0.0], [0.5, 0.5, 0.0]]) self.assertEqual(output.shape, selmask.shape) self.assertTrue(np.allclose(output, selmask, 0.001)) def test_tokenizer(self): self.assertListEqual(_exp_to_token('1+1'), ['1', '+', '1']) print(_exp_to_token('1+1')) self.assertListEqual(_exp_to_token('1 & 1'), ['1', '&', '1']) print(_exp_to_token('1&1')) self.assertListEqual(_exp_to_token('1 and 1'), ['1', 'and', '1']) print(_exp_to_token('1 and 1')) self.assertListEqual(_exp_to_token('1 or 1'), ['1', 'or', '1']) print(_exp_to_token('1 or 1')) self.assertListEqual(_exp_to_token('(1 - 1 + -1) * pi'), ['(', '1', '-', '1', '+', '-1', ')', '*', 'pi']) print(_exp_to_token('(1 - 1 + -1) * pi')) self.assertListEqual(_exp_to_token('abs(5-sqrt(2) / cos(pi))'), ['abs(', '5', '-', 'sqrt(', '2', ')', '/', 'cos(', 'pi', ')', ')']) print(_exp_to_token('abs(5-sqrt(2) / cos(pi))')) self.assertListEqual(_exp_to_token('sin(pi) + 2.14'), ['sin(', 'pi', ')', '+', '2.14']) print(_exp_to_token('sin(pi) + 2.14')) self.assertListEqual(_exp_to_token('(1-2)/3.0 + 0.0000'), ['(', '1', '-', '2', ')', '/', '3.0', '+', '0.0000']) print(_exp_to_token('(1-2)/3.0 + 0.0000')) self.assertListEqual(_exp_to_token('-(1. + .2) * max(1, 3, 5)'), ['-', '(', '1.', '+', '.2', ')', '*', 'max(', '1', ',', '3', ',', '5', ')']) print(_exp_to_token('-(1. + .2) * max(1, 3, 5)')) self.assertListEqual(_exp_to_token('(x + e * 10) / 10'), ['(', 'x', '+', 'e', '*', '10', ')', '/', '10']) print(_exp_to_token('(x + e * 10) / 10')) self.assertListEqual(_exp_to_token('8.2/((-.1+abs3(3,4,5))*0.12)'), ['8.2', '/', '(', '(', '-.1', '+', 'abs3(', '3', ',', '4', ',', '5', ')', ')', '*', '0.12', ')']) print(_exp_to_token('8.2/((-.1+abs3(3,4,5))*0.12)')) self.assertListEqual(_exp_to_token('8.2/abs3(3,4,25.34 + 5)*0.12'), ['8.2', '/', 'abs3(', '3', ',', '4', ',', '25.34', '+', '5', ')', '*', '0.12']) print(_exp_to_token('8.2/abs3(3,4,25.34 + 5)*0.12')) class TestLog(unittest.TestCase): def test_init(self): pass class TestConfig(unittest.TestCase): """测试Config对象以及QT_CONFIG变量的设置和获取值""" def test_init(self): pass def test_invest(self): pass def test_pars_string_to_type(self): _parse_string_kwargs('000300', 'asset_pool', _valid_qt_kwargs()) class TestHistoryPanel(unittest.TestCase): def setUp(self): print('start testing HistoryPanel object\n') self.data = np.random.randint(10, size=(5, 10, 4)) self.index = pd.date_range(start='20200101', freq='d', periods=10) self.index2 = ['2016-07-01', '2016-07-04', '2016-07-05', '2016-07-06', '2016-07-07', '2016-07-08', '2016-07-11', '2016-07-12', '2016-07-13', '2016-07-14'] self.index3 = '2016-07-01, 2016-07-04, 2016-07-05, 2016-07-06, 2016-07-07, ' \ '2016-07-08, 2016-07-11, 2016-07-12, 2016-07-13, 2016-07-14' self.shares = '000100,000101,000102,000103,000104' self.htypes = 'close,open,high,low' self.data2 = np.random.randint(10, size=(10, 5)) self.data3 = np.random.randint(10, size=(10, 4)) self.data4 = np.random.randint(10, size=(10)) self.hp = qt.HistoryPanel(values=self.data, levels=self.shares, columns=self.htypes, rows=self.index) self.hp2 = qt.HistoryPanel(values=self.data2, levels=self.shares, columns='close', rows=self.index) self.hp3 = qt.HistoryPanel(values=self.data3, levels='000100', columns=self.htypes, rows=self.index2) self.hp4 = qt.HistoryPanel(values=self.data4, levels='000100', columns='close', rows=self.index3) self.hp5 = qt.HistoryPanel(values=self.data) self.hp6 = qt.HistoryPanel(values=self.data, levels=self.shares, rows=self.index3) def test_properties(self): """ test all properties of HistoryPanel """ self.assertFalse(self.hp.is_empty) self.assertEqual(self.hp.row_count, 10) self.assertEqual(self.hp.column_count, 4) self.assertEqual(self.hp.level_count, 5) self.assertEqual(self.hp.shape, (5, 10, 4)) self.assertSequenceEqual(self.hp.htypes, ['close', 'open', 'high', 'low']) self.assertSequenceEqual(self.hp.shares, ['000100', '000101', '000102', '000103', '000104']) self.assertSequenceEqual(list(self.hp.hdates), list(self.index)) self.assertDictEqual(self.hp.columns, {'close': 0, 'open': 1, 'high': 2, 'low': 3}) self.assertDictEqual(self.hp.levels, {'000100': 0, '000101': 1, '000102': 2, '000103': 3, '000104': 4}) row_dict = {Timestamp('2020-01-01 00:00:00', freq='D'): 0, Timestamp('2020-01-02 00:00:00', freq='D'): 1, Timestamp('2020-01-03 00:00:00', freq='D'): 2, Timestamp('2020-01-04 00:00:00', freq='D'): 3, Timestamp('2020-01-05 00:00:00', freq='D'): 4, Timestamp('2020-01-06 00:00:00', freq='D'): 5, Timestamp('2020-01-07 00:00:00', freq='D'): 6, Timestamp('2020-01-08 00:00:00', freq='D'): 7, Timestamp('2020-01-09 00:00:00', freq='D'): 8, Timestamp('2020-01-10 00:00:00', freq='D'): 9} self.assertDictEqual(self.hp.rows, row_dict) def test_len(self): """ test the function len(HistoryPanel) :return: """ self.assertEqual(len(self.hp), 10) def test_empty_history_panel(self): """测试空HP或者特殊HP如维度标签为纯数字的HP""" test_hp = qt.HistoryPanel(self.data) self.assertFalse(test_hp.is_empty) self.assertIsInstance(test_hp, qt.HistoryPanel) self.assertEqual(test_hp.shape[0], 5) self.assertEqual(test_hp.shape[1], 10) self.assertEqual(test_hp.shape[2], 4) self.assertEqual(test_hp.level_count, 5) self.assertEqual(test_hp.row_count, 10) self.assertEqual(test_hp.column_count, 4) self.assertEqual(test_hp.shares, list(range(5))) self.assertEqual(test_hp.hdates, list(pd.date_range(start='20200730', periods=10, freq='d'))) self.assertEqual(test_hp.htypes, list(range(4))) self.assertTrue(np.allclose(test_hp.values, self.data)) print(f'shares: {test_hp.shares}\nhtypes: {test_hp.htypes}') print(test_hp) # HistoryPanel should be empty if no value is given empty_hp = qt.HistoryPanel() self.assertTrue(empty_hp.is_empty) self.assertIsInstance(empty_hp, qt.HistoryPanel) self.assertEqual(empty_hp.shape[0], 0) self.assertEqual(empty_hp.shape[1], 0) self.assertEqual(empty_hp.shape[2], 0) self.assertEqual(empty_hp.level_count, 0) self.assertEqual(empty_hp.row_count, 0) self.assertEqual(empty_hp.column_count, 0) # HistoryPanel should also be empty if empty value (np.array([])) is given empty_hp = qt.HistoryPanel(np.empty((5, 0, 4)), levels=self.shares, columns=self.htypes) self.assertTrue(empty_hp.is_empty) self.assertIsInstance(empty_hp, qt.HistoryPanel) self.assertEqual(empty_hp.shape[0], 0) self.assertEqual(empty_hp.shape[1], 0) self.assertEqual(empty_hp.shape[2], 0) self.assertEqual(empty_hp.level_count, 0) self.assertEqual(empty_hp.row_count, 0) self.assertEqual(empty_hp.column_count, 0) def test_create_history_panel(self): """ test the creation of a HistoryPanel object by passing all data explicitly """ self.assertIsInstance(self.hp, qt.HistoryPanel) self.assertEqual(self.hp.shape[0], 5) self.assertEqual(self.hp.shape[1], 10) self.assertEqual(self.hp.shape[2], 4) self.assertEqual(self.hp.level_count, 5) self.assertEqual(self.hp.row_count, 10) self.assertEqual(self.hp.column_count, 4) self.assertEqual(list(self.hp.levels.keys()), self.shares.split(',')) self.assertEqual(list(self.hp.columns.keys()), self.htypes.split(',')) self.assertEqual(list(self.hp.rows.keys())[0], pd.Timestamp('20200101')) self.assertIsInstance(self.hp2, qt.HistoryPanel) self.assertEqual(self.hp2.shape[0], 5) self.assertEqual(self.hp2.shape[1], 10) self.assertEqual(self.hp2.shape[2], 1) self.assertEqual(self.hp2.level_count, 5) self.assertEqual(self.hp2.row_count, 10) self.assertEqual(self.hp2.column_count, 1) self.assertEqual(list(self.hp2.levels.keys()), self.shares.split(',')) self.assertEqual(list(self.hp2.columns.keys()), ['close']) self.assertEqual(list(self.hp2.rows.keys())[0], pd.Timestamp('20200101')) self.assertIsInstance(self.hp3, qt.HistoryPanel) self.assertEqual(self.hp3.shape[0], 1) self.assertEqual(self.hp3.shape[1], 10) self.assertEqual(self.hp3.shape[2], 4) self.assertEqual(self.hp3.level_count, 1) self.assertEqual(self.hp3.row_count, 10) self.assertEqual(self.hp3.column_count, 4) self.assertEqual(list(self.hp3.levels.keys()), ['000100']) self.assertEqual(list(self.hp3.columns.keys()), self.htypes.split(',')) self.assertEqual(list(self.hp3.rows.keys())[0], pd.Timestamp('2016-07-01')) self.assertIsInstance(self.hp4, qt.HistoryPanel) self.assertEqual(self.hp4.shape[0], 1) self.assertEqual(self.hp4.shape[1], 10) self.assertEqual(self.hp4.shape[2], 1) self.assertEqual(self.hp4.level_count, 1) self.assertEqual(self.hp4.row_count, 10) self.assertEqual(self.hp4.column_count, 1) self.assertEqual(list(self.hp4.levels.keys()), ['000100']) self.assertEqual(list(self.hp4.columns.keys()), ['close']) self.assertEqual(list(self.hp4.rows.keys())[0], pd.Timestamp('2016-07-01')) self.hp5.info() self.assertIsInstance(self.hp5, qt.HistoryPanel) self.assertTrue(np.allclose(self.hp5.values, self.data)) self.assertEqual(self.hp5.shape[0], 5) self.assertEqual(self.hp5.shape[1], 10) self.assertEqual(self.hp5.shape[2], 4) self.assertEqual(self.hp5.level_count, 5) self.assertEqual(self.hp5.row_count, 10) self.assertEqual(self.hp5.column_count, 4) self.assertEqual(list(self.hp5.levels.keys()), [0, 1, 2, 3, 4]) self.assertEqual(list(self.hp5.columns.keys()), [0, 1, 2, 3]) self.assertEqual(list(self.hp5.rows.keys())[0], pd.Timestamp('2020-07-30')) self.hp6.info() self.assertIsInstance(self.hp6, qt.HistoryPanel) self.assertTrue(np.allclose(self.hp6.values, self.data)) self.assertEqual(self.hp6.shape[0], 5) self.assertEqual(self.hp6.shape[1], 10) self.assertEqual(self.hp6.shape[2], 4) self.assertEqual(self.hp6.level_count, 5) self.assertEqual(self.hp6.row_count, 10) self.assertEqual(self.hp6.column_count, 4) self.assertEqual(list(self.hp6.levels.keys()), ['000100', '000101', '000102', '000103', '000104']) self.assertEqual(list(self.hp6.columns.keys()), [0, 1, 2, 3]) self.assertEqual(list(self.hp6.rows.keys())[0], pd.Timestamp('2016-07-01')) print('test creating HistoryPanel with very limited data') print('test creating HistoryPanel with 2D data') temp_data =
np.random.randint(10, size=(7, 3))
numpy.random.randint
import cv2 as cv import argparse import os import numpy as np def nearest_resize(pic, scale): """ nearest resize :param pic: img :param scale: scale >0 :return: resized img """ h,w,c = pic.shape # height,width,channel th, tw = int(h * scale), int(w * scale) # avoid the out of bounds from the original img pic = np.pad(pic, ((0, 1), (0, 1), (0, 0)), 'reflect') emptyImage = np.zeros((th, tw,c ), np.uint8) h_scale = h/th w_scale = w/tw for i in range(th): for j in range(tw): # 首先找到在原图中对应的点的(X, Y)坐标 #first, find the location from the original img corr_x = (i + 0.5)*h_scale - 0.5 corr_y = (j + 0.5)*w_scale - 0.5 emptyImage[i, j, :] = pic[int(corr_x), int(corr_y), :] return emptyImage def conv2D(img,kernel,padding,stride): """ convlution 2D :param img: img with size [h,w,c] :param kernel: with size [kh,kw,c] or [kh,kw] :param padding: padding number :param stride: stride :return: filted img """ h,w,c = img.shape # height,width,channel if kernel.ndim==2: kernel = np.expand_dims(kernel, axis=-1) kh,kw,kc = kernel.shape #kernel h, kernel w, kernel channel, assert kc == 1 or kc == c oh = int((h-kh+2*padding)/stride)+1 #out height ow = int((w-kw+2*padding)/stride)+1 #out width #out emptyImage = np.zeros((oh, ow, c), np.float) img = np.pad(img,((padding, padding), (padding, padding), (0, 0)), 'constant') for i in range(oh): for j in range(ow): i_idx = i*stride j_idx = j*stride tmp_out = img[i_idx:i_idx+kh,j_idx:j_idx+kw,:]*kernel for cc in range(c): emptyImage[i,j,cc] =
np.sum(tmp_out[:,:,cc])
numpy.sum
import numpy as np from fidimag.common import CuboidMesh from fidimag.micro import TimeZeeman def time_fun(t, frequency): return 10*np.cos(frequency*t) def fixture_setup(nx, ny, nz): """ Fixtures for the tests """ dx = 10.0/nx spin = np.zeros(3*nx*ny*nz) Ms = np.zeros(nx*ny*nz) Ms_inv =
np.zeros(nx*ny*nz)
numpy.zeros
from stuff import * # Get weekday pattern from case data in order to identify exact date on SGTF graph # 0 mod 7 is Thursday in daytodate notation (being 1970-01-01) nc={} with open('SAcases','r') as fp: for x in fp: y=x.split() nc[datetoday(y[0])]=int(y[1]) minday=min(nc) maxday=max(nc) c0=[0]*7 c1=[0]*7 for d in range(minday+3,maxday-3): ex=[nc[r] for r in range(d-3,d+4)] if min(ex)>=50: i=d%7 c0[i]+=1 c1[i]+=nc[d]*7/sum(ex) #for i in range(7): # print(i,c1[i]/c0[i]) # Thur 1.184 # Fri 1.170 # Sat 1.122 # Sun 0.913 # Mon 0.655 # Tue 0.766 # Wed 1.158 if 0: infile='OmicronSGTF.png' dateorigin=datetoday('2021-10-01')-564 row0,row1=23,359 col0,col1=81,614 y0=(0,358);y1=(50,43) z0=(0,357);z1=(1600,126) if 1: infile='OmicronSGTF_frompdf.png' dateorigin=datetoday('2021-10-01')-564 row0,row1=11,345 col0,col1=81,614 y0=(0,344.5);y1=(50,32) z0=(0,344.5);z1=(2000,57.5) # SGTF image from slide 12 of https://sacoronavirus.co.za/2021/11/25/sars-cov-2-sequencing-new-variant-update-25-november-2021/ # resized down by a factor of 2/3 in order to get 1 horizontal pixel = 1 day. from PIL import Image import numpy as np im_frame = Image.open(infile) cc = np.array(im_frame,dtype=int) im_frame.close() # Top-leftian, row before column r=cc.shape[0] c=cc.shape[1] # Get blueness bb=cc[:,:,2]*2-(cc[:,:,0]+cc[:,:,1]) def process(bb,name): bb1=bb[row0:row1,:] mm=row0+np.argmax(bb1,axis=0) im=Image.fromarray(((bb-bb.min())/(bb.max()-bb.min())*255.999+0.0005).astype(np.dtype('uint8'))) im.save(name+'_filtered.png') oo=cc.astype(np.dtype('uint8')) for x in range(col0,col1): oo[mm[x],x]=[255,0,0] im=Image.fromarray(oo) im.save(name+'_sgtf.png') sgtf={} for x in range(col0,col1): sgtf[daytodate(dateorigin+x)]=(mm[x]-y1[1])/(y0[1]-y1[1])*(y0[0]-y1[0])+y1[0] with open(name+'_sgtf','w') as fp: for date in sorted(list(sgtf)): print(date,"%6.2f"%sgtf[date],file=fp) return mm,sgtf process(bb,'simple') lrantialias=bb-np.maximum(np.roll(bb,1,1),np.roll(bb,-1,1)) process(lrantialias,'LRantialias') # Hybrid because deantialiasing method is likely to work well for the vertical spike, but not when derivative is low. spike=605 hybrid=np.concatenate([bb[:,:spike],lrantialias[:,spike:]],axis=1) mm,sgtf=process(hybrid,'hybrid') dd=cc[:,:,0]-np.maximum(cc[:,:,1],cc[:,:,2]) oo=(dd>3).astype(np.dtype('uint8'))*255 im=Image.fromarray(oo) im.save('temp.png') ee=(dd>3)*1000+np.tile(
np.arange(r-1,-1,-1)
numpy.arange
# -*- coding: utf-8 -*- # Created on Tue Dec 18 16:49:20 2019 # @author: arthurd """ HexGrid Module. Generate Hexagonal grid. Cartesian-Hexagonal coordinates interaction. """ import numpy as np import math import matplotlib.pyplot as plt from pyproj import Proj, Transformer def hexbin_grid(bbox, side_length=1, proj_init=None, proj_out=None): """ Create a grid of hexagons. See http://www.calculatorsoup.com/calculators/geometry-plane/polygon.php Parameters ---------- bbox : Tuple Box of the area to generate the hexagons. Format : Lower X, Lower Y, Upper X, Upper Y. side_length : float, optional Side length of the hexagons. The default is 1. proj_init : String, optional If working with coordinates and the hexagons need to be calculated in another coordinates system, proj_init refers to the starting coordinates system. The default is None. proj_out : String, optional If working with coordinates and the hexagons need to be calculated in another coordinates system, proj_out refers to the ending coordinates system. The default is None. Example ------- If the bbox is in geographic coordinates, but the hexgrid should be computed on the web mercator system. Then, >>> proj_init="epsg:4326" >>> proj_out="epsg:3857" Returns ------- polygons : List List of hexagons. An hexagons is a list of coordinates (tuple, Lat, Lon). """ startx = bbox[0] starty = bbox[1] endx = bbox[2] endy = bbox[3] proj = proj_init != proj_out if proj: transformer = Transformer.from_proj(Proj(init=proj_init), Proj(init=proj_out)) startx, starty = transformer.transform(startx, starty) endx, endy = transformer.transform(endx, endy) # calculate coordinates of the hexagon points p = side_length * 0.5 b = side_length * math.cos(math.radians(30)) w = b * 2 h = 2 * side_length # offset start and end coordinates by hex widths and heights to guarantee coverage startx = startx - w starty = starty - h/2 endx = endx endy = endy origx = startx # offsets for moving along and up rows xoffset = b yoffset = 3 * p P1 = np.empty((0, 2)) P2 = np.empty((0, 2)) P3 = np.empty((0, 2)) P4 = np.empty((0, 2)) P5 = np.empty((0, 2)) P6 = np.empty((0, 2)) row = 0 while starty < endy: if row % 2 == 0: startx = origx + xoffset else: startx = origx + w while startx <= endx: p1 = [startx, starty + p] p2 = [startx, starty + (3 * p)] p3 = [startx + b, starty + h] p4 = [startx + w, starty + (3 * p)] p5 = [startx + w, starty + p] p6 = [startx + b, starty] P1 = np.vstack((P1, p1)) P2 = np.vstack((P2, p2)) P3 = np.vstack((P3, p3)) P4 = np.vstack((P4, p4)) P5 =
np.vstack((P5, p5))
numpy.vstack
""" Visualize Genetic Algorithm to find the shortest path for travel sales problem. Visit my tutorial website for more: https://mofanpy.com/tutorials/ """ import matplotlib.pyplot as plt import numpy as np N_CITIES = 20 # DNA size CROSS_RATE = 0.1 MUTATE_RATE = 0.02 POP_SIZE = 500 N_GENERATIONS = 500 class GA(object): def __init__(self, DNA_size, cross_rate, mutation_rate, pop_size, ): self.DNA_size = DNA_size self.cross_rate = cross_rate self.mutate_rate = mutation_rate self.pop_size = pop_size self.pop = np.vstack([np.random.permutation(DNA_size) for _ in range(pop_size)]) def translateDNA(self, DNA, city_position): # get cities' coord in order line_x = np.empty_like(DNA, dtype=np.float64) line_y = np.empty_like(DNA, dtype=np.float64) for i, d in enumerate(DNA): city_coord = city_position[d] line_x[i, :] = city_coord[:, 0] line_y[i, :] = city_coord[:, 1] return line_x, line_y def get_fitness(self, line_x, line_y): total_distance = np.empty((line_x.shape[0],), dtype=np.float64) for i, (xs, ys) in enumerate(zip(line_x, line_y)): total_distance[i] = np.sum(np.sqrt(np.square(np.diff(xs)) + np.square(np.diff(ys)))) fitness = np.exp(self.DNA_size * 2 / total_distance) return fitness, total_distance def select(self, fitness): idx = np.random.choice(np.arange(self.pop_size), size=self.pop_size, replace=True, p=fitness / fitness.sum()) return self.pop[idx] def crossover(self, parent, pop): if np.random.rand() < self.cross_rate: i_ = np.random.randint(0, self.pop_size, size=1) # select another individual from pop cross_points =
np.random.randint(0, 2, self.DNA_size)
numpy.random.randint
# -*- coding: utf-8 -*- from __future__ import print_function from __future__ import absolute_import import os import sys import tempfile import warnings import numpy from numpy import testing as npt import tables from tables import Atom, ClosedNodeError, NoSuchNodeError from tables.utils import byteorders from tables.tests import common from tables.tests.common import allequal from tables.tests.common import unittest, test_filename from tables.tests.common import PyTablesTestCase as TestCase from six.moves import range #warnings.resetwarnings() class BasicTestCase(TestCase): """Basic test for all the supported typecodes present in numpy. All of them are included on pytables. """ endiancheck = False def write_read(self, testarray): a = testarray if common.verbose: print('\n', '-=' * 30) print("Running test for array with type '%s'" % a.dtype.type, end=' ') print("for class check:", self.title) # Create an instance of HDF5 file filename = tempfile.mktemp(".h5") try: with tables.open_file(filename, mode="w") as fileh: root = fileh.root # Create the array under root and name 'somearray' if self.endiancheck and a.dtype.kind != "S": b = a.byteswap() b.dtype = a.dtype.newbyteorder() a = b fileh.create_array(root, 'somearray', a, "Some array") # Re-open the file in read-only mode with tables.open_file(filename, mode="r") as fileh: root = fileh.root # Read the saved array b = root.somearray.read() # Compare them. They should be equal. if common.verbose and not allequal(a, b): print("Write and read arrays differ!") # print("Array written:", a) print("Array written shape:", a.shape) print("Array written itemsize:", a.itemsize) print("Array written type:", a.dtype.type) # print("Array read:", b) print("Array read shape:", b.shape) print("Array read itemsize:", b.itemsize) print("Array read type:", b.dtype.type) if a.dtype.kind != "S": print("Array written byteorder:", a.dtype.byteorder) print("Array read byteorder:", b.dtype.byteorder) # Check strictly the array equality self.assertEqual(a.shape, b.shape) self.assertEqual(a.shape, root.somearray.shape) if a.dtype.kind == "S": self.assertEqual(root.somearray.atom.type, "string") else: self.assertEqual(a.dtype.type, b.dtype.type) self.assertEqual(a.dtype.type, root.somearray.atom.dtype.type) abo = byteorders[a.dtype.byteorder] bbo = byteorders[b.dtype.byteorder] if abo != "irrelevant": self.assertEqual(abo, root.somearray.byteorder) self.assertEqual(bbo, sys.byteorder) if self.endiancheck: self.assertNotEqual(bbo, abo) obj = root.somearray self.assertEqual(obj.flavor, 'numpy') self.assertEqual(obj.shape, a.shape) self.assertEqual(obj.ndim, a.ndim) self.assertEqual(obj.chunkshape, None) if a.shape: nrows = a.shape[0] else: # scalar nrows = 1 self.assertEqual(obj.nrows, nrows) self.assertTrue(allequal(a, b)) finally: # Then, delete the file os.remove(filename) def write_read_out_arg(self, testarray): a = testarray if common.verbose: print('\n', '-=' * 30) print("Running test for array with type '%s'" % a.dtype.type, end=' ') print("for class check:", self.title) # Create an instance of HDF5 file filename = tempfile.mktemp(".h5") try: with tables.open_file(filename, mode="w") as fileh: root = fileh.root # Create the array under root and name 'somearray' if self.endiancheck and a.dtype.kind != "S": b = a.byteswap() b.dtype = a.dtype.newbyteorder() a = b fileh.create_array(root, 'somearray', a, "Some array") # Re-open the file in read-only mode with tables.open_file(filename, mode="r") as fileh: root = fileh.root # Read the saved array b = numpy.empty_like(a, dtype=a.dtype) root.somearray.read(out=b) # Check strictly the array equality self.assertEqual(a.shape, b.shape) self.assertEqual(a.shape, root.somearray.shape) if a.dtype.kind == "S": self.assertEqual(root.somearray.atom.type, "string") else: self.assertEqual(a.dtype.type, b.dtype.type) self.assertEqual(a.dtype.type, root.somearray.atom.dtype.type) abo = byteorders[a.dtype.byteorder] bbo = byteorders[b.dtype.byteorder] if abo != "irrelevant": self.assertEqual(abo, root.somearray.byteorder) self.assertEqual(abo, bbo) if self.endiancheck: self.assertNotEqual(bbo, sys.byteorder) self.assertTrue(allequal(a, b)) finally: # Then, delete the file os.remove(filename) def write_read_atom_shape_args(self, testarray): a = testarray atom = Atom.from_dtype(a.dtype) shape = a.shape byteorder = None if common.verbose: print('\n', '-=' * 30) print("Running test for array with type '%s'" % a.dtype.type, end=' ') print("for class check:", self.title) # Create an instance of HDF5 file filename = tempfile.mktemp(".h5") try: with tables.open_file(filename, mode="w") as fileh: root = fileh.root # Create the array under root and name 'somearray' if self.endiancheck and a.dtype.kind != "S": b = a.byteswap() b.dtype = a.dtype.newbyteorder() if b.dtype.byteorder in ('>', '<'): byteorder = byteorders[b.dtype.byteorder] a = b ptarr = fileh.create_array(root, 'somearray', atom=atom, shape=shape, title="Some array", # specify the byteorder explicitly # since there is no way to deduce # it in this case byteorder=byteorder) self.assertEqual(shape, ptarr.shape) self.assertEqual(atom, ptarr.atom) ptarr[...] = a # Re-open the file in read-only mode with tables.open_file(filename, mode="r") as fileh: root = fileh.root # Read the saved array b = root.somearray.read() # Compare them. They should be equal. if common.verbose and not allequal(a, b): print("Write and read arrays differ!") # print("Array written:", a) print("Array written shape:", a.shape) print("Array written itemsize:", a.itemsize) print("Array written type:", a.dtype.type) # print("Array read:", b) print("Array read shape:", b.shape) print("Array read itemsize:", b.itemsize) print("Array read type:", b.dtype.type) if a.dtype.kind != "S": print("Array written byteorder:", a.dtype.byteorder) print("Array read byteorder:", b.dtype.byteorder) # Check strictly the array equality self.assertEqual(a.shape, b.shape) self.assertEqual(a.shape, root.somearray.shape) if a.dtype.kind == "S": self.assertEqual(root.somearray.atom.type, "string") else: self.assertEqual(a.dtype.type, b.dtype.type) self.assertEqual(a.dtype.type, root.somearray.atom.dtype.type) abo = byteorders[a.dtype.byteorder] bbo = byteorders[b.dtype.byteorder] if abo != "irrelevant": self.assertEqual(abo, root.somearray.byteorder) self.assertEqual(bbo, sys.byteorder) if self.endiancheck: self.assertNotEqual(bbo, abo) obj = root.somearray self.assertEqual(obj.flavor, 'numpy') self.assertEqual(obj.shape, a.shape) self.assertEqual(obj.ndim, a.ndim) self.assertEqual(obj.chunkshape, None) if a.shape: nrows = a.shape[0] else: # scalar nrows = 1 self.assertEqual(obj.nrows, nrows) self.assertTrue(allequal(a, b)) finally: # Then, delete the file os.remove(filename) def setup00_char(self): """Data integrity during recovery (character objects)""" if not isinstance(self.tupleChar, numpy.ndarray): a = numpy.array(self.tupleChar, dtype="S") else: a = self.tupleChar return a def test00_char(self): a = self.setup00_char() self.write_read(a) def test00_char_out_arg(self): a = self.setup00_char() self.write_read_out_arg(a) def test00_char_atom_shape_args(self): a = self.setup00_char() self.write_read_atom_shape_args(a) def test00b_char(self): """Data integrity during recovery (string objects)""" a = self.tupleChar filename = tempfile.mktemp(".h5") try: # Create an instance of HDF5 file with tables.open_file(filename, mode="w") as fileh: fileh.create_array(fileh.root, 'somearray', a, "Some array") # Re-open the file in read-only mode with tables.open_file(filename, mode="r") as fileh: # Read the saved array b = fileh.root.somearray.read() if isinstance(a, bytes): self.assertEqual(type(b), bytes) self.assertEqual(a, b) else: # If a is not a python string, then it should be a list # or ndarray self.assertTrue(type(b) in [list, numpy.ndarray]) finally: # Then, delete the file os.remove(filename) def test00b_char_out_arg(self): """Data integrity during recovery (string objects)""" a = self.tupleChar filename = tempfile.mktemp(".h5") try: # Create an instance of HDF5 file with tables.open_file(filename, mode="w") as fileh: fileh.create_array(fileh.root, 'somearray', a, "Some array") # Re-open the file in read-only mode with tables.open_file(filename, mode="r") as fileh: # Read the saved array b = numpy.empty_like(a) if fileh.root.somearray.flavor != 'numpy': self.assertRaises(TypeError, lambda: fileh.root.somearray.read(out=b)) else: fileh.root.somearray.read(out=b) self.assertTrue(type(b), numpy.ndarray) finally: # Then, delete the file os.remove(filename) def test00b_char_atom_shape_args(self): """Data integrity during recovery (string objects)""" a = self.tupleChar filename = tempfile.mktemp(".h5") try: # Create an instance of HDF5 file with tables.open_file(filename, mode="w") as fileh: nparr = numpy.asarray(a) atom = Atom.from_dtype(nparr.dtype) shape = nparr.shape if nparr.dtype.byteorder in ('>', '<'): byteorder = byteorders[nparr.dtype.byteorder] else: byteorder = None ptarr = fileh.create_array(fileh.root, 'somearray', atom=atom, shape=shape, byteorder=byteorder, title="Some array") self.assertEqual(shape, ptarr.shape) self.assertEqual(atom, ptarr.atom) ptarr[...] = a # Re-open the file in read-only mode with tables.open_file(filename, mode="r") as fileh: # Read the saved array b = numpy.empty_like(a) if fileh.root.somearray.flavor != 'numpy': self.assertRaises(TypeError, lambda: fileh.root.somearray.read(out=b)) else: fileh.root.somearray.read(out=b) self.assertTrue(type(b), numpy.ndarray) finally: # Then, delete the file os.remove(filename) def setup01_char_nc(self): """Data integrity during recovery (non-contiguous character objects)""" if not isinstance(self.tupleChar, numpy.ndarray): a = numpy.array(self.tupleChar, dtype="S") else: a = self.tupleChar if a.ndim == 0: b = a.copy() else: b = a[::2] # Ensure that this numpy string is non-contiguous if len(b) > 1: self.assertEqual(b.flags.contiguous, False) return b def test01_char_nc(self): b = self.setup01_char_nc() self.write_read(b) def test01_char_nc_out_arg(self): b = self.setup01_char_nc() self.write_read_out_arg(b) def test01_char_nc_atom_shape_args(self): b = self.setup01_char_nc() self.write_read_atom_shape_args(b) def test02_types(self): """Data integrity during recovery (numerical types)""" typecodes = ['int8', 'int16', 'int32', 'int64', 'uint8', 'uint16', 'uint32', 'uint64', 'float32', 'float64', 'complex64', 'complex128'] for name in ('float16', 'float96', 'float128', 'complex192', 'complex256'): atomname = name.capitalize() + 'Atom' if hasattr(tables, atomname): typecodes.append(name) for typecode in typecodes: a = numpy.array(self.tupleInt, typecode) self.write_read(a) b = numpy.array(self.tupleInt, typecode) self.write_read_out_arg(b) c = numpy.array(self.tupleInt, typecode) self.write_read_atom_shape_args(c) def test03_types_nc(self): """Data integrity during recovery (non-contiguous numerical types)""" typecodes = ['int8', 'int16', 'int32', 'int64', 'uint8', 'uint16', 'uint32', 'uint64', 'float32', 'float64', 'complex64', 'complex128', ] for name in ('float16', 'float96', 'float128', 'complex192', 'complex256'): atomname = name.capitalize() + 'Atom' if hasattr(tables, atomname): typecodes.append(name) for typecode in typecodes: a = numpy.array(self.tupleInt, typecode) if a.ndim == 0: b1 = a.copy() b2 = a.copy() b3 = a.copy() else: b1 = a[::2] b2 = a[::2] b3 = a[::2] # Ensure that this array is non-contiguous if len(b1) > 1: self.assertEqual(b1.flags.contiguous, False) if len(b2) > 1: self.assertEqual(b2.flags.contiguous, False) if len(b3) > 1: self.assertEqual(b3.flags.contiguous, False) self.write_read(b1) self.write_read_out_arg(b2) self.write_read_atom_shape_args(b3) class Basic0DOneTestCase(BasicTestCase): # Scalar case title = "Rank-0 case 1" tupleInt = 3 tupleChar = b"3" endiancheck = True class Basic0DTwoTestCase(BasicTestCase): # Scalar case title = "Rank-0 case 2" tupleInt = 33 tupleChar = b"33" endiancheck = True class Basic1DZeroTestCase(BasicTestCase): # This test case is not supported by PyTables (HDF5 limitations) # 1D case title = "Rank-1 case 0" tupleInt = () tupleChar = () endiancheck = False class Basic1DOneTestCase(BasicTestCase): # 1D case title = "Rank-1 case 1" tupleInt = (3,) tupleChar = (b"a",) endiancheck = True class Basic1DTwoTestCase(BasicTestCase): # 1D case title = "Rank-1 case 2" tupleInt = (3, 4) tupleChar = (b"aaa",) endiancheck = True class Basic1DThreeTestCase(BasicTestCase): # 1D case title = "Rank-1 case 3" tupleInt = (3, 4, 5) tupleChar = (b"aaa", b"bbb",) endiancheck = True class Basic2DOneTestCase(BasicTestCase): # 2D case title = "Rank-2 case 1" tupleInt = numpy.array(numpy.arange((4)**2)) tupleInt.shape = (4,)*2 tupleChar = numpy.array(["abc"]*3**2, dtype="S3") tupleChar.shape = (3,)*2 endiancheck = True class Basic2DTwoTestCase(BasicTestCase): # 2D case, with a multidimensional dtype title = "Rank-2 case 2" tupleInt = numpy.array(numpy.arange((4)), dtype=(numpy.int_, (4,))) tupleChar = numpy.array(["abc"]*3, dtype=("S3", (3,))) endiancheck = True class Basic10DTestCase(BasicTestCase): # 10D case title = "Rank-10 test" tupleInt = numpy.array(numpy.arange((2)**10)) tupleInt.shape = (2,)*10 tupleChar = numpy.array( ["abc"]*2**10, dtype="S3") tupleChar.shape = (2,)*10 endiancheck = True class Basic32DTestCase(BasicTestCase): # 32D case (maximum) title = "Rank-32 test" tupleInt = numpy.array((32,)) tupleInt.shape = (1,)*32 tupleChar = numpy.array(["121"], dtype="S3") tupleChar.shape = (1,)*32 class ReadOutArgumentTests(common.TempFileMixin, TestCase): def setUp(self): super(ReadOutArgumentTests, self).setUp() self.size = 1000 def create_array(self): array = numpy.arange(self.size, dtype='f8') disk_array = self.h5file.create_array('/', 'array', array) return array, disk_array def test_read_entire_array(self): array, disk_array = self.create_array() out_buffer = numpy.empty((self.size, ), 'f8') disk_array.read(out=out_buffer) numpy.testing.assert_equal(out_buffer, array) def test_read_contiguous_slice1(self): array, disk_array = self.create_array() out_buffer = numpy.arange(self.size, dtype='f8') out_buffer = numpy.random.permutation(out_buffer) out_buffer_orig = out_buffer.copy() start = self.size // 2 disk_array.read(start=start, stop=self.size, out=out_buffer[start:]) numpy.testing.assert_equal(out_buffer[start:], array[start:]) numpy.testing.assert_equal(out_buffer[:start], out_buffer_orig[:start]) def test_read_contiguous_slice2(self): array, disk_array = self.create_array() out_buffer = numpy.arange(self.size, dtype='f8') out_buffer = numpy.random.permutation(out_buffer) out_buffer_orig = out_buffer.copy() start = self.size // 4 stop = self.size - start disk_array.read(start=start, stop=stop, out=out_buffer[start:stop]) numpy.testing.assert_equal(out_buffer[start:stop], array[start:stop]) numpy.testing.assert_equal(out_buffer[:start], out_buffer_orig[:start]) numpy.testing.assert_equal(out_buffer[stop:], out_buffer_orig[stop:]) def test_read_non_contiguous_slice_contiguous_buffer(self): array, disk_array = self.create_array() out_buffer = numpy.empty((self.size // 2, ), dtype='f8') disk_array.read(start=0, stop=self.size, step=2, out=out_buffer) numpy.testing.assert_equal(out_buffer, array[0:self.size:2]) def test_read_non_contiguous_buffer(self): array, disk_array = self.create_array() out_buffer = numpy.empty((self.size, ), 'f8') out_buffer_slice = out_buffer[0:self.size:2] # once Python 2.6 support is dropped, this could change # to assertRaisesRegexp to check exception type and message at once self.assertRaises(ValueError, disk_array.read, 0, self.size, 2, out_buffer_slice) try: disk_array.read(0, self.size, 2, out_buffer_slice) except ValueError as exc: self.assertEqual('output array not C contiguous', str(exc)) def test_buffer_too_small(self): array, disk_array = self.create_array() out_buffer = numpy.empty((self.size // 2, ), 'f8') self.assertRaises(ValueError, disk_array.read, 0, self.size, 1, out_buffer) try: disk_array.read(0, self.size, 1, out_buffer) except ValueError as exc: self.assertTrue('output array size invalid, got' in str(exc)) def test_buffer_too_large(self): array, disk_array = self.create_array() out_buffer = numpy.empty((self.size + 1, ), 'f8') self.assertRaises(ValueError, disk_array.read, 0, self.size, 1, out_buffer) try: disk_array.read(0, self.size, 1, out_buffer) except ValueError as exc: self.assertTrue('output array size invalid, got' in str(exc)) class SizeOnDiskInMemoryPropertyTestCase(common.TempFileMixin, TestCase): def setUp(self): super(SizeOnDiskInMemoryPropertyTestCase, self).setUp() self.array_size = (10, 10) self.array = self.h5file.create_array( '/', 'somearray', numpy.zeros(self.array_size, 'i4')) def test_all_zeros(self): self.assertEqual(self.array.size_on_disk, 10 * 10 * 4) self.assertEqual(self.array.size_in_memory, 10 * 10 * 4) class UnalignedAndComplexTestCase(common.TempFileMixin, TestCase): """Basic test for all the supported typecodes present in numpy. Most of them are included on PyTables. """ def setUp(self): super(UnalignedAndComplexTestCase, self).setUp() self.root = self.h5file.root def write_read(self, testArray): if common.verbose: print('\n', '-=' * 30) print("\nRunning test for array with type '%s'" % testArray.dtype.type) # Create the array under root and name 'somearray' a = testArray if self.endiancheck: byteorder = {"little": "big", "big": "little"}[sys.byteorder] else: byteorder = sys.byteorder self.h5file.create_array(self.root, 'somearray', a, "Some array", byteorder=byteorder) if self.reopen: self._reopen() self.root = self.h5file.root # Read the saved array b = self.root.somearray.read() # Get an array to be compared in the correct byteorder c = a.newbyteorder(byteorder) # Compare them. They should be equal. if not allequal(c, b) and common.verbose: print("Write and read arrays differ!") print("Array written:", a) print("Array written shape:", a.shape) print("Array written itemsize:", a.itemsize) print("Array written type:", a.dtype.type) print("Array read:", b) print("Array read shape:", b.shape) print("Array read itemsize:", b.itemsize) print("Array read type:", b.dtype.type) # Check strictly the array equality self.assertEqual(a.shape, b.shape) self.assertEqual(a.shape, self.root.somearray.shape) if a.dtype.byteorder != "|": self.assertEqual(a.dtype, b.dtype) self.assertEqual(a.dtype, self.root.somearray.atom.dtype) self.assertEqual(byteorders[b.dtype.byteorder], sys.byteorder) self.assertEqual(self.root.somearray.byteorder, byteorder) self.assertTrue(allequal(c, b)) def test01_signedShort_unaligned(self): """Checking an unaligned signed short integer array""" r = numpy.rec.array(b'a'*200, formats='i1,f4,i2', shape=10) a = r["f2"] # Ensure that this array is non-aligned self.assertEqual(a.flags.aligned, False) self.assertEqual(a.dtype.type, numpy.int16) self.write_read(a) def test02_float_unaligned(self): """Checking an unaligned single precision array""" r = numpy.rec.array(b'a'*200, formats='i1,f4,i2', shape=10) a = r["f1"] # Ensure that this array is non-aligned self.assertEqual(a.flags.aligned, 0) self.assertEqual(a.dtype.type, numpy.float32) self.write_read(a) def test03_byte_offset(self): """Checking an offsetted byte array""" r =
numpy.arange(100, dtype=numpy.int8)
numpy.arange
"""Tests for the policies in the hbaselines/goal_conditioned subdirectory.""" import unittest import numpy as np import tensorflow as tf from gym.spaces import Box from hbaselines.utils.tf_util import get_trainable_vars from hbaselines.goal_conditioned.td3 import GoalConditionedPolicy as \ TD3GoalConditionedPolicy from hbaselines.goal_conditioned.sac import GoalConditionedPolicy as \ SACGoalConditionedPolicy from hbaselines.algorithms.off_policy import SAC_PARAMS, TD3_PARAMS from hbaselines.algorithms.off_policy import GOAL_CONDITIONED_PARAMS class TestBaseGoalConditionedPolicy(unittest.TestCase): """Test GoalConditionedPolicy in hbaselines/goal_conditioned/base.py.""" def setUp(self): self.policy_params = { 'sess': tf.compat.v1.Session(), 'ac_space': Box(low=-1, high=1, shape=(1,)), 'ob_space': Box(low=-2, high=2, shape=(2,)), 'co_space': Box(low=-3, high=3, shape=(2,)), 'verbose': 0, } self.policy_params.update(TD3_PARAMS.copy()) self.policy_params.update(GOAL_CONDITIONED_PARAMS.copy()) def tearDown(self): self.policy_params['sess'].close() del self.policy_params # Clear the graph. tf.compat.v1.reset_default_graph() def test_store_transition(self): """Check the functionality of the store_transition() method. This method is tested for the following cases: 1. hindsight = False, relative_goals = False 2. hindsight = False, relative_goals = True 3. hindsight = True, relative_goals = False 4. hindsight = True, relative_goals = True """ # =================================================================== # # test case 1 # # =================================================================== # policy_params = self.policy_params.copy() policy_params['relative_goals'] = False policy_params['hindsight'] = False policy_params['subgoal_testing_rate'] = 1 policy_params['meta_period'] = 4 policy_params['batch_size'] = 2 policy = TD3GoalConditionedPolicy(**policy_params) # Initialize the variables of the policy. policy.sess.run(tf.compat.v1.global_variables_initializer()) # Run the initialize method. policy.initialize() policy._meta_action = [np.array([5, 5])] for i in range(4): obs0 = np.array([i for _ in range(2)]) context0 = np.array([i for _ in range(3)]) action = np.array([i for _ in range(1)]) reward = i obs1 = np.array([i+1 for _ in range(2)]) context1 = np.array([i for _ in range(3)]) done, is_final_step, evaluate = False, False, False policy.store_transition( obs0=obs0, context0=context0, action=action, reward=reward, obs1=obs1, context1=context1, done=done, is_final_step=is_final_step, evaluate=evaluate, env_num=0, ) obs_t = policy.replay_buffer._obs_t[0] action_t = policy.replay_buffer._action_t[0] reward = policy.replay_buffer._reward_t[0] done = policy.replay_buffer._done_t[0] # check the various attributes self.assertTrue( all(all(obs_t[i] == [np.array([0, 0]), np.array([1, 1]), np.array([2, 2]), np.array([3, 3]), np.array([4, 4])][i]) for i in range(len(obs_t))) ) for i in range(len(action_t)): self.assertTrue( all(all(action_t[i][j] == [[np.array([5, 5]), np.array([5, 5]), np.array([5, 5]), np.array([5, 5]), np.array([5, 5])], [np.array([0]), np.array([1]), np.array([2]), np.array([3])]][i][j]) for j in range(len(action_t[i]))) ) self.assertEqual(reward, [[6], [-5.656854249501219, -4.24264068713107, -2.8284271247638677, -1.4142135624084504]]) self.assertEqual(done, [False, False, False, False]) def test_store_transition_2(self): policy_params = self.policy_params.copy() policy_params['relative_goals'] = True policy_params['hindsight'] = False policy_params['subgoal_testing_rate'] = 1 policy_params['meta_period'] = 4 policy_params['batch_size'] = 2 policy = TD3GoalConditionedPolicy(**policy_params) # Initialize the variables of the policy. policy.sess.run(tf.compat.v1.global_variables_initializer()) # Run the initialize method. policy.initialize() policy._meta_action = [np.array([5, 5])] for i in range(4): obs0 = np.array([i for _ in range(2)]) context0 = np.array([i for _ in range(3)]) action = np.array([i for _ in range(1)]) reward = i obs1 = np.array([i+1 for _ in range(2)]) context1 = np.array([i for _ in range(3)]) done, is_final_step, evaluate = False, False, False policy.store_transition( obs0=obs0, context0=context0, action=action, reward=reward, obs1=obs1, context1=context1, done=done, is_final_step=is_final_step, evaluate=evaluate, env_num=0, ) obs_t = policy.replay_buffer._obs_t[0] action_t = policy.replay_buffer._action_t[0] reward = policy.replay_buffer._reward_t[0] done = policy.replay_buffer._done_t[0] # check the various attributes self.assertTrue( all(all(obs_t[i] == [np.array([0, 0]), np.array([1, 1]), np.array([2, 2]), np.array([3, 3]), np.array([4, 4])][i]) for i in range(len(obs_t))) ) for i in range(len(action_t)): self.assertTrue( all(all(action_t[i][j] == [[np.array([5, 5]), np.array([5, 5]), np.array([5, 5]), np.array([5, 5]), np.array([4, 4])], [np.array([0]), np.array([1]), np.array([2]), np.array([3])]][i][j]) for j in range(len(action_t[i]))) ) self.assertEqual(reward, [[6], [-5.656854249501219, -5.656854249501219, -5.656854249501219, -5.656854249501219]]) self.assertEqual(done, [False, False, False, False]) def test_store_transition_3(self): policy_params = self.policy_params.copy() policy_params['relative_goals'] = False policy_params['hindsight'] = True policy_params['subgoal_testing_rate'] = 1 policy_params['meta_period'] = 4 policy_params['batch_size'] = 2 policy = TD3GoalConditionedPolicy(**policy_params) # Initialize the variables of the policy. policy.sess.run(tf.compat.v1.global_variables_initializer()) # Run the initialize method. policy.initialize() policy._meta_action = [np.array([5, 5])] for i in range(4): obs0 = np.array([i for _ in range(2)]) context0 = np.array([i for _ in range(3)]) action = np.array([i for _ in range(1)]) reward = i obs1 = np.array([i+1 for _ in range(2)]) context1 = np.array([i for _ in range(3)]) done, is_final_step, evaluate = False, False, False policy.store_transition( obs0=obs0, context0=context0, action=action, reward=reward, obs1=obs1, context1=context1, done=done, is_final_step=is_final_step, evaluate=evaluate, env_num=0, ) # unchanged sample obs_t = policy.replay_buffer._obs_t[0] action_t = policy.replay_buffer._action_t[0] reward_t = policy.replay_buffer._reward_t[0] done_t = policy.replay_buffer._done_t[0] # check the various attributes self.assertTrue( all(all(obs_t[i] == [np.array([0, 0]), np.array([1, 1]), np.array([2, 2]), np.array([3, 3]), np.array([4, 4])][i]) for i in range(len(obs_t))) ) for i in range(len(action_t)): self.assertTrue( all(all(action_t[i][j] == [[np.array([5, 5]), np.array([5, 5]), np.array([5, 5]), np.array([5, 5]), np.array([5, 5])], [np.array([0]), np.array([1]), np.array([2]), np.array([3])]][i][j]) for j in range(len(action_t[i]))) ) self.assertEqual(reward_t, [[6], [-5.656854249501219, -4.24264068713107, -2.8284271247638677, -1.4142135624084504]]) self.assertEqual(done_t, [False, False, False, False]) # hindsight sample obs_t = policy.replay_buffer._obs_t[1] action_t = policy.replay_buffer._action_t[1] reward_t = policy.replay_buffer._reward_t[1] done_t = policy.replay_buffer._done_t[1] # check the various attributes self.assertTrue( all(all(obs_t[i] == [np.array([0, 0]), np.array([1, 1]), np.array([2, 2]), np.array([3, 3]), np.array([4, 4])][i]) for i in range(len(obs_t))) ) for i in range(len(action_t)): self.assertTrue( all(all(action_t[i][j] == [[np.array([4, 4]), np.array([4, 4]), np.array([4, 4]), np.array([4, 4]), np.array([4, 4])], [
np.array([0])
numpy.array
import sys import optparse import logging import time from matplotlib.backends.backend_pdf import PdfPages import numpy as np import matplotlib.pyplot as plt import matplotlib as mpl from matplotlib.projections import PolarAxes from matplotlib.ticker import MultipleLocator,FormatStrFormatter import katpoint from katpoint import rad2deg, deg2rad # from katsdpscripts.RTS import git_info,get_git_path def angle_wrap(angle, period=2.0 * np.pi): """Wrap angle into the interval -*period* / 2 ... *period* / 2.""" return (angle + 0.5 * period) % period - 0.5 * period def save_pointingmodel(filebase,model): # Save pointing model to file outfile = file(filebase + '.csv', 'w') outfile.write(model.description) outfile.close() logger.debug("Saved %d-parameter pointing model to '%s'" % (len(model.params), filebase + '.csv')) # These fields contain strings, while the rest of the fields are assumed to contain floats string_fields = ['dataset', 'target', 'timestamp_ut', 'data_unit'] # Create a date/time string for current time now = time.strftime('%Y-%m-%d_%Hh%M') def read_offsetfile(filename): # Load data file in one shot as an array of strings string_fields = ['dataset', 'target', 'timestamp_ut', 'data_unit'] data = np.loadtxt(filename, dtype='string', comments='#', delimiter=', ') # Interpret first non-comment line as header fields = data[0].tolist() # By default, all fields are assumed to contain floats formats = np.tile(np.float, len(fields)) # The string_fields are assumed to contain strings - use data's string type, as it is of sufficient length formats[[fields.index(name) for name in string_fields if name in fields]] = data.dtype # Convert to heterogeneous record array data = np.rec.fromarrays(data[1:].transpose(), dtype=list(zip(fields, formats))) # Load antenna description string from first line of file and construct antenna object from it antenna = katpoint.Antenna(file(filename).readline().strip().partition('=')[2]) # Use the pointing model contained in antenna object as the old model (if not overridden by file) # If the antenna has no model specified, a default null model will be used return data,antenna def referencemetrics(ant,az, el,measured_delta_az, measured_delta_el,delta_azimuth_std=0,delta_elevation_std=0,num_samples_limit=1): """Determine and sky RMS from pointing model.""" text = [] measured_delta_xel = measured_delta_az* np.cos(el) # scale due to sky shape abs_sky_error = np.ma.array(data=measured_delta_xel,mask=False) model_delta_az, model_delta_el = ant.pointing_model.offset(az, el) residual_az = measured_delta_az - model_delta_az residual_el = measured_delta_el - model_delta_el residual_xel = residual_az *
np.cos(el)
numpy.cos
from __future__ import division, print_function import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt from matplotlib.backends.backend_pdf import PdfPages from mpl_toolkits.axes_grid1 import make_axes_locatable from mpl_toolkits.mplot3d import Axes3D import streakline #import streakline2 import myutils import ffwd from streams import load_stream, vcirc_potential, store_progparams, wrap_angles, progenitor_prior #import streams import astropy import astropy.units as u from astropy.constants import G from astropy.table import Table import astropy.coordinates as coord import gala.coordinates as gc import scipy.linalg as la import scipy.interpolate import scipy.optimize import zscale import itertools import copy import pickle # observers # defaults taken as in astropy v2.0 icrs mw_observer = {'z_sun': 27.*u.pc, 'galcen_distance': 8.3*u.kpc, 'roll': 0*u.deg, 'galcen_coord': coord.SkyCoord(ra=266.4051*u.deg, dec=-28.936175*u.deg, frame='icrs')} vsun = {'vcirc': 237.8*u.km/u.s, 'vlsr': [11.1, 12.2, 7.3]*u.km/u.s} vsun0 = {'vcirc': 237.8*u.km/u.s, 'vlsr': [11.1, 12.2, 7.3]*u.km/u.s} gc_observer = {'z_sun': 27.*u.pc, 'galcen_distance': 0.1*u.kpc, 'roll': 0*u.deg, 'galcen_coord': coord.SkyCoord(ra=266.4051*u.deg, dec=-28.936175*u.deg, frame='icrs')} vgc = {'vcirc': 0*u.km/u.s, 'vlsr': [11.1, 12.2, 7.3]*u.km/u.s} vgc0 = {'vcirc': 0*u.km/u.s, 'vlsr': [11.1, 12.2, 7.3]*u.km/u.s} MASK = -9999 pparams_fid = [np.log10(0.5e10)*u.Msun, 0.7*u.kpc, np.log10(6.8e10)*u.Msun, 3*u.kpc, 0.28*u.kpc, 430*u.km/u.s, 30*u.kpc, 1.57*u.rad, 1*u.Unit(1), 1*u.Unit(1), 1*u.Unit(1), 0.*u.pc/u.Myr**2, 0.*u.pc/u.Myr**2, 0.*u.pc/u.Myr**2, 0.*u.Gyr**-2, 0.*u.Gyr**-2, 0.*u.Gyr**-2, 0.*u.Gyr**-2, 0.*u.Gyr**-2, 0.*u.Gyr**-2*u.kpc**-1, 0.*u.Gyr**-2*u.kpc**-1, 0.*u.Gyr**-2*u.kpc**-1, 0.*u.Gyr**-2*u.kpc**-1, 0.*u.Gyr**-2*u.kpc**-1, 0.*u.Gyr**-2*u.kpc**-1, 0.*u.Gyr**-2*u.kpc**-1, 0*u.deg, 0*u.deg, 0*u.kpc, 0*u.km/u.s, 0*u.mas/u.yr, 0*u.mas/u.yr] #pparams_fid = [0.5e-5*u.Msun, 0.7*u.kpc, 6.8e-5*u.Msun, 3*u.kpc, 0.28*u.kpc, 430*u.km/u.s, 30*u.kpc, 1.57*u.rad, 1*u.Unit(1), 1*u.Unit(1), 1*u.Unit(1), 0.*u.pc/u.Myr**2, 0.*u.pc/u.Myr**2, 0.*u.pc/u.Myr**2, 0.*u.Gyr**-2, 0.*u.Gyr**-2, 0.*u.Gyr**-2, 0.*u.Gyr**-2, 0.*u.Gyr**-2, 0*u.deg, 0*u.deg, 0*u.kpc, 0*u.km/u.s, 0*u.mas/u.yr, 0*u.mas/u.yr] class Stream(): def __init__(self, x0=[]*u.kpc, v0=[]*u.km/u.s, progenitor={'coords': 'galactocentric', 'observer': {}, 'pm_polar': False}, potential='nfw', pparams=[], minit=2e4*u.Msun, mfinal=2e4*u.Msun, rcl=20*u.pc, dr=0.5, dv=2*u.km/u.s, dt=1*u.Myr, age=6*u.Gyr, nstars=600, integrator='lf'): """Initialize """ setup = {} if progenitor['coords']=='galactocentric': setup['x0'] = x0 setup['v0'] = v0 elif (progenitor['coords']=='equatorial') & (len(progenitor['observer'])!=0): if progenitor['pm_polar']: a = v0[1].value phi = v0[2].value v0[1] = a*np.sin(phi)*u.mas/u.yr v0[2] = a*np.cos(phi)*u.mas/u.yr # convert positions xeq = coord.SkyCoord(x0[0], x0[1], x0[2], **progenitor['observer']) xgal = xeq.transform_to(coord.Galactocentric) setup['x0'] = [xgal.x.to(u.kpc), xgal.y.to(u.kpc), xgal.z.to(u.kpc)]*u.kpc # convert velocities setup['v0'] = gc.vhel_to_gal(xeq.icrs, rv=v0[0], pm=v0[1:], **vsun) #setup['v0'] = [v.to(u.km/u.s) for v in vgal]*u.km/u.s else: raise ValueError('Observer position needed!') setup['dr'] = dr setup['dv'] = dv setup['minit'] = minit setup['mfinal'] = mfinal setup['rcl'] = rcl setup['dt'] = dt setup['age'] = age setup['nstars'] = nstars setup['integrator'] = integrator setup['potential'] = potential setup['pparams'] = pparams self.setup = setup self.setup_aux = {} self.fill_intid() self.fill_potid() self.st_params = self.format_input() def fill_intid(self): """Assign integrator ID for a given integrator choice Assumes setup dictionary has an 'integrator' key""" if self.setup['integrator']=='lf': self.setup_aux['iaux'] = 0 elif self.setup['integrator']=='rk': self.setup_aux['iaux'] = 1 def fill_potid(self): """Assign potential ID for a given potential choice Assumes d has a 'potential' key""" if self.setup['potential']=='nfw': self.setup_aux['paux'] = 3 elif self.setup['potential']=='log': self.setup_aux['paux'] = 2 elif self.setup['potential']=='point': self.setup_aux['paux'] = 0 elif self.setup['potential']=='gal': self.setup_aux['paux'] = 4 elif self.setup['potential']=='lmc': self.setup_aux['paux'] = 6 elif self.setup['potential']=='dipole': self.setup_aux['paux'] = 8 elif self.setup['potential']=='quad': self.setup_aux['paux'] = 9 elif self.setup['potential']=='octu': self.setup_aux['paux'] = 10 def format_input(self): """Format input parameters for streakline.stream""" p = [None]*12 # progenitor position p[0] = self.setup['x0'].si.value p[1] = self.setup['v0'].si.value # potential parameters p[2] = [x.si.value for x in self.setup['pparams']] # stream smoothing offsets p[3] = [self.setup['dr'], self.setup['dv'].si.value] # potential and integrator choice p[4] = self.setup_aux['paux'] p[5] = self.setup_aux['iaux'] # number of steps and stream stars p[6] = int(self.setup['age']/self.setup['dt']) p[7] = int(p[6]/self.setup['nstars']) # cluster properties p[8] = self.setup['minit'].si.value p[9] = self.setup['mfinal'].si.value p[10] = self.setup['rcl'].si.value # time step p[11] = self.setup['dt'].si.value return p def generate(self): """Create streakline model for a stream of set parameters""" #xm1, xm2, xm3, xp1, xp2, xp3, vm1, vm2, vm3, vp1, vp2, vp3 = streakline.stream(*p) stream = streakline.stream(*self.st_params) self.leading = {} self.leading['x'] = stream[:3]*u.m self.leading['v'] = stream[6:9]*u.m/u.s self.trailing = {} self.trailing['x'] = stream[3:6]*u.m self.trailing['v'] = stream[9:12]*u.m/u.s def observe(self, mode='cartesian', wangle=0*u.deg, units=[], errors=[], nstars=-1, sequential=False, present=[], logerr=False, observer={'z_sun': 0.*u.pc, 'galcen_distance': 8.3*u.kpc, 'roll': 0*u.deg, 'galcen_ra': 300*u.deg, 'galcen_dec': 20*u.deg}, vobs={'vcirc': 237.8*u.km/u.s, 'vlsr': [11.1, 12.2, 7.3]*u.km/u.s}, footprint='none', rotmatrix=None): """Observe the stream stream.obs holds all observations stream.err holds all errors""" x = np.concatenate((self.leading['x'].to(u.kpc).value, self.trailing['x'].to(u.kpc).value), axis=1) * u.kpc v = np.concatenate((self.leading['v'].to(u.km/u.s).value, self.trailing['v'].to(u.km/u.s).value), axis=1) * u.km/u.s if mode=='cartesian': # returns coordinates in following order # x(x, y, z), v(vx, vy, vz) if len(units)<2: units.append(self.trailing['x'].unit) units.append(self.trailing['v'].unit) if len(errors)<2: errors.append(0.2*u.kpc) errors.append(2*u.km/u.s) # positions x = x.to(units[0]) ex = np.ones(np.shape(x))*errors[0] ex = ex.to(units[0]) # velocities v = v.to(units[1]) ev = np.ones(np.shape(v))*errors[1] ev = ev.to(units[1]) self.obs = np.concatenate([x,v]).value self.err = np.concatenate([ex,ev]).value elif mode=='equatorial': # assumes coordinates in the following order: # ra, dec, distance, vrad, mualpha, mudelta if len(units)!=6: units = [u.deg, u.deg, u.kpc, u.km/u.s, u.mas/u.yr, u.mas/u.yr] if len(errors)!=6: errors = [0.2*u.deg, 0.2*u.deg, 0.5*u.kpc, 1*u.km/u.s, 0.2*u.mas/u.yr, 0.2*u.mas/u.yr] # define reference frame xgal = coord.Galactocentric(x, **observer) #frame = coord.Galactocentric(**observer) # convert xeq = xgal.transform_to(coord.ICRS) veq = gc.vgal_to_hel(xeq, v, **vobs) # store coordinates ra, dec, dist = [xeq.ra.to(units[0]).wrap_at(wangle), xeq.dec.to(units[1]), xeq.distance.to(units[2])] vr, mua, mud = [veq[2].to(units[3]), veq[0].to(units[4]), veq[1].to(units[5])] obs = np.hstack([ra, dec, dist, vr, mua, mud]).value obs = np.reshape(obs,(6,-1)) if footprint=='sdss': infoot = dec > -2.5*u.deg obs = obs[:,infoot] if np.allclose(rotmatrix, np.eye(3))!=1: xi, eta = myutils.rotate_angles(obs[0], obs[1], rotmatrix) obs[0] = xi obs[1] = eta self.obs = obs # store errors err = np.ones(np.shape(self.obs)) if logerr: for i in range(6): err[i] *= np.exp(errors[i].to(units[i]).value) else: for i in range(6): err[i] *= errors[i].to(units[i]).value self.err = err self.obsunit = units self.obserror = errors # randomly select nstars from the stream if nstars>-1: if sequential: select = np.linspace(0, np.shape(self.obs)[1], nstars, endpoint=False, dtype=int) else: select = np.random.randint(low=0, high=np.shape(self.obs)[1], size=nstars) self.obs = self.obs[:,select] self.err = self.err[:,select] # include only designated dimensions if len(present)>0: self.obs = self.obs[present] self.err = self.err[present] self.obsunit = [ self.obsunit[x] for x in present ] self.obserror = [ self.obserror[x] for x in present ] def prog_orbit(self): """Generate progenitor orbital history""" orbit = streakline.orbit(self.st_params[0], self.st_params[1], self.st_params[2], self.st_params[4], self.st_params[5], self.st_params[6], self.st_params[11], -1) self.orbit = {} self.orbit['x'] = orbit[:3]*u.m self.orbit['v'] = orbit[3:]*u.m/u.s def project(self, name, N=1000, nbatch=-1): """Project the stream from observed to native coordinates""" poly = np.loadtxt("../data/{0:s}_all.txt".format(name)) self.streak = np.poly1d(poly) self.streak_x = np.linspace(np.min(self.obs[0])-2, np.max(self.obs[0])+2, N) self.streak_y = np.polyval(self.streak, self.streak_x) self.streak_b = np.zeros(N) self.streak_l = np.zeros(N) pdot = np.polyder(poly) for i in range(N): length = scipy.integrate.quad(self._delta_path, self.streak_x[0], self.streak_x[i], args=(pdot,)) self.streak_l[i] = length[0] XB = np.transpose(np.vstack([self.streak_x, self.streak_y])) n = np.shape(self.obs)[1] if nbatch<0: nstep = 0 nbatch = -1 else: nstep = np.int(n/nbatch) i1 = 0 i2 = nbatch for i in range(nstep): XA = np.transpose(np.vstack([np.array(self.obs[0][i1:i2]), np.array(self.obs[1][i1:i2])])) self.emdist(XA, XB, i1=i1, i2=i2) i1 += nbatch i2 += nbatch XA = np.transpose(np.vstack([np.array(self.catalog['ra'][i1:]), np.array(self.catalog['dec'][i1:])])) self.emdist(XA, XB, i1=i1, i2=n) #self.catalog.write("../data/{0:s}_footprint_catalog.txt".format(self.name), format='ascii.commented_header') def emdist(self, XA, XB, i1=0, i2=-1): """""" distances = scipy.spatial.distance.cdist(XA, XB) self.catalog['b'][i1:i2] = np.min(distances, axis=1) imin = np.argmin(distances, axis=1) self.catalog['b'][i1:i2][self.catalog['dec'][i1:i2]<self.streak_y[imin]] *= -1 self.catalog['l'][i1:i2] = self.streak_l[imin] def _delta_path(self, x, pdot): """Return integrand for calculating length of a path along a polynomial""" return np.sqrt(1 + np.polyval(pdot, x)**2) def plot(self, mode='native', fig=None, color='k', **kwargs): """Plot stream""" # Plotting if fig==None: plt.close() plt.figure() ax = plt.axes([0.12,0.1,0.8,0.8]) if mode=='native': # Color setup cindices = np.arange(self.setup['nstars']) # colors of stream particles nor = mpl.colors.Normalize(vmin=0, vmax=self.setup['nstars']) # colormap normalization plt.plot(self.setup['x0'][0].to(u.kpc).value, self.setup['x0'][2].to(u.kpc).value, 'wo', ms=10, mew=2, zorder=3) plt.scatter(self.trailing['x'][0].to(u.kpc).value, self.trailing['x'][2].to(u.kpc).value, s=30, c=cindices, cmap='winter', norm=nor, marker='o', edgecolor='none', lw=0, alpha=0.1) plt.scatter(self.leading['x'][0].to(u.kpc).value, self.leading['x'][2].to(u.kpc).value, s=30, c=cindices, cmap='autumn', norm=nor, marker='o', edgecolor='none', lw=0, alpha=0.1) plt.xlabel("X (kpc)") plt.ylabel("Z (kpc)") elif mode=='observed': plt.subplot(221) plt.plot(self.obs[0], self.obs[1], 'o', color=color, **kwargs) plt.xlabel("RA") plt.ylabel("Dec") plt.subplot(223) plt.plot(self.obs[0], self.obs[2], 'o', color=color, **kwargs) plt.xlabel("RA") plt.ylabel("Distance") plt.subplot(222) plt.plot(self.obs[3], self.obs[4], 'o', color=color, **kwargs) plt.xlabel("V$_r$") plt.ylabel("$\mu\\alpha$") plt.subplot(224) plt.plot(self.obs[3], self.obs[5], 'o', color=color, **kwargs) plt.xlabel("V$_r$") plt.ylabel("$\mu\delta$") plt.tight_layout() #plt.minorticks_on() def read(self, fname, units={'x': u.kpc, 'v': u.km/u.s}): """Read stream star positions from a file""" t = np.loadtxt(fname).T n = np.shape(t)[1] ns = int((n-1)/2) self.setup['nstars'] = ns # progenitor self.setup['x0'] = t[:3,0] * units['x'] self.setup['v0'] = t[3:,0] * units['v'] # leading tail self.leading = {} self.leading['x'] = t[:3,1:ns+1] * units['x'] self.leading['v'] = t[3:,1:ns+1] * units['v'] # trailing tail self.trailing = {} self.trailing['x'] = t[:3,ns+1:] * units['x'] self.trailing['v'] = t[3:,ns+1:] * units['v'] def save(self, fname): """Save stream star positions to a file""" # define table t = Table(names=('x', 'y', 'z', 'vx', 'vy', 'vz')) # add progenitor info t.add_row(np.ravel([self.setup['x0'].to(u.kpc).value, self.setup['v0'].to(u.km/u.s).value])) # add leading tail infoobsmode tt = Table(np.concatenate((self.leading['x'].to(u.kpc).value, self.leading['v'].to(u.km/u.s).value)).T, names=('x', 'y', 'z', 'vx', 'vy', 'vz')) t = astropy.table.vstack([t,tt]) # add trailing tail info tt = Table(np.concatenate((self.trailing['x'].to(u.kpc).value, self.trailing['v'].to(u.km/u.s).value)).T, names=('x', 'y', 'z', 'vx', 'vy', 'vz')) t = astropy.table.vstack([t,tt]) # save to file t.write(fname, format='ascii.commented_header') # make a streakline model of a stream def stream_model(name='gd1', pparams0=pparams_fid, dt=0.2*u.Myr, rotmatrix=np.eye(3), graph=False, graphsave=False, observer=mw_observer, vobs=vsun, footprint='', obsmode='equatorial'): """Create a streakline model of a stream baryonic component as in kupper+2015: 3.4e10*u.Msun, 0.7*u.kpc, 1e11*u.Msun, 6.5*u.kpc, 0.26*u.kpc""" # vary progenitor parameters mock = pickle.load(open('../data/mock_{}.params'.format(name), 'rb')) for i in range(3): mock['x0'][i] += pparams0[26+i] mock['v0'][i] += pparams0[29+i] # vary potential parameters potential = 'octu' pparams = pparams0[:26] #print(pparams[0]) pparams[0] = (10**pparams0[0].value)*pparams0[0].unit pparams[2] = (10**pparams0[2].value)*pparams0[2].unit #pparams[0] = pparams0[0]*1e15 #pparams[2] = pparams0[2]*1e15 #print(pparams[0]) # adjust circular velocity in this halo vobs['vcirc'] = vcirc_potential(observer['galcen_distance'], pparams=pparams) # create a model stream with these parameters params = {'generate': {'x0': mock['x0'], 'v0': mock['v0'], 'progenitor': {'coords': 'equatorial', 'observer': mock['observer'], 'pm_polar': False}, 'potential': potential, 'pparams': pparams, 'minit': mock['mi'], 'mfinal': mock['mf'], 'rcl': 20*u.pc, 'dr': 0., 'dv': 0*u.km/u.s, 'dt': dt, 'age': mock['age'], 'nstars': 400, 'integrator': 'lf'}, 'observe': {'mode': mock['obsmode'], 'wangle': mock['wangle'], 'nstars':-1, 'sequential':True, 'errors': [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc, 5*u.km/u.s, 0.5*u.mas/u.yr, 0.5*u.mas/u.yr], 'present': [0,1,2,3,4,5], 'observer': mock['observer'], 'vobs': mock['vobs'], 'footprint': mock['footprint'], 'rotmatrix': rotmatrix}} stream = Stream(**params['generate']) stream.generate() stream.observe(**params['observe']) ################################ # Plot observed stream and model if graph: observed = load_stream(name) Ndim = np.shape(observed.obs)[0] modcol = 'k' obscol = 'orange' ylabel = ['Dec (deg)', 'Distance (kpc)', 'Radial velocity (km/s)'] plt.close() fig, ax = plt.subplots(1, 3, figsize=(12,4)) for i in range(3): plt.sca(ax[i]) plt.gca().invert_xaxis() plt.xlabel('R.A. (deg)') plt.ylabel(ylabel[i]) plt.plot(observed.obs[0], observed.obs[i+1], 's', color=obscol, mec='none', ms=8, label='Observed stream') plt.plot(stream.obs[0], stream.obs[i+1], 'o', color=modcol, mec='none', ms=4, label='Fiducial model') if i==0: plt.legend(frameon=False, handlelength=0.5, fontsize='small') plt.tight_layout() if graphsave: plt.savefig('../plots/mock_observables_{}_p{}.png'.format(name, potential), dpi=150) return stream def progenitor_params(n): """Return progenitor parameters for a given stream""" if n==-1: age = 1.6*u.Gyr mi = 1e4*u.Msun mf = 2e-1*u.Msun x0, v0 = gd1_coordinates(observer=mw_observer) elif n==-2: age = 2.7*u.Gyr mi = 1e5*u.Msun mf = 2e4*u.Msun x0, v0 = pal5_coordinates(observer=mw_observer, vobs=vsun0) elif n==-3: age = 3.5*u.Gyr mi = 5e4*u.Msun mf = 2e-1*u.Msun x0, v0 = tri_coordinates(observer=mw_observer) elif n==-4: age = 2*u.Gyr mi = 2e4*u.Msun mf = 2e-1*u.Msun x0, v0 = atlas_coordinates(observer=mw_observer) out = {'x0': x0, 'v0': v0, 'age': age, 'mi': mi, 'mf': mf} return out def gal2eq(x, v, observer=mw_observer, vobs=vsun0): """""" # define reference frame xgal = coord.Galactocentric(np.array(x)[:,np.newaxis]*u.kpc, **observer) # convert xeq = xgal.transform_to(coord.ICRS) veq = gc.vgal_to_hel(xeq, np.array(v)[:,np.newaxis]*u.km/u.s, **vobs) # store coordinates units = [u.deg, u.deg, u.kpc, u.km/u.s, u.mas/u.yr, u.mas/u.yr] xobs = [xeq.ra.to(units[0]), xeq.dec.to(units[1]), xeq.distance.to(units[2])] vobs = [veq[2].to(units[3]), veq[0].to(units[4]), veq[1].to(units[5])] return(xobs, vobs) def gd1_coordinates(observer=mw_observer): """Approximate GD-1 progenitor coordinates""" x = coord.SkyCoord(ra=154.377*u.deg, dec=41.5309*u.deg, distance=8.2*u.kpc, **observer) x_ = x.galactocentric x0 = [x_.x.value, x_.y.value, x_.z.value] v0 = [-90, -250, -120] return (x0, v0) def pal5_coordinates(observer=mw_observer, vobs=vsun0): """Pal5 coordinates""" # sdss ra = 229.0128*u.deg dec = -0.1082*u.deg # bob's rrlyrae d = 21.7*u.kpc # harris #d = 23.2*u.kpc # odenkirchen 2002 vr = -58.7*u.km/u.s # fritz & kallivayalil 2015 mua = -2.296*u.mas/u.yr mud = -2.257*u.mas/u.yr d = 24*u.kpc x = coord.SkyCoord(ra=ra, dec=dec, distance=d, **observer) x0 = x.galactocentric v0 = gc.vhel_to_gal(x.icrs, rv=vr, pm=[mua, mud], **vobs).to(u.km/u.s) return ([x0.x.value, x0.y.value, x0.z.value], v0.value.tolist()) def tri_coordinates(observer=mw_observer): """Approximate Triangulum progenitor coordinates""" x = coord.SkyCoord(ra=22.38*u.deg, dec=30.26*u.deg, distance=33*u.kpc, **observer) x_ = x.galactocentric x0 = [x_.x.value, x_.y.value, x_.z.value] v0 = [-40, 155, 155] return (x0, v0) def atlas_coordinates(observer=mw_observer): """Approximate ATLAS progenitor coordinates""" x = coord.SkyCoord(ra=20*u.deg, dec=-27*u.deg, distance=20*u.kpc, **observer) x_ = x.galactocentric x0 = [x_.x.value, x_.y.value, x_.z.value] v0 = [40, 150, -120] return (x0, v0) # great circle orientation def find_greatcircle(stream=None, name='gd1', pparams=pparams_fid, dt=0.2*u.Myr, save=True, graph=True): """Save rotation matrix for a stream model""" if stream==None: stream = stream_model(name, pparams0=pparams, dt=dt) # find the pole ra = np.radians(stream.obs[0]) dec = np.radians(stream.obs[1]) rx = np.cos(ra) * np.cos(dec) ry = np.sin(ra) * np.cos(dec) rz = np.sin(dec) r = np.column_stack((rx, ry, rz)) # fit the plane x0 = np.array([0, 1, 0]) lsq = scipy.optimize.minimize(wfit_plane, x0, args=(r,)) x0 = lsq.x/np.linalg.norm(lsq.x) ra0 = np.arctan2(x0[1], x0[0]) dec0 = np.arcsin(x0[2]) ra0 += np.pi dec0 = np.pi/2 - dec0 # euler rotations R0 = myutils.rotmatrix(np.degrees(-ra0), 2) R1 = myutils.rotmatrix(np.degrees(dec0), 1) R2 = myutils.rotmatrix(0, 2) R = np.dot(R2, np.matmul(R1, R0)) xi, eta = myutils.rotate_angles(stream.obs[0], stream.obs[1], R) # put xi = 50 at the beginning of the stream xi[xi>180] -= 360 xi += 360 xi0 = np.min(xi) - 50 R2 = myutils.rotmatrix(-xi0, 2) R = np.dot(R2, np.matmul(R1, R0)) xi, eta = myutils.rotate_angles(stream.obs[0], stream.obs[1], R) if save: np.save('../data/rotmatrix_{}'.format(name), R) f = open('../data/mock_{}.params'.format(name), 'rb') mock = pickle.load(f) mock['rotmatrix'] = R f.close() f = open('../data/mock_{}.params'.format(name), 'wb') pickle.dump(mock, f) f.close() if graph: plt.close() fig, ax = plt.subplots(1,2,figsize=(10,5)) plt.sca(ax[0]) plt.plot(stream.obs[0], stream.obs[1], 'ko') plt.xlabel('R.A. (deg)') plt.ylabel('Dec (deg)') plt.sca(ax[1]) plt.plot(xi, eta, 'ko') plt.xlabel('$\\xi$ (deg)') plt.ylabel('$\\eta$ (deg)') plt.ylim(-5, 5) plt.tight_layout() plt.savefig('../plots/gc_orientation_{}.png'.format(name)) return R def wfit_plane(x, r, p=None): """Fit a plane to a set of 3d points""" Np = np.shape(r)[0] if np.any(p)==None: p = np.ones(Np) Q = np.zeros((3,3)) for i in range(Np): Q += p[i]**2 * np.outer(r[i], r[i]) x = x/np.linalg.norm(x) lsq = np.inner(x, np.inner(Q, x)) return lsq # observed streams #def load_stream(n): #"""Load stream observations""" #if n==-1: #observed = load_gd1(present=[0,1,2,3]) #elif n==-2: #observed = load_pal5(present=[0,1,2,3]) #elif n==-3: #observed = load_tri(present=[0,1,2,3]) #elif n==-4: #observed = load_atlas(present=[0,1,2,3]) #return observed def endpoints(name): """""" stream = load_stream(name) # find endpoints amin = np.argmin(stream.obs[0]) amax = np.argmax(stream.obs[0]) ra = np.array([stream.obs[0][i] for i in [amin, amax]]) dec = np.array([stream.obs[1][i] for i in [amin, amax]]) f = open('../data/mock_{}.params'.format(name), 'rb') mock = pickle.load(f) # rotate endpoints R = mock['rotmatrix'] xi, eta = myutils.rotate_angles(ra, dec, R) #xi, eta = myutils.rotate_angles(stream.obs[0], stream.obs[1], R) mock['ra_range'] = ra mock['xi_range'] = xi #np.percentile(xi, [10,90]) f.close() f = open('../data/mock_{}.params'.format(name), 'wb') pickle.dump(mock, f) f.close() def load_pal5(present, nobs=50, potential='gal'): """""" if len(present)==2: t = Table.read('../data/pal5_members.txt', format='ascii.commented_header') dist = 21.7 deltadist = 0.7 np.random.seed(34) t = t[np.random.randint(0, high=len(t), size=nobs)] nobs = len(t) d = np.random.randn(nobs)*deltadist + dist obs = np.array([t['ra'], t['dec'], d]) obsunit = [u.deg, u.deg, u.kpc] err = np.repeat( np.array([2e-4, 2e-4, 0.7]), nobs ).reshape(3, -1) obserr = [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc] if len(present)==3: #t = Table.read('../data/pal5_kinematic.txt', format='ascii.commented_header') t = Table.read('../data/pal5_allmembers.txt', format='ascii.commented_header') obs = np.array([t['ra'], t['dec'], t['d']]) obsunit = [u.deg, u.deg, u.kpc] err = np.array([t['err_ra'], t['err_dec'], t['err_d']]) obserr = [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc] if len(present)==4: #t = Table.read('../data/pal5_kinematic.txt', format='ascii.commented_header') t = Table.read('../data/pal5_allmembers.txt', format='ascii.commented_header') obs = np.array([t['ra'], t['dec'], t['d'], t['vr']]) obsunit = [u.deg, u.deg, u.kpc, u.km/u.s] err = np.array([t['err_ra'], t['err_dec'], t['err_d'], t['err_vr']]) obserr = [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc, 5*u.km/u.s] observed = Stream(potential=potential) observed.obs = obs observed.obsunit = obsunit observed.err = err observed.obserror = obserr return observed def load_gd1(present, nobs=50, potential='gal'): """""" if len(present)==3: t = Table.read('../data/gd1_members.txt', format='ascii.commented_header') dist = 0 deltadist = 0.5 np.random.seed(34) t = t[np.random.randint(0, high=len(t), size=nobs)] nobs = len(t) d = np.random.randn(nobs)*deltadist + dist d += t['l']*0.04836 + 9.86 obs = np.array([t['ra'], t['dec'], d]) obsunit = [u.deg, u.deg, u.kpc] err = np.repeat( np.array([2e-4, 2e-4, 0.5]), nobs ).reshape(3, -1) obserr = [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc] if len(present)==4: #t = Table.read('../data/gd1_kinematic.txt', format='ascii.commented_header') t = Table.read('../data/gd1_allmembers.txt', format='ascii.commented_header') obs = np.array([t['ra'], t['dec'], t['d'], t['vr']]) obsunit = [u.deg, u.deg, u.kpc, u.km/u.s] err = np.array([t['err_ra'], t['err_dec'], t['err_d'], t['err_vr']]) obserr = [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc, 5*u.km/u.s] ind = np.all(obs!=MASK, axis=0) observed = Stream(potential=potential) observed.obs = obs#[np.array(present)] observed.obsunit = obsunit observed.err = err#[np.array(present)] observed.obserror = obserr return observed def load_tri(present, nobs=50, potential='gal'): """""" if len(present)==4: t = Table.read('../data/tri_allmembers.txt', format='ascii.commented_header') obs = np.array([t['ra'], t['dec'], t['d'], t['vr']]) obsunit = [u.deg, u.deg, u.kpc, u.km/u.s] err = np.array([t['err_ra'], t['err_dec'], t['err_d'], t['err_vr']]) obserr = [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc, 5*u.km/u.s] if len(present)==3: t = Table.read('../data/tri_allmembers.txt', format='ascii.commented_header') obs = np.array([t['ra'], t['dec'], t['d']]) obsunit = [u.deg, u.deg, u.kpc] err = np.array([t['err_ra'], t['err_dec'], t['err_d']]) obserr = [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc] ind = np.all(obs!=MASK, axis=0) observed = Stream(potential=potential) observed.obs = obs observed.obsunit = obsunit observed.err = err observed.obserror = obserr return observed def load_atlas(present, nobs=50, potential='gal'): """""" ra, dec = atlas_track() n = np.size(ra) d = np.random.randn(n)*2 + 20 obs = np.array([ra, dec, d]) obsunit = [u.deg, u.deg, u.kpc] err = np.array([np.ones(n)*0.05, np.ones(n)*0.05, np.ones(n)*2]) obserr = [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc, 5*u.km/u.s] observed = Stream(potential=potential) observed.obs = obs observed.obsunit = obsunit observed.err = err observed.obserror = obserr return observed def atlas_track(): """""" ra0, dec0 = np.radians(77.16), np.radians(46.92 - 90) # euler rotations D = np.array([[np.cos(ra0), np.sin(ra0), 0], [-np.sin(ra0), np.cos(ra0), 0], [0, 0, 1]]) C = np.array([[np.cos(dec0), 0, np.sin(dec0)], [0, 1, 0], [-np.sin(dec0), 0, np.cos(dec0)]]) B = np.diag(np.ones(3)) R = np.dot(B, np.dot(C, D)) Rinv = np.linalg.inv(R) l0 = np.linspace(0, 2*np.pi, 500) b0 = np.zeros(500) xeq, yeq, zeq = myutils.eq2car(l0, b0) eq = np.column_stack((xeq, yeq, zeq)) eq_rot = np.zeros(np.shape(eq)) for i in range(np.size(l0)): eq_rot[i] = np.dot(Rinv, eq[i]) l0_rot, b0_rot = myutils.car2eq(eq_rot[:, 0], eq_rot[:, 1], eq_rot[:, 2]) ra_s, dec_s = np.degrees(l0_rot), np.degrees(b0_rot) ind_s = (ra_s>17) & (ra_s<30) ra_s = ra_s[ind_s] dec_s = dec_s[ind_s] return (ra_s, dec_s) def fancy_name(n): """Return nicely formatted stream name""" names = {-1: 'GD-1', -2: 'Palomar 5', -3: 'Triangulum', -4: 'ATLAS'} return names[n] # model parameters def get_varied_pars(vary): """Return indices and steps for a preset of varied parameters, and a label for varied parameters Parameters: vary - string setting the parameter combination to be varied, options: 'potential', 'progenitor', 'halo', or a list thereof""" if type(vary) is not list: vary = [vary] Nt = len(vary) vlabel = '_'.join(vary) pid = [] dp = [] for v in vary: o1, o2 = get_varied_bytype(v) pid += o1 dp += o2 return (pid, dp, vlabel) def get_varied_bytype(vary): """Get varied parameter of a particular type""" if vary=='potential': pid = [5,6,8,10,11] dp = [20*u.km/u.s, 2*u.kpc, 0.05*u.Unit(1), 0.05*u.Unit(1), 0.4e11*u.Msun] elif vary=='bary': pid = [0,1,2,3,4] # gd1 dp = [1e-1*u.Msun, 0.005*u.kpc, 1e-1*u.Msun, 0.002*u.kpc, 0.002*u.kpc] ## atlas & triangulum #dp = [0.4e5*u.Msun, 0.0005*u.kpc, 0.5e6*u.Msun, 0.0002*u.kpc, 0.002*u.kpc] # pal5 dp = [1e-2*u.Msun, 0.000005*u.kpc, 1e-2*u.Msun, 0.000002*u.kpc, 0.00002*u.kpc] dp = [1e-7*u.Msun, 0.5*u.kpc, 1e-7*u.Msun, 0.5*u.kpc, 0.5*u.kpc] dp = [1e-2*u.Msun, 0.5*u.kpc, 1e-2*u.Msun, 0.5*u.kpc, 0.5*u.kpc] elif vary=='halo': pid = [5,6,8,10] dp = [20*u.km/u.s, 2*u.kpc, 0.05*u.Unit(1), 0.05*u.Unit(1)] dp = [35*u.km/u.s, 2.9*u.kpc, 0.05*u.Unit(1), 0.05*u.Unit(1)] elif vary=='progenitor': pid = [26,27,28,29,30,31] dp = [1*u.deg, 1*u.deg, 0.5*u.kpc, 20*u.km/u.s, 0.3*u.mas/u.yr, 0.3*u.mas/u.yr] elif vary=='dipole': pid = [11,12,13] #dp = [1e-11*u.Unit(1), 1e-11*u.Unit(1), 1e-11*u.Unit(1)] dp = [0.05*u.pc/u.Myr**2, 0.05*u.pc/u.Myr**2, 0.05*u.pc/u.Myr**2] elif vary=='quad': pid = [14,15,16,17,18] dp = [0.5*u.Gyr**-2 for x in range(5)] elif vary=='octu': pid = [19,20,21,22,23,24,25] dp = [0.001*u.Gyr**-2*u.kpc**-1 for x in range(7)] else: pid = [] dp = [] return (pid, dp) def get_parlabel(pid): """Return label for a list of parameter ids Parameter: pid - list of parameter ids""" master = ['log $M_b$', '$a_b$', 'log $M_d$', '$a_d$', '$b_d$', '$V_h$', '$R_h$', '$\phi$', '$q_x$', '$q_y$', '$q_z$', '$a_{1,-1}$', '$a_{1,0}$', '$a_{1,1}$', '$a_{2,-2}$', '$a_{2,-1}$', '$a_{2,0}$', '$a_{2,1}$', '$a_{2,2}$', '$a_{3,-3}$', '$a_{3,-2}$', '$a_{3,-1}$', '$a_{3,0}$', '$a_{3,1}$', '$a_{3,2}$', '$a_{3,3}$', '$RA_p$', '$Dec_p$', '$d_p$', '$V_{r_p}$', '$\mu_{\\alpha_p}$', '$\mu_{\delta_p}$', ] master_units = ['dex', 'kpc', 'dex', 'kpc', 'kpc', 'km/s', 'kpc', 'rad', '', '', '', 'pc/Myr$^2$', 'pc/Myr$^2$', 'pc/Myr$^2$', 'Gyr$^{-2}$', 'Gyr$^{-2}$', 'Gyr$^{-2}$', 'Gyr$^{-2}$', 'Gyr$^{-2}$', 'Gyr$^{-2}$ kpc$^{-1}$', 'Gyr$^{-2}$ kpc$^{-1}$', 'Gyr$^{-2}$ kpc$^{-1}$', 'Gyr$^{-2}$ kpc$^{-1}$', 'Gyr$^{-2}$ kpc$^{-1}$', 'Gyr$^{-2}$ kpc$^{-1}$', 'Gyr$^{-2}$ kpc$^{-1}$', 'deg', 'deg', 'kpc', 'km/s', 'mas/yr', 'mas/yr', ] if type(pid) is list: labels = [] units = [] for i in pid: labels += [master[i]] units += [master_units[i]] else: labels = master[pid] units = master_units[pid] return (labels, units) def get_steps(Nstep=50, log=False): """Return deltax steps in both directions Paramerets: Nstep - number of steps in one direction (default: 50) log - if True, steps are logarithmically spaced (default: False)""" if log: step = np.logspace(-10, 1, Nstep) else: step = np.linspace(0.1, 10, Nstep) step = np.concatenate([-step[::-1], step]) return (Nstep, step) def lmc_position(): """""" ra = 80.8939*u.deg dec = -69.7561*u.deg dm = 18.48 d = 10**(1 + dm/5)*u.pc x = coord.SkyCoord(ra=ra, dec=dec, distance=d) xgal = [x.galactocentric.x.si, x.galactocentric.y.si, x.galactocentric.z.si] print(xgal) def lmc_properties(): """""" # penarrubia 2016 mass = 2.5e11*u.Msun ra = 80.8939*u.deg dec = -69.7561*u.deg dm = 18.48 d = 10**(1 + dm/5)*u.pc c1 = coord.SkyCoord(ra=ra, dec=dec, distance=d) cgal1 = c1.transform_to(coord.Galactocentric) xgal = np.array([cgal1.x.to(u.kpc).value, cgal1.y.to(u.kpc).value, cgal1.z.to(u.kpc).value])*u.kpc return (mass, xgal) # fit bspline to a stream model def fit_bspline(n, pparams=pparams_fid, dt=0.2*u.Myr, align=False, save='', graph=False, graphsave='', fiducial=False): """Fit bspline to a stream model and save to file""" Ndim = 6 fits = [None]*(Ndim-1) if align: rotmatrix = np.load('../data/rotmatrix_{}.npy'.format(n)) else: rotmatrix = None stream = stream_model(n, pparams0=pparams, dt=dt, rotmatrix=rotmatrix) Nobs = 10 k = 3 isort = np.argsort(stream.obs[0]) ra = np.linspace(np.min(stream.obs[0])*1.05, np.max(stream.obs[0])*0.95, Nobs) t = np.r_[(stream.obs[0][isort][0],)*(k+1), ra, (stream.obs[0][isort][-1],)*(k+1)] for j in range(Ndim-1): fits[j] = scipy.interpolate.make_lsq_spline(stream.obs[0][isort], stream.obs[j+1][isort], t, k=k) if len(save)>0: np.savez('../data/{:s}'.format(save), fits=fits) if graph: xlims, ylims = get_stream_limits(n, align) ylabel = ['R.A. (deg)', 'Dec (deg)', 'd (kpc)', '$V_r$ (km/s)', '$\mu_\\alpha$ (mas/yr)', '$\mu_\delta$ (mas/yr)'] if align: ylabel[:2] = ['$\\xi$ (deg)', '$\\eta$ (deg)'] if fiducial: stream_fid = stream_model(n, pparams0=pparams_fid, dt=dt, rotmatrix=rotmatrix) fidsort = np.argsort(stream_fid.obs[0]) ra = np.linspace(np.min(stream_fid.obs[0])*1.05, np.max(stream_fid.obs[0])*0.95, Nobs) tfid = np.r_[(stream_fid.obs[0][fidsort][0],)*(k+1), ra, (stream_fid.obs[0][fidsort][-1],)*(k+1)] llabel = 'b-spline fit' else: llabel = '' plt.close() fig, ax = plt.subplots(2,5,figsize=(20,5), sharex=True, gridspec_kw = {'height_ratios':[3, 1]}) for i in range(Ndim-1): plt.sca(ax[0][i]) plt.plot(stream.obs[0], stream.obs[i+1], 'ko') plt.plot(stream.obs[0][isort], fits[i](stream.obs[0][isort]), 'r-', lw=2, label=llabel) if fiducial: fits_fid = scipy.interpolate.make_lsq_spline(stream_fid.obs[0][fidsort], stream_fid.obs[i+1][fidsort], tfid, k=k) plt.plot(stream_fid.obs[0], stream_fid.obs[i+1], 'wo', mec='k', alpha=0.1) plt.plot(stream_fid.obs[0][fidsort], fits_fid(stream_fid.obs[0][fidsort]), 'b-', lw=2, label='Fiducial') plt.ylabel(ylabel[i+1]) plt.xlim(xlims[0], xlims[1]) plt.ylim(ylims[i][0], ylims[i][1]) plt.sca(ax[1][i]) if fiducial: yref = fits_fid(stream.obs[0]) ycolor = 'b' else: yref = fits[i](stream.obs[0]) ycolor = 'r' plt.axhline(0, color=ycolor, lw=2) if fiducial: plt.plot(stream.obs[0][isort], stream.obs[i+1][isort] - stream_fid.obs[i+1][fidsort], 'wo', mec='k', alpha=0.1) plt.plot(stream.obs[0], stream.obs[i+1] - yref, 'ko') if fiducial: fits_diff = scipy.interpolate.make_lsq_spline(stream.obs[0][isort], stream.obs[i+1][isort] - stream_fid.obs[i+1][fidsort], t, k=k) plt.plot(stream.obs[0][isort], fits_diff(stream.obs[0][isort]), 'r--') plt.plot(stream.obs[0][isort], fits[i](stream.obs[0][isort]) - yref[isort], 'r-', lw=2, label=llabel) plt.xlabel(ylabel[0]) plt.ylabel('$\Delta$ {}'.format(ylabel[i+1].split(' ')[0])) if fiducial: plt.sca(ax[0][Ndim-2]) plt.legend(fontsize='small') plt.tight_layout() if len(graphsave)>0: plt.savefig('../plots/{:s}.png'.format(graphsave)) def fitbyt_bspline(n, pparams=pparams_fid, dt=0.2*u.Myr, align=False, save='', graph=False, graphsave='', fiducial=False): """Fit each tail individually""" Ndim = 6 fits = [None]*(Ndim-1) if align: rotmatrix = np.load('../data/rotmatrix_{}.npy'.format(n)) else: rotmatrix = None stream = stream_model(n, pparams0=pparams, dt=dt, rotmatrix=rotmatrix) Nobs = 10 k = 3 isort = np.argsort(stream.obs[0]) ra = np.linspace(np.min(stream.obs[0])*1.05, np.max(stream.obs[0])*0.95, Nobs) t = np.r_[(stream.obs[0][isort][0],)*(k+1), ra, (stream.obs[0][isort][-1],)*(k+1)] for j in range(Ndim-1): fits[j] = scipy.interpolate.make_lsq_spline(stream.obs[0][isort], stream.obs[j+1][isort], t, k=k) if len(save)>0: np.savez('../data/{:s}'.format(save), fits=fits) if graph: xlims, ylims = get_stream_limits(n, align) ylabel = ['R.A. (deg)', 'Dec (deg)', 'd (kpc)', '$V_r$ (km/s)', '$\mu_\\alpha$ (mas/yr)', '$\mu_\delta$ (mas/yr)'] if align: ylabel[:2] = ['$\\xi$ (deg)', '$\\eta$ (deg)'] if fiducial: stream_fid = stream_model(n, pparams0=pparams_fid, dt=dt, rotmatrix=rotmatrix) plt.close() fig, ax = plt.subplots(2,Ndim,figsize=(20,4), sharex=True, gridspec_kw = {'height_ratios':[3, 1]}) for i in range(Ndim): plt.sca(ax[0][i]) Nhalf = int(0.5*np.size(stream.obs[i])) plt.plot(stream.obs[i][:Nhalf], 'o') plt.plot(stream.obs[i][Nhalf:], 'o') if fiducial: plt.plot(stream_fid.obs[i][:Nhalf], 'wo', mec='k', mew=0.2, alpha=0.5) plt.plot(stream_fid.obs[i][Nhalf:], 'wo', mec='k', mew=0.2, alpha=0.5) plt.ylabel(ylabel[i]) plt.sca(ax[1][i]) if fiducial: plt.plot(stream.obs[i][:Nhalf] - stream_fid.obs[i][:Nhalf], 'o') plt.plot(stream.obs[i][Nhalf:] - stream_fid.obs[i][Nhalf:], 'o') if fiducial: plt.sca(ax[0][Ndim-1]) plt.legend(fontsize='small') plt.tight_layout() if len(graphsave)>0: plt.savefig('../plots/{:s}.png'.format(graphsave)) else: return fig def get_stream_limits(n, align=False): """Return lists with limiting values in different dimensions""" if n==-1: xlims = [260, 100] ylims = [[-20, 70], [5, 15], [-400, 400], [-15,5], [-15, 5]] elif n==-2: xlims = [250, 210] ylims = [[-20, 15], [17, 27], [-80, -20], [-5,0], [-5, 0]] elif n==-3: xlims = [27, 17] ylims = [[10, 50], [34, 36], [-175, -50], [0.45, 1], [0.1, 0.7]] elif n==-4: xlims = [35, 10] ylims = [[-40, -20], [15, 25], [50, 200], [-0.5,0.5], [-1.5, -0.5]] if align: ylims[0] = [-5, 5] xup = [110, 110, 80, 80] xlims = [xup[np.abs(n)-1], 40] return (xlims, ylims) # step sizes for derivatives def iterate_steps(n): """Calculate derivatives for different parameter classes, and plot""" for vary in ['bary', 'halo', 'progenitor']: print(n, vary) step_convergence(n, Nstep=10, vary=vary) choose_step(n, Nstep=10, vary=vary) def iterate_plotsteps(n): """Plot stream models for a variety of model parameters""" for vary in ['bary', 'halo', 'progenitor']: print(n, vary) pid, dp, vlabel = get_varied_pars(vary) for p in range(len(pid)): plot_steps(n, p=p, Nstep=5, vary=vary, log=False) def plot_steps(n, p=0, Nstep=20, log=True, dt=0.2*u.Myr, vary='halo', verbose=False, align=True, observer=mw_observer, vobs=vsun): """Plot stream for different values of a potential parameter""" if align: rotmatrix = np.load('../data/rotmatrix_{}.npy'.format(n)) else: rotmatrix = None pparams0 = pparams_fid pid, dp, vlabel = get_varied_pars(vary) plabel, punit = get_parlabel(pid[p]) Nstep, step = get_steps(Nstep=Nstep, log=log) plt.close() fig, ax = plt.subplots(5,5,figsize=(20,10), sharex=True, gridspec_kw = {'height_ratios':[3, 1, 1, 1, 1]}) # fiducial model stream0 = stream_model(n, pparams0=pparams0, dt=dt, rotmatrix=rotmatrix, observer=observer, vobs=vobs) Nobs = 10 k = 3 isort = np.argsort(stream0.obs[0]) ra = np.linspace(np.min(stream0.obs[0])*1.05, np.max(stream0.obs[0])*0.95, Nobs) t = np.r_[(stream0.obs[0][isort][0],)*(k+1), ra, (stream0.obs[0][isort][-1],)*(k+1)] fits = [None]*5 for j in range(5): fits[j] = scipy.interpolate.make_lsq_spline(stream0.obs[0][isort], stream0.obs[j+1][isort], t, k=k) # excursions stream_fits = [[None] * 5 for x in range(2 * Nstep)] for i, s in enumerate(step[:]): pparams = [x for x in pparams0] pparams[pid[p]] = pparams[pid[p]] + s*dp[p] stream = stream_model(n, pparams0=pparams, dt=dt, rotmatrix=rotmatrix) color = mpl.cm.RdBu(i/(2*Nstep-1)) #print(i, dp[p], pparams) # fits iexsort = np.argsort(stream.obs[0]) raex = np.linspace(np.percentile(stream.obs[0], 10), np.percentile(stream.obs[0], 90), Nobs) tex = np.r_[(stream.obs[0][iexsort][0],)*(k+1), raex, (stream.obs[0][iexsort][-1],)*(k+1)] fits_ex = [None]*5 for j in range(5): fits_ex[j] = scipy.interpolate.make_lsq_spline(stream.obs[0][iexsort], stream.obs[j+1][iexsort], tex, k=k) stream_fits[i][j] = fits_ex[j] plt.sca(ax[0][j]) plt.plot(stream.obs[0], stream.obs[j+1], 'o', color=color, ms=2) plt.sca(ax[1][j]) plt.plot(stream.obs[0], stream.obs[j+1] - fits[j](stream.obs[0]), 'o', color=color, ms=2) plt.sca(ax[2][j]) plt.plot(stream.obs[0], fits_ex[j](stream.obs[0]) - fits[j](stream.obs[0]), 'o', color=color, ms=2) plt.sca(ax[3][j]) plt.plot(stream.obs[0], (fits_ex[j](stream.obs[0]) - fits[j](stream.obs[0]))/(s*dp[p]), 'o', color=color, ms=2) # symmetric derivatives ra_der = np.linspace(np.min(stream0.obs[0])*1.05, np.max(stream0.obs[0])*0.95, 100) for i in range(Nstep): color = mpl.cm.Greys_r(i/Nstep) for j in range(5): dy = stream_fits[i][j](ra_der) - stream_fits[-i-1][j](ra_der) dydx = -dy / np.abs(2*step[i]*dp[p]) plt.sca(ax[4][j]) plt.plot(ra_der, dydx, '-', color=color, lw=2, zorder=Nstep-i) # labels, limits xlims, ylims = get_stream_limits(n, align) ylabel = ['R.A. (deg)', 'Dec (deg)', 'd (kpc)', '$V_r$ (km/s)', '$\mu_\\alpha$ (mas/yr)', '$\mu_\delta$ (mas/yr)'] if align: ylabel[:2] = ['$\\xi$ (deg)', '$\\eta$ (deg)'] for j in range(5): plt.sca(ax[0][j]) plt.ylabel(ylabel[j+1]) plt.xlim(xlims[0], xlims[1]) plt.ylim(ylims[j][0], ylims[j][1]) plt.sca(ax[1][j]) plt.ylabel('$\Delta$ {}'.format(ylabel[j+1].split(' ')[0])) plt.sca(ax[2][j]) plt.ylabel('$\Delta$ {}'.format(ylabel[j+1].split(' ')[0])) plt.sca(ax[3][j]) plt.ylabel('$\Delta${}/$\Delta${}'.format(ylabel[j+1].split(' ')[0], plabel)) plt.sca(ax[4][j]) plt.xlabel(ylabel[0]) plt.ylabel('$\langle$$\Delta${}/$\Delta${}$\\rangle$'.format(ylabel[j+1].split(' ')[0], plabel)) #plt.suptitle('Varying {}'.format(plabel), fontsize='small') plt.tight_layout() plt.savefig('../plots/observable_steps_{:d}_{:s}_p{:d}_Ns{:d}.png'.format(n, vlabel, p, Nstep)) def step_convergence(name='gd1', Nstep=20, log=True, layer=1, dt=0.2*u.Myr, vary='halo', align=True, graph=False, verbose=False, Nobs=10, k=3, ra_der=np.nan, Nra=50): """Check deviations in numerical derivatives for consecutive step sizes""" mock = pickle.load(open('../data/mock_{}.params'.format(name),'rb')) if align: rotmatrix = mock['rotmatrix'] xmm = mock['xi_range'] else: rotmatrix = np.eye(3) xmm = mock['ra_range'] # fiducial model pparams0 = pparams_fid stream0 = stream_model(name=name, pparams0=pparams0, dt=dt, rotmatrix=rotmatrix) if np.any(~np.isfinite(ra_der)): ra_der = np.linspace(xmm[0]*1.05, xmm[1]*0.95, Nra) Nra = np.size(ra_der) # parameters to vary pid, dp, vlabel = get_varied_pars(vary) Np = len(pid) dpvec = np.array([x.value for x in dp]) Nstep, step = get_steps(Nstep=Nstep, log=log) dydx_all = np.empty((Np, Nstep, 5, Nra)) dev_der = np.empty((Np, Nstep-2*layer)) step_der = np.empty((Np, Nstep-2*layer)) for p in range(Np): plabel = get_parlabel(pid[p]) if verbose: print(p, plabel) # excursions stream_fits = [[None] * 5 for x in range(2 * Nstep)] for i, s in enumerate(step[:]): if verbose: print(i, s) pparams = [x for x in pparams0] pparams[pid[p]] = pparams[pid[p]] + s*dp[p] stream = stream_model(name=name, pparams0=pparams, dt=dt, rotmatrix=rotmatrix) # fits iexsort = np.argsort(stream.obs[0]) raex = np.linspace(np.percentile(stream.obs[0], 10), np.percentile(stream.obs[0], 90), Nobs) tex = np.r_[(stream.obs[0][iexsort][0],)*(k+1), raex, (stream.obs[0][iexsort][-1],)*(k+1)] fits_ex = [None]*5 for j in range(5): fits_ex[j] = scipy.interpolate.make_lsq_spline(stream.obs[0][iexsort], stream.obs[j+1][iexsort], tex, k=k) stream_fits[i][j] = fits_ex[j] # symmetric derivatives dydx = np.empty((Nstep, 5, Nra)) for i in range(Nstep): color = mpl.cm.Greys_r(i/Nstep) for j in range(5): dy = stream_fits[i][j](ra_der) - stream_fits[-i-1][j](ra_der) dydx[i][j] = -dy / np.abs(2*step[i]*dp[p]) dydx_all[p] = dydx # deviations from adjacent steps step_der[p] = -step[layer:Nstep-layer] * dp[p] for i in range(layer, Nstep-layer): dev_der[p][i-layer] = 0 for j in range(5): for l in range(layer): dev_der[p][i-layer] += np.sum((dydx[i][j] - dydx[i-l-1][j])**2) dev_der[p][i-layer] += np.sum((dydx[i][j] - dydx[i+l+1][j])**2) np.savez('../data/step_convergence_{}_{}_Ns{}_log{}_l{}'.format(name, vlabel, Nstep, log, layer), step=step_der, dev=dev_der, ders=dydx_all, steps_all=np.outer(dpvec,step[Nstep:])) if graph: plt.close() fig, ax = plt.subplots(1,Np,figsize=(4*Np,4)) for p in range(Np): plt.sca(ax[p]) plt.plot(step_der[p], dev_der[p], 'ko') #plabel = get_parlabel(pid[p]) #plt.xlabel('$\Delta$ {}'.format(plabel)) plt.ylabel('D') plt.gca().set_yscale('log') plt.tight_layout() plt.savefig('../plots/step_convergence_{}_{}_Ns{}_log{}_l{}.png'.format(name, vlabel, Nstep, log, layer)) def choose_step(name='gd1', tolerance=2, Nstep=20, log=True, layer=1, vary='halo'): """""" pid, dp, vlabel = get_varied_pars(vary) Np = len(pid) plabels, units = get_parlabel(pid) punits = ['({})'.format(x) if len(x) else '' for x in units] t = np.load('../data/step_convergence_{}_{}_Ns{}_log{}_l{}.npz'.format(name, vlabel, Nstep, log, layer)) dev = t['dev'] step = t['step'] dydx = t['ders'] steps_all = t['steps_all'][:,::-1] Nra = np.shape(dydx)[-1] best = np.empty(Np) # plot setup da = 4 nrow = 2 ncol = Np plt.close() fig, ax = plt.subplots(nrow, ncol, figsize=(da*ncol, da*1.3), squeeze=False, sharex='col', gridspec_kw = {'height_ratios':[1.2, 3]}) for p in range(Np): # choose step dmin = np.min(dev[p]) dtol = tolerance * dmin opt_step = np.min(step[p][dev[p]<dtol]) opt_id = step[p]==opt_step best[p] = opt_step ## largest step w deviation smaller than 1e-4 #opt_step = np.max(step[p][dev[p]<1e-4]) #opt_id = step[p]==opt_step #best[p] = opt_step plt.sca(ax[0][p]) for i in range(5): for j in range(10): plt.plot(steps_all[p], np.tanh(dydx[p,:,i,np.int64(j*Nra/10)]), '-', color='{}'.format(i/5), lw=0.5, alpha=0.5) plt.axvline(opt_step, ls='-', color='r', lw=2) plt.ylim(-1,1) plt.ylabel('Derivative') plt.title('{}'.format(plabels[p])+'$_{best}$ = '+'{:2.2g}'.format(opt_step), fontsize='small') plt.sca(ax[1][p]) plt.plot(step[p], dev[p], 'ko') plt.axvline(opt_step, ls='-', color='r', lw=2) plt.plot(step[p][opt_id], dev[p][opt_id], 'ro') plt.axhline(dtol, ls='-', color='orange', lw=1) y0, y1 = plt.gca().get_ylim() plt.axhspan(y0, dtol, color='orange', alpha=0.3, zorder=0) plt.gca().set_yscale('log') plt.gca().set_xscale('log') plt.xlabel('$\Delta$ {} {}'.format(plabels[p], punits[p])) plt.ylabel('Derivative deviation') np.save('../data/optimal_step_{}_{}'.format(name, vlabel), best) plt.tight_layout(h_pad=0) plt.savefig('../plots/step_convergence_{}_{}_Ns{}_log{}_l{}.png'.format(name, vlabel, Nstep, log, layer)) def read_optimal_step(name, vary, equal=False): """Return optimal steps for a range of parameter types""" if type(vary) is not list: vary = [vary] dp = np.empty(0) for v in vary: dp_opt = np.load('../data/optimal_step_{}_{}.npy'.format(name, v)) dp = np.concatenate([dp, dp_opt]) if equal: dp = np.array([0.05, 0.05, 0.2, 1, 0.01, 0.01, 0.05, 0.1, 0.05, 0.1, 0.1, 10, 1, 0.01, 0.01]) return dp def visualize_optimal_steps(name='gd1', vary=['progenitor', 'bary', 'halo'], align=True, dt=0.2*u.Myr, Nobs=50, k=3): """""" mock = pickle.load(open('../data/mock_{}.params'.format(name),'rb')) if align: rotmatrix = mock['rotmatrix'] xmm = mock['xi_range'] else: rotmatrix = np.eye(3) xmm = mock['ra_range'] # varied parameters pparams0 = pparams_fid pid, dp_fid, vlabel = get_varied_pars(vary) Np = len(pid) dp_opt = read_optimal_step(name, vary) dp = [x*y.unit for x,y in zip(dp_opt, dp_fid)] fiducial = stream_model(name=name, pparams0=pparams0, dt=dt, rotmatrix=rotmatrix) iexsort = np.argsort(fiducial.obs[0]) raex = np.linspace(np.percentile(fiducial.obs[0], 10), np.percentile(fiducial.obs[0], 90), Nobs) tex = np.r_[(fiducial.obs[0][iexsort][0],)*(k+1), raex, (fiducial.obs[0][iexsort][-1],)*(k+1)] fit = scipy.interpolate.make_lsq_spline(fiducial.obs[0][iexsort], fiducial.obs[1][iexsort], tex, k=k) nrow = 2 ncol = np.int64((Np+1)/nrow) da = 4 c = ['b', 'b', 'b', 'r', 'r', 'r'] plt.close() fig, ax = plt.subplots(nrow, ncol, figsize=(ncol*da, nrow*da), squeeze=False) for p in range(Np): plt.sca(ax[p%2][int(p/2)]) for i, s in enumerate([-1.1, -1, -0.9, 0.9, 1, 1.1]): pparams = [x for x in pparams0] pparams[pid[p]] = pparams[pid[p]] + s*dp[p] stream = stream_model(name=name, pparams0=pparams, dt=dt, rotmatrix=rotmatrix) # bspline fits to stream centerline iexsort = np.argsort(stream.obs[0]) raex = np.linspace(np.percentile(stream.obs[0], 10), np.percentile(stream.obs[0], 90), Nobs) tex = np.r_[(stream.obs[0][iexsort][0],)*(k+1), raex, (stream.obs[0][iexsort][-1],)*(k+1)] fitex = scipy.interpolate.make_lsq_spline(stream.obs[0][iexsort], stream.obs[1][iexsort], tex, k=k) plt.plot(raex, fitex(raex) - fit(raex), '-', color=c[i]) plt.xlabel('R.A. (deg)') plt.ylabel('Dec (deg)') #print(get_parlabel(p)) plt.title('$\Delta$ {} = {:.2g}'.format(get_parlabel(p)[0], dp[p]), fontsize='medium') plt.tight_layout() plt.savefig('../plots/{}_optimal_steps.png'.format(name), dpi=200) # observing modes def define_obsmodes(): """Output a pickled dictionary with typical uncertainties and dimensionality of data for a number of observing modes""" obsmodes = {} obsmodes['fiducial'] = {'sig_obs': np.array([0.1, 2, 5, 0.1, 0.1]), 'Ndim': [3,4,6]} obsmodes['binospec'] = {'sig_obs': np.array([0.1, 2, 10, 0.1, 0.1]), 'Ndim': [3,4,6]} obsmodes['hectochelle'] = {'sig_obs': np.array([0.1, 2, 1, 0.1, 0.1]), 'Ndim': [3,4,6]} obsmodes['desi'] = {'sig_obs': np.array([0.1, 2, 10, np.nan, np.nan]), 'Ndim': [4,]} obsmodes['gaia'] = {'sig_obs': np.array([0.1, 0.2, 10, 0.2, 0.2]), 'Ndim': [6,]} obsmodes['exgal'] = {'sig_obs': np.array([0.5, np.nan, 20, np.nan, np.nan]), 'Ndim': [3,]} pickle.dump(obsmodes, open('../data/observing_modes.info','wb')) def obsmode_name(mode): """Return full name of the observing mode""" if type(mode) is not list: mode = [mode] full_names = {'fiducial': 'Fiducial', 'binospec': 'Binospec', 'hectochelle': 'Hectochelle', 'desi': 'DESI-like', 'gaia': 'Gaia-like', 'exgal': 'Extragalactic'} keys = full_names.keys() names = [] for m in mode: if m in keys: name = full_names[m] else: name = m names += [name] return names # crbs using bspline def calculate_crb(name='gd1', dt=0.2*u.Myr, vary=['progenitor', 'bary', 'halo'], ra=np.nan, dd=0.5, Nmin=15, verbose=False, align=True, scale=False, errmode='fiducial', k=3): """""" mock = pickle.load(open('../data/mock_{}.params'.format(name),'rb')) if align: rotmatrix = mock['rotmatrix'] xmm = np.sort(mock['xi_range']) else: rotmatrix = np.eye(3) xmm = np.sort(mock['ra_range']) # typical uncertainties and data availability obsmodes = pickle.load(open('../data/observing_modes.info', 'rb')) if errmode not in obsmodes.keys(): errmode = 'fiducial' sig_obs = obsmodes[errmode]['sig_obs'] data_dim = obsmodes[errmode]['Ndim'] # mock observations if np.any(~np.isfinite(ra)): if (np.int64((xmm[1]-xmm[0])/dd + 1) < Nmin): dd = (xmm[1]-xmm[0])/Nmin ra = np.arange(xmm[0], xmm[1]+dd, dd) #ra = np.linspace(xmm[0]*1.05, xmm[1]*0.95, Nobs) #else: Nobs = np.size(ra) print(name, Nobs) err = np.tile(sig_obs, Nobs).reshape(Nobs,-1) # varied parameters pparams0 = pparams_fid pid, dp_fid, vlabel = get_varied_pars(vary) Np = len(pid) dp_opt = read_optimal_step(name, vary) dp = [x*y.unit for x,y in zip(dp_opt, dp_fid)] fits_ex = [[[None]*5 for x in range(2)] for y in range(Np)] if scale: dp_unit = unity_scale(dp) dps = [x*y for x,y in zip(dp, dp_unit)] # calculate derivatives for all parameters for p in range(Np): for i, s in enumerate([-1, 1]): pparams = [x for x in pparams0] pparams[pid[p]] = pparams[pid[p]] + s*dp[p] stream = stream_model(name=name, pparams0=pparams, dt=dt, rotmatrix=rotmatrix) # bspline fits to stream centerline iexsort = np.argsort(stream.obs[0]) raex = np.linspace(np.percentile(stream.obs[0], 10), np.percentile(stream.obs[0], 90), Nobs) tex = np.r_[(stream.obs[0][iexsort][0],)*(k+1), raex, (stream.obs[0][iexsort][-1],)*(k+1)] for j in range(5): fits_ex[p][i][j] = scipy.interpolate.make_lsq_spline(stream.obs[0][iexsort], stream.obs[j+1][iexsort], tex, k=k) # populate matrix of derivatives and calculate CRB for Ndim in data_dim: #for Ndim in [6,]: Ndata = Nobs * (Ndim - 1) cyd = np.empty(Ndata) dydx = np.empty((Np, Ndata)) dy2 = np.empty((2, Np, Ndata)) for j in range(1, Ndim): for p in range(Np): dy = fits_ex[p][0][j-1](ra) - fits_ex[p][1][j-1](ra) dy2[0][p][(j-1)*Nobs:j*Nobs] = fits_ex[p][0][j-1](ra) dy2[1][p][(j-1)*Nobs:j*Nobs] = fits_ex[p][1][j-1](ra) #positive = np.abs(dy)>0 #if verbose: print('{:d},{:d} {:s} min{:.1e} max{:1e} med{:.1e}'.format(j, p, get_parlabel(pid[p])[0], np.min(np.abs(dy[positive])), np.max(np.abs(dy)), np.median(np.abs(dy)))) if scale: dydx[p][(j-1)*Nobs:j*Nobs] = -dy / np.abs(2*dps[p].value) else: dydx[p][(j-1)*Nobs:j*Nobs] = -dy / np.abs(2*dp[p].value) #if verbose: print('{:d},{:d} {:s} min{:.1e} max{:1e} med{:.1e}'.format(j, p, get_parlabel(pid[p])[0], np.min(np.abs(dydx[p][(j-1)*Nobs:j*Nobs][positive])), np.max(np.abs(dydx[p][(j-1)*Nobs:j*Nobs])), np.median(np.abs(dydx[p][(j-1)*Nobs:j*Nobs])))) #print(j, p, get_parlabel(pid[p])[0], dp[p], np.min(np.abs(dy)), np.max(np.abs(dy)), np.median(dydx[p][(j-1)*Nobs:j*Nobs])) cyd[(j-1)*Nobs:j*Nobs] = err[:,j-1]**2 np.savez('../data/crb/components_{:s}{:1d}_{:s}_a{:1d}_{:s}'.format(errmode, Ndim, name, align, vlabel), dydx=dydx, y=dy2, cyd=cyd, dp=dp_opt) # data component of the Fisher matrix cy = np.diag(cyd) cyi = np.diag(1. / cyd) caux = np.matmul(cyi, dydx.T) dxi = np.matmul(dydx, caux) # component based on prior knowledge of model parameters pxi = priors(name, vary) # full Fisher matrix cxi = dxi + pxi if verbose: cx = np.linalg.inv(cxi) cx = np.matmul(np.linalg.inv(np.matmul(cx, cxi)), cx) # iteration to improve inverse at large cond numbers sx = np.sqrt(np.diag(cx)) print('CRB', sx) print('condition {:g}'.format(np.linalg.cond(cxi))) print('standard inverse', np.allclose(cxi, cxi.T), np.allclose(cx, cx.T), np.allclose(np.matmul(cx,cxi), np.eye(np.shape(cx)[0]))) cx = stable_inverse(cxi) print('stable inverse', np.allclose(cxi, cxi.T), np.allclose(cx, cx.T), np.allclose(np.matmul(cx,cxi), np.eye(np.shape(cx)[0]))) np.savez('../data/crb/cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}'.format(errmode, Ndim, name, align, vlabel), cxi=cxi, dxi=dxi, pxi=pxi) def priors(name, vary): """Return covariance matrix with prior knowledge about parameters""" mock = pickle.load(open('../data/mock_{}.params'.format(name), 'rb')) cprog = mock['prog_prior'] cbary = np.array([0.1*x.value for x in pparams_fid[:5]])**-2 chalo = np.zeros(4) cdipole = np.zeros(3) cquad = np.zeros(5) coctu = np.zeros(7) priors = {'progenitor': cprog, 'bary': cbary, 'halo': chalo, 'dipole': cdipole, 'quad': cquad, 'octu': coctu} cprior = np.empty(0) for v in vary: cprior = np.concatenate([cprior, priors[v]]) pxi = np.diag(cprior) return pxi def scale2invert(name='gd1', Ndim=6, vary=['progenitor', 'bary', 'halo'], verbose=False, align=True, errmode='fiducial'): """""" pid, dp_fid, vlabel = get_varied_pars(vary) #dp = read_optimal_step(name, vary) d = np.load('../data/crb/components_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, name, align, vlabel)) dydx = d['dydx'] cyd = d['cyd'] y = d['y'] dp = d['dp'] dy = (y[1,:,:] - y[0,:,:]) dydx = (y[1,:,:] - y[0,:,:]) / (2*dp[:,np.newaxis]) scaling_par = np.median(np.abs(dydx), axis=1) dydx = dydx / scaling_par[:,np.newaxis] dydx_ = np.reshape(dydx, (len(dp), Ndim-1, -1)) scaling_dim = np.median(np.abs(dydx_), axis=(2,0)) dydx_ = dydx_ / scaling_dim[np.newaxis,:,np.newaxis] cyd_ = np.reshape(cyd, (Ndim-1, -1)) cyd_ = cyd_ / scaling_dim[:,np.newaxis] cyd = np.reshape(cyd_, (-1)) dydx = np.reshape(dydx_, (len(dp), -1)) mmin = np.min(np.abs(dy), axis=0) mmax = np.max(np.abs(dy), axis=0) mmed = np.median(np.abs(dydx), axis=1) dyn_range = mmax/mmin #print(dyn_range) print(np.min(dyn_range), np.max(dyn_range), np.std(dyn_range)) cy = np.diag(cyd) cyi = np.diag(1. / cyd) caux = np.matmul(cyi, dydx.T) cxi = np.matmul(dydx, caux) print('condition {:e}'.format(np.linalg.cond(cxi))) cx = np.linalg.inv(cxi) cx = np.matmul(np.linalg.inv(np.matmul(cx, cxi)), cx) # iteration to improve inverse at large cond numbers print('standard inverse', np.allclose(cxi, cxi.T), np.allclose(cx, cx.T), np.allclose(np.matmul(cx,cxi), np.eye(np.shape(cx)[0]))) cx = stable_inverse(cxi, maxiter=30) print('stable inverse', np.allclose(cxi, cxi.T), np.allclose(cx, cx.T), np.allclose(np.matmul(cx,cxi), np.eye(np.shape(cx)[0]))) def unity_scale(dp): """""" dim_scale = 10**np.array([2, 3, 3, 2, 4, 3, 7, 7, 5, 7, 7, 4, 4, 4, 4, 3, 3, 3, 4, 3, 4, 4, 4]) dim_scale = 10**np.array([3, 2, 3, 4, 0, 2, 2, 3, 2, 2, 2, 4, 3, 2, 2, 3]) #dim_scale = 10**np.array([2, 3, 3, 1, 3, 2, 5, 5, 3, 5, 5, 2, 2, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3]) #dim_scale = 10**np.array([2, 3, 3, 1, 3, 2, 5, 5, 3, 5, 5, 2, 2, 4, 4, 3, 3, 3]) dp_unit = [(dp[x].value*dim_scale[x])**-1 for x in range(len(dp))] return dp_unit def test_inversion(name='gd1', Ndim=6, vary=['progenitor', 'bary', 'halo'], align=True, errmode='fiducial'): """""" pid, dp, vlabel = get_varied_pars(vary) d = np.load('../data/crb/cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, name, align, vlabel)) cxi = d['cxi'] N = np.shape(cxi)[0] cx_ = np.linalg.inv(cxi) cx = stable_inverse(cxi, verbose=True, maxiter=100) #cx_ii = stable_inverse(cx, verbose=True, maxiter=50) print('condition {:g}'.format(np.linalg.cond(cxi))) print('linalg inverse', np.allclose(np.matmul(cx_,cxi), np.eye(N))) print('stable inverse', np.allclose(np.matmul(cx,cxi), np.eye(N))) #print(np.matmul(cx,cxi)) #print('inverse inverse', np.allclose(cx_ii, cxi)) def stable_inverse(a, maxiter=20, verbose=False): """Invert a matrix with a bad condition number""" N = np.shape(a)[0] # guess q = np.linalg.inv(a) qa = np.matmul(q,a) # iterate for i in range(maxiter): if verbose: print(i, np.sqrt(np.sum((qa - np.eye(N))**2)), np.allclose(qa, np.eye(N))) if np.allclose(qa, np.eye(N)): return q qai = np.linalg.inv(qa) q = np.matmul(qai,q) qa = np.matmul(q,a) return q def crb_triangle(n, vary, Ndim=6, align=True, plot='all', fast=False): """""" pid, dp, vlabel = get_varied_pars(vary) plabels, units = get_parlabel(pid) params = ['$\Delta$' + x + '({})'.format(y) for x,y in zip(plabels, units)] if align: alabel = '_align' else: alabel = '' fm = np.load('../data/crb/cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, name, align, vlabel)) cxi = fm['cxi'] if fast: cx = np.linalg.inv(cxi) else: cx = stable_inverse(cxi) #print(cx[0][0]) if plot=='halo': cx = cx[:4, :4] params = params[:4] elif plot=='bary': cx = cx[4:9, 4:9] params = params[4:9] elif plot=='progenitor': cx = cx[9:, 9:] params = params[9:] Nvar = len(params) plt.close() dax = 2 fig, ax = plt.subplots(Nvar-1, Nvar-1, figsize=(dax*Nvar, dax*Nvar), sharex='col', sharey='row') for i in range(0,Nvar-1): for j in range(i+1,Nvar): plt.sca(ax[j-1][i]) cx_2d = np.array([[cx[i][i], cx[i][j]], [cx[j][i], cx[j][j]]]) w, v = np.linalg.eig(cx_2d) if np.all(np.isreal(v)): theta = np.degrees(np.arccos(v[0][0])) width = np.sqrt(w[0])*2 height = np.sqrt(w[1])*2 e = mpl.patches.Ellipse((0,0), width=width, height=height, angle=theta, fc='none', ec=mpl.cm.bone(0.5), lw=2) plt.gca().add_patch(e) plt.gca().autoscale_view() #plt.xlim(-ylim[i],ylim[i]) #plt.ylim(-ylim[j], ylim[j]) if j==Nvar-1: plt.xlabel(params[i]) if i==0: plt.ylabel(params[j]) # turn off unused axes for i in range(0,Nvar-1): for j in range(i+1,Nvar-1): plt.sca(ax[i][j]) plt.axis('off') plt.tight_layout() plt.savefig('../plots/crb_triangle_{:s}_{:d}_{:s}_{:d}_{:s}.pdf'.format(alabel, n, vlabel, Ndim, plot)) def crb_triangle_alldim(name='gd1', vary=['progenitor', 'bary', 'halo'], align=True, plot='all', fast=False, scale=False, errmode='fiducial'): """Show correlations in CRB between a chosen set of parameters in a triangle plot""" pid, dp_fid, vlabel = get_varied_pars(vary) dp_opt = read_optimal_step(name, vary) dp = [x*y.unit for x,y in zip(dp_opt, dp_fid)] plabels, units = get_parlabel(pid) punits = [' ({})'.format(x) if len(x) else '' for x in units] params = ['$\Delta$ {}{}'.format(x, y) for x,y in zip(plabels, punits)] if plot=='halo': i0 = 11 i1 = 15 elif plot=='bary': i0 = 6 i1 = 11 elif plot=='progenitor': i0 = 0 i1 = 6 elif plot=='dipole': i0 = 15 i1 = len(params) else: i0 = 0 i1 = len(params) Nvar = i1 - i0 params = params[i0:i1] if scale: dp_unit = unity_scale(dp) #print(dp_unit) dp_unit = dp_unit[i0:i1] pid = pid[i0:i1] label = ['RA, Dec, d', 'RA, Dec, d, $V_r$', 'RA, Dec, d, $V_r$, $\mu_\\alpha$, $\mu_\delta$'] plt.close() dax = 2 fig, ax = plt.subplots(Nvar-1, Nvar-1, figsize=(dax*Nvar, dax*Nvar), sharex='col', sharey='row') for l, Ndim in enumerate([3, 4, 6]): fm = np.load('../data/crb/cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, name, align, vlabel)) cxi = fm['cxi'] #cxi = np.load('../data/crb/bspline_cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}.npy'.format(errmode, Ndim, name, align, vlabel)) if fast: cx = np.linalg.inv(cxi) else: cx = stable_inverse(cxi) cx = cx[i0:i1,i0:i1] for i in range(0,Nvar-1): for j in range(i+1,Nvar): plt.sca(ax[j-1][i]) if scale: cx_2d = np.array([[cx[i][i]/dp_unit[i]**2, cx[i][j]/(dp_unit[i]*dp_unit[j])], [cx[j][i]/(dp_unit[j]*dp_unit[i]), cx[j][j]/dp_unit[j]**2]]) else: cx_2d = np.array([[cx[i][i], cx[i][j]], [cx[j][i], cx[j][j]]]) w, v = np.linalg.eig(cx_2d) if np.all(np.isreal(v)): theta = np.degrees(np.arctan2(v[1][0], v[0][0])) width = np.sqrt(w[0])*2 height = np.sqrt(w[1])*2 e = mpl.patches.Ellipse((0,0), width=width, height=height, angle=theta, fc='none', ec=mpl.cm.bone(0.1+l/4), lw=2, label=label[l]) plt.gca().add_patch(e) if l==1: plt.gca().autoscale_view() if j==Nvar-1: plt.xlabel(params[i]) if i==0: plt.ylabel(params[j]) # turn off unused axes for i in range(0,Nvar-1): for j in range(i+1,Nvar-1): plt.sca(ax[i][j]) plt.axis('off') plt.sca(ax[int(Nvar/2-1)][int(Nvar/2-1)]) plt.legend(loc=2, bbox_to_anchor=(1,1)) plt.tight_layout() plt.savefig('../plots/cxi_{:s}_{:s}_a{:1d}_{:s}_{:s}.pdf'.format(errmode, name, align, vlabel, plot)) def compare_optimal_steps(): """""" vary = ['progenitor', 'bary', 'halo', 'dipole', 'quad'] vary = ['progenitor', 'bary', 'halo'] for name in ['gd1', 'tri']: print(name) print(read_optimal_step(name, vary)) def get_crb(name, Nstep=10, vary=['progenitor', 'bary', 'halo'], first=True): """""" if first: store_progparams(name) wrap_angles(name, save=True) progenitor_prior(name) find_greatcircle(name=name) endpoints(name) for v in vary: step_convergence(name=name, Nstep=Nstep, vary=v) choose_step(name=name, Nstep=Nstep, vary=v) calculate_crb(name=name, vary=vary, verbose=True) crb_triangle_alldim(name=name, vary=vary) ######################## # cartesian coordinates # accelerations def acc_kepler(x, p=1*u.Msun): """Keplerian acceleration""" r = np.linalg.norm(x)*u.kpc a = -G * p * 1e11 * r**-3 * x return a.to(u.pc*u.Myr**-2) def acc_bulge(x, p=[pparams_fid[j] for j in range(2)]): """""" r = np.linalg.norm(x)*u.kpc a = -(G*p[0]*x/(r * (r + p[1])**2)).to(u.pc*u.Myr**-2) return a def acc_disk(x, p=[pparams_fid[j] for j in range(2,5)]): """""" R = np.linalg.norm(x[:2])*u.kpc z = x[2] a = -(G*p[0]*x * (R**2 + (p[1] + np.sqrt(z**2 + p[2]**2))**2)**-1.5).to(u.pc*u.Myr**-2) a[2] *= (1 + p[2]/np.sqrt(z**2 + p[2]**2)) return a def acc_nfw(x, p=[pparams_fid[j] for j in [5,6,8,10]]): """""" r = np.linalg.norm(x)*u.kpc q = np.array([1*u.Unit(1), p[2], p[3]]) a = (p[0]**2 * p[1] * r**-3 * (1/(1+p[1]/r) - np.log(1+r/p[1])) * x * q**-2).to(u.pc*u.Myr**-2) return a def acc_dipole(x, p=[pparams_fid[j] for j in range(11,14)]): """Acceleration due to outside dipole perturbation""" pv = [x.value for x in p] a = np.sqrt(3/(4*np.pi)) * np.array([pv[2], pv[0], pv[1]])*u.pc*u.Myr**-2 return a def acc_quad(x, p=[pparams_fid[j] for j in range(14,19)]): """Acceleration due to outside quadrupole perturbation""" a = np.zeros(3)*u.pc*u.Myr**-2 f = 0.5*np.sqrt(15/np.pi) a[0] = x[0]*(f*p[4] - f/np.sqrt(3)*p[2]) + x[1]*f*p[0] + x[2]*f*p[3] a[1] = x[0]*f*p[0] - x[1]*(f*p[4] + f/np.sqrt(3)*p[2]) + x[2]*f*p[1] a[2] = x[0]*f*p[3] + x[1]*f*p[1] + x[2]*2*f/np.sqrt(3)*p[2] return a.to(u.pc*u.Myr**-2) def acc_octu(x, p=[pparams_fid[j] for j in range(19,26)]): """Acceleration due to outside octupole perturbation""" a = np.zeros(3)*u.pc*u.Myr**-2 f = np.array([0.25*np.sqrt(35/(2*np.pi)), 0.5*np.sqrt(105/np.pi), 0.25*np.sqrt(21/(2*np.pi)), 0.25*np.sqrt(7/np.pi), 0.25*np.sqrt(21/(2*np.pi)), 0.25*np.sqrt(105/np.pi), 0.25*np.sqrt(35/(2*np.pi))]) xu = x.unit pu = p[0].unit pvec = np.array([i.value for i in p]) * pu dmat = np.ones((3,7)) * f * pvec * xu**2 x = np.array([i.value for i in x]) dmat[0] *= np.array([6*x[0]*x[1], x[1]*x[2], -2*x[0]*x[1], -6*x[0]*x[2], 4*x[2]**2-x[1]**2-3*x[0]**2, 2*x[0]*x[2], 3*x[0]**2-3*x[1]**2]) dmat[1] *= np.array([3*x[0]**2-3*x[1]**2, x[0]*x[2], 4*x[2]**2-x[0]**2-3*x[1]**2, -6*x[1]*x[2], -2*x[0]*x[1], -2*x[1]*x[2], -6*x[0]*x[1]]) dmat[2] *= np.array([0, x[0]*x[1], 8*x[1]*x[2], 6*x[2]**2-3*x[0]**2-3*x[1]**2, 8*x[0]*x[2], x[0]**2-x[1]**2, 0]) a = np.einsum('ij->i', dmat) * dmat.unit return a.to(u.pc*u.Myr**-2) # derivatives def der_kepler(x, p=1*u.Msun): """Derivative of Kepler potential parameters wrt cartesian components of the acceleration""" r = np.linalg.norm(x)*u.kpc dmat = np.zeros((3,1)) * u.pc**-1 * u.Myr**2 * u.Msun dmat[:,0] = (-r**3/(G*x)).to(u.pc**-1 * u.Myr**2 * u.Msun) * 1e-11 return dmat.value def pder_kepler(x, p=1*u.Msun): """Derivative of cartesian components of the acceleration wrt to Kepler potential parameter""" r = np.linalg.norm(x)*u.kpc dmat = np.zeros((3,1)) * u.pc * u.Myr**-2 * u.Msun**-1 dmat[:,0] = (-G*x*r**-3).to(u.pc * u.Myr**-2 * u.Msun**-1) * 1e11 return dmat.value def pder_nfw(x, pu=[pparams_fid[j] for j in [5,6,8,10]]): """Calculate derivatives of cartesian components of the acceleration wrt halo potential parameters""" p = pu q = np.array([1, p[2], p[3]]) # physical quantities r = np.linalg.norm(x)*u.kpc a = acc_nfw(x, p=pu) # derivatives dmat = np.zeros((3, 4)) # Vh dmat[:,0] = 2*a/p[0] # Rh dmat[:,1] = a/p[1] + p[0]**2 * p[1] * r**-3 * (1/(p[1]+p[1]**2/r) - 1/(r*(1+p[1]/r)**2)) * x * q**-2 # qy, qz for i in [1,2]: dmat[i,i+1] = (-2*a[i]/q[i]).value return dmat def pder_bulge(x, pu=[pparams_fid[j] for j in range(2)]): """Calculate derivarives of cartesian components of the acceleration wrt Hernquist bulge potential parameters""" # coordinates r = np.linalg.norm(x)*u.kpc # accelerations ab = acc_bulge(x, p=pu[:2]) # derivatives dmat = np.zeros((3, 2)) # Mb dmat[:,0] = ab/pu[0] # ab dmat[:,1] = 2 * ab / (r + pu[1]) return dmat def pder_disk(x, pu=[pparams_fid[j] for j in range(2,5)]): """Calculate derivarives of cartesian components of the acceleration wrt Miyamoto-Nagai disk potential parameters""" # coordinates R = np.linalg.norm(x[:2])*u.kpc z = x[2] aux = np.sqrt(z**2 + pu[2]**2) # accelerations ad = acc_disk(x, p=pu) # derivatives dmat = np.zeros((3, 3)) # Md dmat[:,0] = ad / pu[0] # ad dmat[:,1] = 3 * ad * (pu[1] + aux) / (R**2 + (pu[1] + aux)**2) # bd dmat[:2,2] = 3 * ad[:2] * (pu[1] + aux) / (R**2 + (pu[1] + aux)**2) * pu[2] / aux dmat[2,2] = (3 * ad[2] * (pu[1] + aux) / (R**2 + (pu[1] + aux)**2) * pu[2] / aux - G * pu[0] * z * (R**2 + (pu[1] + aux)**2)**-1.5 * z**2 * (pu[2]**2 + z**2)**-1.5).value return dmat def der_dipole(x, pu=[pparams_fid[j] for j in range(11,14)]): """Calculate derivatives of dipole potential parameters wrt (Cartesian) components of the acceleration vector a""" # shape: 3, Npar dmat = np.zeros((3,3)) f = np.sqrt((4*np.pi)/3) dmat[0,2] = f dmat[1,0] = f dmat[2,1] = f return dmat def pder_dipole(x, pu=[pparams_fid[j] for j in range(11,14)]): """Calculate derivatives of (Cartesian) components of the acceleration vector a wrt dipole potential parameters""" # shape: 3, Npar dmat = np.zeros((3,3)) f = np.sqrt(3/(4*np.pi)) dmat[0,2] = f dmat[1,0] = f dmat[2,1] = f return dmat def der_quad(x, p=[pparams_fid[j] for j in range(14,19)]): """Caculate derivatives of quadrupole potential parameters wrt (Cartesian) components of the acceleration vector a""" f = 2/np.sqrt(15/np.pi) s = np.sqrt(3) x = [1e-3/i.value for i in x] dmat = np.ones((3,5)) * f dmat[0] = np.array([x[1], 0, -s*x[0], x[2], x[0]]) dmat[1] = np.array([x[0], x[2], -s*x[1], 0, -x[1]]) dmat[2] = np.array([0, x[1], 0.5*s*x[2], x[0], 0]) return dmat def pder_quad(x, p=[pparams_fid[j] for j in range(14,19)]): """Caculate derivatives of (Cartesian) components of the acceleration vector a wrt quadrupole potential parameters""" f = 0.5*np.sqrt(15/np.pi) s = 1/np.sqrt(3) x = [1e-3*i.value for i in x] dmat = np.ones((3,5)) * f dmat[0] *= np.array([x[1], 0, -s*x[0], x[2], x[0]]) dmat[1] *= np.array([x[0], x[2], -s*x[1], 0, -x[1]]) dmat[2] *= np.array([0, x[1], 2*s*x[2], x[0], 0]) return dmat def pder_octu(x, p=[pparams_fid[j] for j in range(19,26)]): """Caculate derivatives of (Cartesian) components of the acceleration vector a wrt octupole potential parameters""" f = np.array([0.25*np.sqrt(35/(2*np.pi)), 0.5*np.sqrt(105/np.pi), 0.25*np.sqrt(21/(2*np.pi)), 0.25*np.sqrt(7/np.pi), 0.25*np.sqrt(21/(2*np.pi)), 0.25*np.sqrt(105/np.pi), 0.25*np.sqrt(35/(2*np.pi))]) x = [1e-3*i.value for i in x] dmat = np.ones((3,7)) * f dmat[0] *= np.array([6*x[0]*x[1], x[1]*x[2], -2*x[0]*x[1], -6*x[0]*x[2], 4*x[2]**2-x[1]**2-3*x[0]**2, 2*x[0]*x[2], 3*x[0]**2-3*x[1]**2]) dmat[1] *= np.array([3*x[0]**2-3*x[1]**2, x[0]*x[2], 4*x[2]**2-x[0]**2-3*x[1]**2, -6*x[1]*x[2], -2*x[0]*x[1], -2*x[1]*x[2], -6*x[0]*x[1]]) dmat[2] *= np.array([0, x[0]*x[1], 8*x[1]*x[2], 6*x[2]**2-3*x[0]**2-3*x[1]**2, 8*x[0]*x[2], x[0]**2-x[1]**2, 0]) return dmat def crb_ax(n, Ndim=6, vary=['halo', 'bary', 'progenitor'], align=True, fast=False): """Calculate CRB inverse matrix for 3D acceleration at position x in a halo potential""" pid, dp, vlabel = get_varied_pars(vary) if align: alabel = '_align' else: alabel = '' # read in full inverse CRB for stream modeling cxi = np.load('../data/crb/bspline_cxi{:s}_{:d}_{:s}_{:d}.npy'.format(alabel, n, vlabel, Ndim)) if fast: cx = np.linalg.inv(cxi) else: cx = stable_inverse(cxi) # subset halo parameters Nhalo = 4 cq = cx[:Nhalo,:Nhalo] if fast: cqi = np.linalg.inv(cq) else: cqi = stable_inverse(cq) xi = np.array([-8.3, 0.1, 0.1])*u.kpc x0, v0 = gd1_coordinates() #xi = np.array(x0)*u.kpc d = 50 Nb = 20 x = np.linspace(x0[0]-d, x0[0]+d, Nb) y = np.linspace(x0[1]-d, x0[1]+d, Nb) x = np.linspace(-d, d, Nb) y = np.linspace(-d, d, Nb) xv, yv = np.meshgrid(x, y) xf = np.ravel(xv) yf = np.ravel(yv) af = np.empty((Nb**2, 3)) plt.close() fig, ax = plt.subplots(3,3,figsize=(11,10)) dimension = ['x', 'y', 'z'] xlabel = ['y', 'x', 'x'] ylabel = ['z', 'z', 'y'] for j in range(3): if j==0: xin = np.array([np.repeat(x0[j], Nb**2), xf, yf]).T elif j==1: xin = np.array([xf, np.repeat(x0[j], Nb**2), yf]).T elif j==2: xin = np.array([xf, yf, np.repeat(x0[j], Nb**2)]).T for i in range(Nb**2): #xi = np.array([xf[i], yf[i], x0[2]])*u.kpc xi = xin[i]*u.kpc a = acc_nfw(xi) dqda = halo_accelerations(xi) cai = np.matmul(dqda, np.matmul(cqi, dqda.T)) if fast: ca = np.linalg.inv(cai) else: ca = stable_inverse(cai) a_crb = (np.sqrt(np.diag(ca)) * u.km**2 * u.kpc**-1 * u.s**-2).to(u.pc*u.Myr**-2) af[i] = np.abs(a_crb/a) af[i] = a_crb for i in range(3): plt.sca(ax[j][i]) im = plt.imshow(af[:,i].reshape(Nb,Nb), extent=[-d, d, -d, d], cmap=mpl.cm.gray) #, norm=mpl.colors.LogNorm(), vmin=1e-2, vmax=0.1) plt.xlabel(xlabel[j]+' (kpc)') plt.ylabel(ylabel[j]+' (kpc)') divider = make_axes_locatable(plt.gca()) cax = divider.append_axes("top", size="4%", pad=0.05) plt.colorbar(im, cax=cax, orientation='horizontal') plt.gca().xaxis.set_ticks_position('top') cax.tick_params(axis='x', labelsize='xx-small') if j==0: plt.title('a$_{}$'.format(dimension[i]), y=4) plt.tight_layout(rect=[0,0,1,0.95]) plt.savefig('../plots/acc_{}_{}_{}.png'.format(n, vlabel, Ndim)) def acc_cart(x, components=['bary', 'halo', 'dipole']): """""" acart = np.zeros(3) * u.pc*u.Myr**-2 dict_acc = {'bary': [acc_bulge, acc_disk], 'halo': [acc_nfw], 'dipole': [acc_dipole], 'quad': [acc_quad], 'octu': [acc_octu], 'point': [acc_kepler]} accelerations = [] for c in components: accelerations += dict_acc[c] for acc in accelerations: a_ = acc(x) acart += a_ return acart def acc_rad(x, components=['bary', 'halo', 'dipole']): """Return radial acceleration""" r = np.linalg.norm(x) * x.unit theta = np.arccos(x[2].value/r.value) phi = np.arctan2(x[1].value, x[0].value) trans = np.array([np.sin(theta)*np.cos(phi), np.sin(theta)*np.sin(phi), np.cos(theta)]) a_cart = acc_cart(x, components=components) a_rad = np.dot(a_cart, trans) return a_rad def ader_cart(x, components=['bary', 'halo', 'dipole']): """""" dacart = np.empty((3,0)) dict_der = {'bary': [der_bulge, der_disk], 'halo': [der_nfw], 'dipole': [der_dipole], 'quad': [der_quad], 'point': [der_kepler]} derivatives = [] for c in components: derivatives += dict_der[c] for ader in derivatives: da_ = ader(x) dacart = np.hstack((dacart, da_)) return dacart def apder_cart(x, components=['bary', 'halo', 'dipole']): """""" dacart = np.empty((3,0)) dict_der = {'bary': [pder_bulge, pder_disk], 'halo': [pder_nfw], 'dipole': [pder_dipole], 'quad': [pder_quad], 'octu': [pder_octu], 'point': [pder_kepler]} derivatives = [] for c in components: derivatives += dict_der[c] for ader in derivatives: da_ = ader(x) dacart = np.hstack((dacart, da_)) return dacart def apder_rad(x, components=['bary', 'halo', 'dipole']): """Return dar/dx_pot (radial acceleration/potential parameters) evaluated at vector x""" r = np.linalg.norm(x) * x.unit theta = np.arccos(x[2].value/r.value) phi = np.arctan2(x[1].value, x[0].value) trans = np.array([np.sin(theta)*np.cos(phi), np.sin(theta)*np.sin(phi), np.cos(theta)]) dadq_cart = apder_cart(x, components=components) dadq_rad = np.einsum('ij,i->j', dadq_cart, trans) return dadq_rad def crb_acart(n, Ndim=6, vary=['progenitor', 'bary', 'halo', 'dipole', 'quad'], component='all', align=True, d=20, Nb=50, fast=False, scale=False, relative=True, progenitor=False, errmode='fiducial'): """""" pid, dp_fid, vlabel = get_varied_pars(vary) if align: alabel = '_align' else: alabel = '' if relative: vmin = 1e-2 vmax = 1 rlabel = ' / a' else: vmin = 3e-1 vmax = 1e1 rlabel = ' (pc Myr$^{-2}$)' # read in full inverse CRB for stream modeling cxi = np.load('../data/crb/bspline_cxi{:s}_{:s}_{:d}_{:s}_{:d}.npy'.format(alabel, errmode, n, vlabel, Ndim)) if fast: cx = np.linalg.inv(cxi) else: cx = stable_inverse(cxi) # choose the appropriate components: Nprog, Nbary, Nhalo, Ndipole, Npoint = [6, 5, 4, 3, 1] if 'progenitor' not in vary: Nprog = 0 nstart = {'bary': Nprog, 'halo': Nprog + Nbary, 'dipole': Nprog + Nbary + Nhalo, 'all': Nprog, 'point': 0} nend = {'bary': Nprog + Nbary, 'halo': Nprog + Nbary + Nhalo, 'dipole': Nprog + Nbary + Nhalo + Ndipole, 'all': np.shape(cx)[0], 'point': 1} if 'progenitor' not in vary: nstart['dipole'] = Npoint nend['dipole'] = Npoint + Ndipole if component in ['bary', 'halo', 'dipole', 'point']: components = [component] else: components = [x for x in vary if x!='progenitor'] cq = cx[nstart[component]:nend[component], nstart[component]:nend[component]] Npot = np.shape(cq)[0] if fast: cqi = np.linalg.inv(cq) else: cqi = stable_inverse(cq) if scale: dp_opt = read_optimal_step(n, vary) dp = [x*y.unit for x,y in zip(dp_opt, dp_fid)] scale_vec = np.array([x.value for x in dp[nstart[component]:nend[component]]]) scale_mat = np.outer(scale_vec, scale_vec) cqi *= scale_mat if progenitor: x0, v0 = gd1_coordinates() else: x0 = np.array([4, 4, 0]) Rp = np.linalg.norm(x0[:2]) zp = x0[2] R = np.linspace(-d, d, Nb) k = x0[1]/x0[0] x = R/np.sqrt(1+k**2) y = k * x z = np.linspace(-d, d, Nb) xv, zv = np.meshgrid(x, z) yv, zv = np.meshgrid(y, z) xin = np.array([np.ravel(xv), np.ravel(yv), np.ravel(zv)]).T Npix = np.size(xv) af = np.empty((Npix, 3)) derf = np.empty((Npix, 3, Npot)) for i in range(Npix): xi = xin[i]*u.kpc a = acc_cart(xi, components=components) dadq = apder_cart(xi, components=components) derf[i] = dadq ca = np.matmul(dadq, np.matmul(cq, dadq.T)) a_crb = np.sqrt(np.diag(ca)) * u.pc * u.Myr**-2 if relative: af[i] = np.abs(a_crb/a) else: af[i] = a_crb #print(xi, a_crb) # save np.savez('../data/crb_acart{:s}_{:s}_{:d}_{:s}_{:s}_{:d}_{:d}_{:d}_{:d}'.format(alabel, errmode, n, vlabel, component, Ndim, d, Nb, relative), acc=af, x=xin, der=derf) plt.close() fig, ax = plt.subplots(1, 3, figsize=(15, 5)) label = ['$\Delta$ $a_X$', '$\Delta$ $a_Y$', '$\Delta$ $a_Z$'] for i in range(3): plt.sca(ax[i]) im = plt.imshow(af[:,i].reshape(Nb, Nb), origin='lower', extent=[-d, d, -d, d], cmap=mpl.cm.gray, vmin=vmin, vmax=vmax, norm=mpl.colors.LogNorm()) if progenitor: plt.plot(Rp, zp, 'r*', ms=10) plt.xlabel('R (kpc)') plt.ylabel('Z (kpc)') divider = make_axes_locatable(plt.gca()) cax = divider.append_axes("right", size="3%", pad=0.1) plt.colorbar(im, cax=cax) plt.ylabel(label[i] + rlabel) plt.tight_layout() plt.savefig('../plots/crb_acc_cart{:s}_{:s}_{:d}_{:s}_{:s}_{:d}_{:d}_{:d}_{:d}.png'.format(alabel, errmode, n, vlabel, component, Ndim, d, Nb, relative)) def crb_acart_cov(n, Ndim=6, vary=['progenitor', 'bary', 'halo', 'dipole', 'quad'], component='all', j=0, align=True, d=20, Nb=30, fast=False, scale=False, relative=True, progenitor=False, batch=False, errmode='fiducial'): """""" pid, dp_fid, vlabel = get_varied_pars(vary) if align: alabel = '_align' else: alabel = '' if relative: vmin = 1e-2 vmax = 1 rlabel = ' / a' else: vmin = -0.005 vmax = 0.005 #vmin = 1e-2 #vmax = 1e0 rlabel = ' (pc Myr$^{-2}$)' # read in full inverse CRB for stream modeling cxi = np.load('../data/crb/bspline_cxi{:s}_{:s}_{:d}_{:s}_{:d}.npy'.format(alabel, errmode, n, vlabel, Ndim)) if fast: cx = np.linalg.inv(cxi) else: cx = stable_inverse(cxi) # choose the appropriate components: Nprog, Nbary, Nhalo, Ndipole, Nquad, Npoint = [6, 5, 4, 3, 5, 1] if 'progenitor' not in vary: Nprog = 0 nstart = {'bary': Nprog, 'halo': Nprog + Nbary, 'dipole': Nprog + Nbary + Nhalo, 'quad': Nprog + Nbary + Nhalo + Ndipole, 'all': Nprog, 'point': 0} nend = {'bary': Nprog + Nbary, 'halo': Nprog + Nbary + Nhalo, 'dipole': Nprog + Nbary + Nhalo + Ndipole, 'quad': Nprog + Nbary + Nhalo + Ndipole + Nquad, 'all': np.shape(cx)[0], 'point': 1} if 'progenitor' not in vary: nstart['dipole'] = Npoint nend['dipole'] = Npoint + Ndipole if component in ['bary', 'halo', 'dipole', 'quad', 'point']: components = [component] else: components = [x for x in vary if x!='progenitor'] cq = cx[nstart[component]:nend[component], nstart[component]:nend[component]] Npot = np.shape(cq)[0] if fast: cqi = np.linalg.inv(cq) else: cqi = stable_inverse(cq) if scale: dp_opt = read_optimal_step(n, vary) dp = [x*y.unit for x,y in zip(dp_opt, dp_fid)] scale_vec = np.array([x.value for x in dp[nstart[component]:nend[component]]]) scale_mat = np.outer(scale_vec, scale_vec) cqi *= scale_mat if progenitor: prog_coords = {-1: gd1_coordinates(), -2: pal5_coordinates(), -3: tri_coordinates(), -4: atlas_coordinates()} x0, v0 = prog_coords[n] print(x0) else: x0 = np.array([4, 4, 0]) Rp = np.linalg.norm(x0[:2]) zp = x0[2] R = np.linspace(-d, d, Nb) k = x0[1]/x0[0] x = R/np.sqrt(1+k**2) y = k * x z = np.linspace(-d, d, Nb) xv, zv = np.meshgrid(x, z) yv, zv = np.meshgrid(y, z) xin = np.array([np.ravel(xv), np.ravel(yv), np.ravel(zv)]).T Npix = np.size(xv) af = np.empty((Npix, 3)) derf = np.empty((Npix*3, Npot)) for i in range(Npix): xi = xin[i]*u.kpc a = acc_cart(xi, components=components) dadq = apder_cart(xi, components=components) derf[i*3:(i+1)*3] = dadq ca = np.matmul(derf, np.matmul(cq, derf.T)) Nx = Npot Nw = Npix*3 vals, vecs = la.eigh(ca, eigvals=(Nw - Nx - 2, Nw - 1)) ## check orthogonality: #for i in range(Npot-1): #for k in range(i+1, Npot): #print(i, k) #print(np.dot(vecs[:,i], vecs[:,k])) #print(np.dot(vecs[::3,i], vecs[::3,k]), np.dot(vecs[1::3,i], vecs[1::3,k]), np.dot(vecs[1::3,i], vecs[1::3,k])) # save np.savez('../data/crb_acart_cov{:s}_{:s}_{:d}_{:s}_{:s}_{:d}_{:d}_{:d}_{:d}_{:d}'.format(alabel, errmode, n, vlabel, component, Ndim, d, Nb, relative, progenitor), x=xin, der=derf, c=ca) plt.close() fig, ax = plt.subplots(1, 3, figsize=(15, 5)) if j==0: vcomb = np.sqrt(np.sum(vecs**2*vals, axis=1)) label = ['($\Sigma$ Eigval $\\times$ Eigvec$^2$ $a_{}$'.format(x)+')$^{1/2}$' for x in ['X', 'Y', 'Z']] vmin = 1e-2 vmax = 5e0 norm = mpl.colors.LogNorm() else: vcomb = vecs[:,j] label = ['Eig {} $a_{}$'.format(np.abs(j), x) for x in ['X', 'Y', 'Z']] vmin = -0.025 vmax = 0.025 norm = None for i in range(3): plt.sca(ax[i]) #im = plt.imshow(vecs[i::3,j].reshape(Nb, Nb), origin='lower', extent=[-d, d, -d, d], cmap=mpl.cm.gray, vmin=vmin, vmax=vmax) im = plt.imshow(vcomb[i::3].reshape(Nb, Nb), origin='lower', extent=[-d, d, -d, d], cmap=mpl.cm.gray, vmin=vmin, vmax=vmax, norm=norm) if progenitor: plt.plot(Rp, zp, 'r*', ms=10) plt.xlabel('R (kpc)') plt.ylabel('Z (kpc)') divider = make_axes_locatable(plt.gca()) cax = divider.append_axes("right", size="3%", pad=0.1) plt.colorbar(im, cax=cax) plt.ylabel(label[i]) plt.tight_layout() if batch: return fig else: plt.savefig('../plots/crb_acc_cart_cov{:s}_{:s}_{:d}_{:s}_{:s}_{:d}_{:d}_{:d}_{:d}_{:d}_{:d}.png'.format(alabel, errmode, n, vlabel, component, np.abs(j), Ndim, d, Nb, relative, progenitor)) def a_vecfield(vary=['progenitor', 'bary', 'halo', 'dipole', 'quad'], component='all', d=20, Nb=10): """Plot acceleration field in R,z plane""" if component in ['bary', 'halo', 'dipole', 'quad', 'point']: components = [component] else: components = [x for x in vary if x!='progenitor'] x0 = np.array([4, 4, 0]) R = np.linspace(-d, d, Nb) k = x0[1]/x0[0] x = R/np.sqrt(1+k**2) y = k * x z = np.linspace(-d, d, Nb) xv, zv = np.meshgrid(x, z) yv, zv = np.meshgrid(y, z) xin = np.array([np.ravel(xv), np.ravel(yv), np.ravel(zv)]).T Rin = np.linalg.norm(xin[:,:2], axis=1) * np.sign(xin[:,0]) zin = xin[:,2] Npix = np.size(xv) acart_pix = np.empty((Npix, 3)) acyl_pix = np.empty((Npix, 2)) for i in range(Npix): xi = xin[i]*u.kpc acart = acc_cart(xi, components=components) acart_pix[i] = acart acyl_pix[:,0] = np.linalg.norm(acart_pix[:,:2], axis=1) * -np.sign(xin[:,0]) acyl_pix[:,1] = acart_pix[:,2] plt.close() plt.figure() plt.quiver(Rin, zin, acyl_pix[:,0], acyl_pix[:,1]) plt.tight_layout() def a_crbcov_vecfield(n, Ndim=6, vary=['progenitor', 'bary', 'halo', 'dipole', 'quad'], errmode='fiducial', component='all', j=0, align=True, d=20, Nb=10, fast=False, scale=True, relative=False, progenitor=False, batch=False): """""" pid, dp_fid, vlabel = get_varied_pars(vary) if align: alabel = '_align' else: alabel = '' if relative: vmin = 1e-2 vmax = 1 rlabel = ' / a' else: vmin = -0.005 vmax = 0.005 #vmin = 1e-2 #vmax = 1e0 rlabel = ' (pc Myr$^{-2}$)' # read in full inverse CRB for stream modeling cxi = np.load('../data/crb/bspline_cxi{:s}_{:s}_{:d}_{:s}_{:d}.npy'.format(alabel, errmode, n, vlabel, Ndim)) if fast: cx = np.linalg.inv(cxi) else: cx = stable_inverse(cxi) # choose the appropriate components: Nprog, Nbary, Nhalo, Ndipole, Nquad, Npoint = [6, 5, 4, 3, 5, 1] if 'progenitor' not in vary: Nprog = 0 nstart = {'bary': Nprog, 'halo': Nprog + Nbary, 'dipole': Nprog + Nbary + Nhalo, 'quad': Nprog + Nbary + Nhalo + Ndipole, 'all': Nprog, 'point': 0} nend = {'bary': Nprog + Nbary, 'halo': Nprog + Nbary + Nhalo, 'dipole': Nprog + Nbary + Nhalo + Ndipole, 'quad': Nprog + Nbary + Nhalo + Ndipole + Nquad, 'all': np.shape(cx)[0], 'point': 1} if 'progenitor' not in vary: nstart['dipole'] = Npoint nend['dipole'] = Npoint + Ndipole if component in ['bary', 'halo', 'dipole', 'quad', 'point']: components = [component] else: components = [x for x in vary if x!='progenitor'] cq = cx[nstart[component]:nend[component], nstart[component]:nend[component]] Npot = np.shape(cq)[0] if fast: cqi = np.linalg.inv(cq) else: cqi = stable_inverse(cq) if scale: dp_opt = read_optimal_step(n, vary) dp = [x*y.unit for x,y in zip(dp_opt, dp_fid)] scale_vec = np.array([x.value for x in dp[nstart[component]:nend[component]]]) scale_mat = np.outer(scale_vec, scale_vec) cqi *= scale_mat if progenitor: x0, v0 = gd1_coordinates() else: x0 = np.array([4, 4, 0]) Rp = np.linalg.norm(x0[:2]) zp = x0[2] R = np.linspace(-d, d, Nb) k = x0[1]/x0[0] x = R/np.sqrt(1+k**2) y = k * x z = np.linspace(-d, d, Nb) xv, zv = np.meshgrid(x, z) yv, zv = np.meshgrid(y, z) xin = np.array([np.ravel(xv), np.ravel(yv), np.ravel(zv)]).T Rin = np.linalg.norm(xin[:,:2], axis=1) * np.sign(xin[:,0]) zin = xin[:,2] Npix = np.size(xv) acart_pix = np.empty((Npix, 3)) acyl_pix = np.empty((Npix, 2)) vcomb_pix = np.empty((Npix, 2)) af = np.empty((Npix, 3)) derf = np.empty((Npix*3, Npot)) for i in range(Npix): xi = xin[i]*u.kpc a = acc_cart(xi, components=components) acart_pix[i] = a dadq = apder_cart(xi, components=components) derf[i*3:(i+1)*3] = dadq acyl_pix[:,0] = np.linalg.norm(acart_pix[:,:2], axis=1) * -np.sign(xin[:,0]) acyl_pix[:,1] = acart_pix[:,2] ca = np.matmul(derf, np.matmul(cq, derf.T)) Nx = Npot Nw = Npix*3 vals, vecs = la.eigh(ca, eigvals=(Nw - Nx - 2, Nw - 1)) if j==0: vcomb = np.sqrt(np.sum(vecs**2*vals, axis=1)) label = ['($\Sigma$ Eigval $\\times$ Eigvec$^2$ $a_{}$'.format(x)+')$^{1/2}$' for x in ['X', 'Y', 'Z']] vmin = 1e-3 vmax = 1e-1 norm = mpl.colors.LogNorm() else: vcomb = vecs[:,j]*np.sqrt(vals[j]) label = ['Eig {} $a_{}$'.format(np.abs(j), x) for x in ['X', 'Y', 'Z']] vmin = -0.025 vmax = 0.025 norm = None vcomb_pix[:,0] = np.sqrt(vcomb[0::3]**2 + vcomb[1::3]**2) * -np.sign(xin[:,0]) #vcomb_pix[:,0] = np.sqrt(vcomb[0::3]**2 + vcomb[1::3]**2) * -np.sign(vcomb[0::3]) vcomb_pix[:,1] = vcomb[2::3] plt.close() fig, ax = plt.subplots(1,2,figsize=(10,5)) plt.sca(ax[0]) plt.quiver(Rin, zin, acyl_pix[:,0], acyl_pix[:,1], pivot='middle') plt.xlabel('R (kpc)') plt.ylabel('Z (kpc)') plt.title('Acceleration {}'.format(component), fontsize='medium') plt.sca(ax[1]) plt.quiver(Rin, zin, vcomb_pix[:,0], vcomb_pix[:,1], pivot='middle', headwidth=0, headlength=0, headaxislength=0, scale=0.02, scale_units='xy') plt.xlabel('R (kpc)') plt.ylabel('Z (kpc)') plt.title('Eigenvector {}'.format(np.abs(j)), fontsize='medium') plt.tight_layout() if batch: return fig else: plt.savefig('../plots/afield_crbcov{:s}_{:s}_{:d}_{:s}_{:s}_{:d}_{:d}_{:d}_{:d}_{:d}.png'.format(alabel, errmode, n, vlabel, component, np.abs(j), Ndim, d, Nb, relative)) def summary(n, mode='scalar', vary=['progenitor', 'bary', 'halo', 'dipole', 'quad'], errmode='fiducial', component='all'): """""" pid, dp_fid, vlabel = get_varied_pars(vary) fn = {'scalar': crb_acart_cov, 'vector': a_crbcov_vecfield} bins = {'scalar': 30, 'vector': 10} Nprog, Nbary, Nhalo, Ndipole, Nquad, Npoint = [6, 5, 4, 3, 5, 1] Npars = {'bary': Nbary, 'halo': Nhalo, 'dipole': Ndipole, 'quad': Nquad, 'point': Npoint} if component in ['bary', 'halo', 'dipole', 'quad', 'point']: components = [component] else: components = [x for x in vary if x!='progenitor'] Niter = [Npars[x] for x in components] Niter = sum(Niter) + 1 pp = PdfPages('../plots/acceleration_{}_{}_{}_{}_{}.pdf'.format(n, errmode, vlabel, component, mode)) for i in range(Niter): print(i, Niter) fig = fn[mode](-1, progenitor=True, batch=True, errmode=errmode, vary=vary, component=component, j=-i, d=20, Nb=bins[mode]) pp.savefig(fig) pp.close() ######### # Summary def full_names(): """""" full = {'gd1': 'GD-1', 'atlas': 'ATLAS', 'tri': 'Triangulum', 'ps1a': 'PS1A', 'ps1b': 'PS1B', 'ps1c': 'PS1C', 'ps1d': 'PS1D', 'ps1e': 'PS1E', 'ophiuchus': 'Ophiuchus', 'hermus': 'Hermus', 'kwando': 'Kwando', 'orinoco': 'Orinoco', 'sangarius': 'Sangarius', 'scamander': 'Scamander'} return full def full_name(name): """""" full = full_names() return full[name] def get_done(sort_length=False): """""" done = ['gd1', 'tri', 'atlas', 'ps1a', 'ps1c', 'ps1e', 'ophiuchus', 'kwando', 'orinoco', 'sangarius', 'hermus', 'ps1d'] done = ['gd1', 'tri', 'atlas', 'ps1a', 'ps1c', 'ps1e', 'kwando', 'orinoco', 'sangarius', 'hermus', 'ps1d'] # length if sort_length: tosort = [] for name in done: mock = pickle.load(open('../data/mock_{}.params'.format(name), 'rb')) tosort += [np.max(mock['xi_range']) - np.min(mock['xi_range'])] done = [x for _,x in sorted(zip(tosort,done))] else: tosort = [] vary = ['progenitor', 'bary', 'halo'] Ndim = 6 errmode = 'fiducial' align = True pid, dp_fid, vlabel = get_varied_pars(vary) pid_vh = myutils.wherein(np.array(pid), np.array([5])) for name in done: fm = np.load('../data/crb/cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, name, align, vlabel)) cxi = fm['cxi'] cx = stable_inverse(cxi) crb = np.sqrt(np.diag(cx)) tosort += [crb[pid_vh]] done = [x for _,x in sorted(zip(tosort,done))][::-1] return done def store_mocks(): """""" done = get_done() for name in done: stream = stream_model(name) np.save('../data/streams/mock_observed_{}'.format(name), stream.obs) def period(name): """Return orbital period in units of stepsize and number of complete periods""" orbit = stream_orbit(name=name) r = np.linalg.norm(orbit['x'].to(u.kpc), axis=0) a = np.abs(np.fft.rfft(r)) f = np.argmax(a[1:]) + 1 p = np.size(a)/f return (p, f) def extract_crbs(Ndim=6, vary=['progenitor', 'bary', 'halo'], component='halo', errmode='fiducial', j=0, align=True, fast=False, scale=False): """""" pid, dp_fid, vlabel = get_varied_pars(vary) names = get_done() tout = Table(names=('name', 'crb')) pparams0 = pparams_fid pid_comp, dp_fid2, vlabel2 = get_varied_pars(component) Np = len(pid_comp) pid_crb = myutils.wherein(np.array(pid), np.array(pid_comp)) plt.close() fig, ax = plt.subplots(Np,1,figsize=(10,15), subplot_kw=dict(projection='mollweide')) for name in names[:]: fm = np.load('../data/crb/cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, name, align, vlabel)) cxi = fm['cxi'] if fast: cx = np.linalg.inv(cxi) else: cx = stable_inverse(cxi) crb = np.sqrt(np.diag(cx)) #print([pparams0[pid_comp[i]] for i in range(Np)]) crb_frac = [crb[pid_crb[i]]/pparams0[pid_comp[i]].value for i in range(Np)] print(name, crb_frac) stream = stream_model(name=name) for i in range(Np): plt.sca(ax[i]) color_index = np.array(crb_frac[:]) color_index[color_index>0.2] = 0.2 color_index /= 0.2 color = mpl.cm.viridis(color_index[i]) plt.plot(np.radians(stream.obs[0]),
np.radians(stream.obs[1])
numpy.radians
# This module has been generated automatically from space group information # obtained from the Computational Crystallography Toolbox # """ Space groups This module contains a list of all the 230 space groups that can occur in a crystal. The variable space_groups contains a dictionary that maps space group numbers and space group names to the corresponding space group objects. .. moduleauthor:: <NAME> <<EMAIL>> """ #----------------------------------------------------------------------------- # Copyright (C) 2013 The Mosaic Development Team # # Distributed under the terms of the BSD License. The full license is in # the file LICENSE.txt, distributed as part of this software. #----------------------------------------------------------------------------- import numpy as N class SpaceGroup(object): """ Space group All possible space group objects are created in this module. Other modules should access these objects through the dictionary space_groups rather than create their own space group objects. """ def __init__(self, number, symbol, transformations): """ :param number: the number assigned to the space group by international convention :type number: int :param symbol: the Hermann-Mauguin space-group symbol as used in PDB and mmCIF files :type symbol: str :param transformations: a list of space group transformations, each consisting of a tuple of three integer arrays (rot, tn, td), where rot is the rotation matrix and tn/td are the numerator and denominator of the translation vector. The transformations are defined in fractional coordinates. :type transformations: list """ self.number = number self.symbol = symbol self.transformations = transformations self.transposed_rotations = N.array([N.transpose(t[0]) for t in transformations]) self.phase_factors = N.exp(N.array([(-2j*N.pi*t[1])/t[2] for t in transformations])) def __repr__(self): return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol)) def __len__(self): """ :return: the number of space group transformations :rtype: int """ return len(self.transformations) def symmetryEquivalentMillerIndices(self, hkl): """ :param hkl: a set of Miller indices :type hkl: Scientific.N.array_type :return: a tuple (miller_indices, phase_factor) of two arrays of length equal to the number of space group transformations. miller_indices contains the Miller indices of each reflection equivalent by symmetry to the reflection hkl (including hkl itself as the first element). phase_factor contains the phase factors that must be applied to the structure factor of reflection hkl to obtain the structure factor of the symmetry equivalent reflection. :rtype: tuple """ hkls = N.dot(self.transposed_rotations, hkl) p = N.multiply.reduce(self.phase_factors**hkl, -1) return hkls, p space_groups = {} transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(1, 'P 1', transformations) space_groups[1] = sg space_groups['P 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(2, 'P -1', transformations) space_groups[2] = sg space_groups['P -1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(3, 'P 1 2 1', transformations) space_groups[3] = sg space_groups['P 1 2 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(4, 'P 1 21 1', transformations) space_groups[4] = sg space_groups['P 1 21 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(5, 'C 1 2 1', transformations) space_groups[5] = sg space_groups['C 1 2 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(6, 'P 1 m 1', transformations) space_groups[6] = sg space_groups['P 1 m 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(7, 'P 1 c 1', transformations) space_groups[7] = sg space_groups['P 1 c 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(8, 'C 1 m 1', transformations) space_groups[8] = sg space_groups['C 1 m 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(9, 'C 1 c 1', transformations) space_groups[9] = sg space_groups['C 1 c 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(10, 'P 1 2/m 1', transformations) space_groups[10] = sg space_groups['P 1 2/m 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(11, 'P 1 21/m 1', transformations) space_groups[11] = sg space_groups['P 1 21/m 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(12, 'C 1 2/m 1', transformations) space_groups[12] = sg space_groups['C 1 2/m 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(13, 'P 1 2/c 1', transformations) space_groups[13] = sg space_groups['P 1 2/c 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(14, 'P 1 21/c 1', transformations) space_groups[14] = sg space_groups['P 1 21/c 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(15, 'C 1 2/c 1', transformations) space_groups[15] = sg space_groups['C 1 2/c 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(16, 'P 2 2 2', transformations) space_groups[16] = sg space_groups['P 2 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(17, 'P 2 2 21', transformations) space_groups[17] = sg space_groups['P 2 2 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(18, 'P 21 21 2', transformations) space_groups[18] = sg space_groups['P 21 21 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(19, 'P 21 21 21', transformations) space_groups[19] = sg space_groups['P 21 21 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(20, 'C 2 2 21', transformations) space_groups[20] = sg space_groups['C 2 2 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(21, 'C 2 2 2', transformations) space_groups[21] = sg space_groups['C 2 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(22, 'F 2 2 2', transformations) space_groups[22] = sg space_groups['F 2 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(23, 'I 2 2 2', transformations) space_groups[23] = sg space_groups['I 2 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(24, 'I 21 21 21', transformations) space_groups[24] = sg space_groups['I 21 21 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(25, 'P m m 2', transformations) space_groups[25] = sg space_groups['P m m 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(26, 'P m c 21', transformations) space_groups[26] = sg space_groups['P m c 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(27, 'P c c 2', transformations) space_groups[27] = sg space_groups['P c c 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(28, 'P m a 2', transformations) space_groups[28] = sg space_groups['P m a 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(29, 'P c a 21', transformations) space_groups[29] = sg space_groups['P c a 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(30, 'P n c 2', transformations) space_groups[30] = sg space_groups['P n c 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(31, 'P m n 21', transformations) space_groups[31] = sg space_groups['P m n 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(32, 'P b a 2', transformations) space_groups[32] = sg space_groups['P b a 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(33, 'P n a 21', transformations) space_groups[33] = sg space_groups['P n a 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(34, 'P n n 2', transformations) space_groups[34] = sg space_groups['P n n 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(35, 'C m m 2', transformations) space_groups[35] = sg space_groups['C m m 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(36, 'C m c 21', transformations) space_groups[36] = sg space_groups['C m c 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(37, 'C c c 2', transformations) space_groups[37] = sg space_groups['C c c 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(38, 'A m m 2', transformations) space_groups[38] = sg space_groups['A m m 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(39, 'A b m 2', transformations) space_groups[39] = sg space_groups['A b m 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(40, 'A m a 2', transformations) space_groups[40] = sg space_groups['A m a 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(41, 'A b a 2', transformations) space_groups[41] = sg space_groups['A b a 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(42, 'F m m 2', transformations) space_groups[42] = sg space_groups['F m m 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,1,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,1,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,3,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,3,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(43, 'F d d 2', transformations) space_groups[43] = sg space_groups['F d d 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(44, 'I m m 2', transformations) space_groups[44] = sg space_groups['I m m 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(45, 'I b a 2', transformations) space_groups[45] = sg space_groups['I b a 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(46, 'I m a 2', transformations) space_groups[46] = sg space_groups['I m a 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(47, 'P m m m', transformations) space_groups[47] = sg space_groups['P m m m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(48, 'P n n n :2', transformations) space_groups[48] = sg space_groups['P n n n :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(49, 'P c c m', transformations) space_groups[49] = sg space_groups['P c c m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(50, 'P b a n :2', transformations) space_groups[50] = sg space_groups['P b a n :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(51, 'P m m a', transformations) space_groups[51] = sg space_groups['P m m a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(52, 'P n n a', transformations) space_groups[52] = sg space_groups['P n n a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(53, 'P m n a', transformations) space_groups[53] = sg space_groups['P m n a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(54, 'P c c a', transformations) space_groups[54] = sg space_groups['P c c a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(55, 'P b a m', transformations) space_groups[55] = sg space_groups['P b a m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(56, 'P c c n', transformations) space_groups[56] = sg space_groups['P c c n'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(57, 'P b c m', transformations) space_groups[57] = sg space_groups['P b c m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(58, 'P n n m', transformations) space_groups[58] = sg space_groups['P n n m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(59, 'P m m n :2', transformations) space_groups[59] = sg space_groups['P m m n :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(60, 'P b c n', transformations) space_groups[60] = sg space_groups['P b c n'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(61, 'P b c a', transformations) space_groups[61] = sg space_groups['P b c a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(62, 'P n m a', transformations) space_groups[62] = sg space_groups['P n m a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(63, 'C m c m', transformations) space_groups[63] = sg space_groups['C m c m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(64, 'C m c a', transformations) space_groups[64] = sg space_groups['C m c a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(65, 'C m m m', transformations) space_groups[65] = sg space_groups['C m m m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(66, 'C c c m', transformations) space_groups[66] = sg space_groups['C c c m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(67, 'C m m a', transformations) space_groups[67] = sg space_groups['C m m a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(68, 'C c c a :2', transformations) space_groups[68] = sg space_groups['C c c a :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(69, 'F m m m', transformations) space_groups[69] = sg space_groups['F m m m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([4,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([4,4,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([4,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([4,4,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,3,3]) trans_den = N.array([1,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([4,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,3,1]) trans_den = N.array([4,4,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,1,1]) trans_den = N.array([4,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,1,1]) trans_den = N.array([4,4,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([2,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([3,0,3]) trans_den = N.array([4,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,1,1]) trans_den = N.array([4,4,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,-1,1]) trans_den = N.array([2,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([4,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,-1,1]) trans_den = N.array([4,4,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,3,1]) trans_den = N.array([2,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([3,1,1]) trans_den = N.array([4,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,3,0]) trans_den = N.array([4,4,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([2,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([4,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([4,4,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(70, 'F d d d :2', transformations) space_groups[70] = sg space_groups['F d d d :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(71, 'I m m m', transformations) space_groups[71] = sg space_groups['I m m m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(72, 'I b a m', transformations) space_groups[72] = sg space_groups['I b a m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(73, 'I b c a', transformations) space_groups[73] = sg space_groups['I b c a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(74, 'I m m a', transformations) space_groups[74] = sg space_groups['I m m a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(75, 'P 4', transformations) space_groups[75] = sg space_groups['P 4'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,3]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(76, 'P 41', transformations) space_groups[76] = sg space_groups['P 41'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(77, 'P 42', transformations) space_groups[77] = sg space_groups['P 42'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,3]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(78, 'P 43', transformations) space_groups[78] = sg space_groups['P 43'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(79, 'I 4', transformations) space_groups[79] = sg space_groups['I 4'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(80, 'I 41', transformations) space_groups[80] = sg space_groups['I 41'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(81, 'P -4', transformations) space_groups[81] = sg space_groups['P -4'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(82, 'I -4', transformations) space_groups[82] = sg space_groups['I -4'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(83, 'P 4/m', transformations) space_groups[83] = sg space_groups['P 4/m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(84, 'P 42/m', transformations) space_groups[84] = sg space_groups['P 42/m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(85, 'P 4/n :2', transformations) space_groups[85] = sg space_groups['P 4/n :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(86, 'P 42/n :2', transformations) space_groups[86] = sg space_groups['P 42/n :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(87, 'I 4/m', transformations) space_groups[87] = sg space_groups['I 4/m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-3,-3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,5,5]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,-1,-1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(88, 'I 41/a :2', transformations) space_groups[88] = sg space_groups['I 41/a :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(89, 'P 4 2 2', transformations) space_groups[89] = sg space_groups['P 4 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(90, 'P 4 21 2', transformations) space_groups[90] = sg space_groups['P 4 21 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,3]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,3]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(91, 'P 41 2 2', transformations) space_groups[91] = sg space_groups['P 41 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([2,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([2,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(92, 'P 41 21 2', transformations) space_groups[92] = sg space_groups['P 41 21 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(93, 'P 42 2 2', transformations) space_groups[93] = sg space_groups['P 42 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(94, 'P 42 21 2', transformations) space_groups[94] = sg space_groups['P 42 21 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,3]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,3]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(95, 'P 43 2 2', transformations) space_groups[95] = sg space_groups['P 43 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([2,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([2,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(96, 'P 43 21 2', transformations) space_groups[96] = sg space_groups['P 43 21 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(97, 'I 4 2 2', transformations) space_groups[97] = sg space_groups['I 4 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(98, 'I 41 2 2', transformations) space_groups[98] = sg space_groups['I 41 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(99, 'P 4 m m', transformations) space_groups[99] = sg space_groups['P 4 m m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(100, 'P 4 b m', transformations) space_groups[100] = sg space_groups['P 4 b m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(101, 'P 42 c m', transformations) space_groups[101] = sg space_groups['P 42 c m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(102, 'P 42 n m', transformations) space_groups[102] = sg space_groups['P 42 n m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(103, 'P 4 c c', transformations) space_groups[103] = sg space_groups['P 4 c c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num =
N.array([0,0,0])
numpy.array
"""This module contains useful functions and classes for thermal system calculation Symbols: h: convection_heat_transfer_coeffient [W/m2K] k: thermal conductivity [W/mK] r: radius [m] cp: specific heat capacity at constant pressure [J/kgK] temp: temperature [degC or K] """ import logging import warnings from abc import ABC, abstractmethod from enum import Enum, auto, unique from functools import cached_property from typing import Union, Tuple, NamedTuple import numpy as np from CoolProp.CoolProp import PropsSI from CoolProp.HumidAirProp import HAPropsSI import matplotlib.pyplot as plt from scipy.optimize import root # Define logger logger = logging.getLogger('thermal_system_calculation') logger.setLevel(logging.INFO) ch = logging.StreamHandler() ch.setLevel(logging.INFO) formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') ch.setFormatter(formatter) logger.addHandler(ch) Numeric = Union[float, np.ndarray] GRAVITY = 9.81 def get_heat_flux_by_conduction( temperature_from: Numeric, temperature_to: Numeric, thickness: Numeric, thermal_conductivity: Numeric, ) -> Numeric: """Returns heat flux [W/m2] by conduction from a section of material to another""" return thermal_conductivity * (temperature_from - temperature_to) / thickness def get_heat_flux_by_convection(h: Numeric, temp_from: Numeric, temp_to: Numeric) -> float: """Returns heat flux [W/m2] by convection from a surface of a temperature (temp_from) to the fluid with another temperature (temp_to) for the given heat convection coefficient (alpha)""" return h * (temp_from - temp_to) ################################################################################################### # Exercise 2 def get_thermal_resistance_cylinder( radius: Numeric, wall_thickness: Numeric, thermal_conductivity: Numeric, length: Numeric ) -> Numeric: """Returns thermal resistance [m2K/W] for a cylinder""" outer_radius = radius + wall_thickness return np.log(outer_radius / radius) / (2 * np.pi * length * thermal_conductivity) Numeric = Union[float, np.ndarray] @unique class TipBoundaryConditionForFin(Enum): Convection = auto() Adiabatic = auto() Temperature = auto() Long = auto() class FinConfiguration(NamedTuple): perimeter: float area_cross_section: float length: float def get_heat_transfer_1D_fin_with_uniform_cross_section( x: Numeric, temp_base: Numeric, temp_surr: Numeric, fin_configuration: FinConfiguration, h: Numeric, k: Numeric, boundary_condition: TipBoundaryConditionForFin, temp_tip: Numeric = None ) -> Tuple[Numeric, Numeric]: m = np.sqrt(h * fin_configuration.perimeter / (k * fin_configuration.area_cross_section)) h_mk = h / (m * k) m_l_x = m * (fin_configuration.length - x) m_l = m * fin_configuration.length theta_b = temp_base - temp_surr mm = np.sqrt(h * fin_configuration.perimeter * k * fin_configuration.area_cross_section) * \ theta_b if boundary_condition == TipBoundaryConditionForFin.Convection: denominator = (np.cosh(m_l) + h_mk * np.sinh(m_l)) theta = (np.cosh(m_l_x) + h_mk * np.sinh(m_l_x)) / denominator * theta_b heat_transfer_rate = mm * (np.sinh(m_l) + h_mk * np.cosh(m_l)) / denominator elif boundary_condition == TipBoundaryConditionForFin.Adiabatic: theta = np.cosh(m_l_x) / np.cosh(m_l) * theta_b heat_transfer_rate = mm * np.tanh(m_l) elif boundary_condition == TipBoundaryConditionForFin.Temperature: theta_l_b = (temp_tip - temp_surr) / theta_b theta = (theta_l_b * np.sinh(m * x) + np.sinh(m_l_x)) / np.sinh(m_l) * theta_b heat_transfer_rate = mm * (np.cosh(m_l) - theta_l_b) / np.sinh(m_l) elif boundary_condition == TipBoundaryConditionForFin.Long: theta = np.exp(-m * x) * theta_b heat_transfer_rate = mm else: raise TypeError(f'Invalid boundary condition is given: {boundary_condition}') temp = theta + temp_surr return heat_transfer_rate, temp # CoolProp related classes, functions @unique class Fluid(Enum): WATER = auto() AIR = auto() AMMONIA = auto() CO2 = auto() H2 = auto() NITROGEN = auto() OXYGEN = auto() R134A = auto() R143A = auto() R407C = auto() @unique class ThermodynamicState(Enum): PRESSURE = 'P' TEMPERATURE = 'T' SATURATION = 'Q' @unique class Properties(Enum): SPECIFIC_HEAT_CAPACITY_CONSTANT_PRESSURE = 'C' SPECIFIC_HEAT_CAPACITY_CONSTANT_VOLUME = 'CVMASS' GAS_CONSTANT_MOL = 'GAS_CONSTANT' DENSITY = 'D' CRITICAL_TEMPERATURE = 'Tcrit' RELATIVE_HUMIDITY = 'R' SPECIFIC_ENTHALPY = 'H' SPECIFIC_ENTROPY = 'S' SPECIFIC_INTERNAL_ENERGY = 'U' THERMAL_CONDUCTIVITY = 'CONDUCTIVITY' DYNAMIC_VISCOSITY = 'VISCOSITY' EXPANSION_COEFFICIENT_ISOBARIC = 'ISOBARIC_EXPANSION_COEFFICIENT' @unique class NonCircularCylinderGeometry(Enum): SQUARE_FLAT = auto() SQUARE_OBLIQUE = auto() HEXAGON_FLAT = auto() HEXAGON_OBLIQUE = auto() VERTICAL_FLAT_FRONT = auto() VERTICAL_FLAT_BACK = auto() @unique class TubeBankArrangement(Enum): Aligned = auto() Staggered = auto() class TubeBankConfiguration(NamedTuple): arrangement: TubeBankArrangement vertical_spacing: float horizontal_spacing: float number_rows: float number_tubes_each_row: float def get_maximum_velocity(self, velocity: float, diameter: float): if self.arrangement == TubeBankArrangement.Aligned: return self.vertical_spacing / (self.vertical_spacing - diameter) * velocity diagonal_spacing =
np.sqrt(self.horizontal_spacing ** 2 + (self.vertical_spacing / 2) ** 2)
numpy.sqrt
# -*- coding: utf-8 -*- """ Created on Thu May 30 20:03:50 2019 Finds Vg1 and Vg2 values above a threshold, determined by the ratio of the areas of a Gaussian fit of the intensity histogram to the total area of the intensities @author: <NAME> """ import numpy as np import scipy.signal as ss import scipy.optimize as opt from scipy.signal import medfilt2d, savgol_filter from scipy.ndimage import correlate from sklearn.neighbors import KDTree import stability as stab def hist_data(z): """ Finds x and y data from histogram :param z: input :return: x and y """ data = np.histogram(z, bins='scott') x = data[1] x = np.array([(x[i] + x[i + 1]) / 2 for i in range(0, len(x) - 1)]) return x, np.array(data[0]) def gauss(x, *params): return abs(params[2]) * np.exp(-(x - params[0]) ** 2 / (2 * params[1] ** 2)) def multi_gaussian(x, *params): """ Fits multiple Gaussian distributions, number of which determined by the number of parameters inputted """ y = np.zeros_like(x) index = np.arange(0, len(params), 3) if index.size > 1: for i in range(0, len(params) // 3): mu = params[i] sig = params[i + len(params) // 3] amp = params[i + 2 * len(params) // 3] y = y + abs(amp) * np.exp(-(x - mu) ** 2 / (2 * sig ** 2)) else: y = y + abs(params[2]) * np.exp(-(x - params[0]) ** 2 / (2 * params[1] ** 2)) return y def multi_gauss_background(x, *params): y = np.zeros_like(x) index = np.arange(0, len(params) - 2, 3) if index.size > 1: y = y + params[0] * x + params[1] for i in range(0, (len(params) - 2) // 3): mu = params[i + 2] sig = params[i + 2 + (len(params) - 2) // 3] amp = params[i + 2 + 2 * (len(params) - 2) // 3] y = y + abs(amp) * np.exp(-(x - mu) ** 2 / (2 * sig ** 2)) else: y = y + params[0] * x + params[1] + abs(params[4]) * np.exp(-(x - params[2]) ** 2 / (2 * params[3] ** 2)) return y def greedy_guess(guess, x, y): n = (len(guess) - 2) // 3 m, sig, a = guess[2:n + 2], guess[n + 2:2 * n + 2], guess[2 * n + 2:] chi = (y - multi_gauss_background(x, *guess)) / multi_gauss_background(x, *guess) chi = savgol_filter(chi, 3, 2) m, a = np.append(m, float(x[np.where(chi == np.max(chi))])), np.append(a, float(y[np.where(chi == np.max(chi))])) sig = np.append(sig, sig[n - 1] / 2) return np.append(guess[:2], np.append(m, np.append(sig, a))) def gradient(x, y, z): """ Calculates gradient along x and y of intensities to reduce noise @param x: x vales @param y: y values @param z: intensities @return: """ m_z = np.reshape(z, (len(np.unique(y)), len(np.unique(x))))# Transform array into matrix sg = savgol_filter(m_z, 5, 2) + savgol_filter(m_z, 5, 2, axis=0) # Savgol filter acts as a low pass band filter signal = sg - np.mean(sg) + np.mean(m_z) return np.reshape(signal, np.shape(x)) def gradient_exp(x, y, z): """ Calculates gradient along x and y of intensities to reduce noise @param x: x vales @param y: y values @param z: intensities @return: """ m_z = np.reshape(z, (len(np.unique(y)), len(np.unique(x))))# Transform array into matrix diff = [[0, -1, 0], [-1, 5, -1], [0, -1, 0]] z_diff = correlate(m_z, diff) sg = savgol_filter(z_diff, 5, 2) + savgol_filter(z_diff, 5, 2, axis=0) # Savgol filter acts as a low pass band filter signal = sg - np.mean(sg) + np.mean(m_z) return np.reshape(signal, np.shape(x)) def filtering(x, y, z): m_z = np.reshape(z, (len(np.unique(y)), len(np.unique(x)))) # Transform array into matrix s = medfilt2d(m_z) return np.reshape(s, (int(len(x)),)) def normalise(z): """ Unity-based normalisation function, such that all values range between 0 and 1 :param z: Raw data that needs normalising :return: Normalised data """ return np.nan_to_num((z - np.min(z)) / (np.max(z) - np.min(z))) def fit_gauss(z): intensity = normalise(z) x, y = hist_data(intensity) guess = np.append(0, np.append(np.median(y), np.append(np.median(x[np.where(y == np.max(y))]), np.append(np.std(x[np.where(y > np.median(y))]), np.max(y))))) fit_param, cov = opt.curve_fit(multi_gauss_background, x, y, guess) if fit_param[2] > 0.5: index = np.where(intensity<fit_param[2]-3*abs(fit_param[3])) else: index = np.where(intensity>fit_param[2]+3*abs(fit_param[3])) return index def curved_plane(x, y, param): return param[0]*x + param[1]*x**2 + param[2]*y + param[3]*y**2 + param[4]*x*y + param[5] def linear_plane(x, y, param): return param[0]*x + param[1]*y + param[2] def minimise_plane(param, x, y, z): return np.sum((z - linear_plane(x, y, param))**2) def linear(x, z): return (np.median(z[np.where(x==np.min(x))])-np.median(z[np.where(x==np.max(x))]))/(np.min(x)-np.max(x)) def remove_background(x, y, z): p = gradient_exp(x, y, z) param = np.array((linear(x, z), linear(y,z), np.median(p))) sol = opt.minimize(minimise_plane, param, args=(x, y, p)) p_n = normalise(p - linear_plane(x, y, sol.x)) return p_n*(np.max(z)-np.min(z)) + np.min(z) def grad_exp(z, val_x, val_y): val = z.reshape(val_y, val_x) scharr = np.array([[ -3-3j, 0-10j, +3 -3j], [-10+0j, 0+ 0j, +10 +0j], [ -3+3j, 0+10j, +3 +3j]]) # Gx + j*Gy grad = ss.convolve2d(val, scharr, boundary='symm', mode='same') index = np.where(np.logical_or(abs(np.angle(grad).flatten())<=0.15, abs(np.angle(grad).flatten())>=np.pi-0.15)) z[index] = 0 return z def get_klpq_div(p_probs, q_probs): # Calcualtes the Kullback-Leibler divergence between pi and qi kl_div = 0.0 for pi, qi in zip(p_probs, q_probs): kl_div += pi*np.nan_to_num(np.log(pi/qi)) return kl_div def D_KL(threshold, x, y): # Finds best fit Gaussian distribution and calculates the corresponding Kullback-Leibler divergence index = np.where(np.logical_and(x>=threshold[0], x<=threshold[1])) xs, ys = x[index], y[index] if np.trapz(ys)>0: ys = ys/np.trapz(ys) else: return np.inf guess = np.append(np.median(xs[np.where(ys == np.max(ys))]), np.append(np.std(xs[np.where(ys > np.median(ys))]), np.max(ys))) bounds = ((np.min(x)-np.std(x), np.std(x)/10**4, np.mean(ys)), (np.max(x)+np.std(x), np.max(x)-np.min(x), 10*np.max(ys))) fit_param, cov = opt.curve_fit(gauss, xs, ys, guess, bounds=bounds) return get_klpq_div(ys+10**-7, gauss(xs, *fit_param)+10**-7) # Add small epsilon to ensure that we donn't devide by zero def minimise_DKL(x, y): # Estimate first guess and boundaries to use: guess = np.append(np.median(x[np.where(y == np.max(y))]), np.append(np.std(x[np.where(y > np.median(y))]), np.max(y))) b = ((np.min(x)-np.std(x), np.std(x)/10**4, np.mean(y)), (np.max(x)+np.std(x), np.max(x)-np.min(x), np.max(y)*10)) fit_param, cov = opt.curve_fit(gauss, x, y, guess, bounds=b) x0 = [fit_param[0]-2*fit_param[1], fit_param[0]+2*fit_param[1]] bound = ((np.min(x), fit_param[0]-fit_param[1]), (fit_param[0]+fit_param[1], np.max(x))) # Find optimal bound solutions sol = opt.minimize(D_KL, x0, jac=None, method='L-BFGS-B', options={'eps':1/len(x)}, args=(x, y), bounds=bound) return sol.x def threshold_DKL(z): intensity = normalise(z) x, y = hist_data(intensity) y = y**0.5 # Broadens peak to allow to identify finer structure in the intensity threshold = minimise_DKL(x, y) if abs(np.max(z))>abs(np.min(z)): index = np.where(intensity>=threshold[1]) else: index = np.where(intensity<=threshold[0]) return index def threshold(z, val): if abs(np.max(z))>abs(np.min(z)): v = abs(np.min(z))*0.9 else: v = -abs(np.max(z))*0.9 val = np.append(val, v) v = np.mean(abs(val)) m = np.where(np.logical_or(z > v, z < -v)) return m, val def intense(z, index): x, y = hist_data(z) guess = np.append(np.median(x[np.where(y == np.max(y))]), np.append(np.std(x[np.where(y > np.median(y))]), np.max(y))) fit_param, cov = opt.curve_fit(gauss, x, y, guess) return z[index]-fit_param[0] def threshold_experimental(vg1, vg2, i, q): i_g, q_g = remove_background(vg1, vg2, i), remove_background(vg1, vg2, q) m_i, m_q = threshold_DKL(i_g), threshold_DKL(q_g) index = np.unique(np.append(m_i, m_q)) intensity = normalise(abs(intense(i, index)))+normalise(abs(intense(q, index))) return vg1[index], vg2[index], intensity, i_g, q_g, index def threshold_theoretical(vg1, vg2, i): i_g = gradient(vg1, vg2, i) x, y = hist_data(i_g) x = normalise(x) fit_param = [np.median(x[np.where(y == np.max(y))]), np.std(x[np.where(y > np.median(y))]), np.max(y)] try: fit_one, _ = opt.curve_fit(multi_gaussian, x, y, fit_param) ind = np.where(x > fit_one[0] + fit_one[1]) ys = y[ind] - multi_gaussian(x[ind], *fit_one) guess = [fit_one[0], np.median(x[ind][np.where(ys == np.max(ys))]), fit_one[1], np.std(x[np.where(y > np.median(ys))]), fit_one[2], np.max(ys)] try: fit_param, cov = opt.curve_fit(multi_gaussian, x, y, guess) error = np.sqrt(np.diag(cov)) if error[1] * 10 > error[0]: index = np.where(normalise(i) > fit_param[1]) else: index = np.where(normalise(i) > 0.4) except: val = np.min(x[np.where(x > fit_one[0] + fit_one[1])]) index = np.where(normalise(i) > val) except: index = np.where(normalise(i) > 0.4) return vg1[index], vg2[index], i[index], x, y, fit_param def averaging_xy(x, y, intensity, leaf, n_neighbours): """ Uses KDTree to find n_neighbours and then calculates a weighted mean, resulting in thinning the data :param x: threshold x values :param y: threshold y values :param intensity: corresponding intensities :param leaf: determines how many neighbouring points to check, leaf > n_neighbours :param n_neighbours: number of neighbours to average through :return: thinned x and y values """ data = np.transpose(np.vstack([x, y])) xs, ys, zs = [], [], [] tree = KDTree(data, leaf_size=leaf) # Finds relation between points for i in range(0, len(data)):# // n_neighbours): # Figure out which are the neighbouring points # dist, ind = tree.query(np.reshape(data[i * n_neighbours, :], (1, -1)), k=n_neighbours) dist, ind = tree.query(np.reshape(data[i, :], (1, -1)), k=n_neighbours) # takes weighted average of x and y values of given point x_m, y_m = np.average(x[ind], weights=intensity[ind]), np.average(y[ind], weights=intensity[ind]) z_m = np.average(intensity[ind]) xs, ys, zs = np.append(xs, x_m), np.append(ys, y_m), np.append(zs, z_m) return xs, ys, zs def thinning(Vg1, Vg2, i_g, q_g, ind): val_x, val_y = len(np.unique(Vg1)), len(np.unique(Vg2)) # Set data points below threshold to zero M = np.sqrt(i_g**2+q_g**2) mask = np.ones(M.shape,dtype=bool) mask[ind] = False M[mask] = 0 M = grad_exp(M, val_x, val_y) # Find peaks along x if val_x > 100: peaks, hight = ss.find_peaks(M, width=1, distance=val_x//100) else: peaks, hight = ss.find_peaks(M, width=1) xs, ys, zs = Vg1[peaks], Vg2[peaks], M[peaks] # Find peaks along y xt = np.reshape(np.transpose(np.reshape(Vg1, (val_y, val_x))), np.shape(Vg1)) yt = np.reshape(np.transpose(np.reshape(Vg2, (val_y, val_x))), np.shape(Vg2)) Mt = np.reshape(np.transpose(np.reshape(M, (val_y, val_x))), np.shape(M)) if val_y > 100: peaks, hight = ss.find_peaks(Mt, width=1, distance=val_y//100) else: peaks, hight = ss.find_peaks(Mt, width=1) # add peaks from both directions xs, ys, zs = np.append(xs, xt[peaks]), np.append(ys, yt[peaks]), np.append(zs, Mt[peaks]) # xs, ys, zs = averaging_xy(xs, ys, zs, 100, 10) return xs, ys, zs def thinning_IQ(vg1, vg2, z, val_x): x, y = hist_data(z) y = y**0.5 # Broadens peak to allow to identify finer structure in the intensity threshold = minimise_DKL(x, y) if abs(np.max(z))>abs(
np.min(z)
numpy.min
######################################################################################################### # This is a script for computing the number of voronoi edges shared by different lipids in a ternary # mixture of DPPC/DSPC, DIPC, and CHOL in the MARTINI coarse-grained model. #It can be used as well for atomistic simulations # # This script uses the (x,y) coordinates of lipid head groups to perform Voronoi tessellation # The outputs from this script are: # (1) counts of voronoi edges between lipids in each leaflet # (2) tertiary (lipid1 + lipid2 + lipid3) mixing entropy and binary (lipid1 + lipid2) mixing entropy, ignoring lipid3 (CHOL) # (3) the ratio of #CHOL-DPPC / #CHOL-DIPC Voronoi edges # # The selection of lipids can be changed by changing the definitions of lipid[1-3] and sel[1-3] # The system topology and trajectory can be set by changing the definitions of top and traj # # Usage is: ternary_lipid_voronoi_edges.py <side> # where <side> is "up" or "down", analyzing either the upper or lower leaflet of a MARTINI DPPC:DIPC:CHOL# # membrane using induces of the membrane as if the system was built using the insane.py tool ######################################################################################################### import sys import numpy as np from MDAnalysis import * import MDAnalysis import MDAnalysis.lib.distances import time import numpy.linalg import scipy.stats import matplotlib.pyplot as plt import math import MDAnalysis.lib.NeighborSearch as NS from scipy.spatial import Voronoi, voronoi_plot_2d import multiprocessing as mp print ('Initiating Voroni Tesselation') top = '../trajectory_every1ns_from7.5us.gro' traj = '../trajectory_every100ns_from7.5us.xtc' side = sys.argv[1] # "up" for upper leaflet "down" for lower leaflet u = MDAnalysis.Universe(top,traj) #Lipid Residue names lipid1 ='DSPC' lipid2 ='DLIPC' lipid3 ='CHL' # Atom selections sel1 = 'resname %s and name P'%lipid1 sel2 = 'resname %s and name P'%lipid2 sel3 = 'resname %s and name O2'%lipid3 # Identify number of residues in each lipid and extract only he top residues (for now) #Frames to be calculated end_f = u.trajectory.n_frames print (end_f) start_f = 0 skip = 1 # Number of processors to use in multiprocessing nprocs = 1 frames = np.arange(start_f, end_f)[::skip] n_frames = len(frames) ###################################################################################### # Make empty arrays to hold contact counts ens_Lpd1_Lpd1 = np.zeros(n_frames) ens_Lpd2_Lpd2 = np.zeros(n_frames) ens_Lpd3_Lpd3 = np.zeros(n_frames) ens_Lpd1_Lpd2 = np.zeros(n_frames) ens_Lpd1_Lpd3 = np.zeros(n_frames) ens_Lpd2_Lpd3 = np.zeros(n_frames) ens_sum_bonds = np.zeros(n_frames) ens_avg_bonds = np.zeros(n_frames) ens_mix_entropy = np.zeros(n_frames) ###################################################################################### def voronoi_tessel(ts): # set the time step print ('Frame %i in %i'%(ts, end_f)) u = MDAnalysis.Universe(top,traj) u.trajectory[0] # Select atoms within this particular frame lpd1_atoms = u.select_atoms(sel1) lpd2_atoms = u.select_atoms(sel2) lpd3_atoms = u.select_atoms(sel3) zmean = np.mean(np.concatenate([lpd1_atoms.positions[:,2], lpd2_atoms.positions[:,2], lpd3_atoms.positions[:,2]])) num_lpd1 = lpd1_atoms.n_atoms num_lpd2 = lpd2_atoms.n_atoms # select atoms in the upper leaflet using positions in the first frame of simulation # select cholesterol headgroups within 1.5 nm of lipid headgroups in the selected leaflet in the current frame if side == 'up': lpd1i=[] zpos = lpd1_atoms.positions[:,2] for i in range(num_lpd1): if zpos[i] > zmean: lpd1i.append(lpd1_atoms[i]) lpd1i = np.sum(np.array(lpd1i)) lpd2i=[] zpos = lpd2_atoms.positions[:,2] for i in range(num_lpd2): if zpos[i] > zmean: lpd2i.append(lpd2_atoms[i]) lpd2i = np.sum(np.array(lpd2i)) lipids = lpd1i + lpd2i u.trajectory[ts] # now move to the frame currently being analyzed ns_lipids = NS.AtomNeighborSearch(lpd3_atoms) lpd3i = ns_lipids.search(lipids,20.0) elif side == 'down': lpd1i=[] zpos = lpd1_atoms.positions[:,2] for i in range(num_lpd1): if zpos[i] < zmean: lpd1i.append(lpd1_atoms[i]) lpd1i = np.sum(np.array(lpd1i)) lpd2i=[] zpos = lpd2_atoms.positions[:,2] for i in range(num_lpd2): if zpos[i] < zmean: lpd2i.append(lpd2_atoms[i]) lpd2i = np.sum(np.array(lpd2i)) lipids = lpd1i + lpd2i u.trajectory[ts] # now move to the frame currently being analyzed ns_lipids = NS.AtomNeighborSearch(lpd3_atoms) lpd3i = ns_lipids.search(lipids,20.0) lpd_atms = lpd1i + lpd2i + lpd3i #Extracting the coordinates Pxyz = lpd_atms.positions Pxy = [] for l in range(0,len(Pxyz)) : Pxy.append([Pxyz[l][0],Pxyz[l][1]]) #Extracting xy coordinates and residue names atm_list = [] for a in range(0, len(Pxyz)): atm_list.append([Pxyz[a][0],Pxyz[a][1],lpd_atms[a].resname]) #Introducing PBC x_box = u.dimensions[0] y_box = u.dimensions[1] xplus = [] xminus = [] xyplus = [] xyminus = [] for atm in range(0 ,len(atm_list)): xplus.append([atm_list[atm][0]+x_box,atm_list[atm][1],atm_list[atm][2]]) xminus.append([atm_list[atm][0]-x_box,atm_list[atm][1],atm_list[atm][2]]) atm_list_px = atm_list + xplus + xminus for atm in range(0 ,len(atm_list_px)): xyplus.append([atm_list_px[atm][0],atm_list_px[atm][1]+y_box,atm_list_px[atm][2]]) xyminus.append([atm_list_px[atm][0],atm_list_px[atm][1]-y_box,atm_list_px[atm][2]]) atm_list_p = atm_list_px + xyplus + xyminus atm_xy = [] for i in range(0,len(atm_list_p)) : atm_xy.append([atm_list_p[i][0],atm_list_p[i][1]]) vor = Voronoi(atm_xy) vor_s = Voronoi(Pxy) vertices = vor.vertices ridge_points = vor.ridge_points Lpd1_Lpd1_I = 0 Lpd2_Lpd2_I = 0 Lpd3_Lpd3_I = 0 Lpd1_Lpd2_I = 0 Lpd1_Lpd3_I = 0 Lpd2_Lpd3_I = 0 Lpd1_Lpd1_E = 0 Lpd2_Lpd2_E = 0 Lpd3_Lpd3_E = 0 Lpd1_Lpd2_E = 0 Lpd1_Lpd3_E = 0 Lpd2_Lpd3_E = 0 r_length = len(ridge_points) for k in range (0,r_length) : ridge_k = ridge_points[k] Li = atm_list_p[int(ridge_k[0])] Lj = atm_list_p[int(ridge_k[1])] #Lipids INSIDE the box if 0 < Li[0] < x_box and 0 < Li[1] < y_box and 0 < Lj[0] < x_box and 0 < Lj[1] < y_box : if Li[2] == lipid1 and Lj[2] == lipid1: Lpd1_Lpd1_I = Lpd1_Lpd1_I + 1 if Li[2] == lipid2 and Lj[2] == lipid2: Lpd2_Lpd2_I = Lpd2_Lpd2_I + 1 if Li[2] == lipid3 and Lj[2] == lipid3: Lpd3_Lpd3_I = Lpd3_Lpd3_I + 1 if Li[2] == lipid1 and Lj[2] == lipid2: Lpd1_Lpd2_I = Lpd1_Lpd2_I + 1 if Li[2] == lipid2 and Lj[2] == lipid1: Lpd1_Lpd2_I = Lpd1_Lpd2_I + 1 if Li[2] == lipid1 and Lj[2] == lipid3: Lpd1_Lpd3_I = Lpd1_Lpd3_I + 1 if Li[2] == lipid3 and Lj[2] == lipid1: Lpd1_Lpd3_I = Lpd1_Lpd3_I + 1 if Li[2] == lipid2 and Lj[2] == lipid3: Lpd2_Lpd3_I = Lpd2_Lpd3_I + 1 if Li[2] == lipid3 and Lj[2] == lipid2: Lpd2_Lpd3_I = Lpd2_Lpd3_I + 1 #Lipids at the EDGE of the box if 0 <= Li[0] < x_box and 0 <= Li[1] < y_box or 0 <= Lj[0] < x_box and 0 <= Lj[1] < y_box : if Li[2] == lipid1 and Lj[2] == lipid1: Lpd1_Lpd1_E = Lpd1_Lpd1_E + 1 if Li[2] == lipid2 and Lj[2] == lipid2: Lpd2_Lpd2_E = Lpd2_Lpd2_E + 1 if Li[2] == lipid3 and Lj[2] == lipid3: Lpd3_Lpd3_E = Lpd3_Lpd3_E + 1 if Li[2] == lipid1 and Lj[2] == lipid2: Lpd1_Lpd2_E = Lpd1_Lpd2_E + 1 if Li[2] == lipid2 and Lj[2] == lipid1: Lpd1_Lpd2_E = Lpd1_Lpd2_E + 1 if Li[2] == lipid1 and Lj[2] == lipid3: Lpd1_Lpd3_E = Lpd1_Lpd3_E + 1 if Li[2] == lipid3 and Lj[2] == lipid1: Lpd1_Lpd3_E = Lpd1_Lpd3_E + 1 if Li[2] == lipid2 and Lj[2] == lipid3: Lpd2_Lpd3_E = Lpd2_Lpd3_E + 1 if Li[2] == lipid3 and Lj[2] == lipid2: Lpd2_Lpd3_E = Lpd2_Lpd3_E + 1 #Total = LipidsInside + (Lipids including EDGES - Lipids Inside)/2 -----> Correction for over counting the lipids in periodic images Lpd1_Lpd1 = Lpd1_Lpd1_I + (Lpd1_Lpd1_E - Lpd1_Lpd1_I)/2 Lpd2_Lpd2 = Lpd2_Lpd2_I + (Lpd2_Lpd2_E - Lpd2_Lpd2_I)/2 Lpd3_Lpd3 = Lpd3_Lpd3_I + (Lpd3_Lpd3_E - Lpd3_Lpd3_I)/2 Lpd1_Lpd2 = Lpd1_Lpd2_I + (Lpd1_Lpd2_E - Lpd1_Lpd2_I)/2 Lpd1_Lpd3 = Lpd1_Lpd3_I + (Lpd1_Lpd3_E - Lpd1_Lpd3_I)/2 Lpd2_Lpd3 = Lpd2_Lpd3_I + (Lpd2_Lpd3_E - Lpd2_Lpd3_I)/2 sum_bonds = Lpd1_Lpd1 + Lpd2_Lpd2 + Lpd3_Lpd3 + Lpd1_Lpd2 + Lpd1_Lpd3 + Lpd2_Lpd3 #Considering only Similar Lipid (SL) and Dissimilar Lipid (DL) Bonds SL = Lpd1_Lpd1 + Lpd2_Lpd2 + Lpd3_Lpd3 DL = Lpd1_Lpd2 + Lpd1_Lpd3 + Lpd2_Lpd3 #Calculating Fractions X_SL = float(SL)/float(sum_bonds) #Similar Lipid X_DL = float(DL)/float(sum_bonds) #Dissimilar Lipid #Mixing Entropy mix_entropy = -(X_SL * np.log(X_SL)) +( X_DL * np.log(X_DL)) #Calculating Averages sum_bonds = Lpd1_Lpd1 + Lpd1_Lpd2 + Lpd2_Lpd2 avg_bonds = float(sum_bonds)/float(len(atm_list)) return Lpd1_Lpd1, Lpd2_Lpd2, Lpd3_Lpd3, Lpd1_Lpd2, Lpd1_Lpd3, Lpd2_Lpd3, sum_bonds, avg_bonds, mix_entropy pool = mp.Pool(processes=nprocs) print ('Initiating multiprocessing with %i processors'%nprocs) results = pool.map(voronoi_tessel, frames) ens_Lpd1_Lpd1_np = [] ens_Lpd2_Lpd2_np = [] ens_Lpd3_Lpd3_np = [] ens_Lpd1_Lpd2_np = [] ens_Lpd1_Lpd3_np = [] ens_Lpd2_Lpd3_np = [] ens_sum_bonds_np = [] ens_avg_bonds_np = [] ens_mix_entropy_np = [] for i in range(n_frames): ens_Lpd1_Lpd1_np.append(results[i][0]) ens_Lpd2_Lpd2_np.append(results[i][1]) ens_Lpd3_Lpd3_np.append(results[i][2]) ens_Lpd1_Lpd2_np.append(results[i][3]) ens_Lpd1_Lpd3_np.append(results[i][4]) ens_Lpd2_Lpd3_np.append(results[i][5]) ens_sum_bonds_np.append(results[i][6]) ens_avg_bonds_np.append(results[i][7]) ens_mix_entropy_np.append(results[i][8]) ens_Lpd1_Lpd1_np = np.asarray(ens_Lpd1_Lpd1_np) ens_Lpd2_Lpd2_np = np.asarray(ens_Lpd2_Lpd2_np) ens_Lpd3_Lpd3_np = np.asarray(ens_Lpd3_Lpd3_np) ens_Lpd1_Lpd2_np = np.asarray(ens_Lpd1_Lpd2_np) ens_Lpd1_Lpd3_np = np.asarray(ens_Lpd1_Lpd3_np) ens_Lpd2_Lpd3_np = np.asarray(ens_Lpd2_Lpd3_np) ens_sum_bonds_np = np.asarray(ens_sum_bonds_np) ens_avg_bonds_np = np.asarray(ens_avg_bonds_np) ens_mix_entropy_np = np.asarray(ens_mix_entropy_np) # Define output file names if side == "up": Lpd1_Lpd1_fn = 'upper_Lpd1_Lpd1.dat' Lpd2_Lpd2_fn = 'upper_Lpd2_Lpd2.dat' Lpd3_Lpd3_fn = 'upper_Lpd3_Lpd3.dat' Lpd1_Lpd2_fn = 'upper_Lpd1_Lpd2.dat' Lpd1_Lpd3_fn = 'upper_Lpd1_Lpd3.dat' Lpd2_Lpd3_fn = 'upper_Lpd2_Lpd3.dat' sum_bonds_fn = 'upper_sum_bonds.dat' avg_bonds_fn = 'upper_avg_bonds.dat' mix_entropy_fn = 'upper_mix_entropy.dat' elif side == "down": Lpd1_Lpd1_fn = 'lower_Lpd1_Lpd1.dat' Lpd2_Lpd2_fn = 'lower_Lpd2_Lpd2.dat' Lpd3_Lpd3_fn = 'lower_Lpd3_Lpd3.dat' Lpd1_Lpd2_fn = 'lower_Lpd1_Lpd2.dat' Lpd1_Lpd3_fn = 'lower_Lpd1_Lpd3.dat' Lpd2_Lpd3_fn = 'lower_Lpd2_Lpd3.dat' sum_bonds_fn = 'lower_sum_bonds.dat' avg_bonds_fn = 'lower_avg_bonds.dat' mix_entropy_fn = 'lower_mix_entropy.dat' #Writing Outputs
np.savetxt(Lpd1_Lpd1_fn,ens_Lpd1_Lpd1_np)
numpy.savetxt
import numpy as np import random from abc import abstractmethod, ABC from scipy import signal class Task(ABC): """ Task class. This allows to select various types of tasks while sharing the same base structure. Author: <NAME> """ def __init__(self): self.time_v = None self.state_indices = {'p': 0, 'q': 1, 'r': 2, 'V': 3, 'alpha': 4, 'beta': 5, 'phi': 6, 'theta': 7, 'psi': 8, 'h': 9, 'x': 10, 'y': 11} self.obs_indices = None self.track_signals = None self.track_indices = [] self.signals = {} self.agent_catalog = self.get_agent_catalog() return def organize_indices(self, signals, obs_indices): track_signals = np.zeros(self.time_v.shape[0]) track_indices = [] for state in signals: if signals[state].shape[0] != self.time_v.shape[0]: signals[state] = np.append(signals[state], signals[state][-1]) track_signals = np.vstack([track_signals, signals[state]]) track_indices.append(int(self.state_indices[state])) track_signals = track_signals[1:] obs_indices = track_indices + obs_indices return track_signals, track_indices, obs_indices def choose_task(self, evaluation, failure, FDD): if failure[0] is not 'normal': if evaluation: if FDD: task_fun = self.get_task_eval_FDD else: task_fun = self.get_task_eval_fail else: task_fun = self.get_task_tr_fail else: if evaluation: task_fun = self.get_task_eval else: task_fun = self.get_task_tr return task_fun, evaluation, FDD @abstractmethod def get_agent_catalog(self): catalog = {'normal': None, 'elev_range': None, 'aileron_eff': None, 'rudder_stuck': None, 'horz_tail': None, 'vert_tail': None, 'icing': None, 'cg_shift': None} return catalog @abstractmethod def get_task_tr(self): self.time_v: np.ndarray = np.arange(0, 20, 0.01) pass @abstractmethod def get_task_eval(self): self.time_v: np.ndarray = np.arange(0, 80, 0.01) pass @abstractmethod def get_task_tr_fail(self): pass @abstractmethod def get_task_eval_fail(self): self.time_v: np.ndarray = np.arange(0, 70, 0.01) pass @abstractmethod def get_task_eval_FDD(self): self.time_v: np.ndarray = np.arange(0, 120, 0.01) pass @abstractmethod def return_signals(self): pass class BodyRateTask(Task): def get_agent_catalog(self): catalog = super(BodyRateTask, self).get_agent_catalog() catalog['normal'] = 'body_rates_RG2SG4' return catalog def get_task_tr(self): super(BodyRateTask, self).get_task_tr() self.signals['p'] = np.hstack([np.zeros(int(2.5 * self.time_v.shape[0] / self.time_v[-1].round())), 5 * np.sin(self.time_v[:int(self.time_v.shape[0] * 3 / 4)] * 0.2 * np.pi * 2), # 5 * np.sin(time_v[:int(time_v.shape[0] / 4)] * 3.5 * np.pi * 0.2), # -5 * np.ones(int(2.5 * time_v.shape[0] / time_v[-1].round())), # 5 * np.ones(int(2.5 * time_v.shape[0] / time_v[-1].round())), # np.zeros(int(2.5 * time_v.shape[0] / time_v[-1].round())), ]) self.signals['q'] = np.hstack([5 * np.sin(self.time_v[:int(self.time_v.shape[0] * 3 / 4)] * 0.2 * np.pi * 2), # 5 * np.sin(time_v[:int(time_v.shape[0] / 4)] * 3.5 * np.pi * 0.2), # -5 * np.ones(int(2.5 * time_v.shape[0] / time_v[-1].round())), # 5 * np.ones(int(2.5 * time_v.shape[0] / time_v[-1].round())), np.zeros(int(2.5 * self.time_v.shape[0] / self.time_v[-1].round())), ]) return self.return_signals() def get_task_eval(self): super(BodyRateTask, self).get_task_eval() self.signals['p'] = np.hstack([np.zeros(int(2.5 * self.time_v.shape[0] / self.time_v[-1].round())), 5 * np.sin(self.time_v[:int(self.time_v.shape[0] * 3 / 4)] * 0.2 * np.pi * 2), # 5 * np.sin(time_v[:int(time_v.shape[0] / 4)] * 3.5 * np.pi * 0.2), # -5 * np.ones(int(2.5 * time_v.shape[0] / time_v[-1].round())), # 5 * np.ones(int(2.5 * time_v.shape[0] / time_v[-1].round())), # np.zeros(int(2.5 * time_v.shape[0] / time_v[-1].round())), ]) self.signals['q'] = np.hstack([5 * np.sin(self.time_v[:int(self.time_v.shape[0] * 3 / 4)] * 0.2 * np.pi * 2), # 5 * np.sin(time_v[:int(time_v.shape[0] / 4)] * 3.5 * np.pi * 0.2), # -5 * np.ones(int(2.5 * time_v.shape[0] / time_v[-1].round())), # 5 * np.ones(int(2.5 * time_v.shape[0] / time_v[-1].round())), np.zeros(int(2.5 * self.time_v.shape[0] / self.time_v[-1].round())), ]) return self.return_signals() def get_task_tr_fail(self): raise NotImplementedError def get_task_eval_fail(self): raise NotImplementedError def get_task_eval_FDD(self): raise NotImplementedError def return_signals(self): self.signals['beta'] = np.zeros(int(self.time_v.shape[0])) self.obs_indices = [self.state_indices['r']] self.track_signals, self.track_indices, self.obs_indices = self.organize_indices(self.signals, self.obs_indices) return self.track_signals, self.track_indices, self.obs_indices, self.time_v, 'body_rates' couple = ['PZ5QGW', 'GT0PLE'] # couple = ['XQ2G4Q', 'GT0PLE'] # couple = ['PZ5QGW', '9VZ5VE'] class AttitudeTask(Task): def get_agent_catalog(self): catalog = super(AttitudeTask, self).get_agent_catalog() catalog['normal'] = '3attitude_step_' + couple[1] catalog['elev_range'] = '3attitude_step_Q4N8GV_de' catalog['aileron_eff'] = '3attitude_step_E919SW_da' catalog['rudder_stuck'] = '3attitude_step_HNAKCC_dr' catalog['horz_tail'] = '3attitude_step_R0EV0U_ht' catalog['vert_tail'] = '3attitude_step_2KGDYQ_vt' catalog['icing'] = '3attitude_step_9MUWUB_ice' catalog['cg_shift'] = '3attitude_step_5K6QFG_cg' return catalog def get_task_tr(self, init_alt=2000): super(AttitudeTask, self).get_task_tr() angle_theta = random.choice([20, 15, -20, -15]) time_v = self.time_v self.signals['theta'] = np.hstack( [angle_theta * np.sin(time_v[:np.argwhere(time_v == 1.5)[0, 0]] * 0.16 * np.pi * 2), angle_theta * np.ones(int(3.5 * time_v.shape[0] / time_v[-1].round())), angle_theta * np.cos(time_v[:np.argwhere(time_v == 0.5)[0, 0]] * 0.33 * np.pi * 2), angle_theta / 2 * np.ones(int(4. * time_v.shape[0] / time_v[-1].round())), angle_theta / 2 * np.cos(time_v[:np.argwhere(time_v == 0.5)[0, 0]] * 0.47 * np.pi * 2), -angle_theta * np.sin(time_v[:np.argwhere(time_v == 1.5)[0, 0]] * 0.17 * np.pi * 2), -angle_theta * np.ones(int(3.5 * time_v.shape[0] / time_v[-1].round())), -angle_theta * np.cos(time_v[:np.argwhere(time_v == 0.5)[0, 0]] * 0.33 * np.pi * 2), -angle_theta / 2 * np.ones(int(4.5 * time_v.shape[0] / time_v[-1].round())), ]) angle_phi = random.choice([45, 35, 25, -45, -35, -25]) self.signals['phi'] = np.hstack([np.zeros(int(2 * time_v.shape[0] / time_v[-1].round())), angle_phi * np.sin(time_v[:np.argwhere(time_v == 1)[0, 0]] * 0.25 * np.pi * 2), angle_phi * np.ones(int(4 * time_v.shape[0] / time_v[-1].round())), angle_phi * np.cos(time_v[:np.argwhere(time_v == 1)[0, 0]] * 0.25 * np.pi * 2), np.zeros(int(2 * time_v.shape[0] / time_v[-1].round())), np.zeros(int(2 * time_v.shape[0] / time_v[-1].round())), -angle_phi * np.sin( time_v[:np.argwhere(time_v == 1)[0, 0]] * 0.25 * np.pi * 2), -angle_phi * np.ones(int(4 * time_v.shape[0] / time_v[-1].round())), -angle_phi * np.cos( time_v[:np.argwhere(time_v == 1)[0, 0]] * 0.25 * np.pi * 2), np.zeros(int(2 * time_v.shape[0] / time_v[-1].round())), ]) return self.return_signals() def get_task_eval(self, init_alt=2000): super(AttitudeTask, self).get_task_eval() time_v = self.time_v self.signals['theta'] = np.hstack([20 * np.sin(time_v[:np.argwhere(time_v == 5.0)[0, 0]] * 0.05 * np.pi * 2), 20 * np.ones(int(8 * time_v.shape[0] / time_v[-1].round())), 20 * np.cos(time_v[:np.argwhere(time_v == 2.0)[0, 0]] * 0.08 * np.pi * 2), 10 * np.ones(int(15 * time_v.shape[0] / time_v[-1].round())), 10 * np.cos(time_v[:np.argwhere(time_v == 2.0)[0, 0]] * 0.12 * np.pi * 2), 0 * np.ones(int(25 * time_v.shape[0] / time_v[-1].round())), -15 * np.sin(time_v[:np.argwhere(time_v == 2)[0, 0]] * 0.13 * np.pi * 2), -15 * np.ones(int(12 * time_v.shape[0] / time_v[-1].round())), -15 * np.cos(time_v[:np.argwhere(time_v == 4)[0, 0]] * 0.06 * np.pi * 2), 0 * np.ones(int(5 * time_v.shape[0] / time_v[-1].round())), ]) sign = 1 self.signals['phi'] = np.hstack([0 * np.ones(int(5 * time_v.shape[0] / time_v[-1].round())), sign * 45 * np.sin( time_v[:np.argwhere(time_v == 2.0)[0, 0]] * 0.13 * np.pi * 2), sign * 45 * np.ones(int(9 * time_v.shape[0] / time_v[-1].round())), sign * 45 * np.cos(time_v[:np.argwhere(time_v == 2)[0, 0]] * 0.12 * np.pi * 2), 0 * np.ones(int(4 * time_v.shape[0] / time_v[-1].round())), -sign * 30 * np.sin( time_v[:np.argwhere(time_v == 2)[0, 0]] * 0.13 * np.pi * 2), -sign * 30 * np.ones(int(4 * time_v.shape[0] / time_v[-1].round())), -sign * 30 * np.cos( time_v[:np.argwhere(time_v == 2.0)[0, 0]] * 0.12 * np.pi * 2), 0 * np.ones(int(5 * time_v.shape[0] / time_v[-1].round())), sign * 70 * np.sin( time_v[:np.argwhere(time_v == 5.5)[0, 0]] * 0.05 * np.pi * 2), sign * 70 * np.ones(int(8 * time_v.shape[0] / time_v[-1].round())), sign * 70 * np.cos( time_v[:np.argwhere(time_v == 5.5)[0, 0]] * 0.04 * np.pi * 2), 0 * np.ones(int(6 * time_v.shape[0] / time_v[-1].round())), -sign * 35 * np.sin( time_v[:np.argwhere(time_v == 1.5)[0, 0]] * 0.16 * np.pi * 2), -sign * 35 * np.ones(int(9 * time_v.shape[0] / time_v[-1].round())), -sign * 35 * np.cos( time_v[:np.argwhere(time_v == 1.5)[0, 0]] * 0.16 * np.pi * 2), 0 * np.ones(int(8 * time_v.shape[0] / time_v[-1].round())), ]) return self.return_signals() def get_task_tr_fail(self, theta_angle=15, init_alt=2000): super(AttitudeTask, self).get_task_tr() time_v = self.time_v angle_theta = random.choice([1, -1]) * theta_angle self.signals['theta'] = np.hstack( [angle_theta * np.sin(time_v[:np.argwhere(time_v == 1.5)[0, 0]] * 0.16 * np.pi * 2), angle_theta * np.ones(int(6 * time_v.shape[0] / time_v[-1].round())), angle_theta * np.cos(time_v[:np.argwhere(time_v == 1.5)[0, 0]] * 0.16 * np.pi * 2), -angle_theta * np.sin(time_v[:np.argwhere(time_v == 1.5)[0, 0]] * 0.17 * np.pi * 2), -angle_theta * np.ones(int(5.5 * time_v.shape[0] / time_v[-1].round())), -angle_theta * np.cos(time_v[:np.argwhere(time_v == 1.5)[0, 0]] * 0.16 * np.pi * 2), np.zeros(int(2.5 * time_v.shape[0] / time_v[-1].round())), ]) angle_phi = random.choice([25, -25, -30]) self.signals['phi'] = np.hstack([np.zeros(int(1.5 * time_v.shape[0] / time_v[-1].round())), angle_phi * np.sin(time_v[:np.argwhere(time_v == 1)[0, 0]] * 0.25 * np.pi * 2), angle_phi * np.ones(int(4 * time_v.shape[0] / time_v[-1].round())), angle_phi * np.cos(time_v[:np.argwhere(time_v == 1)[0, 0]] * 0.25 * np.pi * 2), np.zeros(int(3 * time_v.shape[0] / time_v[-1].round())), -angle_phi * np.sin( time_v[:np.argwhere(time_v == 1)[0, 0]] * 0.25 * np.pi * 2), -angle_phi * np.ones(int(3.5 * time_v.shape[0] / time_v[-1].round())), -angle_phi * np.cos( time_v[:np.argwhere(time_v == 1)[0, 0]] * 0.25 * np.pi * 2), np.zeros(int(4 * time_v.shape[0] / time_v[-1].round())), ]) return self.return_signals() def get_task_eval_fail(self, theta_angle=15, init_alt=2000): super(AttitudeTask, self).get_task_eval_fail() time_v = self.time_v self.signals['theta'] = np.hstack([np.zeros(int(10 * time_v.shape[0] / time_v[-1].round())), theta_angle * np.sin( time_v[:np.argwhere(time_v == 4.0)[0, 0]] * 0.06 * np.pi * 2), theta_angle * np.ones(int(14 * time_v.shape[0] / time_v[-1].round())), theta_angle * np.cos( time_v[:np.argwhere(time_v == 2.0)[0, 0]] * 0.12 * np.pi * 2), 0 * np.ones(int(6 * time_v.shape[0] / time_v[-1].round())), -theta_angle * np.sin( time_v[:np.argwhere(time_v == 2)[0, 0]] * 0.13 * np.pi * 2), -theta_angle * np.ones(int(14 * time_v.shape[0] / time_v[-1].round())), -theta_angle * np.cos( time_v[:np.argwhere(time_v == 4)[0, 0]] * 0.06 * np.pi * 2), 0 * np.ones(int(14 * time_v.shape[0] / time_v[-1].round())), ]) self.signals['phi'] = np.hstack([np.zeros(int(16 * time_v.shape[0] / time_v[-1].round())), -20 * np.sin(time_v[:np.argwhere(time_v == 1.5)[0, 0]] * 0.16 * np.pi * 2), -20 * np.ones(int(6 * time_v.shape[0] / time_v[-1].round())), -20 * np.cos(time_v[:np.argwhere(time_v == 1.5)[0, 0]] * 0.16 * np.pi * 2), 0 * np.ones(int(16 * time_v.shape[0] / time_v[-1].round())), 20 * np.sin(time_v[:np.argwhere(time_v == 1.5)[0, 0]] * 0.16 * np.pi * 2), 20 * np.ones(int(6 * time_v.shape[0] / time_v[-1].round())), 20 * np.cos(time_v[:np.argwhere(time_v == 1.5)[0, 0]] * 0.16 * np.pi * 2), 0 * np.ones(int(20 * time_v.shape[0] / time_v[-1].round())), ]) return self.return_signals() def get_task_eval_FDD(self, theta_angle=15, init_alt=2000): super(AttitudeTask, self).get_task_eval_FDD() time_v = self.time_v self.signals['theta'] = np.hstack([np.zeros(int(10 * time_v.shape[0] / time_v[-1].round())), theta_angle * np.sin( time_v[:np.argwhere(time_v == 4.0)[0, 0]] * 0.06 * np.pi * 2), theta_angle * np.ones(int(14 * time_v.shape[0] / time_v[-1].round())), theta_angle * np.cos( time_v[:np.argwhere(time_v == 2.0)[0, 0]] * 0.12 * np.pi * 2), 0 * np.ones(int(6 * time_v.shape[0] / time_v[-1].round())), -theta_angle * np.sin( time_v[:np.argwhere(time_v == 2)[0, 0]] * 0.13 * np.pi * 2), -theta_angle * np.ones(int(14 * time_v.shape[0] / time_v[-1].round())), -theta_angle * np.cos( time_v[:np.argwhere(time_v == 4)[0, 0]] * 0.06 * np.pi * 2), 0 * np.ones(int(4 * time_v.shape[0] / time_v[-1].round())), np.zeros(int(4 * time_v.shape[0] / time_v[-1].round())), theta_angle * np.sin( time_v[:np.argwhere(time_v == 4.0)[0, 0]] * 0.06 * np.pi * 2), theta_angle * np.ones(int(14 * time_v.shape[0] / time_v[-1].round())), theta_angle * np.cos( time_v[:np.argwhere(time_v == 2.0)[0, 0]] * 0.12 * np.pi * 2), 0 * np.ones(int(6 * time_v.shape[0] / time_v[-1].round())), -theta_angle * np.sin( time_v[:np.argwhere(time_v == 2)[0, 0]] * 0.13 * np.pi * 2), -theta_angle * np.ones(int(14 * time_v.shape[0] / time_v[-1].round())), -theta_angle * np.cos( time_v[:np.argwhere(time_v == 4)[0, 0]] * 0.06 * np.pi * 2), 0 * np.ones(int(10 * time_v.shape[0] / time_v[-1].round())), ]) roll_angle = 20 self.signals['phi'] = np.hstack([np.zeros(int(16 * time_v.shape[0] / time_v[-1].round())), -roll_angle * np.sin( time_v[:np.argwhere(time_v == 1.5)[0, 0]] * 0.16 * np.pi * 2), -roll_angle * np.ones(int(6 * time_v.shape[0] / time_v[-1].round())), -roll_angle * np.cos( time_v[:np.argwhere(time_v == 1.5)[0, 0]] * 0.16 * np.pi * 2), 0 * np.ones(int(16 * time_v.shape[0] / time_v[-1].round())), roll_angle * np.sin( time_v[:np.argwhere(time_v == 1.5)[0, 0]] * 0.16 * np.pi * 2), roll_angle * np.ones(int(6 * time_v.shape[0] / time_v[-1].round())), roll_angle * np.cos( time_v[:np.argwhere(time_v == 1.5)[0, 0]] * 0.16 * np.pi * 2), 0 * np.ones(int(10 * time_v.shape[0] / time_v[-1].round())), np.zeros(int(10 * time_v.shape[0] / time_v[-1].round())), -roll_angle * np.sin( time_v[:np.argwhere(time_v == 1.5)[0, 0]] * 0.16 * np.pi * 2), -roll_angle * np.ones(int(6 * time_v.shape[0] / time_v[-1].round())), -roll_angle * np.cos( time_v[:np.argwhere(time_v == 1.5)[0, 0]] * 0.16 * np.pi * 2), 0 * np.ones(int(16 * time_v.shape[0] / time_v[-1].round())), roll_angle * np.sin( time_v[:np.argwhere(time_v == 1.5)[0, 0]] * 0.16 * np.pi * 2), roll_angle * np.ones(int(6 * time_v.shape[0] / time_v[-1].round())), roll_angle * np.cos( time_v[:np.argwhere(time_v == 1.5)[0, 0]] * 0.16 * np.pi * 2), 0 * np.ones(int(16 * time_v.shape[0] / time_v[-1].round())), ]) return self.return_signals() def return_signals(self): self.signals['beta'] = np.zeros(int(self.time_v.shape[0])) self.obs_indices = [self.state_indices['p'], self.state_indices['q'], self.state_indices['r']] self.track_signals, self.track_indices, self.obs_indices = self.organize_indices(self.signals, self.obs_indices) return self.track_signals, self.track_indices, self.obs_indices, self.time_v, '3attitude_step' class DisturbanceRejectionAtt(AttitudeTask): def get_task_eval(self, init_alt=2000): self.time_v = np.arange(0, 20, 0.01) self.signals['theta'] = np.zeros((self.time_v.shape[0],)) self.signals['phi'] = np.zeros((self.time_v.shape[0],)) return self.return_signals() class AltitudeTask(Task): def get_agent_catalog(self): catalog = super(AltitudeTask, self).get_agent_catalog() catalog['normal'] = 'altitude_2attitude_P7V00G' catalog['elev_range'] = 'altitude_2attitude_P7V00G' catalog['aileron_eff'] = 'altitude_2attitude_P7V00G' catalog['rudder_stuck'] = 'altitude_2attitude_P7V00G' catalog['horz_tail'] = 'altitude_2attitude_P7V00G' catalog['vert_tail'] = 'altitude_2attitude_P7V00G' catalog['icing'] = 'altitude_2attitude_P7V00G' catalog['cg_shift'] = 'altitude_2attitude_P7V00G' return catalog def get_task_tr(self, init_alt=2000): super(AltitudeTask, self).get_task_tr() time_v = self.time_v self.signals['h'] = np.hstack([np.linspace(init_alt, init_alt+55, int(10 * time_v.shape[0] / time_v[-1].round())), init_alt+55 * np.ones(int(10 * time_v.shape[0] / time_v[-1].round())), # np.linspace(2044, 2025, int(3.75 * time_v.shape[0] / time_v[-1].round())), # 2025 * np.ones(int(6.75 * time_v.shape[0] / time_v[-1].round())), ]) angle_phi = random.choice([45, 35, -45, -35]) self.signals['phi'] = np.hstack([np.zeros(int(1 * time_v.shape[0] / time_v[-1].round())), angle_phi * np.sin(time_v[:np.argwhere(time_v == 1)[0, 0]] * 0.25 * np.pi * 2), angle_phi * np.ones(int(4 * time_v.shape[0] / time_v[-1].round())), angle_phi * np.cos(time_v[:np.argwhere(time_v == 1)[0, 0]] * 0.25 * np.pi * 2), np.zeros(int(3 * time_v.shape[0] / time_v[-1].round())), np.zeros(int(2 * time_v.shape[0] / time_v[-1].round())), -angle_phi * 0.8 * np.sin( time_v[:np.argwhere(time_v == 1)[0, 0]] * 0.25 * np.pi * 2), -angle_phi * 0.8 * np.ones(int(4 * time_v.shape[0] / time_v[-1].round())), -angle_phi * 0.8 * np.cos( time_v[:np.argwhere(time_v == 1)[0, 0]] * 0.25 * np.pi * 2), np.zeros(int(2 * time_v.shape[0] / time_v[-1].round())), ]) return self.return_signals() def get_task_eval(self, init_alt=2000): self.time_v = time_v =
np.arange(0, 120, 0.01)
numpy.arange
import numpy as np import pandas as pd from numpy import array, atleast_2d, linspace, zeros from scipy.optimize import root from dolo.algos.dtcscc.steady_state import find_deterministic_equilibrium from dolo.numeric.optimize.ncpsolve import ncpsolve from dolo.numeric.optimize.newton import newton from dolo.numeric.serial_operations import serial_multiplication as smult def _shocks_to_epsilons(model, shocks, T): """ Helper function to support input argument `shocks` being one of many different data types. Will always return a `T, n_e` matrix. """ n_e = len(model.calibration['shocks']) # if we have a DataFrame, convert it to a dict and rely on the method below if isinstance(shocks, pd.DataFrame): shocks = {k: shocks[k].tolist() for k in shocks.columns} # handle case where shocks might be a dict. Be careful to handle case where # value arrays are not the same length if isinstance(shocks, dict): epsilons = np.zeros((T+1, n_e)) for (i, k) in enumerate(model.symbols["shocks"]): if k in shocks: this_shock = shocks[k] epsilons[:len(this_shock)-1, i] = this_shock[1:] epsilons[(len(this_shock)-1):, i] = this_shock[-1] else: # otherwise set to value in calibration epsilons[:, i] = model.calibration["shocks"][i] return epsilons # read from calibration if not given if shocks is None: shocks = model.calibration["shocks"] # now we just assume that shocks is array-like and try using the output of # np.asarray(shocks) shocks = np.asarray(shocks) shocks = shocks.reshape((-1, n_e)) # until last period, exogenous shock takes its last value epsilons = np.zeros((T+1, n_e)) epsilons[:(shocks.shape[0]-1), :] = shocks[1:, :] epsilons[(shocks.shape[0]-1):, :] = shocks[-1:, :] return epsilons def deterministic_solve(model, shocks=None, start_states=None, T=100, ignore_constraints=False, maxit=100, initial_guess=None, verbose=False, tol=1e-6): """ Computes a perfect foresight simulation using a stacked-time algorithm. The initial state is specified either by providing a series of exogenous shocks and assuming the model is initially in equilibrium with the first value of the shock, or by specifying an initial value for the states. Parameters ---------- model : NumericModel "fg" or "fga" model to be solved shocks : array-like, dict, or pandas.DataFrame A specification of the shocks to the model. Can be any of the following (note by "declaration order" below we mean the order of `model.symbols["shocks"]`): - A 1d numpy array-like specifying a time series for a single shock, or all shocks stacked into a single array. - A 2d numpy array where each column specifies the time series for one of the shocks in declaration order. This must be an `N` by number of shocks 2d array. - A dict where keys are strings found in `model.symbols["shocks"]` and values are a time series of values for that shock. For model shocks that do not appear in this dict, the shock is set to the calibrated value. Note that this interface is the most flexible as it allows the user to pass values for only a subset of the model shocks and it allows the passed time series to be of different lengths. - A DataFrame where columns map shock names into time series. The same assumptions and behavior that are used in the dict case apply here If nothing is given here, `shocks` is set equal to the calibrated values found in `model.calibration["shocks"]` for all periods. If the length of any time-series in shocks is less than `T` (see below) it is assumed that that particular shock will remain at the final given value for the duration of the simulaiton. start_states : ndarray or dict a vector with the value of initial states, or a calibration dictionary with the initial values of states and controls T : int horizon for the perfect foresight simulation maxit : int maximum number of iteration for the nonlinear solver verbose : boolean if True, the solver displays iterations tol : float stopping criterium for the nonlinear solver ignore_constraints : bool if True, complementarity constraints are ignored. Returns ------- pandas dataframe a dataframe with T+1 observations of the model variables along the simulation (states, controls, auxiliaries). The first observation is the steady-state corresponding to the first value of the shocks. The simulation should return to a steady-state corresponding to the last value of the exogenous shocks. """ # TODO: # if model.model_spec == 'fga': # from dolo.compiler.converter import GModel_fg_from_fga # model = GModel_fg_from_fga(model) # definitions n_s = len(model.calibration['states']) n_x = len(model.calibration['controls']) epsilons = _shocks_to_epsilons(model, shocks, T) # final initial and final steady-states consistent with exogenous shocks if start_states is None: start_states = model.calibration if isinstance(start_states, dict): # at least that part is clear start_equilibrium = start_states start_s = start_equilibrium['states'] start_x = start_equilibrium['controls'] final_s = start_equilibrium['states'] final_x = start_equilibrium['controls'] elif isinstance(start_states, np.ndarray): start_s = start_states start_x = model.calibration['controls'] final_s = model.calibration['states'] final_x = model.calibration['controls'] # if start_constraints: # # we ignore start_constraints # start_dict.update(start_constraints) # final_equilibrium = start_constraints.copy() # else: # final_eqm = find_deterministic_equilibrium(model, # constraints=final_dict) # final_s = final_eqm['states'] # final_x = final_eqm['controls'] # # start_s = start_states # start_x = final_x # TODO: for start_x, it should be possible to use first order guess final = np.concatenate([final_s, final_x]) start = np.concatenate([start_s, start_x]) if verbose is True: print("Initial states : {}".format(start_s)) print("Final controls : {}".format(final_x)) p = model.calibration['parameters'] if initial_guess is None: initial_guess = np.row_stack([start*(1-l) + final*l for l in linspace(0.0, 1.0, T+1)]) else: if isinstance(initial_guess, pd.DataFrame): initial_guess = np.array(initial_guess).T.copy() initial_guess = initial_guess[:, :n_s+n_x] initial_guess[0, :n_s] = start_s initial_guess[-1, n_s:] = final_x sh = initial_guess.shape if model.x_bounds and not ignore_constraints: initial_states = initial_guess[:, :n_s] [lb, ub] = [u(initial_states, p) for u in model.x_bounds] lower_bound = initial_guess*0 - np.inf lower_bound[:, n_s:] = lb upper_bound = initial_guess*0 + np.inf upper_bound[:, n_s:] = ub test1 = max(lb.max(axis=0) - lb.min(axis=0)) test2 = max(ub.max(axis=0) - ub.min(axis=0)) if test1 > 0.00000001 or test2 > 0.00000001: msg = "Not implemented: perfect foresight solution requires that " msg += "controls have constant bounds." raise Exception(msg) else: ignore_constraints = True lower_bound = None upper_bound = None nn = sh[0]*sh[1] def fobj(vec): o = det_residual(model, vec.reshape(sh), start_s, final_x, epsilons)[0] return o.ravel() if not ignore_constraints: def ff(vec): return det_residual(model, vec.reshape(sh), start_s, final_x, epsilons, jactype='sparse') x0 = initial_guess.ravel() sol, nit = ncpsolve(ff, lower_bound.ravel(), upper_bound.ravel(), initial_guess.ravel(), verbose=verbose, maxit=maxit, tol=tol, jactype='sparse') sol = sol.reshape(sh) else: def ff(vec): return det_residual(model, vec.reshape(sh), start_s, final_x, epsilons, diff=False).ravel() x0 = initial_guess.ravel() sol = root(ff, x0, jac=False) res = ff(sol.x) sol = sol.x.reshape(sh) if 'auxiliary' in model.functions: colnames = (model.symbols['states'] + model.symbols['controls'] + model.symbols['auxiliaries']) # compute auxiliaries y = model.functions['auxiliary'](sol[:, :n_s], sol[:, n_s:], p) sol = np.column_stack([sol, y]) else: colnames = model.symbols['states'] + model.symbols['controls'] sol = np.column_stack([sol, epsilons]) colnames = colnames + model.symbols['shocks'] ts = pd.DataFrame(sol, columns=colnames) return ts def det_residual(model, guess, start, final, shocks, diff=True, jactype='sparse'): ''' Computes the residuals, the derivatives of the stacked-time system. :param model: an fga model :param guess: the guess for the simulated values. An `(n_s.n_x) x N` array, where n_s is the number of states, n_x the number of controls, and `N` the length of the simulation. :param start: initial boundary condition (initial value of the states) :param final: final boundary condition (last value of the controls) :param shocks: values for the exogenous shocks :param diff: if True, the derivatives are computes :return: a list with two elements: - an `(n_s.n_x) x N` array with the residuals of the system - a `(n_s.n_x) x N x (n_s.n_x) x N` array representing the jacobian of the system ''' # TODO: compute a sparse derivative and ensure the solvers can deal with it n_s = len(model.symbols['states']) n_x = len(model.symbols['controls']) n_e = len(model.symbols['shocks']) N = guess.shape[0] p = model.calibration['parameters'] f = model.functions['arbitrage'] g = model.functions['transition'] vec = guess[:-1, :] vec_f = guess[1:, :] s = vec[:, :n_s] x = vec[:, n_s:] S = vec_f[:, :n_s] X = vec_f[:, n_s:] e = shocks[:-1, :] E = shocks[1:, :] if diff: SS, SS_s, SS_x, SS_e = g(s, x, e, p, diff=True) R, R_s, R_x, R_e, R_S, R_X = f(s, x, E, S, X, p, diff=True) else: SS = g(s, x, e, p) R = f(s, x, E, S, X, p) res_s = SS - S res_x = R res =
np.zeros((N, n_s+n_x))
numpy.zeros
from __future__ import print_function from datetime import datetime, timedelta import numpy as np import pandas as pd from pandas import (Series, Index, Int64Index, Timestamp, Period, DatetimeIndex, PeriodIndex, TimedeltaIndex, Timedelta, timedelta_range, date_range, Float64Index, _np_version_under1p10) import pandas.tslib as tslib import pandas.tseries.period as period import pandas.util.testing as tm from pandas.tests.test_base import Ops class TestDatetimeIndexOps(Ops): tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore', 'dateutil/US/Pacific'] def setUp(self): super(TestDatetimeIndexOps, self).setUp() mask = lambda x: (isinstance(x, DatetimeIndex) or isinstance(x, PeriodIndex)) self.is_valid_objs = [o for o in self.objs if mask(o)] self.not_valid_objs = [o for o in self.objs if not mask(o)] def test_ops_properties(self): self.check_ops_properties( ['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear', 'week', 'dayofweek', 'dayofyear', 'quarter']) self.check_ops_properties(['date', 'time', 'microsecond', 'nanosecond', 'is_month_start', 'is_month_end', 'is_quarter_start', 'is_quarter_end', 'is_year_start', 'is_year_end', 'weekday_name'], lambda x: isinstance(x, DatetimeIndex)) def test_ops_properties_basic(self): # sanity check that the behavior didn't change # GH7206 for op in ['year', 'day', 'second', 'weekday']: self.assertRaises(TypeError, lambda x: getattr(self.dt_series, op)) # attribute access should still work! s = Series(dict(year=2000, month=1, day=10)) self.assertEqual(s.year, 2000) self.assertEqual(s.month, 1) self.assertEqual(s.day, 10) self.assertRaises(AttributeError, lambda: s.weekday) def test_asobject_tolist(self): idx = pd.date_range(start='2013-01-01', periods=4, freq='M', name='idx') expected_list = [Timestamp('2013-01-31'), Timestamp('2013-02-28'), Timestamp('2013-03-31'), Timestamp('2013-04-30')] expected = pd.Index(expected_list, dtype=object, name='idx') result = idx.asobject self.assertTrue(isinstance(result, Index)) self.assertEqual(result.dtype, object) self.assert_index_equal(result, expected) self.assertEqual(result.name, expected.name) self.assertEqual(idx.tolist(), expected_list) idx = pd.date_range(start='2013-01-01', periods=4, freq='M', name='idx', tz='Asia/Tokyo') expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'), Timestamp('2013-02-28', tz='Asia/Tokyo'), Timestamp('2013-03-31', tz='Asia/Tokyo'), Timestamp('2013-04-30', tz='Asia/Tokyo')] expected = pd.Index(expected_list, dtype=object, name='idx') result = idx.asobject self.assertTrue(isinstance(result, Index)) self.assertEqual(result.dtype, object) self.assert_index_equal(result, expected) self.assertEqual(result.name, expected.name) self.assertEqual(idx.tolist(), expected_list) idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2), pd.NaT, datetime(2013, 1, 4)], name='idx') expected_list = [Timestamp('2013-01-01'), Timestamp('2013-01-02'), pd.NaT, Timestamp('2013-01-04')] expected = pd.Index(expected_list, dtype=object, name='idx') result = idx.asobject self.assertTrue(isinstance(result, Index)) self.assertEqual(result.dtype, object) self.assert_index_equal(result, expected) self.assertEqual(result.name, expected.name) self.assertEqual(idx.tolist(), expected_list) def test_minmax(self): for tz in self.tz: # monotonic idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], tz=tz) self.assertTrue(idx1.is_monotonic) # non-monotonic idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03', '2011-01-02', pd.NaT], tz=tz) self.assertFalse(idx2.is_monotonic) for idx in [idx1, idx2]: self.assertEqual(idx.min(), Timestamp('2011-01-01', tz=tz)) self.assertEqual(idx.max(), Timestamp('2011-01-03', tz=tz)) self.assertEqual(idx.argmin(), 0) self.assertEqual(idx.argmax(), 2) for op in ['min', 'max']: # Return NaT obj = DatetimeIndex([]) self.assertTrue(pd.isnull(getattr(obj, op)())) obj = DatetimeIndex([pd.NaT]) self.assertTrue(pd.isnull(getattr(obj, op)())) obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT]) self.assertTrue(pd.isnull(getattr(obj, op)())) def test_numpy_minmax(self): dr = pd.date_range(start='2016-01-15', end='2016-01-20') self.assertEqual(np.min(dr), Timestamp('2016-01-15 00:00:00', freq='D')) self.assertEqual(np.max(dr), Timestamp('2016-01-20 00:00:00', freq='D')) errmsg = "the 'out' parameter is not supported" tm.assertRaisesRegexp(ValueError, errmsg, np.min, dr, out=0) tm.assertRaisesRegexp(ValueError, errmsg, np.max, dr, out=0) self.assertEqual(np.argmin(dr), 0) self.assertEqual(np.argmax(dr), 5) if not _np_version_under1p10: errmsg = "the 'out' parameter is not supported" tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, dr, out=0) tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, dr, out=0) def test_round(self): for tz in self.tz: rng = pd.date_range(start='2016-01-01', periods=5, freq='30Min', tz=tz) elt = rng[1] expected_rng = DatetimeIndex([ Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'), Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'), Timestamp('2016-01-01 01:00:00', tz=tz, freq='30T'), Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'), Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'), ]) expected_elt = expected_rng[1] tm.assert_index_equal(rng.round(freq='H'), expected_rng) self.assertEqual(elt.round(freq='H'), expected_elt) msg = pd.tseries.frequencies._INVALID_FREQ_ERROR with tm.assertRaisesRegexp(ValueError, msg): rng.round(freq='foo') with tm.assertRaisesRegexp(ValueError, msg): elt.round(freq='foo') msg = "<MonthEnd> is a non-fixed frequency" tm.assertRaisesRegexp(ValueError, msg, rng.round, freq='M') tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M') def test_repeat_range(self): rng = date_range('1/1/2000', '1/1/2001') result = rng.repeat(5) self.assertIsNone(result.freq) self.assertEqual(len(result), 5 * len(rng)) for tz in self.tz: index = pd.date_range('2001-01-01', periods=2, freq='D', tz=tz) exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-02', '2001-01-02'], tz=tz) for res in [index.repeat(2), np.repeat(index, 2)]: tm.assert_index_equal(res, exp) self.assertIsNone(res.freq) index = pd.date_range('2001-01-01', periods=2, freq='2D', tz=tz) exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-03', '2001-01-03'], tz=tz) for res in [index.repeat(2), np.repeat(index, 2)]: tm.assert_index_equal(res, exp) self.assertIsNone(res.freq) index = pd.DatetimeIndex(['2001-01-01', 'NaT', '2003-01-01'], tz=tz) exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01', 'NaT', 'NaT', 'NaT', '2003-01-01', '2003-01-01', '2003-01-01'], tz=tz) for res in [index.repeat(3), np.repeat(index, 3)]: tm.assert_index_equal(res, exp) self.assertIsNone(res.freq) def test_repeat(self): reps = 2 msg = "the 'axis' parameter is not supported" for tz in self.tz: rng = pd.date_range(start='2016-01-01', periods=2, freq='30Min', tz=tz) expected_rng = DatetimeIndex([ Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'), Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'), Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'), Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'), ]) res = rng.repeat(reps) tm.assert_index_equal(res, expected_rng) self.assertIsNone(res.freq) tm.assert_index_equal(np.repeat(rng, reps), expected_rng) tm.assertRaisesRegexp(ValueError, msg, np.repeat, rng, reps, axis=1) def test_representation(self): idx = [] idx.append(DatetimeIndex([], freq='D')) idx.append(DatetimeIndex(['2011-01-01'], freq='D')) idx.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')) idx.append(DatetimeIndex( ['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')) idx.append(DatetimeIndex( ['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00' ], freq='H', tz='Asia/Tokyo')) idx.append(DatetimeIndex( ['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern')) idx.append(DatetimeIndex( ['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='UTC')) exp = [] exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""") exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', " "freq='D')") exp.append("DatetimeIndex(['2011-01-01', '2011-01-02'], " "dtype='datetime64[ns]', freq='D')") exp.append("DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], " "dtype='datetime64[ns]', freq='D')") exp.append("DatetimeIndex(['2011-01-01 09:00:00+09:00', " "'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']" ", dtype='datetime64[ns, Asia/Tokyo]', freq='H')") exp.append("DatetimeIndex(['2011-01-01 09:00:00-05:00', " "'2011-01-01 10:00:00-05:00', 'NaT'], " "dtype='datetime64[ns, US/Eastern]', freq=None)") exp.append("DatetimeIndex(['2011-01-01 09:00:00+00:00', " "'2011-01-01 10:00:00+00:00', 'NaT'], " "dtype='datetime64[ns, UTC]', freq=None)""") with pd.option_context('display.width', 300): for indx, expected in zip(idx, exp): for func in ['__repr__', '__unicode__', '__str__']: result = getattr(indx, func)() self.assertEqual(result, expected) def test_representation_to_series(self): idx1 = DatetimeIndex([], freq='D') idx2 = DatetimeIndex(['2011-01-01'], freq='D') idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D') idx4 = DatetimeIndex( ['2011-01-01', '2011-01-02', '2011-01-03'], freq='D') idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'], freq='H', tz='Asia/Tokyo') idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern') idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15']) exp1 = """Series([], dtype: datetime64[ns])""" exp2 = """0 2011-01-01 dtype: datetime64[ns]""" exp3 = """0 2011-01-01 1 2011-01-02 dtype: datetime64[ns]""" exp4 = """0 2011-01-01 1 2011-01-02 2 2011-01-03 dtype: datetime64[ns]""" exp5 = """0 2011-01-01 09:00:00+09:00 1 2011-01-01 10:00:00+09:00 2 2011-01-01 11:00:00+09:00 dtype: datetime64[ns, Asia/Tokyo]""" exp6 = """0 2011-01-01 09:00:00-05:00 1 2011-01-01 10:00:00-05:00 2 NaT dtype: datetime64[ns, US/Eastern]""" exp7 = """0 2011-01-01 09:00:00 1 2011-01-02 10:15:00 dtype: datetime64[ns]""" with pd.option_context('display.width', 300): for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6, idx7], [exp1, exp2, exp3, exp4, exp5, exp6, exp7]): result = repr(Series(idx)) self.assertEqual(result, expected) def test_summary(self): # GH9116 idx1 = DatetimeIndex([], freq='D') idx2 = DatetimeIndex(['2011-01-01'], freq='D') idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D') idx4 = DatetimeIndex( ['2011-01-01', '2011-01-02', '2011-01-03'], freq='D') idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'], freq='H', tz='Asia/Tokyo') idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern') exp1 = """DatetimeIndex: 0 entries Freq: D""" exp2 = """DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01 Freq: D""" exp3 = """DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02 Freq: D""" exp4 = """DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03 Freq: D""" exp5 = ("DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 " "to 2011-01-01 11:00:00+09:00\n" "Freq: H") exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT""" for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6], [exp1, exp2, exp3, exp4, exp5, exp6]): result = idx.summary() self.assertEqual(result, expected) def test_resolution(self): for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T', 'S', 'L', 'U'], ['day', 'day', 'day', 'day', 'hour', 'minute', 'second', 'millisecond', 'microsecond']): for tz in self.tz: idx = pd.date_range(start='2013-04-01', periods=30, freq=freq, tz=tz) self.assertEqual(idx.resolution, expected) def test_union(self): for tz in self.tz: # union rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz) other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz) expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz) rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz) other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz) expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz) rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz) other3 = pd.DatetimeIndex([], tz=tz) expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz) for rng, other, expected in [(rng1, other1, expected1), (rng2, other2, expected2), (rng3, other3, expected3)]: result_union = rng.union(other) tm.assert_index_equal(result_union, expected) def test_add_iadd(self): for tz in self.tz: # offset offsets = [pd.offsets.Hour(2), timedelta(hours=2), np.timedelta64(2, 'h'), Timedelta(hours=2)] for delta in offsets: rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz) result = rng + delta expected = pd.date_range('2000-01-01 02:00', '2000-02-01 02:00', tz=tz) tm.assert_index_equal(result, expected) rng += delta tm.assert_index_equal(rng, expected) # int rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10, tz=tz) result = rng + 1 expected = pd.date_range('2000-01-01 10:00', freq='H', periods=10, tz=tz) tm.assert_index_equal(result, expected) rng += 1 tm.assert_index_equal(rng, expected) idx = DatetimeIndex(['2011-01-01', '2011-01-02']) msg = "cannot add a datelike to a DatetimeIndex" with tm.assertRaisesRegexp(TypeError, msg): idx + Timestamp('2011-01-01') with tm.assertRaisesRegexp(TypeError, msg): Timestamp('2011-01-01') + idx def test_add_dti_dti(self): # previously performed setop (deprecated in 0.16.0), now raises # TypeError (GH14164) dti = date_range('20130101', periods=3) dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern') with tm.assertRaises(TypeError): dti + dti with tm.assertRaises(TypeError): dti_tz + dti_tz with tm.assertRaises(TypeError): dti_tz + dti with tm.assertRaises(TypeError): dti + dti_tz def test_difference(self): for tz in self.tz: # diff rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz) other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz) expected1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz) rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz) other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz) expected2 = pd.date_range('1/1/2000', freq='D', periods=3, tz=tz) rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz) other3 = pd.DatetimeIndex([], tz=tz) expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz) for rng, other, expected in [(rng1, other1, expected1), (rng2, other2, expected2), (rng3, other3, expected3)]: result_diff = rng.difference(other) tm.assert_index_equal(result_diff, expected) def test_sub_isub(self): for tz in self.tz: # offset offsets = [pd.offsets.Hour(2), timedelta(hours=2), np.timedelta64(2, 'h'), Timedelta(hours=2)] for delta in offsets: rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz) expected = pd.date_range('1999-12-31 22:00', '2000-01-31 22:00', tz=tz) result = rng - delta tm.assert_index_equal(result, expected) rng -= delta tm.assert_index_equal(rng, expected) # int rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10, tz=tz) result = rng - 1 expected = pd.date_range('2000-01-01 08:00', freq='H', periods=10, tz=tz) tm.assert_index_equal(result, expected) rng -= 1 tm.assert_index_equal(rng, expected) def test_sub_dti_dti(self): # previously performed setop (deprecated in 0.16.0), now changed to # return subtraction -> TimeDeltaIndex (GH ...) dti = date_range('20130101', periods=3) dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern') dti_tz2 = date_range('20130101', periods=3).tz_localize('UTC') expected = TimedeltaIndex([0, 0, 0]) result = dti - dti tm.assert_index_equal(result, expected) result = dti_tz - dti_tz tm.assert_index_equal(result, expected) with tm.assertRaises(TypeError): dti_tz - dti with tm.assertRaises(TypeError): dti - dti_tz with tm.assertRaises(TypeError): dti_tz - dti_tz2 # isub dti -= dti tm.assert_index_equal(dti, expected) # different length raises ValueError dti1 = date_range('20130101', periods=3) dti2 = date_range('20130101', periods=4) with tm.assertRaises(ValueError): dti1 - dti2 # NaN propagation dti1 = DatetimeIndex(['2012-01-01', np.nan, '2012-01-03']) dti2 = DatetimeIndex(['2012-01-02', '2012-01-03', np.nan]) expected = TimedeltaIndex(['1 days', np.nan, np.nan]) result = dti2 - dti1 tm.assert_index_equal(result, expected) def test_sub_period(self): # GH 13078 # not supported, check TypeError p = pd.Period('2011-01-01', freq='D') for freq in [None, 'D']: idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=freq) with tm.assertRaises(TypeError): idx - p with tm.assertRaises(TypeError): p - idx def test_comp_nat(self): left = pd.DatetimeIndex([pd.Timestamp('2011-01-01'), pd.NaT, pd.Timestamp('2011-01-03')]) right = pd.DatetimeIndex([pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')]) for l, r in [(left, right), (left.asobject, right.asobject)]: result = l == r expected = np.array([False, False, True]) tm.assert_numpy_array_equal(result, expected) result = l != r expected = np.array([True, True, False]) tm.assert_numpy_array_equal(result, expected) expected = np.array([False, False, False]) tm.assert_numpy_array_equal(l == pd.NaT, expected) tm.assert_numpy_array_equal(pd.NaT == r, expected) expected = np.array([True, True, True]) tm.assert_numpy_array_equal(l != pd.NaT, expected) tm.assert_numpy_array_equal(pd.NaT != l, expected) expected = np.array([False, False, False]) tm.assert_numpy_array_equal(l < pd.NaT, expected) tm.assert_numpy_array_equal(pd.NaT > l, expected) def test_value_counts_unique(self): # GH 7735 for tz in self.tz: idx = pd.date_range('2011-01-01 09:00', freq='H', periods=10) # create repeated values, 'n'th element is repeated by n+1 times idx = DatetimeIndex(np.repeat(idx.values, range(1, len(idx) + 1)), tz=tz) exp_idx = pd.date_range('2011-01-01 18:00', freq='-1H', periods=10, tz=tz) expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64') for obj in [idx, Series(idx)]: tm.assert_series_equal(obj.value_counts(), expected) expected = pd.date_range('2011-01-01 09:00', freq='H', periods=10, tz=tz) tm.assert_index_equal(idx.unique(), expected) idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 09:00', '2013-01-01 09:00', '2013-01-01 08:00', '2013-01-01 08:00', pd.NaT], tz=tz) exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00'], tz=tz) expected = Series([3, 2], index=exp_idx) for obj in [idx, Series(idx)]: tm.assert_series_equal(obj.value_counts(), expected) exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00', pd.NaT], tz=tz) expected = Series([3, 2, 1], index=exp_idx) for obj in [idx, Series(idx)]: tm.assert_series_equal(obj.value_counts(dropna=False), expected) tm.assert_index_equal(idx.unique(), exp_idx) def test_nonunique_contains(self): # GH 9512 for idx in map(DatetimeIndex, ([0, 1, 0], [0, 0, -1], [0, -1, -1], ['2015', '2015', '2016'], ['2015', '2015', '2014'])): tm.assertIn(idx[0], idx) def test_order(self): # with freq idx1 = DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], freq='D', name='idx') idx2 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'], freq='H', tz='Asia/Tokyo', name='tzidx') for idx in [idx1, idx2]: ordered = idx.sort_values() self.assert_index_equal(ordered, idx) self.assertEqual(ordered.freq, idx.freq) ordered = idx.sort_values(ascending=False) expected = idx[::-1] self.assert_index_equal(ordered, expected) self.assertEqual(ordered.freq, expected.freq) self.assertEqual(ordered.freq.n, -1) ordered, indexer = idx.sort_values(return_indexer=True) self.assert_index_equal(ordered, idx) self.assert_numpy_array_equal(indexer, np.array([0, 1, 2]), check_dtype=False) self.assertEqual(ordered.freq, idx.freq) ordered, indexer = idx.sort_values(return_indexer=True, ascending=False) expected = idx[::-1] self.assert_index_equal(ordered, expected) self.assert_numpy_array_equal(indexer, np.array([2, 1, 0]), check_dtype=False) self.assertEqual(ordered.freq, expected.freq) self.assertEqual(ordered.freq.n, -1) # without freq for tz in self.tz: idx1 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05', '2011-01-02', '2011-01-01'], tz=tz, name='idx1') exp1 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02', '2011-01-03', '2011-01-05'], tz=tz, name='idx1') idx2 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05', '2011-01-02', '2011-01-01'], tz=tz, name='idx2') exp2 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02', '2011-01-03', '2011-01-05'], tz=tz, name='idx2') idx3 = DatetimeIndex([pd.NaT, '2011-01-03', '2011-01-05', '2011-01-02', pd.NaT], tz=tz, name='idx3') exp3 = DatetimeIndex([pd.NaT, pd.NaT, '2011-01-02', '2011-01-03', '2011-01-05'], tz=tz, name='idx3') for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3)]: ordered = idx.sort_values() self.assert_index_equal(ordered, expected) self.assertIsNone(ordered.freq) ordered = idx.sort_values(ascending=False) self.assert_index_equal(ordered, expected[::-1]) self.assertIsNone(ordered.freq) ordered, indexer = idx.sort_values(return_indexer=True) self.assert_index_equal(ordered, expected) exp = np.array([0, 4, 3, 1, 2]) self.assert_numpy_array_equal(indexer, exp, check_dtype=False) self.assertIsNone(ordered.freq) ordered, indexer = idx.sort_values(return_indexer=True, ascending=False) self.assert_index_equal(ordered, expected[::-1]) exp = np.array([2, 1, 3, 4, 0]) self.assert_numpy_array_equal(indexer, exp, check_dtype=False) self.assertIsNone(ordered.freq) def test_getitem(self): idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx') idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D', tz='Asia/Tokyo', name='idx') for idx in [idx1, idx2]: result = idx[0] self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz)) result = idx[0:5] expected = pd.date_range('2011-01-01', '2011-01-05', freq='D', tz=idx.tz, name='idx') self.assert_index_equal(result, expected) self.assertEqual(result.freq, expected.freq) result = idx[0:10:2] expected = pd.date_range('2011-01-01', '2011-01-09', freq='2D', tz=idx.tz, name='idx') self.assert_index_equal(result, expected) self.assertEqual(result.freq, expected.freq) result = idx[-20:-5:3] expected = pd.date_range('2011-01-12', '2011-01-24', freq='3D', tz=idx.tz, name='idx') self.assert_index_equal(result, expected) self.assertEqual(result.freq, expected.freq) result = idx[4::-1] expected = DatetimeIndex(['2011-01-05', '2011-01-04', '2011-01-03', '2011-01-02', '2011-01-01'], freq='-1D', tz=idx.tz, name='idx') self.assert_index_equal(result, expected) self.assertEqual(result.freq, expected.freq) def test_drop_duplicates_metadata(self): # GH 10115 idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx') result = idx.drop_duplicates() self.assert_index_equal(idx, result) self.assertEqual(idx.freq, result.freq) idx_dup = idx.append(idx) self.assertIsNone(idx_dup.freq) # freq is reset result = idx_dup.drop_duplicates() self.assert_index_equal(idx, result) self.assertIsNone(result.freq) def test_drop_duplicates(self): # to check Index/Series compat base = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx') idx = base.append(base[:5]) res = idx.drop_duplicates() tm.assert_index_equal(res, base) res = Series(idx).drop_duplicates() tm.assert_series_equal(res, Series(base)) res = idx.drop_duplicates(keep='last') exp = base[5:].append(base[:5]) tm.assert_index_equal(res, exp) res = Series(idx).drop_duplicates(keep='last') tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36))) res = idx.drop_duplicates(keep=False) tm.assert_index_equal(res, base[5:]) res = Series(idx).drop_duplicates(keep=False) tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31))) def test_take(self): # GH 10295 idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx') idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D', tz='Asia/Tokyo', name='idx') for idx in [idx1, idx2]: result = idx.take([0]) self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz)) result = idx.take([0, 1, 2]) expected = pd.date_range('2011-01-01', '2011-01-03', freq='D', tz=idx.tz, name='idx') self.assert_index_equal(result, expected) self.assertEqual(result.freq, expected.freq) result = idx.take([0, 2, 4]) expected = pd.date_range('2011-01-01', '2011-01-05', freq='2D', tz=idx.tz, name='idx') self.assert_index_equal(result, expected) self.assertEqual(result.freq, expected.freq) result = idx.take([7, 4, 1]) expected = pd.date_range('2011-01-08', '2011-01-02', freq='-3D', tz=idx.tz, name='idx') self.assert_index_equal(result, expected) self.assertEqual(result.freq, expected.freq) result = idx.take([3, 2, 5]) expected = DatetimeIndex(['2011-01-04', '2011-01-03', '2011-01-06'], freq=None, tz=idx.tz, name='idx') self.assert_index_equal(result, expected) self.assertIsNone(result.freq) result = idx.take([-3, 2, 5]) expected = DatetimeIndex(['2011-01-29', '2011-01-03', '2011-01-06'], freq=None, tz=idx.tz, name='idx') self.assert_index_equal(result, expected) self.assertIsNone(result.freq) def test_take_invalid_kwargs(self): idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx') indices = [1, 6, 5, 9, 10, 13, 15, 3] msg = r"take\(\) got an unexpected keyword argument 'foo'" tm.assertRaisesRegexp(TypeError, msg, idx.take, indices, foo=2) msg = "the 'out' parameter is not supported" tm.assertRaisesRegexp(ValueError, msg, idx.take, indices, out=indices) msg = "the 'mode' parameter is not supported" tm.assertRaisesRegexp(ValueError, msg, idx.take, indices, mode='clip') def test_infer_freq(self): # GH 11018 for freq in ['A', '2A', '-2A', 'Q', '-1Q', 'M', '-1M', 'D', '3D', '-3D', 'W', '-1W', 'H', '2H', '-2H', 'T', '2T', 'S', '-3S']: idx = pd.date_range('2011-01-01 09:00:00', freq=freq, periods=10) result = pd.DatetimeIndex(idx.asi8, freq='infer') tm.assert_index_equal(idx, result) self.assertEqual(result.freq, freq) def test_nat_new(self): idx = pd.date_range('2011-01-01', freq='D', periods=5, name='x') result = idx._nat_new() exp = pd.DatetimeIndex([pd.NaT] * 5, name='x') tm.assert_index_equal(result, exp) result = idx._nat_new(box=False) exp = np.array([tslib.iNaT] * 5, dtype=np.int64) tm.assert_numpy_array_equal(result, exp) def test_shift(self): # GH 9903 for tz in self.tz: idx = pd.DatetimeIndex([], name='xxx', tz=tz) tm.assert_index_equal(idx.shift(0, freq='H'), idx) tm.assert_index_equal(idx.shift(3, freq='H'), idx) idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-01 11:00' '2011-01-01 12:00'], name='xxx', tz=tz) tm.assert_index_equal(idx.shift(0, freq='H'), idx) exp = pd.DatetimeIndex(['2011-01-01 13:00', '2011-01-01 14:00' '2011-01-01 15:00'], name='xxx', tz=tz) tm.assert_index_equal(idx.shift(3, freq='H'), exp) exp = pd.DatetimeIndex(['2011-01-01 07:00', '2011-01-01 08:00' '2011-01-01 09:00'], name='xxx', tz=tz) tm.assert_index_equal(idx.shift(-3, freq='H'), exp) def test_nat(self): self.assertIs(pd.DatetimeIndex._na_value, pd.NaT) self.assertIs(pd.DatetimeIndex([])._na_value, pd.NaT) for tz in [None, 'US/Eastern', 'UTC']: idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], tz=tz) self.assertTrue(idx._can_hold_na) tm.assert_numpy_array_equal(idx._isnan, np.array([False, False])) self.assertFalse(idx.hasnans) tm.assert_numpy_array_equal(idx._nan_idxs, np.array([], dtype=np.intp)) idx = pd.DatetimeIndex(['2011-01-01', 'NaT'], tz=tz) self.assertTrue(idx._can_hold_na) tm.assert_numpy_array_equal(idx._isnan, np.array([False, True])) self.assertTrue(idx.hasnans) tm.assert_numpy_array_equal(idx._nan_idxs, np.array([1], dtype=np.intp)) def test_equals(self): # GH 13107 for tz in [None, 'UTC', 'US/Eastern', 'Asia/Tokyo']: idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT']) self.assertTrue(idx.equals(idx)) self.assertTrue(idx.equals(idx.copy())) self.assertTrue(idx.equals(idx.asobject)) self.assertTrue(idx.asobject.equals(idx)) self.assertTrue(idx.asobject.equals(idx.asobject)) self.assertFalse(idx.equals(list(idx))) self.assertFalse(idx.equals(pd.Series(idx))) idx2 = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'], tz='US/Pacific') self.assertFalse(idx.equals(idx2)) self.assertFalse(idx.equals(idx2.copy())) self.assertFalse(idx.equals(idx2.asobject)) self.assertFalse(idx.asobject.equals(idx2)) self.assertFalse(idx.equals(list(idx2))) self.assertFalse(idx.equals(pd.Series(idx2))) # same internal, different tz idx3 = pd.DatetimeIndex._simple_new(idx.asi8, tz='US/Pacific') tm.assert_numpy_array_equal(idx.asi8, idx3.asi8) self.assertFalse(idx.equals(idx3)) self.assertFalse(idx.equals(idx3.copy())) self.assertFalse(idx.equals(idx3.asobject)) self.assertFalse(idx.asobject.equals(idx3)) self.assertFalse(idx.equals(list(idx3))) self.assertFalse(idx.equals(pd.Series(idx3))) class TestTimedeltaIndexOps(Ops): def setUp(self): super(TestTimedeltaIndexOps, self).setUp() mask = lambda x: isinstance(x, TimedeltaIndex) self.is_valid_objs = [o for o in self.objs if mask(o)] self.not_valid_objs = [] def test_ops_properties(self): self.check_ops_properties(['days', 'hours', 'minutes', 'seconds', 'milliseconds']) self.check_ops_properties(['microseconds', 'nanoseconds']) def test_asobject_tolist(self): idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx') expected_list = [Timedelta('1 days'), Timedelta('2 days'), Timedelta('3 days'), Timedelta('4 days')] expected = pd.Index(expected_list, dtype=object, name='idx') result = idx.asobject self.assertTrue(isinstance(result, Index)) self.assertEqual(result.dtype, object) self.assert_index_equal(result, expected) self.assertEqual(result.name, expected.name) self.assertEqual(idx.tolist(), expected_list) idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT, timedelta(days=4)], name='idx') expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT, Timedelta('4 days')] expected = pd.Index(expected_list, dtype=object, name='idx') result = idx.asobject self.assertTrue(isinstance(result, Index)) self.assertEqual(result.dtype, object) self.assert_index_equal(result, expected) self.assertEqual(result.name, expected.name) self.assertEqual(idx.tolist(), expected_list) def test_minmax(self): # monotonic idx1 = TimedeltaIndex(['1 days', '2 days', '3 days']) self.assertTrue(idx1.is_monotonic) # non-monotonic idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT']) self.assertFalse(idx2.is_monotonic) for idx in [idx1, idx2]: self.assertEqual(idx.min(), Timedelta('1 days')), self.assertEqual(idx.max(), Timedelta('3 days')), self.assertEqual(idx.argmin(), 0) self.assertEqual(idx.argmax(), 2) for op in ['min', 'max']: # Return NaT obj = TimedeltaIndex([]) self.assertTrue(pd.isnull(getattr(obj, op)())) obj = TimedeltaIndex([pd.NaT]) self.assertTrue(pd.isnull(getattr(obj, op)())) obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT]) self.assertTrue(pd.isnull(getattr(obj, op)())) def test_numpy_minmax(self): dr = pd.date_range(start='2016-01-15', end='2016-01-20') td = TimedeltaIndex(np.asarray(dr)) self.assertEqual(np.min(td), Timedelta('16815 days')) self.assertEqual(np.max(td), Timedelta('16820 days')) errmsg = "the 'out' parameter is not supported" tm.assertRaisesRegexp(ValueError, errmsg, np.min, td, out=0) tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0) self.assertEqual(np.argmin(td), 0) self.assertEqual(np.argmax(td), 5) if not _np_version_under1p10: errmsg = "the 'out' parameter is not supported" tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, td, out=0) tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, td, out=0) def test_round(self): td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min') elt = td[1] expected_rng = TimedeltaIndex([ Timedelta('16801 days 00:00:00'), Timedelta('16801 days 00:00:00'), Timedelta('16801 days 01:00:00'), Timedelta('16801 days 02:00:00'), Timedelta('16801 days 02:00:00'), ]) expected_elt = expected_rng[1] tm.assert_index_equal(td.round(freq='H'), expected_rng) self.assertEqual(elt.round(freq='H'), expected_elt) msg = pd.tseries.frequencies._INVALID_FREQ_ERROR with self.assertRaisesRegexp(ValueError, msg): td.round(freq='foo') with tm.assertRaisesRegexp(ValueError, msg): elt.round(freq='foo') msg = "<MonthEnd> is a non-fixed frequency" tm.assertRaisesRegexp(ValueError, msg, td.round, freq='M') tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M') def test_representation(self): idx1 = TimedeltaIndex([], freq='D') idx2 = TimedeltaIndex(['1 days'], freq='D') idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D') idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D') idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days']) exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')""" exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', " "freq='D')") exp3 = ("TimedeltaIndex(['1 days', '2 days'], " "dtype='timedelta64[ns]', freq='D')") exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], " "dtype='timedelta64[ns]', freq='D')") exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', " "'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)") with pd.option_context('display.width', 300): for idx, expected in zip([idx1, idx2, idx3, idx4, idx5], [exp1, exp2, exp3, exp4, exp5]): for func in ['__repr__', '__unicode__', '__str__']: result = getattr(idx, func)() self.assertEqual(result, expected) def test_representation_to_series(self): idx1 = TimedeltaIndex([], freq='D') idx2 = TimedeltaIndex(['1 days'], freq='D') idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D') idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D') idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days']) exp1 = """Series([], dtype: timedelta64[ns])""" exp2 = """0 1 days dtype: timedelta64[ns]""" exp3 = """0 1 days 1 2 days dtype: timedelta64[ns]""" exp4 = """0 1 days 1 2 days 2 3 days dtype: timedelta64[ns]""" exp5 = """0 1 days 00:00:01 1 2 days 00:00:00 2 3 days 00:00:00 dtype: timedelta64[ns]""" with pd.option_context('display.width', 300): for idx, expected in zip([idx1, idx2, idx3, idx4, idx5], [exp1, exp2, exp3, exp4, exp5]): result = repr(pd.Series(idx)) self.assertEqual(result, expected) def test_summary(self): # GH9116 idx1 = TimedeltaIndex([], freq='D') idx2 = TimedeltaIndex(['1 days'], freq='D') idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D') idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D') idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days']) exp1 = """TimedeltaIndex: 0 entries Freq: D""" exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days Freq: D""" exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days Freq: D""" exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days Freq: D""" exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days " "00:00:00") for idx, expected in zip([idx1, idx2, idx3, idx4, idx5], [exp1, exp2, exp3, exp4, exp5]): result = idx.summary() self.assertEqual(result, expected) def test_add_iadd(self): # only test adding/sub offsets as + is now numeric # offset offsets = [pd.offsets.Hour(2), timedelta(hours=2), np.timedelta64(2, 'h'), Timedelta(hours=2)] for delta in offsets: rng = timedelta_range('1 days', '10 days') result = rng + delta expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00', freq='D') tm.assert_index_equal(result, expected) rng += delta tm.assert_index_equal(rng, expected) # int rng = timedelta_range('1 days 09:00:00', freq='H', periods=10) result = rng + 1 expected = timedelta_range('1 days 10:00:00', freq='H', periods=10) tm.assert_index_equal(result, expected) rng += 1 tm.assert_index_equal(rng, expected) def test_sub_isub(self): # only test adding/sub offsets as - is now numeric # offset offsets = [pd.offsets.Hour(2), timedelta(hours=2), np.timedelta64(2, 'h'), Timedelta(hours=2)] for delta in offsets: rng = timedelta_range('1 days', '10 days') result = rng - delta expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00') tm.assert_index_equal(result, expected) rng -= delta tm.assert_index_equal(rng, expected) # int rng = timedelta_range('1 days 09:00:00', freq='H', periods=10) result = rng - 1 expected = timedelta_range('1 days 08:00:00', freq='H', periods=10) tm.assert_index_equal(result, expected) rng -= 1 tm.assert_index_equal(rng, expected) idx = TimedeltaIndex(['1 day', '2 day']) msg = "cannot subtract a datelike from a TimedeltaIndex" with tm.assertRaisesRegexp(TypeError, msg): idx - Timestamp('2011-01-01') result = Timestamp('2011-01-01') + idx expected = DatetimeIndex(['2011-01-02', '2011-01-03']) tm.assert_index_equal(result, expected) def test_ops_compat(self): offsets = [pd.offsets.Hour(2), timedelta(hours=2), np.timedelta64(2, 'h'), Timedelta(hours=2)] rng = timedelta_range('1 days', '10 days', name='foo') # multiply for offset in offsets: self.assertRaises(TypeError, lambda: rng * offset) # divide expected = Int64Index((np.arange(10) + 1) * 12, name='foo') for offset in offsets: result = rng / offset tm.assert_index_equal(result, expected, exact=False) # divide with nats rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo') expected = Float64Index([12, np.nan, 24], name='foo') for offset in offsets: result = rng / offset tm.assert_index_equal(result, expected) # don't allow division by NaT (make could in the future) self.assertRaises(TypeError, lambda: rng / pd.NaT) def test_subtraction_ops(self): # with datetimes/timedelta and tdi/dti tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo') dti = date_range('20130101', periods=3, name='bar') td = Timedelta('1 days') dt = Timestamp('20130101') self.assertRaises(TypeError, lambda: tdi - dt) self.assertRaises(TypeError, lambda: tdi - dti) self.assertRaises(TypeError, lambda: td - dt) self.assertRaises(TypeError, lambda: td - dti) result = dt - dti expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar') tm.assert_index_equal(result, expected) result = dti - dt expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar') tm.assert_index_equal(result, expected) result = tdi - td expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo') tm.assert_index_equal(result, expected, check_names=False) result = td - tdi expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo') tm.assert_index_equal(result, expected, check_names=False) result = dti - td expected = DatetimeIndex( ['20121231', '20130101', '20130102'], name='bar') tm.assert_index_equal(result, expected, check_names=False) result = dt - tdi expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo') tm.assert_index_equal(result, expected) def test_subtraction_ops_with_tz(self): # check that dt/dti subtraction ops with tz are validated dti = date_range('20130101', periods=3) ts = Timestamp('20130101') dt = ts.to_pydatetime() dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern') ts_tz = Timestamp('20130101').tz_localize('US/Eastern') ts_tz2 = Timestamp('20130101').tz_localize('CET') dt_tz = ts_tz.to_pydatetime() td = Timedelta('1 days') def _check(result, expected): self.assertEqual(result, expected) self.assertIsInstance(result, Timedelta) # scalars result = ts - ts expected = Timedelta('0 days') _check(result, expected) result = dt_tz - ts_tz expected = Timedelta('0 days') _check(result, expected) result = ts_tz - dt_tz expected = Timedelta('0 days') _check(result, expected) # tz mismatches self.assertRaises(TypeError, lambda: dt_tz - ts) self.assertRaises(TypeError, lambda: dt_tz - dt) self.assertRaises(TypeError, lambda: dt_tz - ts_tz2) self.assertRaises(TypeError, lambda: dt - dt_tz) self.assertRaises(TypeError, lambda: ts - dt_tz) self.assertRaises(TypeError, lambda: ts_tz2 - ts) self.assertRaises(TypeError, lambda: ts_tz2 - dt) self.assertRaises(TypeError, lambda: ts_tz - ts_tz2) # with dti self.assertRaises(TypeError, lambda: dti - ts_tz) self.assertRaises(TypeError, lambda: dti_tz - ts) self.assertRaises(TypeError, lambda: dti_tz - ts_tz2) result = dti_tz - dt_tz expected = TimedeltaIndex(['0 days', '1 days', '2 days']) tm.assert_index_equal(result, expected) result = dt_tz - dti_tz expected = TimedeltaIndex(['0 days', '-1 days', '-2 days']) tm.assert_index_equal(result, expected) result = dti_tz - ts_tz expected = TimedeltaIndex(['0 days', '1 days', '2 days']) tm.assert_index_equal(result, expected) result = ts_tz - dti_tz expected = TimedeltaIndex(['0 days', '-1 days', '-2 days']) tm.assert_index_equal(result, expected) result = td - td expected = Timedelta('0 days') _check(result, expected) result = dti_tz - td expected = DatetimeIndex( ['20121231', '20130101', '20130102'], tz='US/Eastern') tm.assert_index_equal(result, expected) def test_dti_tdi_numeric_ops(self): # These are normally union/diff set-like ops tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo') dti = date_range('20130101', periods=3, name='bar') # TODO(wesm): unused? # td = Timedelta('1 days') # dt = Timestamp('20130101') result = tdi - tdi expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo') tm.assert_index_equal(result, expected) result = tdi + tdi expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo') tm.assert_index_equal(result, expected) result = dti - tdi # name will be reset expected = DatetimeIndex(['20121231', pd.NaT, '20130101']) tm.assert_index_equal(result, expected) def test_sub_period(self): # GH 13078 # not supported, check TypeError p = pd.Period('2011-01-01', freq='D') for freq in [None, 'H']: idx = pd.TimedeltaIndex(['1 hours', '2 hours'], freq=freq) with tm.assertRaises(TypeError): idx - p with tm.assertRaises(TypeError): p - idx def test_addition_ops(self): # with datetimes/timedelta and tdi/dti tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo') dti = date_range('20130101', periods=3, name='bar') td = Timedelta('1 days') dt = Timestamp('20130101') result = tdi + dt expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo') tm.assert_index_equal(result, expected) result = dt + tdi expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo') tm.assert_index_equal(result, expected) result = td + tdi expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo') tm.assert_index_equal(result, expected) result = tdi + td expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo') tm.assert_index_equal(result, expected) # unequal length self.assertRaises(ValueError, lambda: tdi + dti[0:1]) self.assertRaises(ValueError, lambda: tdi[0:1] + dti) # random indexes self.assertRaises(TypeError, lambda: tdi + Int64Index([1, 2, 3])) # this is a union! # self.assertRaises(TypeError, lambda : Int64Index([1,2,3]) + tdi) result = tdi + dti # name will be reset expected = DatetimeIndex(['20130102', pd.NaT, '20130105']) tm.assert_index_equal(result, expected) result = dti + tdi # name will be reset expected = DatetimeIndex(['20130102', pd.NaT, '20130105']) tm.assert_index_equal(result, expected) result = dt + td expected = Timestamp('20130102') self.assertEqual(result, expected) result = td + dt expected = Timestamp('20130102') self.assertEqual(result, expected) def test_comp_nat(self): left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT, pd.Timedelta('3 days')]) right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')]) for l, r in [(left, right), (left.asobject, right.asobject)]: result = l == r expected = np.array([False, False, True]) tm.assert_numpy_array_equal(result, expected) result = l != r expected = np.array([True, True, False]) tm.assert_numpy_array_equal(result, expected) expected = np.array([False, False, False]) tm.assert_numpy_array_equal(l == pd.NaT, expected) tm.assert_numpy_array_equal(pd.NaT == r, expected) expected = np.array([True, True, True]) tm.assert_numpy_array_equal(l != pd.NaT, expected) tm.assert_numpy_array_equal(pd.NaT != l, expected) expected = np.array([False, False, False]) tm.assert_numpy_array_equal(l < pd.NaT, expected) tm.assert_numpy_array_equal(pd.NaT > l, expected) def test_value_counts_unique(self): # GH 7735 idx = timedelta_range('1 days 09:00:00', freq='H', periods=10) # create repeated values, 'n'th element is repeated by n+1 times idx = TimedeltaIndex(np.repeat(idx.values, range(1, len(idx) + 1))) exp_idx = timedelta_range('1 days 18:00:00', freq='-1H', periods=10) expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64') for obj in [idx, Series(idx)]: tm.assert_series_equal(obj.value_counts(), expected) expected = timedelta_range('1 days 09:00:00', freq='H', periods=10) tm.assert_index_equal(idx.unique(), expected) idx = TimedeltaIndex(['1 days 09:00:00', '1 days 09:00:00', '1 days 09:00:00', '1 days 08:00:00', '1 days 08:00:00', pd.NaT]) exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00']) expected = Series([3, 2], index=exp_idx) for obj in [idx, Series(idx)]: tm.assert_series_equal(obj.value_counts(), expected) exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00', pd.NaT]) expected = Series([3, 2, 1], index=exp_idx) for obj in [idx, Series(idx)]: tm.assert_series_equal(obj.value_counts(dropna=False), expected) tm.assert_index_equal(idx.unique(), exp_idx) def test_nonunique_contains(self): # GH 9512 for idx in map(TimedeltaIndex, ([0, 1, 0], [0, 0, -1], [0, -1, -1], ['00:01:00', '00:01:00', '00:02:00'], ['00:01:00', '00:01:00', '00:00:01'])): tm.assertIn(idx[0], idx) def test_unknown_attribute(self): # GH 9680 tdi = pd.timedelta_range(start=0, periods=10, freq='1s') ts = pd.Series(np.random.normal(size=10), index=tdi) self.assertNotIn('foo', ts.__dict__.keys()) self.assertRaises(AttributeError, lambda: ts.foo) def test_order(self): # GH 10295 idx1 = TimedeltaIndex(['1 day', '2 day', '3 day'], freq='D', name='idx') idx2 = TimedeltaIndex( ['1 hour', '2 hour', '3 hour'], freq='H', name='idx') for idx in [idx1, idx2]: ordered = idx.sort_values() self.assert_index_equal(ordered, idx) self.assertEqual(ordered.freq, idx.freq) ordered = idx.sort_values(ascending=False) expected = idx[::-1] self.assert_index_equal(ordered, expected) self.assertEqual(ordered.freq, expected.freq) self.assertEqual(ordered.freq.n, -1) ordered, indexer = idx.sort_values(return_indexer=True) self.assert_index_equal(ordered, idx) self.assert_numpy_array_equal(indexer, np.array([0, 1, 2]), check_dtype=False) self.assertEqual(ordered.freq, idx.freq) ordered, indexer = idx.sort_values(return_indexer=True, ascending=False) self.assert_index_equal(ordered, idx[::-1]) self.assertEqual(ordered.freq, expected.freq) self.assertEqual(ordered.freq.n, -1) idx1 = TimedeltaIndex(['1 hour', '3 hour', '5 hour', '2 hour ', '1 hour'], name='idx1') exp1 = TimedeltaIndex(['1 hour', '1 hour', '2 hour', '3 hour', '5 hour'], name='idx1') idx2 = TimedeltaIndex(['1 day', '3 day', '5 day', '2 day', '1 day'], name='idx2') # TODO(wesm): unused? # exp2 = TimedeltaIndex(['1 day', '1 day', '2 day', # '3 day', '5 day'], name='idx2') # idx3 = TimedeltaIndex([pd.NaT, '3 minute', '5 minute', # '2 minute', pd.NaT], name='idx3') # exp3 = TimedeltaIndex([pd.NaT, pd.NaT, '2 minute', '3 minute', # '5 minute'], name='idx3') for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]: ordered = idx.sort_values() self.assert_index_equal(ordered, expected) self.assertIsNone(ordered.freq) ordered = idx.sort_values(ascending=False) self.assert_index_equal(ordered, expected[::-1]) self.assertIsNone(ordered.freq) ordered, indexer = idx.sort_values(return_indexer=True) self.assert_index_equal(ordered, expected) exp = np.array([0, 4, 3, 1, 2]) self.assert_numpy_array_equal(indexer, exp, check_dtype=False) self.assertIsNone(ordered.freq) ordered, indexer = idx.sort_values(return_indexer=True, ascending=False) self.assert_index_equal(ordered, expected[::-1]) exp = np.array([2, 1, 3, 4, 0]) self.assert_numpy_array_equal(indexer, exp, check_dtype=False) self.assertIsNone(ordered.freq) def test_getitem(self): idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx') for idx in [idx1]: result = idx[0] self.assertEqual(result, pd.Timedelta('1 day')) result = idx[0:5] expected = pd.timedelta_range('1 day', '5 day', freq='D', name='idx') self.assert_index_equal(result, expected) self.assertEqual(result.freq, expected.freq) result = idx[0:10:2] expected = pd.timedelta_range('1 day', '9 day', freq='2D', name='idx') self.assert_index_equal(result, expected) self.assertEqual(result.freq, expected.freq) result = idx[-20:-5:3] expected = pd.timedelta_range('12 day', '24 day', freq='3D', name='idx') self.assert_index_equal(result, expected) self.assertEqual(result.freq, expected.freq) result = idx[4::-1] expected = TimedeltaIndex(['5 day', '4 day', '3 day', '2 day', '1 day'], freq='-1D', name='idx') self.assert_index_equal(result, expected) self.assertEqual(result.freq, expected.freq) def test_drop_duplicates_metadata(self): # GH 10115 idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx') result = idx.drop_duplicates() self.assert_index_equal(idx, result) self.assertEqual(idx.freq, result.freq) idx_dup = idx.append(idx) self.assertIsNone(idx_dup.freq) # freq is reset result = idx_dup.drop_duplicates() self.assert_index_equal(idx, result) self.assertIsNone(result.freq) def test_drop_duplicates(self): # to check Index/Series compat base = pd.timedelta_range('1 day', '31 day', freq='D', name='idx') idx = base.append(base[:5]) res = idx.drop_duplicates() tm.assert_index_equal(res, base) res = Series(idx).drop_duplicates() tm.assert_series_equal(res, Series(base)) res = idx.drop_duplicates(keep='last') exp = base[5:].append(base[:5]) tm.assert_index_equal(res, exp) res = Series(idx).drop_duplicates(keep='last') tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36))) res = idx.drop_duplicates(keep=False) tm.assert_index_equal(res, base[5:]) res = Series(idx).drop_duplicates(keep=False) tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31))) def test_take(self): # GH 10295 idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx') for idx in [idx1]: result = idx.take([0]) self.assertEqual(result, pd.Timedelta('1 day')) result = idx.take([-1]) self.assertEqual(result, pd.Timedelta('31 day')) result = idx.take([0, 1, 2]) expected = pd.timedelta_range('1 day', '3 day', freq='D', name='idx') self.assert_index_equal(result, expected) self.assertEqual(result.freq, expected.freq) result = idx.take([0, 2, 4]) expected = pd.timedelta_range('1 day', '5 day', freq='2D', name='idx') self.assert_index_equal(result, expected) self.assertEqual(result.freq, expected.freq) result = idx.take([7, 4, 1]) expected = pd.timedelta_range('8 day', '2 day', freq='-3D', name='idx') self.assert_index_equal(result, expected) self.assertEqual(result.freq, expected.freq) result = idx.take([3, 2, 5]) expected = TimedeltaIndex(['4 day', '3 day', '6 day'], name='idx') self.assert_index_equal(result, expected) self.assertIsNone(result.freq) result = idx.take([-3, 2, 5]) expected = TimedeltaIndex(['29 day', '3 day', '6 day'], name='idx') self.assert_index_equal(result, expected) self.assertIsNone(result.freq) def test_take_invalid_kwargs(self): idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx') indices = [1, 6, 5, 9, 10, 13, 15, 3] msg = r"take\(\) got an unexpected keyword argument 'foo'" tm.assertRaisesRegexp(TypeError, msg, idx.take, indices, foo=2) msg = "the 'out' parameter is not supported" tm.assertRaisesRegexp(ValueError, msg, idx.take, indices, out=indices) msg = "the 'mode' parameter is not supported" tm.assertRaisesRegexp(ValueError, msg, idx.take, indices, mode='clip') def test_infer_freq(self): # GH 11018 for freq in ['D', '3D', '-3D', 'H', '2H', '-2H', 'T', '2T', 'S', '-3S' ]: idx = pd.timedelta_range('1', freq=freq, periods=10) result = pd.TimedeltaIndex(idx.asi8, freq='infer') tm.assert_index_equal(idx, result) self.assertEqual(result.freq, freq) def test_nat_new(self): idx = pd.timedelta_range('1', freq='D', periods=5, name='x') result = idx._nat_new() exp = pd.TimedeltaIndex([pd.NaT] * 5, name='x') tm.assert_index_equal(result, exp) result = idx._nat_new(box=False) exp = np.array([tslib.iNaT] * 5, dtype=np.int64) tm.assert_numpy_array_equal(result, exp) def test_shift(self): # GH 9903 idx = pd.TimedeltaIndex([], name='xxx') tm.assert_index_equal(idx.shift(0, freq='H'), idx) tm.assert_index_equal(idx.shift(3, freq='H'), idx) idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx') tm.assert_index_equal(idx.shift(0, freq='H'), idx) exp = pd.TimedeltaIndex(['8 hours', '9 hours', '12 hours'], name='xxx') tm.assert_index_equal(idx.shift(3, freq='H'), exp) exp = pd.TimedeltaIndex(['2 hours', '3 hours', '6 hours'], name='xxx') tm.assert_index_equal(idx.shift(-3, freq='H'), exp) tm.assert_index_equal(idx.shift(0, freq='T'), idx) exp = pd.TimedeltaIndex(['05:03:00', '06:03:00', '9:03:00'], name='xxx') tm.assert_index_equal(idx.shift(3, freq='T'), exp) exp = pd.TimedeltaIndex(['04:57:00', '05:57:00', '8:57:00'], name='xxx') tm.assert_index_equal(idx.shift(-3, freq='T'), exp) def test_repeat(self): index = pd.timedelta_range('1 days', periods=2, freq='D') exp = pd.TimedeltaIndex(['1 days', '1 days', '2 days', '2 days']) for res in [index.repeat(2), np.repeat(index, 2)]: tm.assert_index_equal(res, exp) self.assertIsNone(res.freq) index = TimedeltaIndex(['1 days', 'NaT', '3 days']) exp = TimedeltaIndex(['1 days', '1 days', '1 days', 'NaT', 'NaT', 'NaT', '3 days', '3 days', '3 days']) for res in [index.repeat(3), np.repeat(index, 3)]: tm.assert_index_equal(res, exp) self.assertIsNone(res.freq) def test_nat(self): self.assertIs(pd.TimedeltaIndex._na_value, pd.NaT) self.assertIs(pd.TimedeltaIndex([])._na_value, pd.NaT) idx = pd.TimedeltaIndex(['1 days', '2 days']) self.assertTrue(idx._can_hold_na) tm.assert_numpy_array_equal(idx._isnan, np.array([False, False])) self.assertFalse(idx.hasnans) tm.assert_numpy_array_equal(idx._nan_idxs, np.array([], dtype=np.intp)) idx = pd.TimedeltaIndex(['1 days', 'NaT']) self.assertTrue(idx._can_hold_na) tm.assert_numpy_array_equal(idx._isnan, np.array([False, True])) self.assertTrue(idx.hasnans) tm.assert_numpy_array_equal(idx._nan_idxs, np.array([1], dtype=np.intp)) def test_equals(self): # GH 13107 idx = pd.TimedeltaIndex(['1 days', '2 days', 'NaT']) self.assertTrue(idx.equals(idx)) self.assertTrue(idx.equals(idx.copy())) self.assertTrue(idx.equals(idx.asobject)) self.assertTrue(idx.asobject.equals(idx)) self.assertTrue(idx.asobject.equals(idx.asobject)) self.assertFalse(idx.equals(list(idx))) self.assertFalse(idx.equals(pd.Series(idx))) idx2 = pd.TimedeltaIndex(['2 days', '1 days', 'NaT']) self.assertFalse(idx.equals(idx2)) self.assertFalse(idx.equals(idx2.copy())) self.assertFalse(idx.equals(idx2.asobject)) self.assertFalse(idx.asobject.equals(idx2)) self.assertFalse(idx.asobject.equals(idx2.asobject)) self.assertFalse(idx.equals(list(idx2))) self.assertFalse(idx.equals(pd.Series(idx2))) class TestPeriodIndexOps(Ops): def setUp(self): super(TestPeriodIndexOps, self).setUp() mask = lambda x: (isinstance(x, DatetimeIndex) or isinstance(x, PeriodIndex)) self.is_valid_objs = [o for o in self.objs if mask(o)] self.not_valid_objs = [o for o in self.objs if not mask(o)] def test_ops_properties(self): self.check_ops_properties( ['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear', 'week', 'dayofweek', 'dayofyear', 'quarter']) self.check_ops_properties(['qyear'], lambda x: isinstance(x, PeriodIndex)) def test_asobject_tolist(self): idx = pd.period_range(start='2013-01-01', periods=4, freq='M', name='idx') expected_list = [pd.Period('2013-01-31', freq='M'), pd.Period('2013-02-28', freq='M'), pd.Period('2013-03-31', freq='M'), pd.Period('2013-04-30', freq='M')] expected = pd.Index(expected_list, dtype=object, name='idx') result = idx.asobject self.assertTrue(isinstance(result, Index)) self.assertEqual(result.dtype, object) self.assert_index_equal(result, expected) self.assertEqual(result.name, expected.name) self.assertEqual(idx.tolist(), expected_list) idx = PeriodIndex(['2013-01-01', '2013-01-02', 'NaT', '2013-01-04'], freq='D', name='idx') expected_list = [pd.Period('2013-01-01', freq='D'), pd.Period('2013-01-02', freq='D'), pd.Period('NaT', freq='D'), pd.Period('2013-01-04', freq='D')] expected = pd.Index(expected_list, dtype=object, name='idx') result = idx.asobject self.assertTrue(isinstance(result, Index)) self.assertEqual(result.dtype, object) tm.assert_index_equal(result, expected) for i in [0, 1, 3]: self.assertEqual(result[i], expected[i]) self.assertIs(result[2], pd.NaT) self.assertEqual(result.name, expected.name) result_list = idx.tolist() for i in [0, 1, 3]: self.assertEqual(result_list[i], expected_list[i]) self.assertIs(result_list[2], pd.NaT) def test_minmax(self): # monotonic idx1 = pd.PeriodIndex([pd.NaT, '2011-01-01', '2011-01-02', '2011-01-03'], freq='D') self.assertTrue(idx1.is_monotonic) # non-monotonic idx2 = pd.PeriodIndex(['2011-01-01', pd.NaT, '2011-01-03', '2011-01-02', pd.NaT], freq='D') self.assertFalse(idx2.is_monotonic) for idx in [idx1, idx2]: self.assertEqual(idx.min(), pd.Period('2011-01-01', freq='D')) self.assertEqual(idx.max(), pd.Period('2011-01-03', freq='D')) self.assertEqual(idx1.argmin(), 1) self.assertEqual(idx2.argmin(), 0) self.assertEqual(idx1.argmax(), 3) self.assertEqual(idx2.argmax(), 2) for op in ['min', 'max']: # Return NaT obj = PeriodIndex([], freq='M') result = getattr(obj, op)() self.assertIs(result, tslib.NaT) obj = PeriodIndex([pd.NaT], freq='M') result = getattr(obj, op)() self.assertIs(result, tslib.NaT) obj = PeriodIndex([pd.NaT, pd.NaT, pd.NaT], freq='M') result = getattr(obj, op)() self.assertIs(result, tslib.NaT) def test_numpy_minmax(self): pr = pd.period_range(start='2016-01-15', end='2016-01-20') self.assertEqual(np.min(pr), Period('2016-01-15', freq='D')) self.assertEqual(np.max(pr), Period('2016-01-20', freq='D')) errmsg = "the 'out' parameter is not supported" tm.assertRaisesRegexp(ValueError, errmsg, np.min, pr, out=0) tm.assertRaisesRegexp(ValueError, errmsg, np.max, pr, out=0) self.assertEqual(np.argmin(pr), 0) self.assertEqual(np.argmax(pr), 5) if not _np_version_under1p10: errmsg = "the 'out' parameter is not supported" tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, pr, out=0) tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, pr, out=0) def test_representation(self): # GH 7601 idx1 = PeriodIndex([], freq='D') idx2 = PeriodIndex(['2011-01-01'], freq='D') idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D') idx4 = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], freq='D') idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A') idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], freq='H') idx7 = pd.period_range('2013Q1', periods=1, freq="Q") idx8 = pd.period_range('2013Q1', periods=2, freq="Q") idx9 = pd.period_range('2013Q1', periods=3, freq="Q") idx10 = PeriodIndex(['2011-01-01', '2011-02-01'], freq='3D') exp1 = """PeriodIndex([], dtype='period[D]', freq='D')""" exp2 = """PeriodIndex(['2011-01-01'], dtype='period[D]', freq='D')""" exp3 = ("PeriodIndex(['2011-01-01', '2011-01-02'], dtype='period[D]', " "freq='D')") exp4 = ("PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], " "dtype='period[D]', freq='D')") exp5 = ("PeriodIndex(['2011', '2012', '2013'], dtype='period[A-DEC]', " "freq='A-DEC')") exp6 = ("PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], " "dtype='period[H]', freq='H')") exp7 = ("PeriodIndex(['2013Q1'], dtype='period[Q-DEC]', " "freq='Q-DEC')") exp8 = ("PeriodIndex(['2013Q1', '2013Q2'], dtype='period[Q-DEC]', " "freq='Q-DEC')") exp9 = ("PeriodIndex(['2013Q1', '2013Q2', '2013Q3'], " "dtype='period[Q-DEC]', freq='Q-DEC')") exp10 = ("PeriodIndex(['2011-01-01', '2011-02-01'], " "dtype='period[3D]', freq='3D')") for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6, idx7, idx8, idx9, idx10], [exp1, exp2, exp3, exp4, exp5, exp6, exp7, exp8, exp9, exp10]): for func in ['__repr__', '__unicode__', '__str__']: result = getattr(idx, func)() self.assertEqual(result, expected) def test_representation_to_series(self): # GH 10971 idx1 = PeriodIndex([], freq='D') idx2 = PeriodIndex(['2011-01-01'], freq='D') idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D') idx4 = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], freq='D') idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A') idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], freq='H') idx7 = pd.period_range('2013Q1', periods=1, freq="Q") idx8 = pd.period_range('2013Q1', periods=2, freq="Q") idx9 = pd.period_range('2013Q1', periods=3, freq="Q") exp1 = """Series([], dtype: object)""" exp2 = """0 2011-01-01 dtype: object""" exp3 = """0 2011-01-01 1 2011-01-02 dtype: object""" exp4 = """0 2011-01-01 1 2011-01-02 2 2011-01-03 dtype: object""" exp5 = """0 2011 1 2012 2 2013 dtype: object""" exp6 = """0 2011-01-01 09:00 1 2012-02-01 10:00 2 NaT dtype: object""" exp7 = """0 2013Q1 dtype: object""" exp8 = """0 2013Q1 1 2013Q2 dtype: object""" exp9 = """0 2013Q1 1 2013Q2 2 2013Q3 dtype: object""" for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6, idx7, idx8, idx9], [exp1, exp2, exp3, exp4, exp5, exp6, exp7, exp8, exp9]): result = repr(pd.Series(idx)) self.assertEqual(result, expected) def test_summary(self): # GH9116 idx1 = PeriodIndex([], freq='D') idx2 = PeriodIndex(['2011-01-01'], freq='D') idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D') idx4 = PeriodIndex( ['2011-01-01', '2011-01-02', '2011-01-03'], freq='D') idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A') idx6 = PeriodIndex( ['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], freq='H') idx7 = pd.period_range('2013Q1', periods=1, freq="Q") idx8 = pd.period_range('2013Q1', periods=2, freq="Q") idx9 = pd.period_range('2013Q1', periods=3, freq="Q") exp1 = """PeriodIndex: 0 entries Freq: D""" exp2 = """PeriodIndex: 1 entries, 2011-01-01 to 2011-01-01 Freq: D""" exp3 = """PeriodIndex: 2 entries, 2011-01-01 to 2011-01-02 Freq: D""" exp4 = """PeriodIndex: 3 entries, 2011-01-01 to 2011-01-03 Freq: D""" exp5 = """PeriodIndex: 3 entries, 2011 to 2013 Freq: A-DEC""" exp6 = """PeriodIndex: 3 entries, 2011-01-01 09:00 to NaT Freq: H""" exp7 = """PeriodIndex: 1 entries, 2013Q1 to 2013Q1 Freq: Q-DEC""" exp8 = """PeriodIndex: 2 entries, 2013Q1 to 2013Q2 Freq: Q-DEC""" exp9 = """PeriodIndex: 3 entries, 2013Q1 to 2013Q3 Freq: Q-DEC""" for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6, idx7, idx8, idx9], [exp1, exp2, exp3, exp4, exp5, exp6, exp7, exp8, exp9]): result = idx.summary() self.assertEqual(result, expected) def test_resolution(self): for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T', 'S', 'L', 'U'], ['day', 'day', 'day', 'day', 'hour', 'minute', 'second', 'millisecond', 'microsecond']): idx = pd.period_range(start='2013-04-01', periods=30, freq=freq) self.assertEqual(idx.resolution, expected) def test_union(self): # union rng1 = pd.period_range('1/1/2000', freq='D', periods=5) other1 = pd.period_range('1/6/2000', freq='D', periods=5) expected1 = pd.period_range('1/1/2000', freq='D', periods=10) rng2 = pd.period_range('1/1/2000', freq='D', periods=5) other2 = pd.period_range('1/4/2000', freq='D', periods=5) expected2 = pd.period_range('1/1/2000', freq='D', periods=8) rng3 = pd.period_range('1/1/2000', freq='D', periods=5) other3 = pd.PeriodIndex([], freq='D') expected3 = pd.period_range('1/1/2000', freq='D', periods=5) rng4 = pd.period_range('2000-01-01 09:00', freq='H', periods=5) other4 = pd.period_range('2000-01-02 09:00', freq='H', periods=5) expected4 = pd.PeriodIndex(['2000-01-01 09:00', '2000-01-01 10:00', '2000-01-01 11:00', '2000-01-01 12:00', '2000-01-01 13:00', '2000-01-02 09:00', '2000-01-02 10:00', '2000-01-02 11:00', '2000-01-02 12:00', '2000-01-02 13:00'], freq='H') rng5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03', '2000-01-01 09:05'], freq='T') other5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:05' '2000-01-01 09:08'], freq='T') expected5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03', '2000-01-01 09:05', '2000-01-01 09:08'], freq='T') rng6 = pd.period_range('2000-01-01', freq='M', periods=7) other6 = pd.period_range('2000-04-01', freq='M', periods=7) expected6 = pd.period_range('2000-01-01', freq='M', periods=10) rng7 = pd.period_range('2003-01-01', freq='A', periods=5) other7 = pd.period_range('1998-01-01', freq='A', periods=8) expected7 = pd.period_range('1998-01-01', freq='A', periods=10) for rng, other, expected in [(rng1, other1, expected1), (rng2, other2, expected2), (rng3, other3, expected3), (rng4, other4, expected4), (rng5, other5, expected5), (rng6, other6, expected6), (rng7, other7, expected7)]: result_union = rng.union(other) tm.assert_index_equal(result_union, expected) def test_add_iadd(self): rng = pd.period_range('1/1/2000', freq='D', periods=5) other = pd.period_range('1/6/2000', freq='D', periods=5) # previously performed setop union, now raises TypeError (GH14164) with tm.assertRaises(TypeError): rng + other with tm.assertRaises(TypeError): rng += other # offset # DateOffset rng = pd.period_range('2014', '2024', freq='A') result = rng + pd.offsets.YearEnd(5) expected = pd.period_range('2019', '2029', freq='A') tm.assert_index_equal(result, expected) rng += pd.offsets.YearEnd(5) tm.assert_index_equal(rng, expected) for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), pd.offsets.Minute(), np.timedelta64(365, 'D'), timedelta(365), Timedelta(days=365)]: msg = ('Input has different freq(=.+)? ' 'from PeriodIndex\\(freq=A-DEC\\)') with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg): rng + o rng = pd.period_range('2014-01', '2016-12', freq='M') result = rng + pd.offsets.MonthEnd(5) expected = pd.period_range('2014-06', '2017-05', freq='M') tm.assert_index_equal(result, expected) rng += pd.offsets.MonthEnd(5) tm.assert_index_equal(rng, expected) for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), pd.offsets.Minute(), np.timedelta64(365, 'D'), timedelta(365), Timedelta(days=365)]: rng = pd.period_range('2014-01', '2016-12', freq='M') msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)' with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg): rng + o # Tick offsets = [pd.offsets.Day(3), timedelta(days=3), np.timedelta64(3, 'D'), pd.offsets.Hour(72), timedelta(minutes=60 * 24 * 3), np.timedelta64(72, 'h'), Timedelta('72:00:00')] for delta in offsets: rng = pd.period_range('2014-05-01', '2014-05-15', freq='D') result = rng + delta expected = pd.period_range('2014-05-04', '2014-05-18', freq='D') tm.assert_index_equal(result, expected) rng += delta tm.assert_index_equal(rng, expected) for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), pd.offsets.Minute(), np.timedelta64(4, 'h'), timedelta(hours=23), Timedelta('23:00:00')]: rng = pd.period_range('2014-05-01', '2014-05-15', freq='D') msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=D\\)' with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg): rng + o offsets = [pd.offsets.Hour(2), timedelta(hours=2), np.timedelta64(2, 'h'), pd.offsets.Minute(120), timedelta(minutes=120), np.timedelta64(120, 'm'), Timedelta(minutes=120)] for delta in offsets: rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00', freq='H') result = rng + delta expected = pd.period_range('2014-01-01 12:00', '2014-01-05 12:00', freq='H') tm.assert_index_equal(result, expected) rng += delta tm.assert_index_equal(rng, expected) for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30), np.timedelta64(30, 's'), Timedelta(seconds=30)]: rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00', freq='H') msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=H\\)' with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg): result = rng + delta with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg): rng += delta # int rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10) result = rng + 1 expected = pd.period_range('2000-01-01 10:00', freq='H', periods=10) tm.assert_index_equal(result, expected) rng += 1 tm.assert_index_equal(rng, expected) def test_difference(self): # diff rng1 = pd.period_range('1/1/2000', freq='D', periods=5) other1 = pd.period_range('1/6/2000', freq='D', periods=5) expected1 = pd.period_range('1/1/2000', freq='D', periods=5) rng2 = pd.period_range('1/1/2000', freq='D', periods=5) other2 = pd.period_range('1/4/2000', freq='D', periods=5) expected2 = pd.period_range('1/1/2000', freq='D', periods=3) rng3 = pd.period_range('1/1/2000', freq='D', periods=5) other3 = pd.PeriodIndex([], freq='D') expected3 = pd.period_range('1/1/2000', freq='D', periods=5) rng4 = pd.period_range('2000-01-01 09:00', freq='H', periods=5) other4 = pd.period_range('2000-01-02 09:00', freq='H', periods=5) expected4 = rng4 rng5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03', '2000-01-01 09:05'], freq='T') other5 = pd.PeriodIndex( ['2000-01-01 09:01', '2000-01-01 09:05'], freq='T') expected5 = pd.PeriodIndex(['2000-01-01 09:03'], freq='T') rng6 = pd.period_range('2000-01-01', freq='M', periods=7) other6 = pd.period_range('2000-04-01', freq='M', periods=7) expected6 = pd.period_range('2000-01-01', freq='M', periods=3) rng7 = pd.period_range('2003-01-01', freq='A', periods=5) other7 = pd.period_range('1998-01-01', freq='A', periods=8) expected7 = pd.period_range('2006-01-01', freq='A', periods=2) for rng, other, expected in [(rng1, other1, expected1), (rng2, other2, expected2), (rng3, other3, expected3), (rng4, other4, expected4), (rng5, other5, expected5), (rng6, other6, expected6), (rng7, other7, expected7), ]: result_union = rng.difference(other) tm.assert_index_equal(result_union, expected) def test_sub_isub(self): # previously performed setop, now raises TypeError (GH14164) # TODO needs to wait on #13077 for decision on result type rng = pd.period_range('1/1/2000', freq='D', periods=5) other = pd.period_range('1/6/2000', freq='D', periods=5) with tm.assertRaises(TypeError): rng - other with tm.assertRaises(TypeError): rng -= other # offset # DateOffset rng = pd.period_range('2014', '2024', freq='A') result = rng - pd.offsets.YearEnd(5) expected = pd.period_range('2009', '2019', freq='A') tm.assert_index_equal(result, expected) rng -= pd.offsets.YearEnd(5) tm.assert_index_equal(rng, expected) for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), pd.offsets.Minute(), np.timedelta64(365, 'D'), timedelta(365)]: rng = pd.period_range('2014', '2024', freq='A') msg = ('Input has different freq(=.+)? ' 'from PeriodIndex\\(freq=A-DEC\\)') with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg): rng - o rng = pd.period_range('2014-01', '2016-12', freq='M') result = rng - pd.offsets.MonthEnd(5) expected = pd.period_range('2013-08', '2016-07', freq='M') tm.assert_index_equal(result, expected) rng -= pd.offsets.MonthEnd(5) tm.assert_index_equal(rng, expected) for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), pd.offsets.Minute(), np.timedelta64(365, 'D'), timedelta(365)]: rng = pd.period_range('2014-01', '2016-12', freq='M') msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)' with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg): rng - o # Tick offsets = [pd.offsets.Day(3), timedelta(days=3), np.timedelta64(3, 'D'), pd.offsets.Hour(72), timedelta(minutes=60 * 24 * 3), np.timedelta64(72, 'h')] for delta in offsets: rng = pd.period_range('2014-05-01', '2014-05-15', freq='D') result = rng - delta expected = pd.period_range('2014-04-28', '2014-05-12', freq='D') tm.assert_index_equal(result, expected) rng -= delta tm.assert_index_equal(rng, expected) for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), pd.offsets.Minute(), np.timedelta64(4, 'h'), timedelta(hours=23)]: rng = pd.period_range('2014-05-01', '2014-05-15', freq='D') msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=D\\)' with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg): rng - o offsets = [pd.offsets.Hour(2), timedelta(hours=2), np.timedelta64(2, 'h'), pd.offsets.Minute(120), timedelta(minutes=120), np.timedelta64(120, 'm')] for delta in offsets: rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00', freq='H') result = rng - delta expected = pd.period_range('2014-01-01 08:00', '2014-01-05 08:00', freq='H') tm.assert_index_equal(result, expected) rng -= delta tm.assert_index_equal(rng, expected) for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30), np.timedelta64(30, 's')]: rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00', freq='H') msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=H\\)' with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg): result = rng + delta with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg): rng += delta # int rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10) result = rng - 1 expected = pd.period_range('2000-01-01 08:00', freq='H', periods=10) tm.assert_index_equal(result, expected) rng -= 1 tm.assert_index_equal(rng, expected) def test_comp_nat(self): left = pd.PeriodIndex([pd.Period('2011-01-01'), pd.NaT, pd.Period('2011-01-03')]) right = pd.PeriodIndex([pd.NaT, pd.NaT, pd.Period('2011-01-03')]) for l, r in [(left, right), (left.asobject, right.asobject)]: result = l == r expected = np.array([False, False, True]) tm.assert_numpy_array_equal(result, expected) result = l != r expected = np.array([True, True, False]) tm.assert_numpy_array_equal(result, expected) expected = np.array([False, False, False]) tm.assert_numpy_array_equal(l == pd.NaT, expected) tm.assert_numpy_array_equal(pd.NaT == r, expected) expected = np.array([True, True, True]) tm.assert_numpy_array_equal(l != pd.NaT, expected) tm.assert_numpy_array_equal(pd.NaT != l, expected) expected = np.array([False, False, False]) tm.assert_numpy_array_equal(l < pd.NaT, expected) tm.assert_numpy_array_equal(pd.NaT > l, expected) def test_value_counts_unique(self): # GH 7735 idx = pd.period_range('2011-01-01 09:00', freq='H', periods=10) # create repeated values, 'n'th element is repeated by n+1 times idx = PeriodIndex(np.repeat(idx.values, range(1, len(idx) + 1)), freq='H') exp_idx = PeriodIndex(['2011-01-01 18:00', '2011-01-01 17:00', '2011-01-01 16:00', '2011-01-01 15:00', '2011-01-01 14:00', '2011-01-01 13:00', '2011-01-01 12:00', '2011-01-01 11:00', '2011-01-01 10:00', '2011-01-01 09:00'], freq='H') expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64') for obj in [idx, Series(idx)]: tm.assert_series_equal(obj.value_counts(), expected) expected = pd.period_range('2011-01-01 09:00', freq='H', periods=10) tm.assert_index_equal(idx.unique(), expected) idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 09:00', '2013-01-01 09:00', '2013-01-01 08:00', '2013-01-01 08:00', pd.NaT], freq='H') exp_idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 08:00'], freq='H') expected = Series([3, 2], index=exp_idx) for obj in [idx, Series(idx)]: tm.assert_series_equal(obj.value_counts(), expected) exp_idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 08:00', pd.NaT], freq='H') expected = Series([3, 2, 1], index=exp_idx) for obj in [idx, Series(idx)]: tm.assert_series_equal(obj.value_counts(dropna=False), expected) tm.assert_index_equal(idx.unique(), exp_idx) def test_drop_duplicates_metadata(self): # GH 10115 idx = pd.period_range('2011-01-01', '2011-01-31', freq='D', name='idx') result = idx.drop_duplicates() self.assert_index_equal(idx, result) self.assertEqual(idx.freq, result.freq) idx_dup = idx.append(idx) # freq will not be reset result = idx_dup.drop_duplicates() self.assert_index_equal(idx, result) self.assertEqual(idx.freq, result.freq) def test_drop_duplicates(self): # to check Index/Series compat base = pd.period_range('2011-01-01', '2011-01-31', freq='D', name='idx') idx = base.append(base[:5]) res = idx.drop_duplicates() tm.assert_index_equal(res, base) res = Series(idx).drop_duplicates() tm.assert_series_equal(res, Series(base)) res = idx.drop_duplicates(keep='last') exp = base[5:].append(base[:5]) tm.assert_index_equal(res, exp) res = Series(idx).drop_duplicates(keep='last') tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36))) res = idx.drop_duplicates(keep=False) tm.assert_index_equal(res, base[5:]) res = Series(idx).drop_duplicates(keep=False) tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31))) def test_order_compat(self): def _check_freq(index, expected_index): if isinstance(index, PeriodIndex): self.assertEqual(index.freq, expected_index.freq) pidx = PeriodIndex(['2011', '2012', '2013'], name='pidx', freq='A') # for compatibility check iidx = Index([2011, 2012, 2013], name='idx') for idx in [pidx, iidx]: ordered = idx.sort_values() self.assert_index_equal(ordered, idx) _check_freq(ordered, idx) ordered = idx.sort_values(ascending=False) self.assert_index_equal(ordered, idx[::-1]) _check_freq(ordered, idx[::-1]) ordered, indexer = idx.sort_values(return_indexer=True) self.assert_index_equal(ordered, idx) self.assert_numpy_array_equal(indexer, np.array([0, 1, 2]), check_dtype=False) _check_freq(ordered, idx) ordered, indexer = idx.sort_values(return_indexer=True, ascending=False) self.assert_index_equal(ordered, idx[::-1]) self.assert_numpy_array_equal(indexer, np.array([2, 1, 0]), check_dtype=False) _check_freq(ordered, idx[::-1]) pidx = PeriodIndex(['2011', '2013', '2015', '2012', '2011'], name='pidx', freq='A') pexpected = PeriodIndex( ['2011', '2011', '2012', '2013', '2015'], name='pidx', freq='A') # for compatibility check iidx = Index([2011, 2013, 2015, 2012, 2011], name='idx') iexpected = Index([2011, 2011, 2012, 2013, 2015], name='idx') for idx, expected in [(pidx, pexpected), (iidx, iexpected)]: ordered = idx.sort_values() self.assert_index_equal(ordered, expected) _check_freq(ordered, idx) ordered = idx.sort_values(ascending=False) self.assert_index_equal(ordered, expected[::-1]) _check_freq(ordered, idx) ordered, indexer = idx.sort_values(return_indexer=True) self.assert_index_equal(ordered, expected) exp = np.array([0, 4, 3, 1, 2]) self.assert_numpy_array_equal(indexer, exp, check_dtype=False) _check_freq(ordered, idx) ordered, indexer = idx.sort_values(return_indexer=True, ascending=False) self.assert_index_equal(ordered, expected[::-1]) exp = np.array([2, 1, 3, 4, 0]) self.assert_numpy_array_equal(indexer, exp, check_dtype=False) _check_freq(ordered, idx) pidx = PeriodIndex(['2011', '2013', 'NaT', '2011'], name='pidx', freq='D') result = pidx.sort_values() expected = PeriodIndex(['NaT', '2011', '2011', '2013'], name='pidx', freq='D') self.assert_index_equal(result, expected) self.assertEqual(result.freq, 'D') result = pidx.sort_values(ascending=False) expected = PeriodIndex( ['2013', '2011', '2011', 'NaT'], name='pidx', freq='D') self.assert_index_equal(result, expected) self.assertEqual(result.freq, 'D') def test_order(self): for freq in ['D', '2D', '4D']: idx = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], freq=freq, name='idx') ordered = idx.sort_values() self.assert_index_equal(ordered, idx) self.assertEqual(ordered.freq, idx.freq) ordered = idx.sort_values(ascending=False) expected = idx[::-1] self.assert_index_equal(ordered, expected) self.assertEqual(ordered.freq, expected.freq) self.assertEqual(ordered.freq, freq) ordered, indexer = idx.sort_values(return_indexer=True) self.assert_index_equal(ordered, idx) self.assert_numpy_array_equal(indexer, np.array([0, 1, 2]), check_dtype=False) self.assertEqual(ordered.freq, idx.freq) self.assertEqual(ordered.freq, freq) ordered, indexer = idx.sort_values(return_indexer=True, ascending=False) expected = idx[::-1] self.assert_index_equal(ordered, expected) self.assert_numpy_array_equal(indexer, np.array([2, 1, 0]), check_dtype=False) self.assertEqual(ordered.freq, expected.freq) self.assertEqual(ordered.freq, freq) idx1 = PeriodIndex(['2011-01-01', '2011-01-03', '2011-01-05', '2011-01-02', '2011-01-01'], freq='D', name='idx1') exp1 = PeriodIndex(['2011-01-01', '2011-01-01', '2011-01-02', '2011-01-03', '2011-01-05'], freq='D', name='idx1') idx2 = PeriodIndex(['2011-01-01', '2011-01-03', '2011-01-05', '2011-01-02', '2011-01-01'], freq='D', name='idx2') exp2 = PeriodIndex(['2011-01-01', '2011-01-01', '2011-01-02', '2011-01-03', '2011-01-05'], freq='D', name='idx2') idx3 = PeriodIndex([pd.NaT, '2011-01-03', '2011-01-05', '2011-01-02', pd.NaT], freq='D', name='idx3') exp3 = PeriodIndex([pd.NaT, pd.NaT, '2011-01-02', '2011-01-03', '2011-01-05'], freq='D', name='idx3') for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3)]: ordered = idx.sort_values() self.assert_index_equal(ordered, expected) self.assertEqual(ordered.freq, 'D') ordered = idx.sort_values(ascending=False) self.assert_index_equal(ordered, expected[::-1]) self.assertEqual(ordered.freq, 'D') ordered, indexer = idx.sort_values(return_indexer=True) self.assert_index_equal(ordered, expected) exp = np.array([0, 4, 3, 1, 2]) self.assert_numpy_array_equal(indexer, exp, check_dtype=False) self.assertEqual(ordered.freq, 'D') ordered, indexer = idx.sort_values(return_indexer=True, ascending=False) self.assert_index_equal(ordered, expected[::-1]) exp = np.array([2, 1, 3, 4, 0]) self.assert_numpy_array_equal(indexer, exp, check_dtype=False) self.assertEqual(ordered.freq, 'D') def test_getitem(self): idx1 = pd.period_range('2011-01-01', '2011-01-31', freq='D', name='idx') for idx in [idx1]: result = idx[0] self.assertEqual(result, pd.Period('2011-01-01', freq='D')) result = idx[-1] self.assertEqual(result, pd.Period('2011-01-31', freq='D')) result = idx[0:5] expected = pd.period_range('2011-01-01', '2011-01-05', freq='D', name='idx') self.assert_index_equal(result, expected) self.assertEqual(result.freq, expected.freq) self.assertEqual(result.freq, 'D') result = idx[0:10:2] expected = pd.PeriodIndex(['2011-01-01', '2011-01-03', '2011-01-05', '2011-01-07', '2011-01-09'], freq='D', name='idx') self.assert_index_equal(result, expected) self.assertEqual(result.freq, expected.freq) self.assertEqual(result.freq, 'D') result = idx[-20:-5:3] expected = pd.PeriodIndex(['2011-01-12', '2011-01-15', '2011-01-18', '2011-01-21', '2011-01-24'], freq='D', name='idx') self.assert_index_equal(result, expected) self.assertEqual(result.freq, expected.freq) self.assertEqual(result.freq, 'D') result = idx[4::-1] expected = PeriodIndex(['2011-01-05', '2011-01-04', '2011-01-03', '2011-01-02', '2011-01-01'], freq='D', name='idx') self.assert_index_equal(result, expected) self.assertEqual(result.freq, expected.freq) self.assertEqual(result.freq, 'D') def test_take(self): # GH 10295 idx1 = pd.period_range('2011-01-01', '2011-01-31', freq='D', name='idx') for idx in [idx1]: result = idx.take([0]) self.assertEqual(result, pd.Period('2011-01-01', freq='D')) result = idx.take([5]) self.assertEqual(result, pd.Period('2011-01-06', freq='D')) result = idx.take([0, 1, 2]) expected = pd.period_range('2011-01-01', '2011-01-03', freq='D', name='idx') self.assert_index_equal(result, expected) self.assertEqual(result.freq, 'D') self.assertEqual(result.freq, expected.freq) result = idx.take([0, 2, 4]) expected = pd.PeriodIndex(['2011-01-01', '2011-01-03', '2011-01-05'], freq='D', name='idx') self.assert_index_equal(result, expected) self.assertEqual(result.freq, expected.freq) self.assertEqual(result.freq, 'D') result = idx.take([7, 4, 1]) expected = pd.PeriodIndex(['2011-01-08', '2011-01-05', '2011-01-02'], freq='D', name='idx') self.assert_index_equal(result, expected) self.assertEqual(result.freq, expected.freq) self.assertEqual(result.freq, 'D') result = idx.take([3, 2, 5]) expected = PeriodIndex(['2011-01-04', '2011-01-03', '2011-01-06'], freq='D', name='idx') self.assert_index_equal(result, expected) self.assertEqual(result.freq, expected.freq) self.assertEqual(result.freq, 'D') result = idx.take([-3, 2, 5]) expected = PeriodIndex(['2011-01-29', '2011-01-03', '2011-01-06'], freq='D', name='idx') self.assert_index_equal(result, expected) self.assertEqual(result.freq, expected.freq) self.assertEqual(result.freq, 'D') def test_nat_new(self): idx = pd.period_range('2011-01', freq='M', periods=5, name='x') result = idx._nat_new() exp = pd.PeriodIndex([pd.NaT] * 5, freq='M', name='x') tm.assert_index_equal(result, exp) result = idx._nat_new(box=False) exp = np.array([tslib.iNaT] * 5, dtype=np.int64) tm.assert_numpy_array_equal(result, exp) def test_shift(self): # GH 9903 idx = pd.PeriodIndex([], name='xxx', freq='H') with tm.assertRaises(TypeError): # period shift doesn't accept freq idx.shift(1, freq='H') tm.assert_index_equal(idx.shift(0), idx) tm.assert_index_equal(idx.shift(3), idx) idx = pd.PeriodIndex(['2011-01-01 10:00', '2011-01-01 11:00' '2011-01-01 12:00'], name='xxx', freq='H') tm.assert_index_equal(idx.shift(0), idx) exp = pd.PeriodIndex(['2011-01-01 13:00', '2011-01-01 14:00' '2011-01-01 15:00'], name='xxx', freq='H') tm.assert_index_equal(idx.shift(3), exp) exp = pd.PeriodIndex(['2011-01-01 07:00', '2011-01-01 08:00' '2011-01-01 09:00'], name='xxx', freq='H') tm.assert_index_equal(idx.shift(-3), exp) def test_repeat(self): index = pd.period_range('2001-01-01', periods=2, freq='D') exp = pd.PeriodIndex(['2001-01-01', '2001-01-01', '2001-01-02', '2001-01-02'], freq='D') for res in [index.repeat(2), np.repeat(index, 2)]: tm.assert_index_equal(res, exp) index = pd.period_range('2001-01-01', periods=2, freq='2D') exp = pd.PeriodIndex(['2001-01-01', '2001-01-01', '2001-01-03', '2001-01-03'], freq='2D') for res in [index.repeat(2), np.repeat(index, 2)]: tm.assert_index_equal(res, exp) index = pd.PeriodIndex(['2001-01', 'NaT', '2003-01'], freq='M') exp = pd.PeriodIndex(['2001-01', '2001-01', '2001-01', 'NaT', 'NaT', 'NaT', '2003-01', '2003-01', '2003-01'], freq='M') for res in [index.repeat(3), np.repeat(index, 3)]: tm.assert_index_equal(res, exp) def test_nat(self): self.assertIs(pd.PeriodIndex._na_value, pd.NaT) self.assertIs(pd.PeriodIndex([], freq='M')._na_value, pd.NaT) idx = pd.PeriodIndex(['2011-01-01', '2011-01-02'], freq='D') self.assertTrue(idx._can_hold_na) tm.assert_numpy_array_equal(idx._isnan, np.array([False, False])) self.assertFalse(idx.hasnans) tm.assert_numpy_array_equal(idx._nan_idxs, np.array([], dtype=np.intp)) idx = pd.PeriodIndex(['2011-01-01', 'NaT'], freq='D') self.assertTrue(idx._can_hold_na) tm.assert_numpy_array_equal(idx._isnan, np.array([False, True])) self.assertTrue(idx.hasnans) tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp)
numpy.array
import matplotlib import matplotlib.pyplot as plt import numpy as np from scipy import optimize import pint import math u = pint.UnitRegistry() # Constants n_crew = 4 n_passengers = 50 n_people = n_passengers + n_crew weight_per_passenger = 100 * u.kg Swet_Sref = 6 AR = 8 K_ld = 15.5 Rcr = 2000 * u.km Eltr = 45 * u.minutes Rdiv = 370 * u.km # Speed of sound = 589 knots at 35,000 feet Vcr = 589 * 0.75 * u.knots # Roskam, page 57 for inefficiencies of c_j # cj_cr = 0.5 * u.lb / u.lb / u.hr # cj_ltr = 0.6 * u.lb / u.lb / u.hr # cj_diversion = 0.9 * u.lb / u.lb / u.hr g = 9.81 * u.m / (u.s ** 2) cj_cr = (19.8 * u.milligram / u.newton / u.s) * g # TODO: Look into better methods for this cj_ltr = 1.2 * cj_cr cj_diversion = 1.8 * cj_cr # Calculate fuel fractions f_W_to = 0.99 * 0.99 * 0.995 f_W_climb = 0.98 f_W_descent = 0.99 f_W_shutdown = 0.992 # Range equation fractions (x2) LDmax = 20.67 # Actual calculations now W_PL = n_people * weight_per_passenger # Weight fractions... # Cruise f_W_cr = 1/math.exp(Rcr / ((Vcr/cj_cr) * (LDmax * 0.867))) # Loiter f_W_ltr = 1/math.exp(Eltr / ((1/cj_ltr) * (LDmax))) # Diversion cruise f_W_div = 1/math.exp(Rdiv / ((275 * u.knot/cj_diversion) * (LDmax))) # No penalty for diversion climb/land - only to about 10,000 ft W =
np.array([1, f_W_to, f_W_climb, f_W_cr, f_W_descent, 1, f_W_div, 1, f_W_ltr, f_W_shutdown])
numpy.array
import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import torch.backends.cudnn as cudnn import torchvision import torchvision.transforms as transforms import os import argparse import numpy as np import itertools from models import * from db_quant_utils import rel_index parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training') parser.add_argument('--k', default=64, type=int, help='depth of model') parser.add_argument('--noise_rate', default=0.2, type=float, help='label noise') parser.add_argument('--asym', action='store_true') parser.add_argument('--resolution', default=500, type=float, help='resolution for plot') parser.add_argument('--range_l', default=0.5, type=float, help='how far `left` to go in the plot') parser.add_argument('--range_r', default=0.5, type=float, help='how far `right` to go in the plot') parser.add_argument('--plot_method', default='train', type=str) parser.add_argument('--mixup', action='store_true') parser.add_argument('--active_log', action='store_true') parser.add_argument('--num_samples', default=100 , type=int) parser.add_argument('--set_seed', default=1 , type=int) parser.add_argument('--set_data_seed', default=1 , type=int) args = parser.parse_args() device = 'cuda' if torch.cuda.is_available() else 'cpu' print(device) args.eval = True if args.mixup: args.model_path = './checkpoint/mixup' else: args.model_path = f'./checkpoint/{args.set_seed}/{args.set_data_seed}' if args.active_log: import wandb if not args.mixup: wandb.init(project="dd_fragmentation", name = f'frag_{args.noise_rate}noise_{args.k}k_{args.set_seed}') else: wandb.init(project="dd_fragmentation", name = f'frag_{args.noise_rate}noise_{args.k}k__{args.set_seed}_wmixup') wandb.config.update(args) transform_train = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), ]) transform_test = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), ]) torch.manual_seed(args.set_data_seed) from data_noisy import cifar10Nosiy trainset_noisy = cifar10Nosiy(root='./data', train=True,transform=transform_train, download=True, asym=args.asym, nosiy_rate=0.2) if args.noise_rate > 0: from data_noisy import cifar10Nosiy trainset = cifar10Nosiy(root='./data', train=True,transform=transform_train, download=True, asym=args.asym, nosiy_rate=args.noise_rate) else: trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train) trainloader = torch.utils.data.DataLoader( trainset, batch_size=128, shuffle=True, num_workers=2) testset = torchvision.datasets.CIFAR10( root='./data', train=False, download=True, transform=transform_test) testloader = torch.utils.data.DataLoader( testset, batch_size=100, shuffle=False, num_workers=2) l_mis = np.where(np.array(trainset_noisy.targets) != np.array(trainset_noisy.true_targets))[0] #mislabeled images in noisy case l_corr = np.where(np.array(trainset_noisy.targets) == np.array(trainset_noisy.true_targets))[0] #correctly labeled images in noisy case l_all = np.arange(len(trainset.targets)) l_test = np.arange(len(testset.targets)) from db.data import make_planeloader from db.evaluation import decision_boundary from db.utils import connected_components def num_connected_components(dlist1,dlist2, loader1,loader2, num_samples,net,device,args): cc_list = [] for i in range(num_samples): # import ipdb; ipdb.set_trace() dirlist =
np.random.choice(dlist1, 2)
numpy.random.choice
import numpy as np from astropy.io import fits import os import re import glob import copy from vorbin.voronoi_2d_binning import voronoi_2d_binning import matplotlib.pyplot as plt from scipy import interpolate, stats, optimize import gc from matplotlib import gridspec, animation try: import tqdm except: tqdm = None from joblib import Parallel, delayed plt.style.use('dark_background') def read_muse_ifu(fits_file,z=0): """ Read in a MUSE-formatted IFU cube :param fits_file: str File path to the FITS IFU cube :param z: float, optional The redshift of the spectrum, since MUSE cubes often do not provide this information :return nx: int x-dimension (horizontal axis) of the cube :return ny: int y-dimension (vertical axis) of the cube :return nz: int z-dimension (wavelength axis) of the cube :return ra: float Right ascension :return dec: float Declination :return museid: str The MUSE ID of the observation :return wave: array 1-D Wavelength array with dimension (nz,) :return flux: array 3-D flux array with dimensions (nz, ny, nx) :return ivar: array 3-D inverse variance array with dimensions (nz, ny, nx) :return specres: array 1-D spectral resolution ("R") array with dimension (nz,) :return mask: array 3-D mask array with dimensions (nz, ny, nx) :return object_name: str The name of the object, if provided in the FITS header """ # Load the file # https://www.eso.org/rm/api/v1/public/releaseDescriptions/78 with fits.open(fits_file) as hdu: # First axis is wavelength, then 2nd and 3rd are image x/y try: nx, ny, nz = hdu[1].header['NAXIS1'], hdu[1].header['NAXIS2'], hdu[1].header['NAXIS3'] ra = hdu[0].header['RA'] dec = hdu[0].header['DEC'] except: # ra = hdu[0].header['ESO ADA GUID RA'] # dec = hdu[0].header['ESO ADA GUID DEC'] nx, ny, nz = hdu[0].header['NAXIS1'], hdu[0].header['NAXIS2'], hdu[0].header['NAXIS3'] ra = hdu[0].header['CRVAL1'] dec = hdu[0].header['CRVAL2'] primary = hdu[0].header try: object_name = primary['OBJECT'] except: object_name = None i = 1 museid = [] while True: try: museid.append(primary['OBID'+str(i)]) i += 1 except: break # Get unit of flux, assuming 10^-x erg/s/cm2/Angstrom/spaxel # unit = hdu[0].header['BUNIT'] # power = int(re.search('10\*\*(\(?)(.+?)(\))?\s', unit).group(2)) # scale = 10**(-17) / 10**power try: # 3d rectified cube in units of 10(-20) erg/s/cm2/Angstrom/spaxel [NX x NY x NWAVE], convert to 10(-17) flux = hdu[1].data # Variance (sigma2) for the above [NX x NY x NWAVE], convert to 10(-17) var = hdu[2].data # Wavelength vector must be reconstructed, convert from nm to angstroms header = hdu[1].header wave = np.array(header['CRVAL3'] + header['CD3_3']*np.arange(header['NAXIS3'])) # wave = np.linspace(primary['WAVELMIN'], primary['WAVELMAX'], nz) * 10 # Median spectral resolution at (wavelmin + wavelmax)/2 # dlambda = cwave / primary['SPEC_RES'] # specres = wave / dlambda # Default behavior for MUSE data cubes using https://www.aanda.org/articles/aa/pdf/2017/12/aa30833-17.pdf equation 7 dlambda = 5.835e-8 * wave**2 - 9.080e-4 * wave + 5.983 specres = wave / dlambda # Scale by the measured spec_res at the central wavelength spec_cent = primary['SPEC_RES'] cwave = np.nanmedian(wave) c_dlambda = 5.835e-8 * cwave**2 - 9.080e-4 * cwave + 5.983 scale = 1 + (spec_cent - cwave/c_dlambda) / spec_cent specres *= scale except: flux = hdu[0].data var = (0.1 * flux)**2 wave = np.arange(primary['CRVAL3'], primary['CRVAL3']+primary['CDELT3']*(nz-1), primary['CDELT3']) # specres = wave / 2.6 dlambda = 5.835e-8 * wave**2 - 9.080e-4 * wave + 5.983 specres = wave / dlambda ivar = 1/var mask = np.zeros_like(flux) return nx,ny,nz,ra,dec,museid,wave,flux,ivar,specres,mask,object_name def read_manga_ifu(fits_file,z=0): """ Read in a MANGA-formatted IFU cube :param fits_file: str File path to the FITS IFU cube :param z: float, optional The redshift of the spectrum, this is unused. :return nx: int x-dimension (horizontal axis) of the cube :return ny: int y-dimension (vertical axis) of the cube :return nz: int z-dimension (wavelength axis) of the cube :return ra: float Right ascension :return dec: float Declination :return mangaid: str The MANGA ID of the observation :return wave: array 1-D Wavelength array with dimension (nz,) :return flux: array 3-D flux array with dimensions (nz, ny, nx) :return ivar: array 3-D inverse variance array with dimensions (nz, ny, nx) :return specres: array 1-D spectral resolution ("R") array with dimension (nz,) :return mask: array 3-D mask array with dimensions (nz, ny, nx) :return None: To mirror the output length of read_muse_ifu """ # Load the file # https://data.sdss.org/datamodel/files/MANGA_SPECTRO_REDUX/DRPVER/PLATE4/stack/manga-CUBE.html#hdu1 with fits.open(fits_file) as hdu: # First axis is wavelength, then 2nd and 3rd are image x/y nx, ny, nz = hdu[1].header['NAXIS1'], hdu[1].header['NAXIS2'], hdu[1].header['NAXIS3'] try: ra = hdu[0].header['OBJRA'] dec = hdu[0].header['OBJDEC'] except: ra = hdu[1].header['IFURA'] dec = hdu[1].header['IFUDEC'] primary = hdu[0].header ebv = primary['EBVGAL'] mangaid = primary['MANGAID'] # 3d rectified cube in units of 10(-17) erg/s/cm2/Angstrom/spaxel [NX x NY x NWAVE] flux = hdu[1].data # Inverse variance (1/sigma2) for the above [NX x NY x NWAVE] ivar = hdu[2].data # Pixel mask [NX x NY x NWAVE]. Defined values are set in sdssMaskbits.par mask = hdu[3].data # Wavelength vector [NWAVE] wave = hdu[6].data # Median spectral resolution as a function of wavelength for the fibers in this IFU [NWAVE] specres = hdu[7].data # ebv = hdu[0].header['EBVGAL'] return nx,ny,nz,ra,dec,mangaid,wave,flux,ivar,specres,mask,None def prepare_ifu(fits_file,z,format,aperture=None,voronoi_binning=True,fixed_binning=False,targetsn=None,cvt=True,voronoi_plot=True,quiet=True,wvt=False, maxbins=800,snr_threshold=0.5,fixed_bin_size=10,use_and_mask=True,nx=None,ny=None,nz=None,ra=None,dec=None,dataid=None,wave=None,flux=None,ivar=None, specres=None,mask=None,objname=None): """ Deconstruct an IFU cube into individual spaxel files for fitting with BADASS :param fits_file: str The file path to the IFU FITS file; if format == 'user', this field may be left as None, '', or any other filler value :param z: float The redshift of the spectrum :param aperture: array, optional The lower-left and upper-right corners of a square aperture, formatted as [y0, y1, x0, x1] :param voronoi_binning: bool Whether or not to bin spaxels using the voronoi method (grouping to read a certain SNR threshold). Default True. Mutually exclusive with fixed_binning. :param fixed_binning: bool Whether or not to bin spaxels using a fixed size. Default False. Mutually exclusive with voronoi_binning. :param targetsn: float, optional The target SNR to bin by, if using voronoi binning. :param cvt: bool Vorbin CVT option (see the vorbin package docs). Default True. :param voronoi_plot: bool Whether or not to plot the voronoi bin structure. Default True. :param quiet: bool Vorbin quiet option (see the vorbin package docs). Default True. :param wvt: bool Vorbin wvt option (see the vorbin package docs). Default False. :param maxbins: int If no target SNR is provided for voronoi binning, maxbins may be specified, which will automatically calculate the target SNR required to reach the number of bins desired. Default 800. :param snr_threshold: float Minimum SNR threshold, below which spaxel data will be removed and not fit. :param fixed_bin_size: int If using fixed binning, this is the side length of the square bins, in units of spaxels. :param use_and_mask: bool Whether or not to save the and_mask data. :param nx: int, optional x-dimension of the cube, only required if format == 'user' :param ny: int, optional y-dimension of the cube, only required if format == 'user' :param nz: int, optional z-dimension of the cube, only required if format == 'user' :param ra: float, optional Right ascension of the cube, only required if format == 'user' :param dec: float, optional Declination of the cube, only required if format == 'user' :param dataid: str, optional ID of the cube, only required if format == 'user' :param wave: array, optional 1-D wavelength array with shape (nz,), only required if format == 'user' :param flux: array, optional 3-D flux array with shape (nz, ny, nx), only required if format == 'user' :param ivar: array, optional 3-D inverse variance array with shape (nz, ny, nx), only required if format == 'user' :param specres: array, optional 1-D spectral resolution ("R") array with shape (nz,), only required if format == 'user' :param mask: array, optional 3-D mask array with shape (nz, ny, nx), only required if format == 'user' :param objname: str, optional The name of the object, only required if format == 'user' :return wave: array 1-D wavelength array with shape (nz,) :return flux: array 3-D masked flux array with shape (nz, ny, nx) :return ivar: array 3-D masked inverse variance array with shape (nz, ny, nx) :return mask: array 3-D mask array with shape (nz, ny, nx) :return fwhm_res: array 1-D FWHM resolution array with shape (nz,) :return binnum: array Bin number array that specifies which spaxels are in each bin (see the vorbin docs) :return npixels: array Number of spaxels in each bin (see the vorbin docs) :return xpixbin: array The x positions of spaxels in each bin :return ypixbin: array The y positions of spaxels in each bin :return z: float The redshift :return dataid: str The data ID :return objname: str The object name """ assert format in ('manga', 'muse', 'user'), "format must be either 'manga' or 'muse'; no others currently supported!" # Read the FITS file using the appropriate parsing function # no more eval 🥲 if format == 'manga': nx,ny,nz,ra,dec,dataid,wave,flux,ivar,specres,mask,objname = read_manga_ifu(fits_file,z) elif format == 'muse': nx,ny,nz,ra,dec,dataid,wave,flux,ivar,specres,mask,objname = read_muse_ifu(fits_file,z) else: # wave array shape = (nz,) # flux, ivar array shape = (nz, ny, nx) # specres can be a single value or an array of shape (nz,) # VALIDATE THAT USER INPUTS ARE IN THE CORRECT FORMAT for value in (nx, ny, nz, ra, dec, wave, flux, specres): assert value is not None, "For user spec, all of (nx, ny, nz, ra, dec, wave, flux, specres) must be specified!" if ivar is None: print("WARNING: No ivar was input. Defaulting to sqrt(flux).") ivar = np.sqrt(flux) if mask is None: mask = np.zeros(flux.shape, dtype=int) assert wave.shape == (nz,), "Wave array shape should be (nz,)" assert flux.shape == (nz, ny, nx), "Flux array shape should be (nz, ny, nx)" assert ivar.shape == (nz, ny, nx), "Ivar array shape should be (nz, ny, nx)" assert mask.shape == (nz, ny, nx), "Mask array shape should be (nz, ny, nx)" assert (type(specres) in (int, float, np.int_, np.float_)) or (specres.shape == (nz,)), "Specres should be a float or an array of shape (nz,)" loglam = np.log10(wave) # FWHM Resolution in angstroms: fwhm_res = wave / specres # dlambda = lambda / R; R = lambda / dlambda if not use_and_mask: mask = np.zeros(flux.shape, dtype=int) # Converting to wdisp -- so that 2.355*wdisp*dlam_gal = fwhm_res # if format == 'manga': # c = 299792.458 # speed of light in km/s # frac = wave[1]/wave[0] # Constant lambda fraction per pixel # dlam_gal = (frac-1)*wave # Size of every pixel in Angstrom # vdisp = c / (2.355*specres) # delta v = c / R in km/s # velscale = np.log(frac) * c # Constant velocity scale in km/s per pixel # wdisp = vdisp / velscale # Intrinsic dispersion of every pixel, in pixels units minx, maxx = 0, nx miny, maxy = 0, ny if aperture: miny, maxy, minx, maxx = aperture maxy += 1 maxx += 1 x = np.arange(minx, maxx, 1) y = np.arange(miny, maxy, 1) # Create x/y grid for the voronoi binning X, Y = np.meshgrid(x, y) _x, _y = X.ravel(), Y.ravel() if voronoi_binning: # Average along the wavelength axis so each spaxel has one s/n value # Note to self: Y AXIS IS ALWAYS FIRST ON NUMPY ARRAYS signal = np.nanmean(flux[:, miny:maxy, minx:maxx], axis=0) noise = np.sqrt(1 / np.nanmean(ivar[:, miny:maxy, minx:maxx], axis=0)) sr = signal.ravel() nr = noise.ravel() good = np.where(np.isfinite(sr) & np.isfinite(nr) & (sr > 0) & (nr > 0))[0] # Target S/N ratio to bin for. If none, defaults to value such that the highest pixel isnt binned # In general this isn't a great choice. Should want to maximize resolution without sacrificing too much # computation time. if not targetsn: # binnum = np.array([maxbins+1]) targetsn0 = np.max([np.sort((sr / nr)[good], kind='quicksort')[-1] / 16, 10]) def objective(targetsn, return_data=False): vplot = voronoi_plot if return_data else False qt = quiet if return_data else True try: binnum, xbin, ybin, xbar, ybar, sn, npixels, scale = voronoi_2d_binning(_x[good], _y[good], sr[good], nr[good], targetsn, cvt=cvt, pixelsize=1, plot=vplot, quiet=qt, wvt=wvt) except ValueError: return np.inf if return_data: return binnum, xbin, ybin, xbar, ybar, sn, npixels, scale return (np.max(binnum)+1 - maxbins)**2 print(f'Performing S/N optimization to reach {maxbins} bins. This may take a while...') soln = optimize.minimize(objective, [targetsn0], method='Nelder-Mead', bounds=[(1, X.size)]) targetsn = soln.x[0] binnum, xbin, ybin, xbar, ybar, SNR, npixels, scale = objective(targetsn, return_data=True) else: binnum, xbin, ybin, xbar, ybar, SNR, npixels, scale = voronoi_2d_binning(_x[good], _y[good], sr[good], nr[good], targetsn, cvt=cvt, pixelsize=1, plot=voronoi_plot, quiet=quiet, wvt=wvt) print(f'Voronoi binning successful with target S/N = {targetsn}! Created {np.max(binnum)+1} bins.') if voronoi_plot: # For some reason voronoi makes the plot but doesnt save it or anything filename = os.path.join(os.path.dirname(fits_file), 'voronoi_binning.pdf') plt.savefig(filename, bbox_inches='tight', dpi=300) plt.close() _x = _x[good] _y = _y[good] # Create output arrays for flux, ivar, mask out_flux = np.zeros((flux.shape[0], np.nanmax(binnum)+1)) out_ivar = np.zeros((ivar.shape[0], np.nanmax(binnum)+1)) out_mask = np.zeros((mask.shape[0], np.nanmax(binnum)+1)) xpixbin = np.full(np.nanmax(binnum)+1, fill_value=np.nan, dtype=object) ypixbin = np.full(np.nanmax(binnum)+1, fill_value=np.nan, dtype=object) for j in range(xpixbin.size): xpixbin[j] = [] ypixbin[j] = [] # Average flux/ivar in each bin for i, bin in enumerate(binnum): # there is probably a better way to do this, but I'm lazy xi, yi = _x[i], _y[i] out_flux[:, bin] += flux[:, yi, xi] out_ivar[:, bin] += ivar[:, yi, xi] out_mask[:, bin] += mask[:, yi, xi] xpixbin[bin].append(xi) ypixbin[bin].append(yi) out_flux /= npixels out_ivar /= npixels irange = np.nanmax(binnum)+1 for bin in binnum: if SNR[bin] < snr_threshold: flux[:, np.asarray(ypixbin[bin]), np.asarray(xpixbin[bin])] = np.nan ivar[:, np.asarray(ypixbin[bin]), np.asarray(xpixbin[bin])] = np.nan mask[:, np.asarray(ypixbin[bin]), np.asarray(xpixbin[bin])] = 1 elif fixed_binning: print(f'Performing binning with fixed bin size of {fixed_bin_size}') # Create square bins of a fixed size binnum = np.zeros((maxy-miny, maxx-minx), dtype=int) wy = int(np.ceil((maxy-miny)/fixed_bin_size)) wx = int(np.ceil((maxx-minx)/fixed_bin_size)) indx = 0 nbins = wy*wx out_flux = np.zeros((flux.shape[0], nbins)) out_ivar = np.zeros((ivar.shape[0], nbins)) out_mask = np.zeros((mask.shape[0], nbins)) xpixbin = np.full(nbins, fill_value=np.nan, dtype=object) ypixbin = np.full(nbins, fill_value=np.nan, dtype=object) npixels = np.zeros((nbins,), dtype=int) SNR = np.zeros((nbins,)) for iy in range(wy): for ix in range(wx): # Relative axes indices ylo = iy*fixed_bin_size yhi = np.min([(iy+1)*fixed_bin_size, binnum.shape[0]]) xlo = ix*fixed_bin_size xhi = np.min([(ix+1)*fixed_bin_size, binnum.shape[1]]) binnum[ylo:yhi, xlo:xhi] = indx # Shift axes limits by the aperture ylo += miny yhi += miny xlo += minx xhi += minx ybin, xbin = np.meshgrid(np.arange(ylo, yhi, 1), np.arange(xlo, xhi, 1)) ypixbin[indx] = ybin.flatten().tolist() xpixbin[indx] = xbin.flatten().tolist() out_flux[:, indx] = np.apply_over_axes(np.nanmean, flux[:, ylo:yhi, xlo:xhi], (1,2)).flatten() out_ivar[:, indx] = np.apply_over_axes(np.nanmean, ivar[:, ylo:yhi, xlo:xhi], (1,2)).flatten() out_mask[:, indx] = np.apply_over_axes(np.nansum, mask[:, ylo:yhi, xlo:xhi], (1,2)).flatten() npixels[indx] = len(ybin) signal = np.nanmean(flux[:, ylo:yhi, xlo:xhi], axis=0) noise = np.sqrt(1/np.nanmean(ivar[:, ylo:yhi, xlo:xhi], axis=0)) SNR[indx] = np.nansum(signal) / np.sqrt(np.nansum(noise**2)) if SNR[indx] < snr_threshold: flux[:, ylo:yhi, xlo:xhi] = np.nan ivar[:, ylo:yhi, xlo:xhi] = np.nan mask[:, ylo:yhi, xlo:xhi] = 1 indx += 1 binnum = binnum.flatten() irange = nbins print(f'Fixed binning successful, created {nbins} bins') else: xpixbin = None ypixbin = None out_flux = flux[:, miny:maxy, minx:maxx].reshape(nz, (maxx-minx)*(maxy-miny)) out_ivar = ivar[:, miny:maxy, minx:maxx].reshape(nz, (maxx-minx)*(maxy-miny)) out_mask = mask[:, miny:maxy, minx:maxx].reshape(nz, (maxx-minx)*(maxy-miny)) binnum = np.zeros((maxx-minx)*(maxy-miny)) npixels = np.ones((maxx-minx)*(maxy-miny)) * (maxx-minx)*(maxy-miny) irange = (maxx-minx)*(maxy-miny) signal = np.nanmean(flux, axis=0) noise = np.sqrt(1 / np.nanmean(ivar, axis=0)) SNR = signal / noise flux[:, SNR < snr_threshold] = np.nan ivar[:, SNR < snr_threshold] = np.nan mask[:, SNR < snr_threshold] = 1 for i in range(irange): # Unpack the spaxel galaxy_spaxel = out_flux[:,i] # observed flux ivar_spaxel = out_ivar[:,i] # 1-sigma spectral noise mask_spaxel = out_mask[:,i] # bad pixels if voronoi_binning or fixed_binning: xi = xpixbin[i] # x and y pixel position yi = ypixbin[i] snr_thresh = SNR[i] >= snr_threshold # make sure bin has an overall SNR greater than the threshold else: xi = [_x[i]] yi = [_y[i]] snr_thresh = SNR[_y[i], _x[i]] >= snr_threshold # make sure spaxel has an SNR greater than the threshold binnum_i = 0 if (not voronoi_binning) and (not fixed_binning) else i # Voronoi bin index that this pixel belongs to # Package into a FITS file -- but only if the SNR is high enough, otherwise throw out the data if snr_thresh: primaryhdu = fits.PrimaryHDU() primaryhdu.header.append(("FORMAT", format.upper(), "Data format"), end=True) if type(dataid) is list: for j, did in enumerate(dataid): primaryhdu.header.append((f'{format.upper()}ID{j}', did, f'{"MANGA" if format == "manga" else "MUSE"} ID number'), end=True) else: primaryhdu.header.append((f'{format.upper()}ID', dataid, f'{"MANGA" if format == "manga" else "MUSE"} ID number'), end=True) primaryhdu.header.append(('OBJNAME', objname, 'Object Name'), end=True) primaryhdu.header.append(('RA', ra, 'Right ascension'), end=True) primaryhdu.header.append(('DEC', dec, 'Declination'), end=True) primaryhdu.header.append(('BINNUM', binnum_i, 'bin index of the spaxel (Voronoi)'), end=True) primaryhdu.header.append(('NX', nx, 'x dimension of the full MANGA cube'), end=True) primaryhdu.header.append(('NY', ny, 'y dimension of the full MANGA cube'), end=True) coadd = fits.BinTableHDU.from_columns(fits.ColDefs([ fits.Column(name='flux', array=galaxy_spaxel, format='D'), fits.Column(name='loglam', array=loglam, format='D'), fits.Column(name='ivar', array=ivar_spaxel, format='D'), fits.Column(name='and_mask', array=mask_spaxel, format='D'), fits.Column(name='fwhm_res', array=fwhm_res, format='D') ])) specobj = fits.BinTableHDU.from_columns(fits.ColDefs([ fits.Column(name='z', array=np.array([z]), format='D'), # fits.Column(name='ebv', array=np.array([ebv]), format='E') ])) specobj.header.append(('PLUG_RA', ra, 'Right ascension'), end=True) specobj.header.append(('PLUG_DEC', dec, 'Declination'), end=True) binobj = fits.BinTableHDU.from_columns(fits.ColDefs([ fits.Column(name='spaxelx', array=np.array(xi), format='E'), fits.Column(name='spaxely', array=np.array(yi), format='E') ])) out_hdu = fits.HDUList([primaryhdu, coadd, specobj, binobj]) # Save output to sub-folder if voronoi_binning or fixed_binning: tag = '_'.join(['spaxel', 'bin', str(binnum_i)]) else: tag = '_'.join(['spaxel', str(xi[0]), str(yi[0])]) outdir = os.path.join(os.path.dirname(fits_file), fits_file.split(os.sep)[-1].replace('.fits',''), tag) if not os.path.exists(outdir): os.makedirs(outdir) outfile = os.path.join(outdir, tag+'.fits') out_hdu.writeto(outfile, overwrite=True) # else: # for xx, yy in zip(xi, yi): # flux[:, yy, xx] = np.nan # ivar[:, yy, xx] = np.nan # mask[:, yy, xx] = 1 return wave,flux,ivar,mask,fwhm_res,binnum,npixels,xpixbin,ypixbin,z,dataid,objname def plot_ifu(fits_file,wave,flux,ivar,mask,binnum,npixels,xpixbin,ypixbin,z,dataid,aperture=None,object_name=None): """ Plot a binned IFU cube and aperture. :param fits_file: str The file path to the FITS IFU file. :param wave: array 1-D wavelength array with shape (nz,) :param flux: array 3-D masked flux array with shape (nz, ny, nx) :param ivar: array 3-D masked inverse variance array with shape (nz, ny, nx) :param mask: array 3-D mask array with shape (nz, ny, nx) :param binnum: array Bin number array that specifies which spaxels are in each bin (see the vorbin docs) :param npixels: array Number of spaxels in each bin (see the vorbin docs) :param xpixbin: array The x positions of spaxels in each bin :param ypixbin: array The y positions of spaxels in each bin :param z: float The redshift :param dataid: str The data ID :param aperture: array The lower-left and upper-right corners of a square aperture, formatted as [y0, y1, x0, x1] :param objname: str The object name :return None: """ # fig = plt.figure(figsize=(14,4)) fig = plt.figure(figsize=(14, 10)) gs = gridspec.GridSpec(nrows=8, ncols=8) ax1 = fig.add_subplot(gs[0:5, 0:4]) ax2 = fig.add_subplot(gs[0:5, 4:8]) ax3 = fig.add_subplot(gs[5:8, 0:8]) fig.subplots_adjust(wspace=0.1, hspace=0.5) ny, nx = flux.shape[1:] # breakpoint() center = (nx / 2, ny / 2) minx, maxx = 0, nx miny, maxy = 0, ny if aperture: miny, maxy, minx, maxx = aperture maxy += 1 maxx += 1 flux_sum = np.nansum(flux, axis=0) # flux_sum[flux_sum==0] = np.nan flux_avg = flux_sum / flux.shape[0] noise_sum = np.nanmedian(np.sqrt(1/ivar), axis=0) flux_max_unbinned = np.nanmax(flux, axis=0) noise_max_unbinned = np.nanmax(np.sqrt(1/ivar), axis=0) if np.any(binnum): flux_bin = np.zeros(np.nanmax(binnum)+1) noise_bin = np.zeros(np.nanmax(binnum)+1) # flux_max = np.zeros(np.nanmax(binnum)+1) # noise_max = np.zeros(np.nanmax(binnum)+1) for bin in range(np.nanmax(binnum)+1): _x = xpixbin[bin] _y = ypixbin[bin] for i in range(len(_x)): flux_bin[bin] += flux_avg[_y[i], _x[i]] noise_bin[bin] += noise_sum[_y[i], _x[i]] # flux_max[bin] = np.nanmax([flux_max[bin], np.nanmax(flux[:, _y[i], _x[i]])]) # noise_max[bin] = np.nanmax([noise_max[bin], np.nanmax(np.sqrt(1/ivar)[:, _y[i], _x[i]])]) flux_bin /= npixels noise_bin /= npixels for bin in range(np.nanmax(binnum)+1): _x = xpixbin[bin] _y = ypixbin[bin] for i in range(len(_x)): flux_avg[_y[i], _x[i]] = flux_bin[bin] noise_sum[_y[i], _x[i]] = noise_bin[bin] # flux_max_unbinned[_y[i], _x[i]] = flux_max[bin] # noise_max_unbinned[_y[i], _x[i]] = noise_max[bin] # This is rapidly making me lose the will to live base = 10 cbar_data = ax1.imshow(np.log(flux_avg*base+1)/np.log(base), origin='lower', cmap='cubehelix') cbar_noise = ax2.imshow(np.log(noise_sum*base+1)/np.log(base), origin='lower', cmap='cubehelix') cbar = plt.colorbar(cbar_data, ax=ax1, label=r'$\log_{10}{(f_{\lambda,max})}$ ($10^{-17}$ erg s$^{-1}$ cm$^{-2}$ spaxel$^{-1}$)') cbar2 = plt.colorbar(cbar_noise, ax=ax2, label=r'$\log_{10}{(\Sigma\sigma)}$ ($10^{-17}$ erg s$^{-1}$ cm$^{-2}$ spaxel$^{-1}$)') if aperture: aper = plt.Rectangle((aperture[2]-.5, aperture[0]-.5), aperture[3]-aperture[2]+1, aperture[1]-aperture[0]+1, color='red', fill=False, linewidth=2) ax1.add_patch(aper) aper = plt.Rectangle((aperture[2]-.5, aperture[0]-.5), aperture[3]-aperture[2]+1, aperture[1]-aperture[0]+1, color='red', fill=False, linewidth=2) ax2.add_patch(aper) # Oh you're a python coder? Name every numpy function. coadd = np.nansum(np.nansum(flux, axis=2), axis=1) / (flux.shape[1]*flux.shape[2]) coadd_noise = np.nansum(np.nansum(np.sqrt(1/ivar), axis=2), axis=1) / (ivar.shape[1]*ivar.shape[2]) fontsize = 14 ax3.plot(wave, coadd, linewidth=0.5, color='xkcd:bright aqua', label='Coadded Flux') ax3.plot(wave, coadd_noise, linewidth=0.5, color='xkcd:bright orange', label='$1\sigma$ uncertainty') ax3.axhline(0.0, color='white', linewidth=0.5, linestyle='--') ax3.set_ylabel(r'$f_\lambda$ ($10^{-17}$ erg cm$^{-2}$ s$^{-1}$ $\mathrm{\AA}^{-1}$)', fontsize=fontsize) # ax4.plot(wave, fwhm) # ax4.set_ylabel(r'$\Delta\lambda = \lambda/R (\AA)$', fontsize=fontsize) ax3.set_xlabel(r'$\lambda_{\rm{obs}}$ ($\mathrm{\AA}$)', fontsize=fontsize) ax3.legend(loc='best') fig.suptitle(f'OBJECT ID: {dataid}, z={z}' if object_name is None else f'{object_name}, z={z}', fontsize=fontsize) plt.tight_layout() filepath = os.path.join(os.path.dirname(fits_file), 'fitting_aperture.pdf') plt.savefig(filepath) ax1.clear() ax2.clear() ax3.clear() fig.clear() plt.close(fig) def reconstruct_ifu(fits_file,mcmc_label=None): """ Reconstruct an IFU cube using the fit MCMC data from BADASS :param fits_file: str The file path to the original IFU FITS file :param mcmc_label: int, optional The index of the MCMC_output_* files that should be used in the reconstruction. Defaults to the largest one found. :return par_out: FITS HDUList FITS-formatted HDUList mirroring the par_table format from BADASS, but arranged in the cube shape Each HDU in the list corresponds to one parameter, mapped with a shape (ny, nx) :return bmc_out: FITS HDUList FITS-formatted HDUList mirroring the best_model_components format from BADASS, but arranged in the cube shape Each HDU in the list corresponds to a model component, mapped with a shape (nz, ny, nx) :return last_mcmc+1: int The index of the output MCMC_output_* file for the overall cube (independent of the individual MCMC_output_* folders for each spaxel) """ # Make sure outputs exist path = fits_file.replace('.fits', '') + os.sep if not os.path.exists(path): raise NotADirectoryError(f"The unpacked folders for {fits_file} do not exist! Fit before calling reconstruct") subdirs = glob.glob(path + 'spaxel_*_*') voronoi = subdirs[0].split('_')[1] == 'bin' subdirs.sort() if len(subdirs) == 0: raise NotADirectoryError(f"The unpacked folders for {fits_file} do not exist! Fit before calling reconstruct") # Get number of bins if voronoi: nbins = max([int(subdir.split('_')[-1]) for subdir in subdirs]) + 1 else: nbins = len(subdirs) xpixbin = np.full(nbins, fill_value=np.nan, dtype=object) ypixbin = np.full(nbins, fill_value=np.nan, dtype=object) i = 0 subdir = subdirs[0] # Find each MCMC output if mcmc_label is None: most_recent_mcmc = glob.glob(subdir + os.sep + 'MCMC_output_*') if len(most_recent_mcmc) == 0: raise NotADirectoryError(f"The unpacked folders for {fits_file} do not exist! Fit before calling reconstruct") most_recent_mcmc = sorted(most_recent_mcmc)[-1] else: most_recent_mcmc = glob.glob(subdir + os.sep + f"MCMC_output_{mcmc_label}") if len(most_recent_mcmc) == 0: raise NotADirectoryError(f"The unpacked folders for {fits_file}, MCMC_output{mcmc_label} do not exist! Fit before calling reconstruct") most_recent_mcmc = most_recent_mcmc[0] par_table = sorted(glob.glob(os.path.join(most_recent_mcmc, 'log', '*par_table.fits'))) best_model_components = sorted(glob.glob(os.path.join(most_recent_mcmc, 'log', '*best_model_components.fits'))) test_stats = sorted(glob.glob(os.path.join(most_recent_mcmc, 'log', 'test_stats.fits'))) if len(par_table) < 1 or len(best_model_components) < 1: raise FileNotFoundError( f"The FITS files for {most_recent_mcmc} do not exist! Fit before calling reconstruct") par_table = par_table[0] best_model_components = best_model_components[0] # Load in the FITS files with fits.open(par_table) as parhdu, fits.open(best_model_components) as bmchdu: # Get the bin number and x/y coord(s) hdr = parhdu[0].header data1 = parhdu[1].data data2 = parhdu[2].data bdata = bmchdu[1].data if len(test_stats) > 0: test_stats = test_stats[0] with fits.open(test_stats) as tshdu: tdata = tshdu[1].data else: tdata = None binnum = copy.deepcopy(hdr['binnum']) if voronoi else i xpixbin[binnum] = copy.deepcopy(data2['spaxelx']) ypixbin[binnum] = copy.deepcopy(data2['spaxely']) # if it's the first iteration, create the arrays based on the proper shape parameters = data1['parameter'] if tdata is not None: parameters = np.concatenate((parameters, tdata['parameter'])) parvals = np.full(shape=(nbins,), fill_value=np.nan, dtype=[ (param, float) for param in np.unique(parameters) ]) parvals_low = copy.deepcopy(parvals) parvals_upp = copy.deepcopy(parvals) bmcparams = np.array(bdata.columns.names, dtype=str) bmcvals = np.full(shape=(bdata.size, nbins), fill_value=np.nan, dtype=[ (param, float) for param in np.unique(bmcparams) ]) # Set the par table parameters mcmc = 'sigma_low' in data1.names and 'sigma_upp' in data1.names for param in parameters: w = np.where(data1['parameter'] == param)[0] if w.size > 0: w = w[0] parvals[param][binnum] = copy.deepcopy(data1['best_fit'][w]) if mcmc: parvals_low[param][binnum] = copy.deepcopy(data1['sigma_low'][w]) parvals_upp[param][binnum] = copy.deepcopy(data1['sigma_upp'][w]) elif tdata is not None: w2 = np.where(tdata['parameter'] == param)[0] if w2.size > 0: parvals[param][binnum] = copy.deepcopy(tdata['best_fit'][w2]) parvals_low[param][binnum] = copy.deepcopy(tdata['sigma_low'][w2]) parvals_upp[param][binnum] = copy.deepcopy(tdata['sigma_upp'][w2]) # Set the best model components for param in bmcparams: bmcvals[param][:, binnum] = copy.deepcopy(bdata[param]) parsize = data1.size if tdata is not None: parsize += tdata.size bmcsize = bdata.size def append_spaxel(i, subdir): nonlocal parvals, parvals_low, parvals_upp, bmcvals, parameters, xpixbin, ypixbin, voronoi # Find each MCMC output if mcmc_label is None: most_recent_mcmc = glob.glob(subdir + os.sep + 'MCMC_output_*') if len(most_recent_mcmc) == 0: # raise NotADirectoryError( # f"The unpacked folders for {fits_file} do not exist! Fit before calling reconstruct") print(f"WARNING: MCMC folder for {subdir} not found!") return most_recent_mcmc = sorted(most_recent_mcmc)[-1] else: most_recent_mcmc = glob.glob(subdir + os.sep + f"MCMC_output_{mcmc_label}") if len(most_recent_mcmc) == 0: print(f"WARNING: MCMC folder for {subdir} not found!") return most_recent_mcmc = most_recent_mcmc[0] par_table = sorted(glob.glob(os.path.join(most_recent_mcmc, 'log', '*par_table.fits'))) best_model_components = sorted(glob.glob(os.path.join(most_recent_mcmc, 'log', '*best_model_components.fits'))) test_stats = sorted(glob.glob(os.path.join(most_recent_mcmc, 'log', 'test_stats.fits'))) if len(par_table) < 1 or len(best_model_components) < 1: # raise FileNotFoundError( # f"The FITS files for {most_recent_mcmc} do not exist! Fit before calling reconstruct") print(f"WARNING: FITS files for {most_recent_mcmc} not found!") return par_table = par_table[0] best_model_components = best_model_components[0] # Load in the FITS files with fits.open(par_table) as parhdu, fits.open(best_model_components) as bmchdu: # Get the bin number and x/y coord(s) hdr = parhdu[0].header data1 = parhdu[1].data data2 = parhdu[2].data bdata = bmchdu[1].data if len(test_stats) > 0: test_stats = test_stats[0] with fits.open(test_stats) as tshdu: tdata = tshdu[1].data else: tdata = None binnum = copy.deepcopy(hdr['binnum']) if voronoi else i xpixbin[binnum] = copy.deepcopy(data2['spaxelx']) ypixbin[binnum] = copy.deepcopy(data2['spaxely']) # Set the par table parameters mcmc = 'sigma_low' in data1.names and 'sigma_upp' in data1.names for param in parameters: w = np.where(data1['parameter'] == param)[0] if w.size > 0: w = w[0] parvals[param][binnum] = copy.deepcopy(data1['best_fit'][w]) if mcmc: parvals_low[param][binnum] = copy.deepcopy(data1['sigma_low'][w]) parvals_upp[param][binnum] = copy.deepcopy(data1['sigma_upp'][w]) elif tdata is not None: w2 = np.where(tdata['parameter'] == param)[0] if w2.size > 0: parvals[param][binnum] = copy.deepcopy(tdata['best_fit'][w2]) parvals_low[param][binnum] = copy.deepcopy(tdata['sigma_low'][w2]) parvals_upp[param][binnum] = copy.deepcopy(tdata['sigma_upp'][w2]) # Set the best model components for param in bmcparams: bmcvals[param][:, binnum] = copy.deepcopy(bdata[param]) iterable = enumerate(subdirs) if tqdm is None else tqdm.tqdm(enumerate(subdirs), total=len(subdirs)) Parallel(n_jobs=-1, require='sharedmem')(delayed(append_spaxel)(i, subdir) for i, subdir in iterable) for i in range(len(xpixbin)): if type(xpixbin[i]) in (float, np.float_) and np.isnan(xpixbin[i]): xpixbin[i] = [] if type(ypixbin[i]) in (float, np.float_) and np.isnan(ypixbin[i]): ypixbin[i] = [] maxx = -np.inf maxy = -np.inf minx = np.inf miny = np.inf for j in range(nbins): maxx = np.nanmax([maxx, np.nanmax(xpixbin[j]) if len(xpixbin[j]) > 0 else np.nan]) maxy = np.nanmax([maxy, np.nanmax(ypixbin[j]) if len(ypixbin[j]) > 0 else np.nan]) minx = np.nanmin([minx, np.nanmin(xpixbin[j]) if len(xpixbin[j]) > 0 else np.nan]) miny = np.nanmin([miny, np.nanmin(ypixbin[j]) if len(ypixbin[j]) > 0 else np.nan]) # Reconstruct original shape nx = int(maxx - minx + 1) ny = int(maxy - miny + 1) bmcvals_out = np.full(shape=(bmcparams.size, bmcsize, ny, nx), fill_value=np.nan, dtype=float) parvals_out = np.full(shape=(parsize, ny, nx), fill_value=np.nan, dtype=float) parvals_out_low = copy.deepcopy(parvals_out) parvals_out_upp = copy.deepcopy(parvals_out) binpix = np.zeros((nx*ny, 3), dtype=int) ii = 0 for n in range(nbins): for xi, yi in zip(xpixbin[n], ypixbin[n]): for j, param in enumerate(parameters): parvals_out[j, int(yi-miny), int(xi-minx)] = parvals[param][n] if mcmc: parvals_out_low[j, int(yi-miny), int(xi-minx)] = parvals_low[param][n] parvals_out_upp[j, int(yi-miny), int(xi-minx)] = parvals_upp[param][n] binpix[ii, :] = (int(xi-minx), int(yi-miny), n) ii += 1 for j, param in enumerate(bmcparams): for xi, yi in zip(xpixbin[n], ypixbin[n]): bmcvals_out[j, :, int(yi-miny), int(xi-minx)] = bmcvals[param][:, n] # Construct FITS outputs bmc_out = fits.HDUList() primary = fits.PrimaryHDU() primary.header.append(('ORIGINX', minx, 'x-coordinate of position (0,0) in full cube'), end=True) primary.header.append(('ORIGINY', miny, 'y-coordinate of position (0,0) in full cube'), end=True) primary.header.append(('NBINS', nbins, 'number of Voronoi bins'), end=True) primary2 = copy.deepcopy(primary) bininfo = fits.BinTableHDU.from_columns(fits.ColDefs([ fits.Column(name='x', array=binpix[:, 0], format='I'), fits.Column(name='y', array=binpix[:, 1], format='I'), fits.Column(name='bin', array=binpix[:, 2], format='I') ])) bininfo2 = copy.deepcopy(bininfo) bmc_out.append(primary) for k, name in enumerate(bmcparams): if name.upper() == 'WAVE': # good = np.where(np.isfinite(bmcvals_out[k, ...])) bmc_out.append( fits.BinTableHDU.from_columns(fits.ColDefs([ fits.Column(name='wave', array=bmcvals_out[k, :, ny//2, nx//2], format='E'), ]), name=name)) else: bmc_out.append( fits.ImageHDU(bmcvals_out[k, ...], name=name) ) bmc_out.append(bininfo) par_out = fits.HDUList() par_out.append(primary2) for k, name in enumerate(parameters): par_out.append( fits.ImageHDU(parvals_out[k, ...], name=name) ) if mcmc: par_out.append( fits.ImageHDU(parvals_out_low[k, ...], name=name + '_SIGMA_LOW') ) par_out.append( fits.ImageHDU(parvals_out_upp[k, ...], name=name + '_SIGMA_UPP') ) par_out.append(bininfo2) # Write outputs folders = os.listdir(os.path.dirname(fits_file)) mcmc_outputs = [int(fold.split('_')[-1]) for fold in folders if 'MCMC_output' in fold] if len(mcmc_outputs) >= 1: last_mcmc = max(mcmc_outputs) else: last_mcmc = 0 logdir = os.path.join(os.path.dirname(fits_file), 'MCMC_output_'+str(last_mcmc+1), 'log') if not os.path.exists(logdir): os.makedirs(logdir) bmc_out.writeto(logdir + os.sep + 'cube_best_model_components.fits', overwrite=True) par_out.writeto(logdir + os.sep + 'cube_par_table.fits', overwrite=True) return par_out, bmc_out, last_mcmc+1 def plot_reconstructed_cube(mcmc_output_dir, partable_to_plot=None, bmc_to_plot=None, animated=False): """ Make 2D maps and/or videos of the reconstructed par_table and best_model_components parameters :param mcmc_output_dir: str The folder to the overall MCMC_output_* folder for the whole cube (not individual spaxels) :param partable_to_plot: list, optional List of the par_table parameter names to plot. If None, plots them all. :param bmc_to_plot: list, optional List of best_model_components parameter names to plot. If None, plots them all. :param animated: bool Whether or not to make the best_model_components plots into videos. Required an installation of FFMpeg. :return None: """ # Get directories partable = os.path.join(mcmc_output_dir, 'log', 'cube_par_table.fits') bmc = os.path.join(mcmc_output_dir, 'log', 'cube_best_model_components.fits') if not os.path.isfile(partable) or not os.path.isfile(bmc): raise FileNotFoundError(f"Could not find cube_par_table.fits or cube_best_model_components.fits in" f"{mcmc_output_dir}/log/") # Load in data parhdu = fits.open(partable) bmchdu = fits.open(bmc) ox, oy = parhdu[0].header['ORIGINX'], parhdu[0].header['ORIGINY'] # First make 2D image maps for each parameter in par table if not os.path.exists(os.path.join(mcmc_output_dir, 'partable_plots')): os.mkdir(os.path.join(mcmc_output_dir, 'partable_plots')) if not os.path.exists(os.path.join(mcmc_output_dir, 'best_model_components_plots')): os.mkdir(os.path.join(mcmc_output_dir, 'best_model_components_plots')) if partable_to_plot is None: partable_to_plot = [p.name for p in parhdu[1:-1]] if bmc_to_plot is None: bmc_to_plot = [b.name for b in bmchdu[1:-1]] for imagehdu in parhdu[1:-1]: if imagehdu.name not in partable_to_plot: continue fig, ax = plt.subplots() data = imagehdu.data std = np.nanstd(data) mad = stats.median_absolute_deviation(data[np.isfinite(data)]) # data[np.abs(data - np.nanmedian(data)) > 10*std] = np.nan if "FLUX" in imagehdu.name and "SIGMA" not in imagehdu.name: mask = data >= 0 data[mask] = np.nan map_ = ax.imshow(data, origin='lower', cmap='cubehelix', vmin=np.nanpercentile(data, 1), vmax=np.nanpercentile(data, 99), extent=[ox-.5, ox+imagehdu.data.shape[0]-.5, oy-.5, oy+imagehdu.data.shape[1]-.5]) plt.colorbar(map_, ax=ax, label=imagehdu.name) ax.set_title(mcmc_output_dir.split(os.sep)[-1]) plt.savefig(os.path.join(mcmc_output_dir, 'partable_plots', f'{imagehdu.name}.pdf'), bbox_inches='tight', dpi=300) plt.close() # Now loop over and plot the model components, averaging/summing over wavelength if not animated: for imagehdu in bmchdu[1:-1]: if imagehdu.name.upper() == 'WAVE' or imagehdu.name not in bmc_to_plot: continue # Sum over the 1st axis, aka the wavelength axis datasum = np.nansum(imagehdu.data, axis=0) datasum[datasum == 0] = np.nan # datasum[np.abs(datasum) > 1e5] = np.nan dataavg = datasum / imagehdu.data.shape[0] std = np.nanstd(dataavg) # mad = stats.median_absolute_deviation(dataavg.flatten()[np.isfinite(dataavg.flatten())]) # dataavg[np.abs(dataavg - np.nanmedian(dataavg)) > 10*std] = np.nan fig, ax = plt.subplots() map_ = ax.imshow(dataavg, origin='lower', cmap='cubehelix', vmin=
np.nanpercentile(dataavg, 1)
numpy.nanpercentile
""" Module containing functions to calculate the 3 most important wind turbines in a regular wind farm """ import numpy as np def calculate_distance(x, y): """ calculate the effective distance of a wind turbine assuming the streamwise direction is 10x more important than spanwise direction distance = sqrt(x^2 + 10*y^2) Parameters ---------- x : float The spanwise coordinate of the turbine y: float The streamwise coordinate of the turbine Returns ------- distance : float The effective distance """ distance = np.sqrt(x**2 + 10*y**2) distance[x < 0] = np.inf return distance def make_wind_farm_coords(S_x, S_y, S_off, theta): """ calculate the x, y coordinates of a wind farm with regular arrangement Parameters ---------- S_x: float The streamwise spacing of the wind farm S_y: float The sspanwise spacing of the wind farm S_off : float The spanwise offset of rows of turbines theta: float The angle of incoming wind above the x axis Returns ------- farm_coords: ndarray of shape (49, 3) (49, :3) The x, y coordinates of the closest 49 wind turbines (49, 3) The effective distance of the closest 49 wind turbines """ farm_coords = np.zeros((49, 3)) for n_y in np.arange(-3, 4): farm_coords[7*(n_y + 3):7*(n_y + 4), 0:2] = \ [calculate_turbine_coords(S_x, S_y, S_off, theta, n_x, n_y) for n_x in np.arange(-3, 4)] farm_coords[:, 2] = calculate_distance(farm_coords[:, 0], farm_coords[:, 1]) return farm_coords def calculate_turbine_coords(S_x, S_y, S_off, theta, n_x, n_y): """caulcate the x, y coordinates of a single wind turbine in a regular arrangment Parameters ---------- S_x: float The streamwise spacing of the wind farm S_y: float The spanwise spacing of the wind farm S_off : float The spanwise offset of row of turbines theta: float The angle of incoming wind above the x axis n_x: integer Turbine number in original streamwise direction n_y: integer Turbine number in the original spanwise direction Returns ------- turbine coords: tuple of shape (2,) The x, y coordinates of the wind turbine specified by the turbine numbers n_x, n_y """ x = np.cos(theta)*S_x*n_x + np.sin(theta)*S_off*n_x \ + np.sin(theta)*S_y*n_y y = - np.sin(theta)*S_x*n_x + np.cos(theta)*S_off*n_x \ +
np.cos(theta)
numpy.cos
import math import numpy as np import astropy.units as u DEG2RAD = math.pi / 180. class BeamError(Exception): """docstring for BeamError""" pass class InvalidBeamOperationError(Exception): pass class RadioBeamDeprecationWarning(Warning): pass def deconvolve_optimized(beamprops1, beamprops2, failure_returns_pointlike=False): """ An optimized, non-Quantity version of beam deconvolution. Because no unit conversions are handled, the inputs MUST be in degrees for the major, minor, and position angle. Parameters ---------- beamprops1: dict Dictionary with keys 'BMAJ', 'BMIN', and 'BPA' for the beam to deconvolve from. Can be produced with `~radio_beam.Beam.to_fits_keywords`. beamprops2: dict Same as `beamprops1` for the second beam. failure_returns_pointlike : bool, optional Return a point beam (zero area) when deconvolution fails. If `False`, this will instead raise a `~radio_beam.utils.BeamError` when deconvolution fails. Returns ------- new_major : float Deconvolved major FWHM. new_minor : float Deconvolved minor FWHM. new_pa : float Deconvolved position angle. """ # blame: https://github.com/pkgw/carma-miriad/blob/CVSHEAD/src/subs/gaupar.for # (githup checkin of MIRIAD, code by Sault) maj1 = beamprops1['BMAJ'] min1 = beamprops1['BMIN'] pa1 = beamprops1['BPA'] * DEG2RAD maj2 = beamprops2['BMAJ'] min2 = beamprops2['BMIN'] pa2 = beamprops2['BPA'] * DEG2RAD alpha = ((maj1 * math.cos(pa1))**2 + (min1 * math.sin(pa1))**2 - (maj2 * math.cos(pa2))**2 - (min2 * math.sin(pa2))**2) beta = ((maj1 * math.sin(pa1))**2 + (min1 * math.cos(pa1))**2 - (maj2 * math.sin(pa2))**2 - (min2 * math.cos(pa2))**2) gamma = 2 * ((min1**2 - maj1**2) * math.sin(pa1) * math.cos(pa1) - (min2**2 - maj2**2) * math.sin(pa2) * math.cos(pa2)) s = alpha + beta t = math.sqrt((alpha - beta)**2 + gamma**2) # Deal with floating point issues # This matches the arcsec**2 check for deconvolve below # Difference is we keep things in deg^2 here atol_t = np.finfo(np.float64).eps / 3600.**2 # To deconvolve, the beam must satisfy: # alpha < 0 alpha_cond = alpha + np.finfo(np.float64).eps < 0 # beta < 0 beta_cond = beta + np.finfo(np.float64).eps < 0 # s < t st_cond = s < t + atol_t if alpha_cond or beta_cond or st_cond: if failure_returns_pointlike: return 0., 0., 0. else: raise BeamError("Beam could not be deconvolved") else: new_major = math.sqrt(0.5 * (s + t)) new_minor = math.sqrt(0.5 * (s - t)) # absolute tolerance needs to be <<1 microarcsec atol = 1e-7 / 3600. if (math.sqrt(abs(gamma) + abs(alpha - beta))) < atol: new_pa = 0.0 else: new_pa = 0.5 * math.atan2(-1. * gamma, alpha - beta) # In the limiting case, the axes can be zero to within precision # Add the precision level onto each axis so a deconvolvable beam # is always has beam.isfinite == True new_major += np.finfo(np.float64).eps new_minor += np.finfo(np.float64).eps return new_major, new_minor, new_pa def deconvolve(beam, other, failure_returns_pointlike=False): """ Deconvolve a beam from another Parameters ---------- beam : `Beam` The defined beam. other : `Beam` The beam to deconvolve from this beam failure_returns_pointlike : bool Option to return a pointlike beam (i.e., one with major=minor=0) if the second beam is larger than the first. Otherwise, a ValueError will be raised Returns ------- new_beam : `Beam` The convolved Beam Raises ------ failure : ValueError If the second beam is larger than the first, the default behavior is to raise an exception. This can be overridden with failure_returns_pointlike """ # The header keywords handle the conversions to degree for BMAJ, BMIN, BPA. beamprops1 = beam.to_header_keywords() beamprops2 = other.to_header_keywords() return deconvolve_optimized(beamprops1, beamprops2, failure_returns_pointlike=failure_returns_pointlike) def convolve(beam, other): """ Convolve one beam with another. Parameters ---------- other : `Beam` The beam to convolve with Returns ------- new_beam : `Beam` The convolved Beam """ # blame: https://github.com/pkgw/carma-miriad/blob/CVSHEAD/src/subs/gaupar.for # (github checkin of MIRIAD, code by Sault) alpha = ((beam.major * np.cos(beam.pa))**2 + (beam.minor * np.sin(beam.pa))**2 + (other.major * np.cos(other.pa))**2 + (other.minor * np.sin(other.pa))**2) beta = ((beam.major * np.sin(beam.pa))**2 + (beam.minor * np.cos(beam.pa))**2 + (other.major * np.sin(other.pa))**2 + (other.minor * np.cos(other.pa))**2) gamma = (2 * ((beam.minor**2 - beam.major**2) * np.sin(beam.pa) * np.cos(beam.pa) + (other.minor**2 - other.major**2) * np.sin(other.pa) * np.cos(other.pa))) s = alpha + beta t = np.sqrt((alpha - beta)**2 + gamma**2) new_major = np.sqrt(0.5 * (s + t)) new_minor = np.sqrt(0.5 * (s - t)) # absolute tolerance needs to be <<1 microarcsec if np.isclose(((abs(gamma) + abs(alpha - beta))**0.5).to(u.arcsec).value, 1e-7): new_pa = 0.0 * u.deg else: new_pa = 0.5 * np.arctan2(-1. * gamma, alpha - beta) return new_major, new_minor, new_pa def transform_ellipse(major, minor, pa, x_scale, y_scale): ''' Transform an ellipse by scaling in the x and y axes. Parameters ---------- major : `~astropy.units.Quantity` Major axis. minor : `~astropy.units.Quantity` Minor axis. pa : `~astropy.units.Quantity` PA of the major axis. x_scale : float x axis scaling factor. y_scale : float y axis scaling factor. Returns ------- trans_major : `~astropy.units.Quantity` Major axis in the transformed frame. trans_minor : `~astropy.units.Quantity` Minor axis in the transformed frame. trans_pa : `~astropy.units.Quantity` PA of the major axis in the transformed frame. ''' # This code is based on the implementation in CASA: # https://open-bitbucket.nrao.edu/projects/CASA/repos/casa/browse/code/imageanalysis/ImageAnalysis/CasaImageBeamSet.cc major = major.to(u.arcsec) minor = minor.to(u.arcsec) pa = pa.to(u.rad) cospa = np.cos(pa) sinpa = np.sin(pa) cos2pa = cospa**2 sin2pa = sinpa**2 major2 = major**2 minor2 = minor**2 a = (cos2pa / major2) + (sin2pa / minor2) b = -2 * cospa * sinpa * (major2**-1 - minor2**-1) c = (sin2pa / major2) + (cos2pa / minor2) x2_scale = x_scale**2 y2_scale = y_scale**2 r = a / x2_scale s = b**2 / (4 * x2_scale * y2_scale) t = c / y2_scale udiff = r - t u2 = udiff**2 f1 = u2 + 4 * s f2 = np.sqrt(f1) * np.abs(udiff) j1 = (f2 + f1) / f1 / 2 j2 = (f1 - f2) / f1 / 2 k1 = (j1 * r + j1 * t - t) / (2 * j1 - 1) k2 = (j2 * r + j2 * t - t) / (2 * j2 - 1) c1 =
np.sqrt(k1)
numpy.sqrt
import h5py from matplotlib import pyplot as plt import pandas as pd import numpy as np from sklearn.manifold import TSNE h5file = h5py.File("96.jl.h5") X = h5file["X"].value # n_samples x n_features assert X.shape[1] == 300 df = pd.read_csv("./countries.csv") labels =
np.array(df.iloc[:, 1])
numpy.array
# -*- coding: utf-8 -*- # Copyright 2021 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # wITHOUT wARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Test operator sparsing.""" import numpy as np from openfermion import get_sparse_operator from openfermion.chem import MolecularData from mindquantum.algorithm.nisq.chem.transform import Transform from mindquantum.core.operators.utils import get_fermion_operator from mindquantum.third_party.interaction_operator import InteractionOperator def test_sparsing_operator(): """ Description: Test sparsing operator Expectation: success """ molecular = "./tests/st/H4.hdf5" mol = MolecularData(filename=molecular) mol.load() ham_of = mol.get_molecular_hamiltonian() inter_ops = InteractionOperator(*ham_of.n_body_tensors.values()) ham_hiq = get_fermion_operator(inter_ops) ham = Transform(ham_hiq).jordan_wigner() h = ham.to_openfermion() m1 = get_sparse_operator(h).toarray() m2 = ham.matrix().toarray() m3 = ham_hiq.matrix().toarray() v1 = np.real(np.linalg.eigvals(m1)) v2 = np.real(np.linalg.eigvals(m2)) v3 = np.real(
np.linalg.eigvals(m3)
numpy.linalg.eigvals
import torch from torch.autograd import Variable import torch.nn as nn import numpy as np import pdb from lib.coarsening import lmax_L from lib.coarsening import rescale_L from torch.autograd import Variable import torch.nn.functional as F from lib.coarsening import perm_data_torch2 if torch.cuda.is_available(): print('cuda available') dtypeFloat = torch.cuda.FloatTensor dtypeLong = torch.cuda.LongTensor torch.cuda.manual_seed(1) else: print('cuda not available') dtypeFloat = torch.FloatTensor dtypeLong = torch.LongTensor torch.manual_seed(1) def isnan(x): return x != x class loss_with_consensus(torch.nn.Module): def __init__(self, loss_fn, config=None): super().__init__() self.existing_loss = loss_fn self.config = config self.num_c = config['cons'] self.lin = torch.nn.Linear(2*(2 * self.num_c - 1), 2) def forward(self, pred, labels): B, cons, plen = pred.shape pred = pred.view(B, cons * plen) # pdb.set_trace() fin_pred = self.lin(pred) loss = self.existing_loss(fin_pred, labels) return loss class loss_with_consensus2(torch.nn.Module): def __init__(self, loss_fn): super().__init__() self.existing_loss = loss_fn # self.kld = torch.nn.KLDivLoss() def forward(self, pred, labels): # Assume pred is of size: B x 5 x 2 # Labels is of size: B x 1 B, cons, plen = pred.shape pred_curr = pred[:, 0, :] loss = self.existing_loss(pred_curr, labels) pred_new = pred.view(B*cons, plen) pred_new_ls = F.log_softmax(pred_new, dim=1) pred_new_ls = pred_new_ls.view(B, cons, plen) pred_curr_ls = pred_new_ls[:, 0, :] for i in range(1, cons): pred_tmp = pred_new_ls[:, i, :] pred_tmp = pred_tmp.detach() loss += F.kl_div(pred_curr_ls, pred_tmp) # loss += self.kld(pred_tmp, pred_curr_ls) return loss class my_sparse_mm(torch.autograd.Function): """ Implementation of a new autograd function for sparse variables, called "my_sparse_mm", by subclassing torch.autograd.Function and implementing the forward and backward passes. """ def forward(self, W, x): # W is SPARSE self.save_for_backward(W, x) # pdb.set_trace() y = torch.mm(W, x) return y def backward(self, grad_output): W, x = self.saved_tensors grad_input = grad_output.clone() grad_input_dL_dW = torch.mm(grad_input, x.t()) grad_input_dL_dx = torch.mm(W.t(), grad_input) return grad_input_dL_dW, grad_input_dL_dx class gconv(torch.nn.Module): def __init__(self, in_c, out_c, kern): super().__init__() self.in_c = in_c self.out_c = out_c self.kern = kern self.lin = torch.nn.Linear(in_c * kern, out_c) return def forward(self, inp, L): B, V, Fin = inp.shape x0 = inp.permute(1, 2, 0).contiguous() x0 = x0.view([V, Fin*B]) x = x0.unsqueeze(0) if self.kern > 1: x1 = my_sparse_mm()(L, x0) # V x Fin*B x = torch.cat((x, x1.unsqueeze(0)), 0) # 2 x V x Fin*B for k in range(2, self.kern): x2 = 2 * my_sparse_mm()(L, x1) - x0 x = torch.cat((x, x2.unsqueeze(0)), 0) # M x Fin*B x0, x1 = x1, x2 x = x.view([self.kern, V, Fin, B]) # K x V x Fin x B x = x.permute(3, 1, 2, 0).contiguous() # B x V x Fin x K x = x.view([B*V, Fin*self.kern]) # B*V x Fin*K # pdb.set_trace() x = self.lin(x) x = x.view([B, V, self.out_c]) # B x V x Fout return x class Graph_ConvNet_LeNet5(nn.Module): def __init__(self, net_parameters): print('Graph ConvNet: LeNet5') super(Graph_ConvNet_LeNet5, self).__init__() # parameters D, Fin1, CL1_F, CL1_K, CL2_F, CL2_K, FC1_F, FC2_F = net_parameters p = 2 FC1Fin = CL2_F*(D//(p*p)) # pdb.set_trace() # graph CL1 self.cl1 = nn.Linear(CL1_K * Fin1, CL1_F) self.conv1_bn = torch.nn.BatchNorm1d(D, CL1_F) Fin = CL1_K Fout = CL1_F scale = np.sqrt(2.0 / (Fin+Fout)) self.cl1.weight.data.uniform_(-scale, scale) self.cl1.bias.data.fill_(0.0) self.CL1_K = CL1_K self.CL1_F = CL1_F # graph CL2 self.cl2 = nn.Linear(CL2_K*CL1_F, CL2_F) self.conv2_bn = torch.nn.BatchNorm1d(D//p, CL2_F) Fin = CL2_K * CL1_F Fout = CL2_F scale = np.sqrt(2.0 / (Fin+Fout)) self.cl2.weight.data.uniform_(-scale, scale) self.cl2.bias.data.fill_(0.0) self.CL2_K = CL2_K self.CL2_F = CL2_F # FC1 self.fc1 = nn.Linear(FC1Fin, FC1_F) Fin = FC1Fin Fout = FC1_F scale = np.sqrt(2.0 / (Fin+Fout)) self.fc1.weight.data.uniform_(-scale, scale) self.fc1.bias.data.fill_(0.0) # pdb.set_trace() self.FC1Fin = FC1Fin # FC2 self.fc2 = nn.Linear(FC1_F, FC2_F) Fin = FC1_F Fout = FC2_F scale = np.sqrt(2.0 / (Fin+Fout)) self.fc2.weight.data.uniform_(-scale, scale) self.fc2.bias.data.fill_(0.0) # nb of parameters nb_param = CL1_K * CL1_F + CL1_F # CL1 nb_param += CL2_K * CL1_F * CL2_F + CL2_F # CL2 nb_param += FC1Fin * FC1_F + FC1_F # FC1 nb_param += FC1_F * FC2_F + FC2_F # FC2 print('nb of parameters=', nb_param, '\n') def init_weights(self, W, Fin, Fout): scale = np.sqrt(2.0 / (Fin+Fout)) W.uniform_(-scale, scale) return W def graph_conv_cheby(self, x, cl, L, lmax, Fout, K): # parameters # B = batch size # V = nb vertices # Fin = nb input features # Fout = nb output features # K = Chebyshev order & support size # pdb.set_trace() B, V, Fin = x.size() B, V, Fin = int(B), int(V), int(Fin) # rescale Laplacian lmax = lmax_L(L) L = rescale_L(L, lmax) # convert scipy sparse matric L to pytorch L = L.tocoo() indices = np.column_stack((L.row, L.col)).T indices = indices.astype(np.int64) indices = torch.from_numpy(indices) indices = indices.type(torch.LongTensor) L_data = L.data.astype(np.float32) L_data = torch.from_numpy(L_data) L_data = L_data.type(torch.FloatTensor) L = torch.sparse.FloatTensor(indices, L_data, torch.Size(L.shape)) L = Variable(L, requires_grad=False) if torch.cuda.is_available(): L = L.cuda() # transform to Chebyshev basis x0 = x.permute(1, 2, 0).contiguous() # V x Fin x B # pdb.set_trace() x0 = x0.view([V, Fin*B]) # V x Fin*B x = x0.unsqueeze(0) # 1 x V x Fin*B def concat(x, x_): x_ = x_.unsqueeze(0) # 1 x V x Fin*B return torch.cat((x, x_), 0) # K x V x Fin*B if K > 1: x1 = my_sparse_mm()(L, x0) # V x Fin*B x = torch.cat((x, x1.unsqueeze(0)), 0) # 2 x V x Fin*B # pdb.set_trace() for k in range(2, K): x2 = 2 * my_sparse_mm()(L, x1) - x0 x = torch.cat((x, x2.unsqueeze(0)), 0) # M x Fin*B x0, x1 = x1, x2 x = x.view([K, V, Fin, B]) # K x V x Fin x B x = x.permute(3, 1, 2, 0).contiguous() # B x V x Fin x K x = x.view([B*V, Fin*K]) # B*V x Fin*K # Compose linearly Fin features to get Fout features # pdb.set_trace() x = cl(x) # B*V x Fout # pdb.set_trace() x = x.view([B, V, Fout]) # B x V x Fout # pdb.set_trace() return x # Max pooling of size p. Must be a power of 2. def graph_max_pool(self, x, p): if p > 1: x = x.permute(0, 2, 1).contiguous() # x = B x F x V x = nn.MaxPool1d(p)(x) # B x F x V/p x = x.permute(0, 2, 1).contiguous() # x = B x V/p x F return x else: return x def forward(self, x, d, L, lmax): # graph CL1 # pdb.set_trace() # x = x.squeeze() # x = x.unsqueeze(2) # B x V x Fin=1 # pdb.set_trace() # pdb.set_trace() x = self.graph_conv_cheby(x, self.cl1, L[0], lmax[0], self.CL1_F, self.CL1_K) # pdb.set_trace() x = F.relu(x) x = self.conv1_bn(x) x = self.graph_max_pool(x, 2) # graph CL2 x = self.graph_conv_cheby(x, self.cl2, L[1], lmax[1], self.CL2_F, self.CL2_K) x = F.relu(x) x = self.conv2_bn(x) x = self.graph_max_pool(x, 2) # FC1 x = x.view(-1, self.FC1Fin) x = self.fc1(x) x = F.relu(x) x = nn.Dropout(d)(x) # FC2 x = self.fc2(x) # pdb.set_trace() return x def loss(self, y, y_target, l2_regularization): # pdb.set_trace() loss = nn.CrossEntropyLoss()(y, y_target) l2_loss = 0.0 for param in self.parameters(): data = param * param l2_loss += data.sum() # loss += 0.5 * l2_regularization * l2_loss return loss def update(self, lr): update = torch.optim.SGD(self.parameters(), lr=lr, momentum=0.9) return update def update_learning_rate(self, optimizer, lr): for param_group in optimizer.param_groups: param_group['lr'] = lr return optimizer def evaluation(self, y_predicted, test_l): _, class_predicted = torch.max(y_predicted.data, 1) return 100.0 * (class_predicted == test_l).sum() / y_predicted.size(0) def calc_dim(din, f, s): return (din - f)//s + 1 class simple_net(torch.nn.Module): def __init__(self, D_in, inp_channels): super(simple_net, self).__init__() # For conv1d the params are N, C, L # N is the batch size # C is the number of channels # L is the len of the signal # For now keep the number of channels=1 f = 3 s = 1 self.conv1 = torch.nn.Conv1d(inp_channels, 6, f, stride=s) new_dim = calc_dim(D_in, f, s) // 2 self.conv1_bn = torch.nn.BatchNorm1d(6) self.conv2 = torch.nn.Conv1d(6, 16, f, stride=s) new_dim = calc_dim(new_dim, f, s) // 2 self.conv2_bn = torch.nn.BatchNorm1d(16) # self.conv3 = torch.nn.Conv1d(16, 32, f, stride=s) # new_dim = calc_dim(new_dim, f, s) // 2 self.lin1 = torch.nn.Linear(16*new_dim, 30) self.lin2 = torch.nn.Linear(30, 2) def forward(self, inp): # out = F.relu(F.max_pool1d(self.conv1_bn(self.conv1(inp)), 2)) layer_outs = dict() out = F.max_pool1d(self.conv1_bn(F.relu(self.conv1(inp))), 2) # out = F.dropout(out) # out = F.relu(F.max_pool1d(self.conv2_bn(self.conv2(out)), 2)) layer_outs['conv1'] = out out = F.max_pool1d(self.conv2_bn(F.relu(self.conv2(out))), 2) layer_outs['conv2'] = out # out = F.relu(self.conv2(out)) # out = F.dropout(out) # out = F.max_pool1d(out, 2) # out = F.relu(self.conv3(out)) # out = F.dropout(out) # out = F.max_pool1d(out, 2) out = out.view(out.size(0), -1) layer_outs['fc_inp'] = out out = F.relu(self.lin1(out)) layer_outs['fc1'] = out # out = F.dropout(out) out = self.lin2(out) layer_outs['fc2'] = out return out, layer_outs class complex_net(torch.nn.Module): def __init__(self, D_in, inp_channels): super(complex_net, self).__init__() # For conv1d the params are N, C, L # N is the batch size # C is the number of channels # L is the len of the signal # For now keep the number of channels=1 self.num_inp_channels = inp_channels f = 3 s = 1 self.conv1_list = torch.nn.ModuleList() # for i in range(inp_channels): # self.conv1_list.append(torch.nn.Conv1d(1, 6, f, stride=s)) new_dim = calc_dim(D_in, f, s) // 2 self.conv1_bn = torch.nn.BatchNorm1d(6) self.conv2_list = torch.nn.ModuleList() # for i in range(inp_channels): # self.conv2_list.append(torch.nn.Conv1d(6, 16, f, stride=s)) new_dim = calc_dim(new_dim, f, s) // 2 self.conv2_bn = torch.nn.BatchNorm1d(16) # self.conv3 = torch.nn.Conv1d(16, 32, f, stride=s) # new_dim = calc_dim(new_dim, f, s) // 2 self.lin1_list = torch.nn.ModuleList() # for i in range(inp_channels): # self.lin1_list.append(torch.nn.Linear(16*new_dim, 30)) self.lin2_list = torch.nn.ModuleList() for i in range(inp_channels): self.conv1_list.append(torch.nn.Conv1d(1, 6, f, stride=s)) self.conv2_list.append(torch.nn.Conv1d(6, 16, f, stride=s)) self.lin1_list.append(torch.nn.Linear(16*new_dim, 30)) self.lin2_list.append(torch.nn.Linear(30, 2)) def forward(self, inp): # out = F.relu(F.max_pool1d(self.conv1_bn(self.conv1(inp)), 2)) # pdb.set_trace() num_channels = inp.shape[1] channel_layer_outs = [] fin_outs = [] for i in range(num_channels): layer_outs = dict() # pdb.set_trace() # inp_chan = inp[:, ] out = F.max_pool1d(self.conv1_bn(F.relu(self.conv1_list[i](inp[:, [i], :]))), 2) layer_outs['conv1'] = out out = F.max_pool1d(self.conv2_bn(F.relu(self.conv2_list[i](out))), 2) layer_outs['conv2'] = out out = out.view(out.size(0), -1) layer_outs['fc_inp'] = out out = F.relu(self.lin1_list[i](out)) layer_outs['fc1'] = out out = self.lin2_list[i](out) layer_outs['fc2'] = out channel_layer_outs.append(layer_outs) fin_outs.append(out) # dict() # out = F.max_pool1d(self.conv1_bn(F.relu(self.conv1(inp))), 2) # out = F.dropout(out) # out = F.relu(F.max_pool1d(self.conv2_bn(self.conv2(out)), 2)) # layer_outs['conv1'] = out # out = F.max_pool1d(self.conv2_bn(F.relu(self.conv2(out))), 2) # layer_outs['conv2'] = out # out = F.relu(self.conv2(out)) # out = F.dropout(out) # out = F.max_pool1d(out, 2) # out = F.relu(self.conv3(out)) # out = F.dropout(out) # out = F.max_pool1d(out, 2) # out = out.view(out.size(0), -1) # layer_outs['fc_inp'] = out # out = F.relu(self.lin1(out)) # layer_outs['fc1'] = out # out = F.dropout(out) # out = self.lin2(out) # layer_outs['fc2'] = out # pdb.set_trace() # return out, channel_layer_outs # def to_cuda(self): # self.cuda() # for i in range(self.num_inp_channels): # self.conv1_list[i].cuda() # self.conv2_list[i].cuda() # self.lin1_list[i].cuda() # self.lin2_list[i].cuda() # class complex_net2(torch.nn.Module): # def __init__(self, D_in, inp_channels): # super(complex_net2, self).__init__() # # For conv1d the params are N, C, L # # N is the batch size # # C is the number of channels # # L is the len of the signal # # For now keep the number of channels=1 # self.num_inp_channels = inp_channels # f = 3 # s = 1 # # for i in range(inp_channels): # new_dim = calc_dim(D_in, f, s) // 2 # self.conv1_bn = torch.nn.BatchNorm1d(6) # new_dim = calc_dim(new_dim, f, s) // 2 # self.conv2_bn = torch.nn.BatchNorm1d(16) # self.basic_block_lenet5 = simple_net(D_in, 1) # def forward(self, inp): # # out = F.relu(F.max_pool1d(self.conv1_bn(self.conv1(inp)), 2)) # # pdb.set_trace() # num_channels = inp.shape[1] # channel_layer_outs = [] # fin_outs = [] # for i in range(num_channels): # layer_outs = dict() # # pdb.set_trace() # # inp_chan = inp[:, ] # out = F.max_pool1d(self.conv1_bn(F.relu(self.conv1_list[i](inp[:, [i], :]))), 2) # layer_outs['conv1'] = out # out = F.max_pool1d(self.conv2_bn(F.relu(self.conv2_list[i](out))), 2) # layer_outs['conv2'] = out # out = out.view(out.size(0), -1) # layer_outs['fc_inp'] = out # out = F.relu(self.lin1_list[i](out)) # layer_outs['fc1'] = out # out = self.lin2_list[i](out) # layer_outs['fc2'] = out # channel_layer_outs.append(layer_outs) # fin_outs.append(out) # # dict() # # out = F.max_pool1d(self.conv1_bn(F.relu(self.conv1(inp))), 2) # # out = F.dropout(out) # # out = F.relu(F.max_pool1d(self.conv2_bn(self.conv2(out)), 2)) # # layer_outs['conv1'] = out # # out = F.max_pool1d(self.conv2_bn(F.relu(self.conv2(out))), 2) # # layer_outs['conv2'] = out # # out = F.relu(self.conv2(out)) # # out = F.dropout(out) # # out = F.max_pool1d(out, 2) # # out = F.relu(self.conv3(out)) # # out = F.dropout(out) # # out = F.max_pool1d(out, 2) # # out = out.view(out.size(0), -1) # # layer_outs['fc_inp'] = out # # out = F.relu(self.lin1(out)) # # layer_outs['fc1'] = out # # out = F.dropout(out) # # out = self.lin2(out) # # layer_outs['fc2'] = out # return out, channel_layer_outs # def to_cuda(self): # self.cuda() # for i in range(self.num_inp_channels): # self.conv1_list[i].cuda() # self.conv2_list[i].cuda() # self.lin1_list[i].cuda() # self.lin2_list[i].cuda() class small_model(torch.nn.Module): def __init__(self, cnet_parameters): super(small_model, self).__init__() Din, num_inp_channels, f, s, c1o, c2o, fc1o = cnet_parameters self.conv1 = torch.nn.Conv1d(1, c1o, f, stride=s) new_dim = calc_dim(Din, f, s) // 2 self.conv2 = torch.nn.Conv1d(c1o, c2o, f, stride=s) new_dim = calc_dim(new_dim, f, s) // 2 self.lin1 = torch.nn.Linear(c2o*new_dim, fc1o) self.conv1_bn = torch.nn.BatchNorm1d(c1o) self.conv2_bn = torch.nn.BatchNorm1d(c2o) def forward(self, inp): out = F.max_pool1d(self.conv1_bn(F.relu(self.conv1(inp))), 2) out = F.max_pool1d(self.conv2_bn(F.relu(self.conv2(out))), 2) out = out.view(out.size(0), -1) out = F.relu(self.lin1(out)) return out class end_to_end_model(torch.nn.Module): def __init__(self, cnet_parameters, gnet_parameters): super(end_to_end_model, self).__init__() Din, num_inp_channels, f, s, c1o, c2o, fc1o = cnet_parameters self.Din = Din self.c1o = c1o self.c2o = c2o self.fc1o = fc1o self.f = f self.s = s self.num_inp_channels = num_inp_channels # self.small_model0 = small_model(cnet_parameters) # self.small_model1 = small_model(cnet_parameters) # self.small_model2 = small_model(cnet_parameters) # self.small_model3 = small_model(cnet_parameters) # self.small_model4 = small_model(cnet_parameters) # self.small_model5 = small_model(cnet_parameters) D, CL1_F, CL1_K, CL2_F, CL2_K, FC1_F, FC2_F = gnet_parameters p = 2 FC1Fin = CL2_F*(D//(p*p)) self.conv1_list = torch.nn.ModuleList() new_dim = calc_dim(Din, f, s) // 2 self.conv1_bn_list = torch.nn.ModuleList() # torch.nn.BatchNorm1d(c1o) self.conv2_list = torch.nn.ModuleList() new_dim = calc_dim(new_dim, f, s) // 2 self.conv2_bn_list = torch.nn.ModuleList() # = torch.nn.BatchNorm1d(c2o) self.lin1_list = torch.nn.ModuleList() self.lin2_list = torch.nn.ModuleList() for i in range(self.num_inp_channels): # Need to change 2->1. self.conv1_list.append(torch.nn.Conv1d(1, c1o, f, stride=s)) self.conv1_bn_list.append(torch.nn.BatchNorm1d(c1o)) self.conv2_list.append(torch.nn.Conv1d(c1o, c2o, f, stride=s)) self.conv2_bn_list.append(torch.nn.BatchNorm1d(c2o)) self.lin1_list.append(torch.nn.Linear(c2o*new_dim, fc1o)) self.lin2_list.append(torch.nn.Linear(fc1o, 2)) # self.cnet_module_list = torch.nn.ModuleList([self.conv1_list, # self.conv2_list, self.lin1_list]) self.cnet_module_list = torch.nn.ModuleList() for i in range(self.num_inp_channels): tmp_list = torch.nn.ModuleList([self.conv1_list[i], self.conv1_bn_list[i], self.conv2_list[i], self.conv2_bn_list[i], self.lin1_list[i], self.lin2_list[i]]) self.cnet_module_list.append(tmp_list) # FC1Fin = CL2_F*(D//(p*p)) # pdb.set_trace() # graph CL1 self.cl1 = nn.Linear(CL1_K * fc1o, CL1_F) self.gconv1_bn = torch.nn.BatchNorm1d(D, CL1_F) Fin = CL1_K Fout = CL1_F scale =
np.sqrt(2.0 / (Fin+Fout))
numpy.sqrt
"""Run Demonstration Image Classification Experiments. """ import sys,os sys.path.append('..') import numpy as np from models.BrokenModel import BrokenModel as BrokenModel import glob import tensorflow as tf import pandas as pd from timeit import default_timer as timer from .calloc import loadChannel,quantInit from .simmods import * from errConceal.caltec import * from errConceal.altec import * from errConceal.tc_algos import * import cv2 as cv2 from PIL import Image # ---------------------------------------------------------------------------- # def fnRunImgClassDemo(modelDict,splitLayerDict,ecDict,batch_size,path_base,transDict,outputDir): print('TensorFlow version') print(tf.__version__) model_path = modelDict['fullModel'] customObjects = modelDict['customObjects'] task = modelDict['task'] normalize = modelDict['normalize'] reshapeDims = modelDict['reshapeDims'] splitLayer = splitLayerDict['split'] mobile_model_path = splitLayerDict['MobileModel'] cloud_model_path = splitLayerDict['CloudModel'] rowsPerPacket = transDict['rowsperpacket'] quantization = transDict['quantization'] numberOfBits_1 = quantization[1]['numberOfBits'] numberOfBits_2 = quantization[2]['numberOfBits'] channel = transDict['channel'] res_data_dir = outputDir['resDataDir'] # directory for loss maps. sim_data_dir = outputDir['simDataDir'] # directory for simulation results. # ------------------------------------------------------------------------ # # tensorflow.keras deep model loading. loaded_model = tf.keras.models.load_model(os.path.join(model_path)) loaded_model_config = loaded_model.get_config() loaded_model_name = loaded_model_config['name'] # Check if mobile and cloud sub-models are already available: if os.path.isfile(mobile_model_path) and os.path.isfile(cloud_model_path): print(f'Sub-models of {loaded_model_name} split at {splitLayer} are available.') mobile_model = tf.keras.models.load_model(os.path.join(mobile_model_path)) cloud_model = tf.keras.models.load_model(os.path.join(cloud_model_path)) else: # if not, split the deep model. # Object for splitting a tf.keras model into a mobile sub-model and a cloud # sub-model at the chosen split layer 'splitLayer'. testModel = BrokenModel(loaded_model, splitLayer, customObjects) testModel.splitModel() mobile_model = testModel.deviceModel cloud_model = testModel.remoteModel # Save the mobile and cloud sub-model mobile_model.save(mobile_model_path) cloud_model.save(cloud_model_path) # ---------------------------------------------------------------------------- # # Create results directory if 'GilbertChannel' in channel: lossProbability = channel['GilbertChannel']['lossProbability'] burstLength = channel['GilbertChannel']['burstLength'] results_dir = os.path.join(sim_data_dir,path_base,loaded_model_name,'demo',splitLayer+'_lp_'+str(lossProbability)+'_Bl_'+str(burstLength)) channel_flag = 'GC' elif 'RandomLossChannel' in channel: lossProbability = channel['RandomLossChannel']['lossProbability'] results_dir = os.path.join(sim_data_dir,path_base,loaded_model_name,'demo',splitLayer+'_lp_'+str(lossProbability)) channel_flag = 'RL' elif 'ExternalChannel' in channel: print('External packet traces imported') results_dir = os.path.join(sim_data_dir,path_base,loaded_model_name,'demo',splitLayer+'_ext_trace') channel_flag = 'EX' num_channels = transDict['channel']['ExternalChannel']['num_channels'] ext_dir = os.path.join(res_data_dir,path_base,loaded_model_name,splitLayer) else: # No lossy channel. This means we are doing a quantization experiment. channel_flag = 'NC' results_dir = os.path.join(sim_data_dir,path_base,loaded_model_name,'demo',splitLayer+'_NoChannel') MC_runs = [0,1] # with no lossy channel, there's no need to do monte carlo runs because each monte carlo run would give the same results. if channel_flag in ['GC','RL','EX']: # Only load altec weights if we will be doing error concealment. tc_weights_path = ecDict['ALTeC']['weightspath'] altec_w_path = os.path.join(tc_weights_path,loaded_model_name,splitLayer,splitLayer+'_rpp_'+str(rowsPerPacket)+'_'+str(numberOfBits_1)+'Bits_tensor_weights.npy') altec_pkt_w = np.load(altec_w_path) print(f'Loaded ALTeC weights for splitLayer {splitLayer} and {rowsPerPacket} rows per packet. Shape {np.shape(altec_pkt_w)}') halrtc_iters = ecDict['HaLRTC']['numiters'] silrtc_iters = ecDict['SiLRTC']['numiters'] inpaint_radius = ecDict['InpaintNS']['radius'] os.makedirs(results_dir,exist_ok=True) res_filename = '_'+str(numberOfBits_1)+'Bits_'+str(numberOfBits_2)+'Bits_' # ------------------------------------------------------------------------ # # Objects for the channel, quantization. if channel_flag != 'EX': channel = loadChannel(channel) quant_tensor1 = quantInit(quantization,tensor_id = 1) quant_tensor2 = quantInit(quantization,tensor_id = 2) # ------------------------------------------------------------------------ # # Load the dataset dataset_x_files,dataset_y_labels,file_names = fn_Data_PreProcessing_ImgClass(path_base,reshapeDims,normalize) # ------------------------------------------------------------------------ # # Process the dataset. batched_y_labels = [dataset_y_labels[i:i + batch_size] for i in range(0, len(dataset_y_labels), batch_size)] batched_x_files = [dataset_x_files[i: i + batch_size] for i in range(0,len(dataset_x_files),batch_size)] if channel_flag == 'EX': loss_matrix_mc = [] print('Loading external packet traces') for i_mc in range(MC_runs[0],MC_runs[1]): # Load external packet traces as loss matrices. lossMap_list = [] for i_c in range(num_channels): df = pd.read_excel(os.path.join(ext_dir,'Rpp_'+str(rowsPerPacket)+'_MC_'+str(i_mc)+'.xlsx'),sheet_name=[str(i_c)],engine='openpyxl') lossMap_channel = (df[str(i_c)].to_numpy())[:,1:].astype(np.bool) lossMap_list.append(lossMap_channel) loss_matrix_all = np.dstack(lossMap_list) loss_matrix_ex = [loss_matrix_all[k_batch:k_batch+batch_size,:,:] for k_batch in range(0,np.shape(loss_matrix_all)[0],batch_size)] loss_matrix_mc.append(loss_matrix_ex) # lists to store results. true_labels = [] top1_pred_full_model = [] top1_pred_split_model = [] top5_pred_full_model = [] top5_pred_split_model = [] top1_pred_caltec = [] top5_pred_caltec = [] top1_pred_altec = [] top5_pred_altec = [] top1_pred_halrtc = [] top5_pred_halrtc = [] top1_pred_silrtc = [] top5_pred_silrtc = [] top1_pred_inpaint = [] top5_pred_inpaint = [] top1_conf_full = [] top1_conf_split = [] top1_conf_caltec = [] top1_conf_altec = [] top1_conf_halrtc = [] top1_conf_silrtc = [] top1_conf_inpaint = [] for i_b in range(len(batched_y_labels)): # Run through Monte Carlo experiments through each batch. print(f"Batch {i_b}") batch_labels = np.asarray(batched_y_labels[i_b],dtype=np.int64) true_labels.extend(batch_labels) batch_imgs = batched_x_files[i_b] batch_imgs_stacked = np.vstack([i[np.newaxis,...] for i in batch_imgs]) # ---------------------------------------------------------------- # full_model_out = loaded_model.predict(batch_imgs_stacked) batch_top1_predictions = np.argmax(full_model_out,axis=1) batch_confidence = np.max(full_model_out,axis=1) top1_pred_full_model.extend(batch_top1_predictions) top1_conf_full.extend(batch_confidence) for i_item in range(np.shape(full_model_out)[0]): item_top5_predictions = np.argpartition(-full_model_out[i_item,:],5)[:5] top5_pred_full_model.append(item_top5_predictions) # --------------------------------------------------------------- # deviceOut = mobile_model.predict(batch_imgs_stacked) print(f'Shape of device out tensor {np.shape(deviceOut)}') # ---------------------------------------------------------------- # devOut = [] if not isinstance(deviceOut, list): devOut.append(deviceOut) deviceOut = devOut # deviceOut is the output tensor for a batch of data. # Quantize the data quanParams_1 = [] quanParams_2 = [] # If quantization is required: if len(deviceOut) > 1: if quant_tensor1!= 'noQuant': print("Quantizing tensors") quant_tensor1.bitQuantizer(deviceOut[0]) deviceOut[0] = quant_tensor1.quanData quanParams_1.append(quant_tensor1.min) quanParams_1.append(quant_tensor1.max) quant_tensor2.bitQuantizer(deviceOut[1]) deviceOut[1] = quant_tensor2.quanData quanParams_2.append(quant_tensor2.min) quanParams_2.append(quant_tensor2.max) else: if quant_tensor1!= 'noQuant': print("Quantizing tensor.") quant_tensor1.bitQuantizer(deviceOut[0]) deviceOut[0] = quant_tensor1.quanData quanParams_1.append(quant_tensor1.min) quanParams_1.append(quant_tensor1.max) # Save quantized tensors as image. for i in range(len(deviceOut)): quant_tensor = deviceOut[i] for item_index in range(np.shape(quant_tensor)[0]): for i_c in range(np.shape(quant_tensor)[-1]): tensor_channel = Image.fromarray(quant_tensor[item_index,:,:,i_c].astype(np.uint8)) tensor_channel.save(os.path.join(results_dir,'original_batch_'+str(i_b)+'_item_'+str(item_index)+'_tensor_'+str(i)+'_channel_'+str(i_c)+res_filename+'.png')) # -------------------------------------------------------------------- # # Transmit the tensor deviceOut through the channel. if channel_flag in ['GC','RL']: # if a lossy channel has to be realized. # if mc_task == 'GenLossPatterns': # if we want to generate packet loss patterns. lossMatrix = [] receivedIndices = [] lostIndices = [] dOut = [] for i in range(len(deviceOut)): dO, lM, rI, lI = transmit(deviceOut[i], channel, rowsPerPacket) dOut.append(dO) lossMatrix.append(lM) receivedIndices.append(rI) lostIndices.append(lI) channel.lossMatrix = [] deviceOut = dOut # ---------------------------------------------------------------- # # packetize tensor. pkt_obj_list = [] for i in range(len(deviceOut)): pkt_obj_list.append(PacketModel(rows_per_packet=rowsPerPacket,data_tensor=np.copy(deviceOut[i].data_tensor))) # -------------------------------------------------------------------- # if channel_flag == 'EX': batch_loss_matrix = loss_matrix_mc[i_mc] loss_matrix = [batch_loss_matrix[i_b]] # -------------------------------------------------------------------- # if channel_flag in ['GC','RL','EX']: # ---------------------------------------------------------------- # # apply the loss matrix to the tensor. for i in range(len(pkt_obj_list)): loss_map = lossMatrix[i] #print(np.shape(loss_map)) channel_width = np.shape(pkt_obj_list[i].packet_seq)[3] # loop through items in batch. for item_index in range(np.shape(loss_map)[0]): item_lost_map = loss_map[item_index,:,:] lost_pkt_indices,lost_channel_indices = np.where(item_lost_map == False) if len(lost_pkt_indices) != 0: # drop packet in tensor. for k in range(len(lost_pkt_indices)): pkt_obj_list[i].packet_seq[item_index,lost_pkt_indices[k],:,:,lost_channel_indices[k]] = np.zeros([rowsPerPacket,channel_width]) for i in range(len(deviceOut)): quant_tensor = pkt_obj_list[i].data_tensor for item_index in range(
np.shape(quant_tensor)
numpy.shape
# -*- coding: utf-8 -*- from __future__ import absolute_import, division, unicode_literals import unittest import datetime import warnings import jsonpickle from jsonpickle.compat import PY2, PY3, PY_MINOR from helper import SkippableTest try: import numpy as np import numpy.testing as npt from numpy.compat import asbytes from numpy.testing import assert_equal except ImportError: np = None class NumpyTestCase(SkippableTest): def setUp(self): if np is None: self.should_skip = True return self.should_skip = False import jsonpickle.ext.numpy jsonpickle.ext.numpy.register_handlers() def tearDown(self): if self.should_skip: return import jsonpickle.ext.numpy jsonpickle.ext.numpy.unregister_handlers() def roundtrip(self, obj): return jsonpickle.decode(jsonpickle.encode(obj)) def test_dtype_roundtrip(self): if self.should_skip: return self.skip('numpy is not importable') dtypes = [ np.int, np.float, np.complex, np.int32, np.str, np.object, np.unicode, np.dtype('f4,i4,f2,i1'), np.dtype(('f4', 'i4'), ('f2', 'i1')),
np.dtype('1i4', align=True)
numpy.dtype
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import scipy.signal __all__ = ['calc_discount_sum_rewards', 'calc_gae', 'Scaler'] """ The following code are copied or modified from: https://github.com/pat-coady/trpo Written by <NAME> (pat-coady.github.io) """ def calc_discount_sum_rewards(rewards, gamma): """ Calculate discounted forward sum of a sequence at each point """ return scipy.signal.lfilter([1.0], [1.0, -gamma], rewards[::-1])[::-1] def calc_gae(rewards, values, gamma, lam): """ Calculate generalized advantage estimator. See: https://arxiv.org/pdf/1506.02438.pdf """ # temporal differences tds = rewards - values + np.append(values[1:] * gamma, 0) advantages = calc_discount_sum_rewards(tds, gamma * lam) return advantages class Scaler(object): """ Generate scale and offset based on running mean and stddev along axis=0 offset = running mean scale = 1 / (stddev + 0.1) / 3 (i.e. 3x stddev = +/- 1.0) """ def __init__(self, obs_dim): """ Args: obs_dim: dimension of axis=1 """ self.vars = np.zeros(obs_dim) self.means = np.zeros(obs_dim) self.cnt = 0 self.first_pass = True def update(self, x): """ Update running mean and variance (this is an exact method) Args: x: NumPy array, shape = (N, obs_dim) see: https://stats.stackexchange.com/questions/43159/how-to-calculate-pooled- variance-of-two-groups-given-known-group-variances-mean """ if self.first_pass: self.means = np.mean(x, axis=0) self.vars = np.var(x, axis=0) self.cnt = x.shape[0] self.first_pass = False else: n = x.shape[0] new_data_var = np.var(x, axis=0) new_data_mean = np.mean(x, axis=0) new_data_mean_sq = np.square(new_data_mean) new_means = ( (self.means * self.cnt) + (new_data_mean * n)) / (self.cnt + n) self.vars = (((self.cnt * (self.vars + np.square(self.means))) + (n * (new_data_var + new_data_mean_sq))) / (self.cnt + n) -
np.square(new_means)
numpy.square
from __future__ import print_function, division, absolute_import import time import matplotlib matplotlib.use('Agg') # fix execution of tests involving matplotlib on travis import numpy as np import six.moves as sm import cv2 import shapely import shapely.geometry import imgaug as ia from imgaug.testutils import reseed def main(): time_start = time.time() test_is_np_array() test_is_single_integer() test_is_single_float() test_is_single_number() test_is_iterable() test_is_string() test_is_single_bool() test_is_integer_array() test_is_float_array() test_is_callable() test_caller_name() test_seed() test_current_random_state() test_new_random_state() test_dummy_random_state() test_copy_random_state() test_derive_random_state() test_derive_random_states() test_forward_random_state() # test_quokka() # test_quokka_square() # test_angle_between_vectors() # test_draw_text() test_imresize_many_images() test_imresize_single_image() test_pad() test_compute_paddings_for_aspect_ratio() test_pad_to_aspect_ratio() test_pool() test_avg_pool() test_max_pool() test_draw_grid() # test_show_grid() # test_do_assert() # test_HooksImages_is_activated() # test_HooksImages_is_propagating() # test_HooksImages_preprocess() # test_HooksImages_postprocess() test_Keypoint() test_KeypointsOnImage() test_BoundingBox() test_BoundingBoxesOnImage() # test_HeatmapsOnImage_get_arr() # test_HeatmapsOnImage_find_global_maxima() test_HeatmapsOnImage_draw() test_HeatmapsOnImage_draw_on_image() test_HeatmapsOnImage_invert() test_HeatmapsOnImage_pad() # test_HeatmapsOnImage_pad_to_aspect_ratio() test_HeatmapsOnImage_avg_pool() test_HeatmapsOnImage_max_pool() test_HeatmapsOnImage_scale() # test_HeatmapsOnImage_to_uint8() # test_HeatmapsOnImage_from_uint8() # test_HeatmapsOnImage_from_0to1() # test_HeatmapsOnImage_change_normalization() # test_HeatmapsOnImage_copy() # test_HeatmapsOnImage_deepcopy() test_SegmentationMapOnImage_bool() test_SegmentationMapOnImage_get_arr_int() # test_SegmentationMapOnImage_get_arr_bool() test_SegmentationMapOnImage_draw() test_SegmentationMapOnImage_draw_on_image() test_SegmentationMapOnImage_pad() test_SegmentationMapOnImage_pad_to_aspect_ratio() test_SegmentationMapOnImage_scale() test_SegmentationMapOnImage_to_heatmaps() test_SegmentationMapOnImage_from_heatmaps() test_SegmentationMapOnImage_copy() test_SegmentationMapOnImage_deepcopy() test_Polygon___init__() test_Polygon_xx() test_Polygon_yy() test_Polygon_xx_int() test_Polygon_yy_int() test_Polygon_is_valid() test_Polygon_area() test_Polygon_project() test_Polygon__compute_inside_image_point_mask() test_Polygon_is_fully_within_image() test_Polygon_is_partly_within_image() test_Polygon_is_out_of_image() test_Polygon_cut_out_of_image() test_Polygon_clip_out_of_image() test_Polygon_shift() test_Polygon_draw_on_image() test_Polygon_extract_from_image() test_Polygon_to_shapely_polygon() test_Polygon_to_bounding_box() test_Polygon_from_shapely() test_Polygon_copy() test_Polygon_deepcopy() test_Polygon___repr__() test_Polygon___str__() # test_Batch() test_BatchLoader() # test_BackgroundAugmenter.get_batch() # test_BackgroundAugmenter._augment_images_worker() # test_BackgroundAugmenter.terminate() time_end = time.time() print("<%s> Finished without errors in %.4fs." % (__file__, time_end - time_start,)) def test_is_np_array(): class _Dummy(object): pass values_true = [ np.zeros((1, 2), dtype=np.uint8), np.zeros((64, 64, 3), dtype=np.uint8), np.zeros((1, 2), dtype=np.float32), np.zeros((100,), dtype=np.float64) ] values_false = [ "A", "BC", "1", True, False, (1.0, 2.0), [1.0, 2.0], _Dummy(), -100, 1, 0, 1, 100, -1.2, -0.001, 0.0, 0.001, 1.2, 1e-4 ] for value in values_true: assert ia.is_np_array(value) is True for value in values_false: assert ia.is_np_array(value) is False def test_is_single_integer(): assert ia.is_single_integer("A") is False assert ia.is_single_integer(None) is False assert ia.is_single_integer(1.2) is False assert ia.is_single_integer(1.0) is False assert ia.is_single_integer(np.ones((1,), dtype=np.float32)[0]) is False assert ia.is_single_integer(1) is True assert ia.is_single_integer(1234) is True assert ia.is_single_integer(np.ones((1,), dtype=np.uint8)[0]) is True assert ia.is_single_integer(np.ones((1,), dtype=np.int32)[0]) is True def test_is_single_float(): assert ia.is_single_float("A") is False assert ia.is_single_float(None) is False assert ia.is_single_float(1.2) is True assert ia.is_single_float(1.0) is True assert ia.is_single_float(np.ones((1,), dtype=np.float32)[0]) is True assert ia.is_single_float(1) is False assert ia.is_single_float(1234) is False assert ia.is_single_float(np.ones((1,), dtype=np.uint8)[0]) is False assert ia.is_single_float(np.ones((1,), dtype=np.int32)[0]) is False def test_caller_name(): assert ia.caller_name() == 'test_caller_name' def test_is_single_number(): class _Dummy(object): pass values_true = [-100, 1, 0, 1, 100, -1.2, -0.001, 0.0, 0.001, 1.2, 1e-4] values_false = ["A", "BC", "1", True, False, (1.0, 2.0), [1.0, 2.0], _Dummy(), np.zeros((1, 2), dtype=np.uint8)] for value in values_true: assert ia.is_single_number(value) is True for value in values_false: assert ia.is_single_number(value) is False def test_is_iterable(): class _Dummy(object): pass values_true = [ [0, 1, 2], ["A", "X"], [[123], [456, 789]], [], (1, 2, 3), (1,), tuple(), "A", "ABC", "", np.zeros((100,), dtype=np.uint8) ] values_false = [1, 100, 0, -100, -1, 1.2, -1.2, True, False, _Dummy()] for value in values_true: assert ia.is_iterable(value) is True, value for value in values_false: assert ia.is_iterable(value) is False def test_is_string(): class _Dummy(object): pass values_true = ["A", "BC", "1", ""] values_false = [-100, 1, 0, 1, 100, -1.2, -0.001, 0.0, 0.001, 1.2, 1e-4, True, False, (1.0, 2.0), [1.0, 2.0], _Dummy(), np.zeros((1, 2), dtype=np.uint8)] for value in values_true: assert ia.is_string(value) is True for value in values_false: assert ia.is_string(value) is False def test_is_single_bool(): class _Dummy(object): pass values_true = [False, True] values_false = [-100, 1, 0, 1, 100, -1.2, -0.001, 0.0, 0.001, 1.2, 1e-4, (1.0, 2.0), [1.0, 2.0], _Dummy(), np.zeros((1, 2), dtype=np.uint8), np.zeros((1,), dtype=bool)] for value in values_true: assert ia.is_single_bool(value) is True for value in values_false: assert ia.is_single_bool(value) is False def test_is_integer_array(): class _Dummy(object): pass values_true = [ np.zeros((1, 2), dtype=np.uint8), np.zeros((100,), dtype=np.uint8), np.zeros((1, 2), dtype=np.uint16), np.zeros((1, 2), dtype=np.int32), np.zeros((1, 2), dtype=np.int64) ] values_false = [ "A", "BC", "1", "", -100, 1, 0, 1, 100, -1.2, -0.001, 0.0, 0.001, 1.2, 1e-4, True, False, (1.0, 2.0), [1.0, 2.0], _Dummy(), np.zeros((1, 2), dtype=np.float16), np.zeros((100,), dtype=np.float32), np.zeros((1, 2), dtype=np.float64), np.zeros((1, 2), dtype=np.bool) ] for value in values_true: assert ia.is_integer_array(value) is True for value in values_false: assert ia.is_integer_array(value) is False def test_is_float_array(): class _Dummy(object): pass values_true = [ np.zeros((1, 2), dtype=np.float16), np.zeros((100,), dtype=np.float32), np.zeros((1, 2), dtype=np.float64) ] values_false = [ "A", "BC", "1", "", -100, 1, 0, 1, 100, -1.2, -0.001, 0.0, 0.001, 1.2, 1e-4, True, False, (1.0, 2.0), [1.0, 2.0], _Dummy(), np.zeros((1, 2), dtype=np.uint8), np.zeros((100,), dtype=np.uint8), np.zeros((1, 2), dtype=np.uint16), np.zeros((1, 2), dtype=np.int32), np.zeros((1, 2), dtype=np.int64), np.zeros((1, 2), dtype=np.bool) ] for value in values_true: assert ia.is_float_array(value) is True for value in values_false: assert ia.is_float_array(value) is False def test_is_callable(): def _dummy_func(): pass _dummy_func2 = lambda x: x class _Dummy1(object): pass class _Dummy2(object): def __call__(self): pass values_true = [_dummy_func, _dummy_func2, _Dummy2()] values_false = ["A", "BC", "1", "", -100, 1, 0, 1, 100, -1.2, -0.001, 0.0, 0.001, 1.2, 1e-4, True, False, (1.0, 2.0), [1.0, 2.0], _Dummy1(), np.zeros((1, 2), dtype=np.uint8)] for value in values_true: assert ia.is_callable(value) == True for value in values_false: assert ia.is_callable(value) == False def test_seed(): ia.seed(10017) rs = np.random.RandomState(10017) assert ia.CURRENT_RANDOM_STATE.randint(0, 1000*1000) == rs.randint(0, 1000*1000) reseed() def test_current_random_state(): assert ia.current_random_state() == ia.CURRENT_RANDOM_STATE def test_new_random_state(): seed = 1000 ia.seed(seed) rs_observed = ia.new_random_state(seed=None, fully_random=False) rs_expected = np.random.RandomState(np.random.RandomState(seed).randint(0, 10**6, 1)[0]) assert rs_observed.randint(0, 10**6) == rs_expected.randint(0, 10**6) rs_observed1 = ia.new_random_state(seed=None, fully_random=False) rs_observed2 = ia.new_random_state(seed=None, fully_random=False) assert rs_observed1.randint(0, 10**6) != rs_observed2.randint(0, 10**6) ia.seed(seed) np.random.seed(seed) rs_observed = ia.new_random_state(seed=None, fully_random=True) rs_not_expected = np.random.RandomState(np.random.RandomState(seed).randint(0, 10**6, 1)[0]) assert rs_observed.randint(0, 10**6) != rs_not_expected.randint(0, 10**6) rs_observed1 = ia.new_random_state(seed=None, fully_random=True) rs_observed2 = ia.new_random_state(seed=None, fully_random=True) assert rs_observed1.randint(0, 10**6) != rs_observed2.randint(0, 10**6) rs_observed1 = ia.new_random_state(seed=1234) rs_observed2 = ia.new_random_state(seed=1234) rs_expected = np.random.RandomState(1234) assert rs_observed1.randint(0, 10**6) == rs_observed2.randint(0, 10**6) == rs_expected.randint(0, 10**6) def test_dummy_random_state(): assert ia.dummy_random_state().randint(0, 10**6) == np.random.RandomState(1).randint(0, 10**6) def test_copy_random_state(): rs = np.random.RandomState(1017) rs_copy = ia.copy_random_state(rs) assert rs != rs_copy assert rs.randint(0, 10**6) == rs_copy.randint(0, 10**6) assert ia.copy_random_state(np.random) == np.random assert ia.copy_random_state(np.random, force_copy=True) != np.random def test_derive_random_state(): rs = np.random.RandomState(1017) rs_observed = ia.derive_random_state(np.random.RandomState(1017)) rs_expected = np.random.RandomState(np.random.RandomState(1017).randint(0, 10**6)) assert rs_observed.randint(0, 10**6) == rs_expected.randint(0, 10**6) def test_derive_random_states(): rs_observed1, rs_observed2 = ia.derive_random_states(np.random.RandomState(1017), n=2) seed = np.random.RandomState(1017).randint(0, 10**6) rs_expected1 = np.random.RandomState(seed+0) rs_expected2 = np.random.RandomState(seed+1) assert rs_observed1.randint(0, 10**6) == rs_expected1.randint(0, 10**6) assert rs_observed2.randint(0, 10**6) == rs_expected2.randint(0, 10**6) def test_forward_random_state(): rs1 = np.random.RandomState(1017) rs2 = np.random.RandomState(1017) ia.forward_random_state(rs1) rs2.uniform() assert rs1.randint(0, 10**6) == rs2.randint(0, 10**6) def test_imresize_many_images(): interpolations = [None, "nearest", "linear", "area", "cubic", cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_CUBIC] for c in [1, 3]: image1 = np.zeros((16, 16, c), dtype=np.uint8) + 255 image2 = np.zeros((16, 16, c), dtype=np.uint8) image3 = np.pad( np.zeros((8, 8, c), dtype=np.uint8) + 255, ((4, 4), (4, 4), (0, 0)), mode="constant", constant_values=0 ) image1_small = np.zeros((8, 8, c), dtype=np.uint8) + 255 image2_small = np.zeros((8, 8, c), dtype=np.uint8) image3_small = np.pad( np.zeros((4, 4, c), dtype=np.uint8) + 255, ((2, 2), (2, 2), (0, 0)), mode="constant", constant_values=0 ) image1_large = np.zeros((32, 32, c), dtype=np.uint8) + 255 image2_large = np.zeros((32, 32, c), dtype=np.uint8) image3_large = np.pad( np.zeros((16, 16, c), dtype=np.uint8) + 255, ((8, 8), (8, 8), (0, 0)), mode="constant", constant_values=0 ) images = np.uint8([image1, image2, image3]) images_small = np.uint8([image1_small, image2_small, image3_small]) images_large = np.uint8([image1_large, image2_large, image3_large]) for images_this_iter in [images, list(images)]: # test for ndarray and list(ndarray) input for interpolation in interpolations: images_same_observed = ia.imresize_many_images(images_this_iter, (16, 16), interpolation=interpolation) for image_expected, image_observed in zip(images_this_iter, images_same_observed): diff = np.abs(image_expected.astype(np.int32) - image_observed.astype(np.int32)) assert np.sum(diff) == 0 for interpolation in interpolations: images_small_observed = ia.imresize_many_images(images_this_iter, (8, 8), interpolation=interpolation) for image_expected, image_observed in zip(images_small, images_small_observed): diff = np.abs(image_expected.astype(np.int32) - image_observed.astype(np.int32)) diff_fraction = np.sum(diff) / (image_observed.size * 255) assert diff_fraction < 0.5 for interpolation in interpolations: images_large_observed = ia.imresize_many_images(images_this_iter, (32, 32), interpolation=interpolation) for image_expected, image_observed in zip(images_large, images_large_observed): diff = np.abs(image_expected.astype(np.int32) - image_observed.astype(np.int32)) diff_fraction = np.sum(diff) / (image_observed.size * 255) assert diff_fraction < 0.5 # test size given as single int images = np.zeros((1, 4, 4, 3), dtype=np.uint8) observed = ia.imresize_many_images(images, 8) assert observed.shape == (1, 8, 8, 3) # test size given as single float images = np.zeros((1, 4, 4, 3), dtype=np.uint8) observed = ia.imresize_many_images(images, 2.0) assert observed.shape == (1, 8, 8, 3) images = np.zeros((1, 4, 4, 3), dtype=np.uint8) observed = ia.imresize_many_images(images, 0.5) assert observed.shape == (1, 2, 2, 3) # test size given as (float, float) images = np.zeros((1, 4, 4, 3), dtype=np.uint8) observed = ia.imresize_many_images(images, (2.0, 2.0)) assert observed.shape == (1, 8, 8, 3) images = np.zeros((1, 4, 4, 3), dtype=np.uint8) observed = ia.imresize_many_images(images, (0.5, 0.5)) assert observed.shape == (1, 2, 2, 3) images = np.zeros((1, 4, 4, 3), dtype=np.uint8) observed = ia.imresize_many_images(images, (2.0, 0.5)) assert observed.shape == (1, 8, 2, 3) images = np.zeros((1, 4, 4, 3), dtype=np.uint8) observed = ia.imresize_many_images(images, (0.5, 2.0)) assert observed.shape == (1, 2, 8, 3) # test size given as int+float or float+int images = np.zeros((1, 4, 4, 3), dtype=np.uint8) observed = ia.imresize_many_images(images, (11, 2.0)) assert observed.shape == (1, 11, 8, 3) images = np.zeros((1, 4, 4, 3), dtype=np.uint8) observed = ia.imresize_many_images(images, (2.0, 11)) assert observed.shape == (1, 8, 11, 3) # test no channels images = np.zeros((1, 4, 4), dtype=np.uint8) images_rs = ia.imresize_many_images(images, (2, 2)) assert images_rs.shape == (1, 2, 2) images = [np.zeros((4, 4), dtype=np.uint8)] images_rs = ia.imresize_many_images(images, (2, 2)) assert isinstance(images_rs, list) assert images_rs[0].shape == (2, 2) # test len 0 input observed = ia.imresize_many_images(np.zeros((0, 8, 8, 3), dtype=np.uint8), (4, 4)) assert ia.is_np_array(observed) assert observed.dtype.type == np.uint8 assert len(observed) == 0 observed = ia.imresize_many_images([], (4, 4)) assert isinstance(observed, list) assert len(observed) == 0 # test images with zero height/width images = [np.zeros((0, 4, 3), dtype=np.uint8)] got_exception = False try: _ = ia.imresize_many_images(images, sizes=(2, 2)) except Exception as exc: assert "Cannot resize images, because at least one image has a height and/or width of zero." in str(exc) got_exception = True assert got_exception images = [np.zeros((4, 0, 3), dtype=np.uint8)] got_exception = False try: _ = ia.imresize_many_images(images, sizes=(2, 2)) except Exception as exc: assert "Cannot resize images, because at least one image has a height and/or width of zero." in str(exc) got_exception = True assert got_exception images = [np.zeros((0, 0, 3), dtype=np.uint8)] got_exception = False try: _ = ia.imresize_many_images(images, sizes=(2, 2)) except Exception as exc: assert "Cannot resize images, because at least one image has a height and/or width of zero." in str(exc) got_exception = True assert got_exception # test invalid sizes sizes_all = [(-1, 2), (0, 2)] sizes_all = sizes_all\ + [(float(a), b) for a, b in sizes_all]\ + [(a, float(b)) for a, b in sizes_all]\ + [(float(a), float(b)) for a, b in sizes_all]\ + [(-a, -b) for a, b in sizes_all]\ + [(-float(a), -b) for a, b in sizes_all]\ + [(-a, -float(b)) for a, b in sizes_all]\ + [(-float(a), -float(b)) for a, b in sizes_all] sizes_all = sizes_all\ + [(b, a) for a, b in sizes_all] sizes_all = sizes_all\ + [-1.0, 0.0, -1, 0] for sizes in sizes_all: images = [np.zeros((4, 4, 3), dtype=np.uint8)] got_exception = False try: _ = ia.imresize_many_images(images, sizes=sizes) except Exception as exc: assert "value is zero or lower than zero." in str(exc) got_exception = True assert got_exception # test list input but all with same shape images = [np.zeros((8, 8, 3), dtype=np.uint8) for _ in range(2)] observed = ia.imresize_many_images(images, (4, 4)) assert isinstance(observed, list) assert all([image.shape == (4, 4, 3) for image in observed]) assert all([image.dtype.type == np.uint8 for image in observed]) def test_imresize_single_image(): for c in [-1, 1, 3]: image1 = np.zeros((16, 16, abs(c)), dtype=np.uint8) + 255 image2 = np.zeros((16, 16, abs(c)), dtype=np.uint8) image3 = np.pad( np.zeros((8, 8, abs(c)), dtype=np.uint8) + 255, ((4, 4), (4, 4), (0, 0)), mode="constant", constant_values=0 ) image1_small = np.zeros((8, 8, abs(c)), dtype=np.uint8) + 255 image2_small = np.zeros((8, 8, abs(c)), dtype=np.uint8) image3_small = np.pad( np.zeros((4, 4, abs(c)), dtype=np.uint8) + 255, ((2, 2), (2, 2), (0, 0)), mode="constant", constant_values=0 ) image1_large = np.zeros((32, 32, abs(c)), dtype=np.uint8) + 255 image2_large = np.zeros((32, 32, abs(c)), dtype=np.uint8) image3_large = np.pad( np.zeros((16, 16, abs(c)), dtype=np.uint8) + 255, ((8, 8), (8, 8), (0, 0)), mode="constant", constant_values=0 ) images = np.uint8([image1, image2, image3]) images_small = np.uint8([image1_small, image2_small, image3_small]) images_large = np.uint8([image1_large, image2_large, image3_large]) if c == -1: images = images[:, :, 0] images_small = images_small[:, :, 0] images_large = images_large[:, :, 0] interpolations = [None, "nearest", "linear", "area", "cubic", cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_CUBIC] for interpolation in interpolations: for image in images: image_observed = ia.imresize_single_image(image, (16, 16), interpolation=interpolation) diff = np.abs(image.astype(np.int32) - image_observed.astype(np.int32)) assert np.sum(diff) == 0 for interpolation in interpolations: for image, image_expected in zip(images, images_small): image_observed = ia.imresize_single_image(image, (8, 8), interpolation=interpolation) diff = np.abs(image_expected.astype(np.int32) - image_observed.astype(np.int32)) diff_fraction = np.sum(diff) / (image_observed.size * 255) assert diff_fraction < 0.5 for interpolation in interpolations: for image, image_expected in zip(images, images_large): image_observed = ia.imresize_single_image(image, (32, 32), interpolation=interpolation) diff = np.abs(image_expected.astype(np.int32) - image_observed.astype(np.int32)) diff_fraction = np.sum(diff) / (image_observed.size * 255) assert diff_fraction < 0.5 def test_pad(): # ------- # uint8, int32 # ------- for dtype in [np.uint8, np.int32]: arr = np.zeros((3, 3), dtype=dtype) + 255 arr_pad = ia.pad(arr) assert arr_pad.shape == (3, 3) assert arr_pad.dtype.type == dtype assert np.array_equal(arr_pad, arr) arr_pad = ia.pad(arr, top=1) assert arr_pad.shape == (4, 3) assert arr_pad.dtype.type == dtype assert np.all(arr_pad[0, :] == 0) arr_pad = ia.pad(arr, right=1) assert arr_pad.shape == (3, 4) assert arr_pad.dtype.type == dtype assert np.all(arr_pad[:, -1] == 0) arr_pad = ia.pad(arr, bottom=1) assert arr_pad.shape == (4, 3) assert arr_pad.dtype.type == dtype assert np.all(arr_pad[-1, :] == 0) arr_pad = ia.pad(arr, left=1) assert arr_pad.shape == (3, 4) assert arr_pad.dtype.type == dtype assert np.all(arr_pad[:, 0] == 0) arr_pad = ia.pad(arr, top=1, right=2, bottom=3, left=4) assert arr_pad.shape == (3+(1+3), 3+(2+4)) assert arr_pad.dtype.type == dtype assert np.all(arr_pad[0, :] == 0) assert np.all(arr_pad[:, -2:] == 0) assert np.all(arr_pad[-3:, :] == 0) assert np.all(arr_pad[:, :4] == 0) arr_pad = ia.pad(arr, top=1, cval=10) assert arr_pad.shape == (4, 3) assert arr_pad.dtype.type == dtype assert np.all(arr_pad[0, :] == 10) arr = np.zeros((3, 3, 3), dtype=dtype) + 128 arr_pad = ia.pad(arr, top=1) assert arr_pad.shape == (4, 3, 3) assert arr_pad.dtype.type == dtype assert np.all(arr_pad[0, :, 0] == 0) assert np.all(arr_pad[0, :, 1] == 0) assert np.all(arr_pad[0, :, 2] == 0) arr = np.zeros((3, 3), dtype=dtype) + 128 arr[1, 1] = 200 arr_pad = ia.pad(arr, top=1, mode="maximum") assert arr_pad.shape == (4, 3) assert arr_pad.dtype.type == dtype assert arr_pad[0, 0] == 128 assert arr_pad[0, 1] == 200 assert arr_pad[0, 2] == 128 arr = np.zeros((3, 3), dtype=dtype) arr_pad = ia.pad(arr, top=1, mode="constant", cval=123) assert arr_pad.shape == (4, 3) assert arr_pad.dtype.type == dtype assert arr_pad[0, 0] == 123 assert arr_pad[0, 1] == 123 assert arr_pad[0, 2] == 123 assert arr_pad[1, 0] == 0 arr = np.zeros((1, 1), dtype=dtype) + 100 arr_pad = ia.pad(arr, top=4, mode="linear_ramp", cval=200) assert arr_pad.shape == (5, 1) assert arr_pad.dtype.type == dtype assert arr_pad[0, 0] == 200 assert arr_pad[1, 0] == 175 assert arr_pad[2, 0] == 150 assert arr_pad[3, 0] == 125 assert arr_pad[4, 0] == 100 # ------- # float32, float64 # ------- for dtype in [np.float32, np.float64]: arr = np.zeros((3, 3), dtype=dtype) + 1.0 arr_pad = ia.pad(arr) assert arr_pad.shape == (3, 3) assert arr_pad.dtype.type == dtype assert np.allclose(arr_pad, arr) arr_pad = ia.pad(arr, top=1) assert arr_pad.shape == (4, 3) assert arr_pad.dtype.type == dtype assert np.allclose(arr_pad[0, :], dtype([0, 0, 0])) arr_pad = ia.pad(arr, right=1) assert arr_pad.shape == (3, 4) assert arr_pad.dtype.type == dtype assert np.allclose(arr_pad[:, -1], dtype([0, 0, 0])) arr_pad = ia.pad(arr, bottom=1) assert arr_pad.shape == (4, 3) assert arr_pad.dtype.type == dtype assert np.allclose(arr_pad[-1, :], dtype([0, 0, 0])) arr_pad = ia.pad(arr, left=1) assert arr_pad.shape == (3, 4) assert arr_pad.dtype.type == dtype assert np.allclose(arr_pad[:, 0], dtype([0, 0, 0])) arr_pad = ia.pad(arr, top=1, right=2, bottom=3, left=4) assert arr_pad.shape == (3+(1+3), 3+(2+4)) assert arr_pad.dtype.type == dtype assert 0 - 1e-6 < np.max(arr_pad[0, :]) < 0 + 1e-6 assert 0 - 1e-6 < np.max(arr_pad[:, -2:]) < 0 + 1e-6 assert 0 - 1e-6 < np.max(arr_pad[-3, :]) < 0 + 1e-6 assert 0 - 1e-6 < np.max(arr_pad[:, :4]) < 0 + 1e-6 arr_pad = ia.pad(arr, top=1, cval=0.2) assert arr_pad.shape == (4, 3) assert arr_pad.dtype.type == dtype assert np.allclose(arr_pad[0, :], dtype([0.2, 0.2, 0.2])) arr = np.zeros((3, 3, 3), dtype=dtype) + 0.5 arr_pad = ia.pad(arr, top=1) assert arr_pad.shape == (4, 3, 3) assert arr_pad.dtype.type == dtype assert np.allclose(arr_pad[0, :, 0], dtype([0, 0, 0])) assert np.allclose(arr_pad[0, :, 1], dtype([0, 0, 0])) assert np.allclose(arr_pad[0, :, 2], dtype([0, 0, 0])) arr = np.zeros((3, 3), dtype=dtype) + 0.5 arr[1, 1] = 0.75 arr_pad = ia.pad(arr, top=1, mode="maximum") assert arr_pad.shape == (4, 3) assert arr_pad.dtype.type == dtype assert 0.50 - 1e-6 < arr_pad[0, 0] < 0.50 + 1e-6 assert 0.75 - 1e-6 < arr_pad[0, 1] < 0.75 + 1e-6 assert 0.50 - 1e-6 < arr_pad[0, 2] < 0.50 + 1e-6 arr = np.zeros((3, 3), dtype=dtype) arr_pad = ia.pad(arr, top=1, mode="constant", cval=0.4) assert arr_pad.shape == (4, 3) assert arr_pad.dtype.type == dtype assert 0.4 - 1e-6 < arr_pad[0, 0] < 0.4 + 1e-6 assert 0.4 - 1e-6 < arr_pad[0, 1] < 0.4 + 1e-6 assert 0.4 - 1e-6 < arr_pad[0, 2] < 0.4 + 1e-6 assert 0.0 - 1e-6 < arr_pad[1, 0] < 0.0 + 1e-6 arr = np.zeros((1, 1), dtype=dtype) + 0.6 arr_pad = ia.pad(arr, top=4, mode="linear_ramp", cval=1.0) assert arr_pad.shape == (5, 1) assert arr_pad.dtype.type == dtype assert 1.0 - 1e-6 < arr_pad[0, 0] < 1.0 + 1e-6 assert 0.9 - 1e-6 < arr_pad[1, 0] < 0.9 + 1e-6 assert 0.8 - 1e-6 < arr_pad[2, 0] < 0.8 + 1e-6 assert 0.7 - 1e-6 < arr_pad[3, 0] < 0.7 + 1e-6 assert 0.6 - 1e-6 < arr_pad[4, 0] < 0.6 + 1e-6 def test_compute_paddings_for_aspect_ratio(): arr = np.zeros((4, 4), dtype=np.uint8) top, right, bottom, left = ia.compute_paddings_for_aspect_ratio(arr, 1.0) assert top == 0 assert right == 0 assert bottom == 0 assert left == 0 arr = np.zeros((1, 4), dtype=np.uint8) top, right, bottom, left = ia.compute_paddings_for_aspect_ratio(arr, 1.0) assert top == 2 assert right == 0 assert bottom == 1 assert left == 0 arr = np.zeros((4, 1), dtype=np.uint8) top, right, bottom, left = ia.compute_paddings_for_aspect_ratio(arr, 1.0) assert top == 0 assert right == 2 assert bottom == 0 assert left == 1 arr = np.zeros((2, 4), dtype=np.uint8) top, right, bottom, left = ia.compute_paddings_for_aspect_ratio(arr, 1.0) assert top == 1 assert right == 0 assert bottom == 1 assert left == 0 arr = np.zeros((4, 2), dtype=np.uint8) top, right, bottom, left = ia.compute_paddings_for_aspect_ratio(arr, 1.0) assert top == 0 assert right == 1 assert bottom == 0 assert left == 1 arr = np.zeros((4, 4), dtype=np.uint8) top, right, bottom, left = ia.compute_paddings_for_aspect_ratio(arr, 0.5) assert top == 2 assert right == 0 assert bottom == 2 assert left == 0 arr = np.zeros((4, 4), dtype=np.uint8) top, right, bottom, left = ia.compute_paddings_for_aspect_ratio(arr, 2.0) assert top == 0 assert right == 2 assert bottom == 0 assert left == 2 def test_pad_to_aspect_ratio(): for dtype in [np.uint8, np.int32, np.float32]: # aspect_ratio = 1.0 arr = np.zeros((4, 4), dtype=dtype) arr_pad = ia.pad_to_aspect_ratio(arr, 1.0) assert arr_pad.dtype.type == dtype assert arr_pad.shape[0] == 4 assert arr_pad.shape[1] == 4 arr = np.zeros((1, 4), dtype=dtype) arr_pad = ia.pad_to_aspect_ratio(arr, 1.0) assert arr_pad.dtype.type == dtype assert arr_pad.shape[0] == 4 assert arr_pad.shape[1] == 4 arr = np.zeros((4, 1), dtype=dtype) arr_pad = ia.pad_to_aspect_ratio(arr, 1.0) assert arr_pad.dtype.type == dtype assert arr_pad.shape[0] == 4 assert arr_pad.shape[1] == 4 arr = np.zeros((2, 4), dtype=dtype) arr_pad = ia.pad_to_aspect_ratio(arr, 1.0) assert arr_pad.dtype.type == dtype assert arr_pad.shape[0] == 4 assert arr_pad.shape[1] == 4 arr = np.zeros((4, 2), dtype=dtype) arr_pad = ia.pad_to_aspect_ratio(arr, 1.0) assert arr_pad.dtype.type == dtype assert arr_pad.shape[0] == 4 assert arr_pad.shape[1] == 4 # aspect_ratio != 1.0 arr = np.zeros((4, 4), dtype=dtype) arr_pad = ia.pad_to_aspect_ratio(arr, 2.0) assert arr_pad.dtype.type == dtype assert arr_pad.shape[0] == 4 assert arr_pad.shape[1] == 8 arr = np.zeros((4, 4), dtype=dtype) arr_pad = ia.pad_to_aspect_ratio(arr, 0.5) assert arr_pad.dtype.type == dtype assert arr_pad.shape[0] == 8 assert arr_pad.shape[1] == 4 # 3d arr arr = np.zeros((4, 2, 3), dtype=dtype) arr_pad = ia.pad_to_aspect_ratio(arr, 1.0) assert arr_pad.dtype.type == dtype assert arr_pad.shape[0] == 4 assert arr_pad.shape[1] == 4 assert arr_pad.shape[2] == 3 # cval arr = np.zeros((4, 4), dtype=np.uint8) + 128 arr_pad = ia.pad_to_aspect_ratio(arr, 2.0) assert arr_pad.shape[0] == 4 assert arr_pad.shape[1] == 8 assert np.max(arr_pad[:, 0:2]) == 0 assert np.max(arr_pad[:, -2:]) == 0 assert np.max(arr_pad[:, 2:-2]) == 128 arr = np.zeros((4, 4), dtype=np.uint8) + 128 arr_pad = ia.pad_to_aspect_ratio(arr, 2.0, cval=10) assert arr_pad.shape[0] == 4 assert arr_pad.shape[1] == 8 assert np.max(arr_pad[:, 0:2]) == 10 assert np.max(arr_pad[:, -2:]) == 10 assert np.max(arr_pad[:, 2:-2]) == 128 arr = np.zeros((4, 4), dtype=np.float32) + 0.5 arr_pad = ia.pad_to_aspect_ratio(arr, 2.0, cval=0.0) assert arr_pad.shape[0] == 4 assert arr_pad.shape[1] == 8 assert 0 - 1e-6 <= np.max(arr_pad[:, 0:2]) <= 0 + 1e-6 assert 0 - 1e-6 <= np.max(arr_pad[:, -2:]) <= 0 + 1e-6 assert 0.5 - 1e-6 <= np.max(arr_pad[:, 2:-2]) <= 0.5 + 1e-6 arr = np.zeros((4, 4), dtype=np.float32) + 0.5 arr_pad = ia.pad_to_aspect_ratio(arr, 2.0, cval=0.1) assert arr_pad.shape[0] == 4 assert arr_pad.shape[1] == 8 assert 0.1 - 1e-6 <= np.max(arr_pad[:, 0:2]) <= 0.1 + 1e-6 assert 0.1 - 1e-6 <= np.max(arr_pad[:, -2:]) <= 0.1 + 1e-6 assert 0.5 - 1e-6 <= np.max(arr_pad[:, 2:-2]) <= 0.5 + 1e-6 # mode arr = np.zeros((4, 4), dtype=np.uint8) + 128 arr[1:3, 1:3] = 200 arr_pad = ia.pad_to_aspect_ratio(arr, 2.0, mode="maximum") assert arr_pad.shape[0] == 4 assert arr_pad.shape[1] == 8 assert np.max(arr_pad[0:1, 0:2]) == 128 assert np.max(arr_pad[1:3, 0:2]) == 200 assert np.max(arr_pad[3:, 0:2]) == 128 assert np.max(arr_pad[0:1, -2:]) == 128 assert np.max(arr_pad[1:3, -2:]) == 200 assert np.max(arr_pad[3:, -2:]) == 128 # TODO add tests for return_pad_values=True def test_pool(): # basic functionality with uint8, int32, float32 arr = np.uint8([ [0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15] ]) arr_pooled = ia.pool(arr, 2, np.average) assert arr_pooled.shape == (2, 2) assert arr_pooled.dtype == arr.dtype.type assert arr_pooled[0, 0] == int(np.average([0, 1, 4, 5])) assert arr_pooled[0, 1] == int(np.average([2, 3, 6, 7])) assert arr_pooled[1, 0] == int(np.average([8, 9, 12, 13])) assert arr_pooled[1, 1] == int(np.average([10, 11, 14, 15])) arr = np.int32([ [0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15] ]) arr_pooled = ia.pool(arr, 2, np.average) assert arr_pooled.shape == (2, 2) assert arr_pooled.dtype == arr.dtype.type assert arr_pooled[0, 0] == int(np.average([0, 1, 4, 5])) assert arr_pooled[0, 1] == int(np.average([2, 3, 6, 7])) assert arr_pooled[1, 0] == int(np.average([8, 9, 12, 13])) assert arr_pooled[1, 1] == int(np.average([10, 11, 14, 15])) arr = np.float32([ [0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15] ]) arr_pooled = ia.pool(arr, 2, np.average) assert arr_pooled.shape == (2, 2) assert arr_pooled.dtype == arr.dtype.type assert np.allclose(arr_pooled[0, 0], np.average([0, 1, 4, 5])) assert np.allclose(arr_pooled[0, 1], np.average([2, 3, 6, 7])) assert np.allclose(arr_pooled[1, 0], np.average([8, 9, 12, 13])) assert np.allclose(arr_pooled[1, 1], np.average([10, 11, 14, 15])) # preserve_dtype off arr = np.uint8([ [0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15] ]) arr_pooled = ia.pool(arr, 2, np.average, preserve_dtype=False) assert arr_pooled.shape == (2, 2) assert arr_pooled.dtype == np.float64 assert np.allclose(arr_pooled[0, 0], np.average([0, 1, 4, 5])) assert np.allclose(arr_pooled[0, 1], np.average([2, 3, 6, 7])) assert np.allclose(arr_pooled[1, 0], np.average([8, 9, 12, 13])) assert np.allclose(arr_pooled[1, 1], np.average([10, 11, 14, 15])) # maximum function arr = np.uint8([ [0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15] ]) arr_pooled = ia.pool(arr, 2, np.max) assert arr_pooled.shape == (2, 2) assert arr_pooled.dtype == arr.dtype.type assert arr_pooled[0, 0] == int(np.max([0, 1, 4, 5])) assert arr_pooled[0, 1] == int(np.max([2, 3, 6, 7])) assert arr_pooled[1, 0] == int(np.max([8, 9, 12, 13])) assert arr_pooled[1, 1] == int(np.max([10, 11, 14, 15])) # 3d array arr = np.uint8([ [0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15] ]) arr = np.tile(arr[..., np.newaxis], (1, 1, 3)) arr_pooled = ia.pool(arr, 2, np.average) assert arr_pooled.shape == (2, 2, 3) assert np.array_equal(arr_pooled[..., 0], arr_pooled[..., 1]) assert np.array_equal(arr_pooled[..., 1], arr_pooled[..., 2]) arr_pooled = arr_pooled[..., 0] assert arr_pooled.dtype == arr.dtype.type assert arr_pooled[0, 0] == int(np.average([0, 1, 4, 5])) assert arr_pooled[0, 1] == int(np.average([2, 3, 6, 7])) assert arr_pooled[1, 0] == int(np.average([8, 9, 12, 13])) assert arr_pooled[1, 1] == int(np.average([10, 11, 14, 15])) # block_size per axis arr = np.float32([ [0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15] ]) arr_pooled = ia.pool(arr, (2, 1), np.average) assert arr_pooled.shape == (2, 4) assert arr_pooled.dtype == arr.dtype.type assert np.allclose(arr_pooled[0, 0], np.average([0, 4])) assert np.allclose(arr_pooled[0, 1], np.average([1, 5])) assert np.allclose(arr_pooled[0, 2], np.average([2, 6])) assert np.allclose(arr_pooled[0, 3], np.average([3, 7])) assert np.allclose(arr_pooled[1, 0], np.average([8, 12])) assert np.allclose(arr_pooled[1, 1], np.average([9, 13])) assert np.allclose(arr_pooled[1, 2], np.average([10, 14])) assert np.allclose(arr_pooled[1, 3], np.average([11, 15])) # cval arr = np.uint8([ [0, 1, 2], [4, 5, 6], [8, 9, 10] ]) arr_pooled = ia.pool(arr, 2, np.average) assert arr_pooled.shape == (2, 2) assert arr_pooled.dtype == arr.dtype.type assert arr_pooled[0, 0] == int(np.average([0, 1, 4, 5])) assert arr_pooled[0, 1] == int(np.average([2, 0, 6, 0])) assert arr_pooled[1, 0] == int(np.average([8, 9, 0, 0])) assert arr_pooled[1, 1] == int(np.average([10, 0, 0, 0])) arr = np.uint8([ [0, 1], [4, 5] ]) arr_pooled = ia.pool(arr, (4, 1), np.average) assert arr_pooled.shape == (1, 2) assert arr_pooled.dtype == arr.dtype.type assert arr_pooled[0, 0] == int(np.average([0, 4, 0, 0])) assert arr_pooled[0, 1] == int(np.average([1, 5, 0, 0])) arr = np.uint8([ [0, 1, 2], [4, 5, 6], [8, 9, 10] ]) arr_pooled = ia.pool(arr, 2, np.average, cval=22) assert arr_pooled.shape == (2, 2) assert arr_pooled.dtype == arr.dtype.type assert arr_pooled[0, 0] == int(np.average([0, 1, 4, 5])) assert arr_pooled[0, 1] == int(np.average([2, 22, 6, 22])) assert arr_pooled[1, 0] == int(np.average([8, 9, 22, 22])) assert arr_pooled[1, 1] == int(np.average([10, 22, 22, 22])) def test_avg_pool(): # very basic test, as avg_pool() just calls pool(), which is tested in test_pool() arr = np.uint8([ [0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15] ]) arr_pooled = ia.avg_pool(arr, 2) assert arr_pooled.shape == (2, 2) assert arr_pooled.dtype == arr.dtype.type assert arr_pooled[0, 0] == int(np.average([0, 1, 4, 5])) assert arr_pooled[0, 1] == int(np.average([2, 3, 6, 7])) assert arr_pooled[1, 0] == int(np.average([8, 9, 12, 13])) assert arr_pooled[1, 1] == int(np.average([10, 11, 14, 15])) def test_max_pool(): # very basic test, as avg_pool() just calls pool(), which is tested in test_pool() arr = np.uint8([ [0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15] ]) arr_pooled = ia.max_pool(arr, 2) assert arr_pooled.shape == (2, 2) assert arr_pooled.dtype == arr.dtype.type assert arr_pooled[0, 0] == int(np.max([0, 1, 4, 5])) assert arr_pooled[0, 1] == int(np.max([2, 3, 6, 7])) assert arr_pooled[1, 0] == int(np.max([8, 9, 12, 13])) assert arr_pooled[1, 1] == int(np.max([10, 11, 14, 15])) def test_draw_grid(): image = np.zeros((2, 2, 3), dtype=np.uint8) image[0, 0] = 64 image[0, 1] = 128 image[1, 0] = 192 image[1, 1] = 256 grid = ia.draw_grid([image], rows=1, cols=1) assert np.array_equal(grid, image) grid = ia.draw_grid(np.uint8([image]), rows=1, cols=1) assert np.array_equal(grid, image) grid = ia.draw_grid([image, image, image, image], rows=2, cols=2) expected = np.vstack([ np.hstack([image, image]), np.hstack([image, image]) ]) assert np.array_equal(grid, expected) grid = ia.draw_grid([image, image], rows=1, cols=2) expected = np.hstack([image, image]) assert np.array_equal(grid, expected) grid = ia.draw_grid([image, image, image, image], rows=2, cols=None) expected = np.vstack([ np.hstack([image, image]), np.hstack([image, image]) ]) assert np.array_equal(grid, expected) grid = ia.draw_grid([image, image, image, image], rows=None, cols=2) expected = np.vstack([ np.hstack([image, image]), np.hstack([image, image]) ]) assert np.array_equal(grid, expected) grid = ia.draw_grid([image, image, image, image], rows=None, cols=None) expected = np.vstack([ np.hstack([image, image]), np.hstack([image, image]) ]) assert np.array_equal(grid, expected) def test_Keypoint(): eps = 1e-8 # x/y/x_int/y_int kp = ia.Keypoint(y=1, x=2) assert kp.y == 1 assert kp.x == 2 assert kp.y_int == 1 assert kp.x_int == 2 kp = ia.Keypoint(y=1.1, x=2.7) assert 1.1 - eps < kp.y < 1.1 + eps assert 2.7 - eps < kp.x < 2.7 + eps assert kp.y_int == 1 assert kp.x_int == 3 # project kp = ia.Keypoint(y=1, x=2) kp2 = kp.project((10, 10), (10, 10)) assert kp2.y == 1 assert kp2.x == 2 kp2 = kp.project((10, 10), (20, 10)) assert kp2.y == 2 assert kp2.x == 2 kp2 = kp.project((10, 10), (10, 20)) assert kp2.y == 1 assert kp2.x == 4 kp2 = kp.project((10, 10), (20, 20)) assert kp2.y == 2 assert kp2.x == 4 # shift kp = ia.Keypoint(y=1, x=2) kp2 = kp.shift(y=1) assert kp2.y == 2 assert kp2.x == 2 kp2 = kp.shift(y=-1) assert kp2.y == 0 assert kp2.x == 2 kp2 = kp.shift(x=1) assert kp2.y == 1 assert kp2.x == 3 kp2 = kp.shift(x=-1) assert kp2.y == 1 assert kp2.x == 1 kp2 = kp.shift(y=1, x=2) assert kp2.y == 2 assert kp2.x == 4 # __repr__ / __str_ kp = ia.Keypoint(y=1, x=2) assert kp.__repr__() == kp.__str__() == "Keypoint(x=2.00000000, y=1.00000000)" kp = ia.Keypoint(y=1.2, x=2.7) assert kp.__repr__() == kp.__str__() == "Keypoint(x=2.70000000, y=1.20000000)" def test_KeypointsOnImage(): eps = 1e-8 kps = [ia.Keypoint(x=1, y=2), ia.Keypoint(x=3, y=4)] # height/width kpi = ia.KeypointsOnImage(keypoints=kps, shape=(10, 20, 3)) assert kpi.height == 10 assert kpi.width == 20 # image instead of shape kpi = ia.KeypointsOnImage(keypoints=kps, shape=np.zeros((10, 20, 3), dtype=np.uint8)) assert kpi.shape == (10, 20, 3) # on() kpi2 = kpi.on((10, 20, 3)) assert all([kp_i.x == kp_j.x and kp_i.y == kp_j.y for kp_i, kp_j in zip(kpi.keypoints, kpi2.keypoints)]) kpi2 = kpi.on((20, 40, 3)) assert kpi2.keypoints[0].x == 2 assert kpi2.keypoints[0].y == 4 assert kpi2.keypoints[1].x == 6 assert kpi2.keypoints[1].y == 8 kpi2 = kpi.on(np.zeros((20, 40, 3), dtype=np.uint8)) assert kpi2.keypoints[0].x == 2 assert kpi2.keypoints[0].y == 4 assert kpi2.keypoints[1].x == 6 assert kpi2.keypoints[1].y == 8 # draw_on_image kpi = ia.KeypointsOnImage(keypoints=kps, shape=(5, 5, 3)) image = np.zeros((5, 5, 3), dtype=np.uint8) + 10 kps_mask = np.zeros(image.shape[0:2], dtype=np.bool) kps_mask[2, 1] = 1 kps_mask[4, 3] = 1 image_kps = kpi.draw_on_image(image, color=[0, 255, 0], size=1, copy=True, raise_if_out_of_image=False) assert np.all(image_kps[kps_mask] == [0, 255, 0]) assert np.all(image_kps[~kps_mask] == [10, 10, 10]) image_kps = kpi.draw_on_image(image, color=[0, 255, 0], size=3, copy=True, raise_if_out_of_image=False) kps_mask_size3 = np.copy(kps_mask) kps_mask_size3[2-1:2+1+1, 1-1:1+1+1] = 1 kps_mask_size3[4-1:4+1+1, 3-1:3+1+1] = 1 assert np.all(image_kps[kps_mask_size3] == [0, 255, 0]) assert np.all(image_kps[~kps_mask_size3] == [10, 10, 10]) image_kps = kpi.draw_on_image(image, color=[0, 0, 255], size=1, copy=True, raise_if_out_of_image=False) assert np.all(image_kps[kps_mask] == [0, 0, 255]) assert np.all(image_kps[~kps_mask] == [10, 10, 10]) image_kps = kpi.draw_on_image(image, color=255, size=1, copy=True, raise_if_out_of_image=False) assert np.all(image_kps[kps_mask] == [255, 255, 255]) assert np.all(image_kps[~kps_mask] == [10, 10, 10]) image2 = np.copy(image) image_kps = kpi.draw_on_image(image2, color=[0, 255, 0], size=1, copy=False, raise_if_out_of_image=False) assert np.all(image2 == image_kps) assert np.all(image_kps[kps_mask] == [0, 255, 0]) assert np.all(image_kps[~kps_mask] == [10, 10, 10]) assert np.all(image2[kps_mask] == [0, 255, 0]) assert np.all(image2[~kps_mask] == [10, 10, 10]) kpi = ia.KeypointsOnImage(keypoints=kps + [ia.Keypoint(x=100, y=100)], shape=(5, 5, 3)) image = np.zeros((5, 5, 3), dtype=np.uint8) + 10 kps_mask = np.zeros(image.shape[0:2], dtype=np.bool) kps_mask[2, 1] = 1 kps_mask[4, 3] = 1 image_kps = kpi.draw_on_image(image, color=[0, 255, 0], size=1, copy=True, raise_if_out_of_image=False) assert np.all(image_kps[kps_mask] == [0, 255, 0]) assert np.all(image_kps[~kps_mask] == [10, 10, 10]) kpi = ia.KeypointsOnImage(keypoints=kps + [ia.Keypoint(x=100, y=100)], shape=(5, 5, 3)) image = np.zeros((5, 5, 3), dtype=np.uint8) + 10 got_exception = False try: image_kps = kpi.draw_on_image(image, color=[0, 255, 0], size=1, copy=True, raise_if_out_of_image=True) assert np.all(image_kps[kps_mask] == [0, 255, 0]) assert np.all(image_kps[~kps_mask] == [10, 10, 10]) except Exception: got_exception = True assert got_exception kpi = ia.KeypointsOnImage(keypoints=kps + [ia.Keypoint(x=5, y=5)], shape=(5, 5, 3)) image = np.zeros((5, 5, 3), dtype=np.uint8) + 10 kps_mask = np.zeros(image.shape[0:2], dtype=np.bool) kps_mask[2, 1] = 1 kps_mask[4, 3] = 1 image_kps = kpi.draw_on_image(image, color=[0, 255, 0], size=1, copy=True, raise_if_out_of_image=False) assert np.all(image_kps[kps_mask] == [0, 255, 0]) assert np.all(image_kps[~kps_mask] == [10, 10, 10]) got_exception = False try: image_kps = kpi.draw_on_image(image, color=[0, 255, 0], size=1, copy=True, raise_if_out_of_image=True) assert np.all(image_kps[kps_mask] == [0, 255, 0]) assert np.all(image_kps[~kps_mask] == [10, 10, 10]) except Exception: got_exception = True assert got_exception # shift kpi = ia.KeypointsOnImage(keypoints=kps, shape=(5, 5, 3)) kpi2 = kpi.shift(x=0, y=0) assert kpi2.keypoints[0].x == kpi.keypoints[0].x assert kpi2.keypoints[0].y == kpi.keypoints[0].y assert kpi2.keypoints[1].x == kpi.keypoints[1].x assert kpi2.keypoints[1].y == kpi.keypoints[1].y kpi2 = kpi.shift(x=1) assert kpi2.keypoints[0].x == kpi.keypoints[0].x + 1 assert kpi2.keypoints[0].y == kpi.keypoints[0].y assert kpi2.keypoints[1].x == kpi.keypoints[1].x + 1 assert kpi2.keypoints[1].y == kpi.keypoints[1].y kpi2 = kpi.shift(x=-1) assert kpi2.keypoints[0].x == kpi.keypoints[0].x - 1 assert kpi2.keypoints[0].y == kpi.keypoints[0].y assert kpi2.keypoints[1].x == kpi.keypoints[1].x - 1 assert kpi2.keypoints[1].y == kpi.keypoints[1].y kpi2 = kpi.shift(y=1) assert kpi2.keypoints[0].x == kpi.keypoints[0].x assert kpi2.keypoints[0].y == kpi.keypoints[0].y + 1 assert kpi2.keypoints[1].x == kpi.keypoints[1].x assert kpi2.keypoints[1].y == kpi.keypoints[1].y + 1 kpi2 = kpi.shift(y=-1) assert kpi2.keypoints[0].x == kpi.keypoints[0].x assert kpi2.keypoints[0].y == kpi.keypoints[0].y - 1 assert kpi2.keypoints[1].x == kpi.keypoints[1].x assert kpi2.keypoints[1].y == kpi.keypoints[1].y - 1 kpi2 = kpi.shift(x=1, y=2) assert kpi2.keypoints[0].x == kpi.keypoints[0].x + 1 assert kpi2.keypoints[0].y == kpi.keypoints[0].y + 2 assert kpi2.keypoints[1].x == kpi.keypoints[1].x + 1 assert kpi2.keypoints[1].y == kpi.keypoints[1].y + 2 # get_coords_array kpi = ia.KeypointsOnImage(keypoints=kps, shape=(5, 5, 3)) observed = kpi.get_coords_array() expected = np.float32([ [1, 2], [3, 4] ]) assert np.allclose(observed, expected) # from_coords_array arr = np.float32([ [1, 2], [3, 4] ]) kpi = ia.KeypointsOnImage.from_coords_array(arr, shape=(5, 5, 3)) assert 1 - eps < kpi.keypoints[0].x < 1 + eps assert 2 - eps < kpi.keypoints[0].y < 2 + eps assert 3 - eps < kpi.keypoints[1].x < 3 + eps assert 4 - eps < kpi.keypoints[1].y < 4 + eps # to_keypoint_image kpi = ia.KeypointsOnImage(keypoints=kps, shape=(5, 5, 3)) image = kpi.to_keypoint_image(size=1) image_size3 = kpi.to_keypoint_image(size=3) kps_mask = np.zeros((5, 5, 2), dtype=np.bool) kps_mask[2, 1, 0] = 1 kps_mask[4, 3, 1] = 1 kps_mask_size3 = np.zeros_like(kps_mask) kps_mask_size3[2-1:2+1+1, 1-1:1+1+1, 0] = 1 kps_mask_size3[4-1:4+1+1, 3-1:3+1+1, 1] = 1 assert np.all(image[kps_mask] == 255) assert np.all(image[~kps_mask] == 0) assert np.all(image_size3[kps_mask] == 255) assert np.all(image_size3[kps_mask_size3] >= 128) assert np.all(image_size3[~kps_mask_size3] == 0) # from_keypoint_image() kps_image = np.zeros((5, 5, 2), dtype=np.uint8) kps_image[2, 1, 0] = 255 kps_image[4, 3, 1] = 255 kpi2 = ia.KeypointsOnImage.from_keypoint_image(kps_image, nb_channels=3) assert kpi2.shape == (5, 5, 3) assert len(kpi2.keypoints) == 2 assert kpi2.keypoints[0].y == 2 assert kpi2.keypoints[0].x == 1 assert kpi2.keypoints[1].y == 4 assert kpi2.keypoints[1].x == 3 kps_image = np.zeros((5, 5, 2), dtype=np.uint8) kps_image[2, 1, 0] = 255 kps_image[4, 3, 1] = 10 kpi2 = ia.KeypointsOnImage.from_keypoint_image(kps_image, if_not_found_coords={"x": -1, "y": -2}, threshold=20, nb_channels=3) assert kpi2.shape == (5, 5, 3) assert len(kpi2.keypoints) == 2 assert kpi2.keypoints[0].y == 2 assert kpi2.keypoints[0].x == 1 assert kpi2.keypoints[1].y == -2 assert kpi2.keypoints[1].x == -1 kps_image = np.zeros((5, 5, 2), dtype=np.uint8) kps_image[2, 1, 0] = 255 kps_image[4, 3, 1] = 10 kpi2 = ia.KeypointsOnImage.from_keypoint_image(kps_image, if_not_found_coords=(-1, -2), threshold=20, nb_channels=3) assert kpi2.shape == (5, 5, 3) assert len(kpi2.keypoints) == 2 assert kpi2.keypoints[0].y == 2 assert kpi2.keypoints[0].x == 1 assert kpi2.keypoints[1].y == -2 assert kpi2.keypoints[1].x == -1 kps_image = np.zeros((5, 5, 2), dtype=np.uint8) kps_image[2, 1, 0] = 255 kps_image[4, 3, 1] = 10 kpi2 = ia.KeypointsOnImage.from_keypoint_image(kps_image, if_not_found_coords=None, threshold=20, nb_channels=3) assert kpi2.shape == (5, 5, 3) assert len(kpi2.keypoints) == 1 assert kpi2.keypoints[0].y == 2 assert kpi2.keypoints[0].x == 1 got_exception = False try: kps_image = np.zeros((5, 5, 2), dtype=np.uint8) kps_image[2, 1, 0] = 255 kps_image[4, 3, 1] = 10 _ = ia.KeypointsOnImage.from_keypoint_image(kps_image, if_not_found_coords="exception-please", threshold=20, nb_channels=3) except Exception as exc: assert "Expected if_not_found_coords to be" in str(exc) got_exception = True assert got_exception # copy() kps = [ia.Keypoint(x=1, y=2), ia.Keypoint(x=3, y=4)] kpi = ia.KeypointsOnImage(keypoints=kps, shape=(5, 5, 3)) kpi2 = kpi.copy() assert kpi2.keypoints[0].x == 1 assert kpi2.keypoints[0].y == 2 assert kpi2.keypoints[1].x == 3 assert kpi2.keypoints[1].y == 4 kps[0].x = 100 assert kpi2.keypoints[0].x == 100 assert kpi2.keypoints[0].y == 2 assert kpi2.keypoints[1].x == 3 assert kpi2.keypoints[1].y == 4 # deepcopy() kps = [ia.Keypoint(x=1, y=2), ia.Keypoint(x=3, y=4)] kpi = ia.KeypointsOnImage(keypoints=kps, shape=(5, 5, 3)) kpi2 = kpi.deepcopy() assert kpi2.keypoints[0].x == 1 assert kpi2.keypoints[0].y == 2 assert kpi2.keypoints[1].x == 3 assert kpi2.keypoints[1].y == 4 kps[0].x = 100 assert kpi2.keypoints[0].x == 1 assert kpi2.keypoints[0].y == 2 assert kpi2.keypoints[1].x == 3 assert kpi2.keypoints[1].y == 4 # repr/str kps = [ia.Keypoint(x=1, y=2), ia.Keypoint(x=3, y=4)] kpi = ia.KeypointsOnImage(keypoints=kps, shape=(5, 5, 3)) expected = "KeypointsOnImage([Keypoint(x=1.00000000, y=2.00000000), Keypoint(x=3.00000000, y=4.00000000)], " \ + "shape=(5, 5, 3))" assert kpi.__repr__() == kpi.__str__() == expected def test_BoundingBox(): eps = 1e-8 # properties with ints bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None) assert bb.y1_int == 10 assert bb.x1_int == 20 assert bb.y2_int == 30 assert bb.x2_int == 40 assert bb.width == 40 - 20 assert bb.height == 30 - 10 center_x = bb.x1 + (bb.x2 - bb.x1)/2 center_y = bb.y1 + (bb.y2 - bb.y1)/2 assert center_x - eps < bb.center_x < center_x + eps assert center_y - eps < bb.center_y < center_y + eps # wrong order of y1/y2, x1/x2 bb = ia.BoundingBox(y1=30, x1=40, y2=10, x2=20, label=None) assert bb.y1_int == 10 assert bb.x1_int == 20 assert bb.y2_int == 30 assert bb.x2_int == 40 # properties with floats bb = ia.BoundingBox(y1=10.1, x1=20.1, y2=30.9, x2=40.9, label=None) assert bb.y1_int == 10 assert bb.x1_int == 20 assert bb.y2_int == 31 assert bb.x2_int == 41 assert bb.width == 40.9 - 20.1 assert bb.height == 30.9 - 10.1 center_x = bb.x1 + (bb.x2 - bb.x1)/2 center_y = bb.y1 + (bb.y2 - bb.y1)/2 assert center_x - eps < bb.center_x < center_x + eps assert center_y - eps < bb.center_y < center_y + eps # area bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None) assert bb.area == (30-10) * (40-20) # project bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None) bb2 = bb.project((10, 10), (10, 10)) assert 10 - eps < bb2.y1 < 10 + eps assert 20 - eps < bb2.x1 < 20 + eps assert 30 - eps < bb2.y2 < 30 + eps assert 40 - eps < bb2.x2 < 40 + eps bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None) bb2 = bb.project((10, 10), (20, 20)) assert 10*2 - eps < bb2.y1 < 10*2 + eps assert 20*2 - eps < bb2.x1 < 20*2 + eps assert 30*2 - eps < bb2.y2 < 30*2 + eps assert 40*2 - eps < bb2.x2 < 40*2 + eps bb2 = bb.project((10, 10), (5, 5)) assert 10*0.5 - eps < bb2.y1 < 10*0.5 + eps assert 20*0.5 - eps < bb2.x1 < 20*0.5 + eps assert 30*0.5 - eps < bb2.y2 < 30*0.5 + eps assert 40*0.5 - eps < bb2.x2 < 40*0.5 + eps bb2 = bb.project((10, 10), (10, 20)) assert 10*1 - eps < bb2.y1 < 10*1 + eps assert 20*2 - eps < bb2.x1 < 20*2 + eps assert 30*1 - eps < bb2.y2 < 30*1 + eps assert 40*2 - eps < bb2.x2 < 40*2 + eps bb2 = bb.project((10, 10), (20, 10)) assert 10*2 - eps < bb2.y1 < 10*2 + eps assert 20*1 - eps < bb2.x1 < 20*1 + eps assert 30*2 - eps < bb2.y2 < 30*2 + eps assert 40*1 - eps < bb2.x2 < 40*1 + eps # extend bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None) bb2 = bb.extend(all_sides=1) assert bb2.y1 == 10-1 assert bb2.y2 == 30+1 assert bb2.x1 == 20-1 assert bb2.x2 == 40+1 bb2 = bb.extend(all_sides=-1) assert bb2.y1 == 10-(-1) assert bb2.y2 == 30+(-1) assert bb2.x1 == 20-(-1) assert bb2.x2 == 40+(-1) bb2 = bb.extend(top=1) assert bb2.y1 == 10-1 assert bb2.y2 == 30+0 assert bb2.x1 == 20-0 assert bb2.x2 == 40+0 bb2 = bb.extend(right=1) assert bb2.y1 == 10-0 assert bb2.y2 == 30+0 assert bb2.x1 == 20-0 assert bb2.x2 == 40+1 bb2 = bb.extend(bottom=1) assert bb2.y1 == 10-0 assert bb2.y2 == 30+1 assert bb2.x1 == 20-0 assert bb2.x2 == 40+0 bb2 = bb.extend(left=1) assert bb2.y1 == 10-0 assert bb2.y2 == 30+0 assert bb2.x1 == 20-1 assert bb2.x2 == 40+0 # intersection bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None) bb2 = ia.BoundingBox(y1=10, x1=39, y2=30, x2=59, label=None) bb_inter = bb1.intersection(bb2) assert bb_inter.x1 == 39 assert bb_inter.x2 == 40 assert bb_inter.y1 == 10 assert bb_inter.y2 == 30 bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None) bb2 = ia.BoundingBox(y1=10, x1=41, y2=30, x2=61, label=None) bb_inter = bb1.intersection(bb2, default=False) assert bb_inter is False # union bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None) bb2 = ia.BoundingBox(y1=10, x1=39, y2=30, x2=59, label=None) bb_union = bb1.union(bb2) assert bb_union.x1 == 20 assert bb_union.x2 == 59 assert bb_union.y1 == 10 assert bb_union.y2 == 30 # iou bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None) bb2 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None) iou = bb1.iou(bb2) assert 1.0 - eps < iou < 1.0 + eps bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None) bb2 = ia.BoundingBox(y1=10, x1=41, y2=30, x2=61, label=None) iou = bb1.iou(bb2) assert 0.0 - eps < iou < 0.0 + eps bb1 = ia.BoundingBox(y1=10, x1=10, y2=20, x2=20, label=None) bb2 = ia.BoundingBox(y1=15, x1=15, y2=25, x2=25, label=None) iou = bb1.iou(bb2) area_union = 10 * 10 + 10 * 10 - 5 * 5 area_intersection = 5 * 5 iou_expected = area_intersection / area_union assert iou_expected - eps < iou < iou_expected + eps # is_fully_within_image bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None) assert bb.is_fully_within_image((100, 100, 3)) is True assert bb.is_fully_within_image((20, 100, 3)) is False assert bb.is_fully_within_image((100, 30, 3)) is False assert bb.is_fully_within_image((1, 1, 3)) is False # is_partly_within_image bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None) assert bb.is_partly_within_image((100, 100, 3)) is True assert bb.is_partly_within_image((20, 100, 3)) is True assert bb.is_partly_within_image((100, 30, 3)) is True assert bb.is_partly_within_image((1, 1, 3)) is False # is_out_of_image() bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None) assert bb.is_out_of_image((100, 100, 3), partly=True, fully=True) is False assert bb.is_out_of_image((100, 100, 3), partly=False, fully=True) is False assert bb.is_out_of_image((100, 100, 3), partly=True, fully=False) is False assert bb.is_out_of_image((20, 100, 3), partly=True, fully=True) is True assert bb.is_out_of_image((20, 100, 3), partly=False, fully=True) is False assert bb.is_out_of_image((20, 100, 3), partly=True, fully=False) is True assert bb.is_out_of_image((100, 30, 3), partly=True, fully=True) is True assert bb.is_out_of_image((100, 30, 3), partly=False, fully=True) is False assert bb.is_out_of_image((100, 30, 3), partly=True, fully=False) is True assert bb.is_out_of_image((1, 1, 3), partly=True, fully=True) is True assert bb.is_out_of_image((1, 1, 3), partly=False, fully=True) is True assert bb.is_out_of_image((1, 1, 3), partly=True, fully=False) is False # cut_out_of_image bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None) bb_cut = bb.cut_out_of_image((100, 100, 3)) eps = np.finfo(np.float32).eps assert bb_cut.y1 == 10 assert bb_cut.x1 == 20 assert bb_cut.y2 == 30 assert bb_cut.x2 == 40 bb_cut = bb.cut_out_of_image(np.zeros((100, 100, 3), dtype=np.uint8)) assert bb_cut.y1 == 10 assert bb_cut.x1 == 20 assert bb_cut.y2 == 30 assert bb_cut.x2 == 40 bb_cut = bb.cut_out_of_image((20, 100, 3)) assert bb_cut.y1 == 10 assert bb_cut.x1 == 20 assert 20 - 2*eps < bb_cut.y2 < 20 assert bb_cut.x2 == 40 bb_cut = bb.cut_out_of_image((100, 30, 3)) assert bb_cut.y1 == 10 assert bb_cut.x1 == 20 assert bb_cut.y2 == 30 assert 30 - 2*eps < bb_cut.x2 < 30 # shift bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None) bb_top = bb.shift(top=0) bb_right = bb.shift(right=0) bb_bottom = bb.shift(bottom=0) bb_left = bb.shift(left=0) assert bb_top.y1 == 10 assert bb_top.x1 == 20 assert bb_top.y2 == 30 assert bb_top.x2 == 40 assert bb_right.y1 == 10 assert bb_right.x1 == 20 assert bb_right.y2 == 30 assert bb_right.x2 == 40 assert bb_bottom.y1 == 10 assert bb_bottom.x1 == 20 assert bb_bottom.y2 == 30 assert bb_bottom.x2 == 40 assert bb_left.y1 == 10 assert bb_left.x1 == 20 assert bb_left.y2 == 30 assert bb_left.x2 == 40 bb_top = bb.shift(top=1) bb_right = bb.shift(right=1) bb_bottom = bb.shift(bottom=1) bb_left = bb.shift(left=1) assert bb_top.y1 == 10+1 assert bb_top.x1 == 20 assert bb_top.y2 == 30+1 assert bb_top.x2 == 40 assert bb_right.y1 == 10 assert bb_right.x1 == 20-1 assert bb_right.y2 == 30 assert bb_right.x2 == 40-1 assert bb_bottom.y1 == 10-1 assert bb_bottom.x1 == 20 assert bb_bottom.y2 == 30-1 assert bb_bottom.x2 == 40 assert bb_left.y1 == 10 assert bb_left.x1 == 20+1 assert bb_left.y2 == 30 assert bb_left.x2 == 40+1 bb_top = bb.shift(top=-1) bb_right = bb.shift(right=-1) bb_bottom = bb.shift(bottom=-1) bb_left = bb.shift(left=-1) assert bb_top.y1 == 10-1 assert bb_top.x1 == 20 assert bb_top.y2 == 30-1 assert bb_top.x2 == 40 assert bb_right.y1 == 10 assert bb_right.x1 == 20+1 assert bb_right.y2 == 30 assert bb_right.x2 == 40+1 assert bb_bottom.y1 == 10+1 assert bb_bottom.x1 == 20 assert bb_bottom.y2 == 30+1 assert bb_bottom.x2 == 40 assert bb_left.y1 == 10 assert bb_left.x1 == 20-1 assert bb_left.y2 == 30 assert bb_left.x2 == 40-1 bb_mix = bb.shift(top=1, bottom=2, left=3, right=4) assert bb_mix.y1 == 10+1-2 assert bb_mix.x1 == 20+3-4 assert bb_mix.y2 == 30+3-4 assert bb_mix.x2 == 40+1-2 # draw_on_image() image = np.zeros((10, 10, 3), dtype=np.uint8) bb = ia.BoundingBox(y1=1, x1=1, y2=3, x2=3, label=None) bb_mask = np.zeros(image.shape[0:2], dtype=np.bool) bb_mask[1:3+1, 1] = True bb_mask[1:3+1, 3] = True bb_mask[1, 1:3+1] = True bb_mask[3, 1:3+1] = True image_bb = bb.draw_on_image(image, color=[255, 255, 255], alpha=1.0, thickness=1, copy=True, raise_if_out_of_image=False) assert np.all(image_bb[bb_mask] == [255, 255, 255]) assert np.all(image_bb[~bb_mask] == [0, 0, 0]) assert np.all(image == 0) image_bb = bb.draw_on_image(image, color=[255, 0, 0], alpha=1.0, thickness=1, copy=True, raise_if_out_of_image=False) assert np.all(image_bb[bb_mask] == [255, 0, 0]) assert np.all(image_bb[~bb_mask] == [0, 0, 0]) image_bb = bb.draw_on_image(image, color=128, alpha=1.0, thickness=1, copy=True, raise_if_out_of_image=False) assert np.all(image_bb[bb_mask] == [128, 128, 128]) assert np.all(image_bb[~bb_mask] == [0, 0, 0]) image_bb = bb.draw_on_image(image+100, color=[200, 200, 200], alpha=0.5, thickness=1, copy=True, raise_if_out_of_image=False) assert np.all(image_bb[bb_mask] == [150, 150, 150]) assert np.all(image_bb[~bb_mask] == [100, 100, 100]) image_bb = bb.draw_on_image((image+100).astype(np.float32), color=[200, 200, 200], alpha=0.5, thickness=1, copy=True, raise_if_out_of_image=False) assert np.sum(np.abs((image_bb - [150, 150, 150])[bb_mask])) < 0.1 assert np.sum(np.abs((image_bb - [100, 100, 100])[~bb_mask])) < 0.1 image_bb = bb.draw_on_image(image, color=[255, 255, 255], alpha=1.0, thickness=1, copy=False, raise_if_out_of_image=False) assert np.all(image_bb[bb_mask] == [255, 255, 255]) assert np.all(image_bb[~bb_mask] == [0, 0, 0]) assert np.all(image[bb_mask] == [255, 255, 255]) assert np.all(image[~bb_mask] == [0, 0, 0]) image = np.zeros_like(image) bb = ia.BoundingBox(y1=-1, x1=-1, y2=2, x2=2, label=None) bb_mask = np.zeros(image.shape[0:2], dtype=np.bool) bb_mask[2, 0:3] = True bb_mask[0:3, 2] = True image_bb = bb.draw_on_image(image, color=[255, 255, 255], alpha=1.0, thickness=1, copy=True, raise_if_out_of_image=False) assert np.all(image_bb[bb_mask] == [255, 255, 255]) assert np.all(image_bb[~bb_mask] == [0, 0, 0]) bb = ia.BoundingBox(y1=1, x1=1, y2=3, x2=3, label=None) bb_mask = np.zeros(image.shape[0:2], dtype=np.bool) bb_mask[0:5, 0:5] = True bb_mask[2, 2] = False image_bb = bb.draw_on_image(image, color=[255, 255, 255], alpha=1.0, thickness=2, copy=True, raise_if_out_of_image=False) assert np.all(image_bb[bb_mask] == [255, 255, 255]) assert np.all(image_bb[~bb_mask] == [0, 0, 0]) bb = ia.BoundingBox(y1=-1, x1=-1, y2=1, x2=1, label=None) bb_mask = np.zeros(image.shape[0:2], dtype=np.bool) bb_mask[0:1+1, 1] = True bb_mask[1, 0:1+1] = True image_bb = bb.draw_on_image(image, color=[255, 255, 255], alpha=1.0, thickness=1, copy=True, raise_if_out_of_image=False) assert np.all(image_bb[bb_mask] == [255, 255, 255]) assert np.all(image_bb[~bb_mask] == [0, 0, 0]) bb = ia.BoundingBox(y1=-1, x1=-1, y2=1, x2=1, label=None) got_exception = False try: _ = bb.draw_on_image(image, color=[255, 255, 255], alpha=1.0, thickness=1, copy=True, raise_if_out_of_image=True) except Exception: got_exception = True assert got_exception is False bb = ia.BoundingBox(y1=-5, x1=-5, y2=-1, x2=-1, label=None) got_exception = False try: _ = bb.draw_on_image(image, color=[255, 255, 255], alpha=1.0, thickness=1, copy=True, raise_if_out_of_image=True) except Exception: got_exception = True assert got_exception is True # extract_from_image() image = np.random.RandomState(1234).randint(0, 255, size=(10, 10, 3)) bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label=None) image_sub = bb.extract_from_image(image) assert np.array_equal(image_sub, image[1:3, 1:3, :]) image = np.random.RandomState(1234).randint(0, 255, size=(10, 10)) bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label=None) image_sub = bb.extract_from_image(image) assert np.array_equal(image_sub, image[1:3, 1:3]) image = np.random.RandomState(1234).randint(0, 255, size=(10, 10)) bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label=None) image_sub = bb.extract_from_image(image) assert np.array_equal(image_sub, image[1:3, 1:3]) image = np.random.RandomState(1234).randint(0, 255, size=(10, 10, 3)) image_pad = np.pad(image, ((0, 1), (0, 1), (0, 0)), mode="constant", constant_values=0) bb = ia.BoundingBox(y1=8, y2=11, x1=8, x2=11, label=None) image_sub = bb.extract_from_image(image) assert np.array_equal(image_sub, image_pad[8:11, 8:11, :]) image = np.random.RandomState(1234).randint(0, 255, size=(10, 10, 3)) image_pad = np.pad(image, ((1, 0), (1, 0), (0, 0)), mode="constant", constant_values=0) bb = ia.BoundingBox(y1=-1, y2=3, x1=-1, x2=4, label=None) image_sub = bb.extract_from_image(image) assert np.array_equal(image_sub, image_pad[0:4, 0:5, :]) # to_keypoints() bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label=None) kps = bb.to_keypoints() assert kps[0].y == 1 assert kps[0].x == 1 assert kps[1].y == 1 assert kps[1].x == 3 assert kps[2].y == 3 assert kps[2].x == 3 assert kps[3].y == 3 assert kps[3].x == 1 # copy() bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label="test") bb2 = bb.copy() assert bb2.y1 == 1 assert bb2.y2 == 3 assert bb2.x1 == 1 assert bb2.x2 == 3 assert bb2.label == "test" bb2 = bb.copy(y1=10, x1=20, y2=30, x2=40, label="test2") assert bb2.y1 == 10 assert bb2.x1 == 20 assert bb2.y2 == 30 assert bb2.x2 == 40 assert bb2.label == "test2" # deepcopy() bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label=["test"]) bb2 = bb.deepcopy() assert bb2.y1 == 1 assert bb2.y2 == 3 assert bb2.x1 == 1 assert bb2.x2 == 3 assert bb2.label[0] == "test" # BoundingBox_repr() bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label=None) assert bb.__repr__() == "BoundingBox(x1=1.0000, y1=1.0000, x2=3.0000, y2=3.0000, label=None)" # test_BoundingBox_str() bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label=None) assert bb.__str__() == "BoundingBox(x1=1.0000, y1=1.0000, x2=3.0000, y2=3.0000, label=None)" def test_BoundingBoxesOnImage(): reseed() # test height/width bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None) bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=45, label=None) bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3)) assert bbsoi.height == 40 assert bbsoi.width == 50 bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None) bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=45, label=None) bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=np.zeros((40, 50, 3), dtype=np.uint8)) assert bbsoi.height == 40 assert bbsoi.width == 50 # on() bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None) bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=45, label=None) bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=np.zeros((40, 50, 3), dtype=np.uint8)) bbsoi_projected = bbsoi.on((40, 50)) assert bbsoi_projected.bounding_boxes[0].y1 == 10 assert bbsoi_projected.bounding_boxes[0].x1 == 20 assert bbsoi_projected.bounding_boxes[0].y2 == 30 assert bbsoi_projected.bounding_boxes[0].x2 == 40 assert bbsoi_projected.bounding_boxes[1].y1 == 15 assert bbsoi_projected.bounding_boxes[1].x1 == 25 assert bbsoi_projected.bounding_boxes[1].y2 == 35 assert bbsoi_projected.bounding_boxes[1].x2 == 45 bbsoi_projected = bbsoi.on((40*2, 50*2, 3)) assert bbsoi_projected.bounding_boxes[0].y1 == 10*2 assert bbsoi_projected.bounding_boxes[0].x1 == 20*2 assert bbsoi_projected.bounding_boxes[0].y2 == 30*2 assert bbsoi_projected.bounding_boxes[0].x2 == 40*2 assert bbsoi_projected.bounding_boxes[1].y1 == 15*2 assert bbsoi_projected.bounding_boxes[1].x1 == 25*2 assert bbsoi_projected.bounding_boxes[1].y2 == 35*2 assert bbsoi_projected.bounding_boxes[1].x2 == 45*2 bbsoi_projected = bbsoi.on(np.zeros((40*2, 50*2, 3), dtype=np.uint8)) assert bbsoi_projected.bounding_boxes[0].y1 == 10*2 assert bbsoi_projected.bounding_boxes[0].x1 == 20*2 assert bbsoi_projected.bounding_boxes[0].y2 == 30*2 assert bbsoi_projected.bounding_boxes[0].x2 == 40*2 assert bbsoi_projected.bounding_boxes[1].y1 == 15*2 assert bbsoi_projected.bounding_boxes[1].x1 == 25*2 assert bbsoi_projected.bounding_boxes[1].y2 == 35*2 assert bbsoi_projected.bounding_boxes[1].x2 == 45*2 # draw_on_image() bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None) bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=45, label=None) bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3)) image = bbsoi.draw_on_image(np.zeros(bbsoi.shape, dtype=np.uint8), color=[0, 255, 0], alpha=1.0, thickness=1, copy=True, raise_if_out_of_image=False) assert np.all(image[10-1, 20-1, :] == [0, 0, 0]) assert np.all(image[10-1, 20-0, :] == [0, 0, 0]) assert np.all(image[10-0, 20-1, :] == [0, 0, 0]) assert np.all(image[10-0, 20-0, :] == [0, 255, 0]) assert np.all(image[10+1, 20+1, :] == [0, 0, 0]) assert np.all(image[30-1, 40-1, :] == [0, 0, 0]) assert np.all(image[30+1, 40-0, :] == [0, 0, 0]) assert np.all(image[30+0, 40+1, :] == [0, 0, 0]) assert np.all(image[30+0, 40+0, :] == [0, 255, 0]) assert np.all(image[30+1, 40+1, :] == [0, 0, 0]) assert np.all(image[15-1, 25-1, :] == [0, 0, 0]) assert np.all(image[15-1, 25-0, :] == [0, 0, 0]) assert np.all(image[15-0, 25-1, :] == [0, 0, 0]) assert np.all(image[15-0, 25-0, :] == [0, 255, 0]) assert np.all(image[15+1, 25+1, :] == [0, 0, 0]) assert np.all(image[35-1, 45-1, :] == [0, 0, 0]) assert np.all(image[35+1, 45+0, :] == [0, 0, 0]) assert np.all(image[35+0, 45+1, :] == [0, 0, 0]) assert np.all(image[35+0, 45+0, :] == [0, 255, 0]) assert np.all(image[35+1, 45+1, :] == [0, 0, 0]) # remove_out_of_image() bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None) bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=51, label=None) bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3)) bbsoi_slim = bbsoi.remove_out_of_image(fully=True, partly=True) assert len(bbsoi_slim.bounding_boxes) == 1 assert bbsoi_slim.bounding_boxes[0] == bb1 # cut_out_of_image() bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None) bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=51, label=None) bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3)) eps = np.finfo(np.float32).eps bbsoi_cut = bbsoi.cut_out_of_image() assert len(bbsoi_cut.bounding_boxes) == 2 assert bbsoi_cut.bounding_boxes[0].y1 == 10 assert bbsoi_cut.bounding_boxes[0].x1 == 20 assert bbsoi_cut.bounding_boxes[0].y2 == 30 assert bbsoi_cut.bounding_boxes[0].x2 == 40 assert bbsoi_cut.bounding_boxes[1].y1 == 15 assert bbsoi_cut.bounding_boxes[1].x1 == 25 assert bbsoi_cut.bounding_boxes[1].y2 == 35 assert 50 - 2*eps < bbsoi_cut.bounding_boxes[1].x2 < 50 # shift() bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None) bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=51, label=None) bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3)) bbsoi_shifted = bbsoi.shift(right=1) assert len(bbsoi_cut.bounding_boxes) == 2 assert bbsoi_shifted.bounding_boxes[0].y1 == 10 assert bbsoi_shifted.bounding_boxes[0].x1 == 20 - 1 assert bbsoi_shifted.bounding_boxes[0].y2 == 30 assert bbsoi_shifted.bounding_boxes[0].x2 == 40 - 1 assert bbsoi_shifted.bounding_boxes[1].y1 == 15 assert bbsoi_shifted.bounding_boxes[1].x1 == 25 - 1 assert bbsoi_shifted.bounding_boxes[1].y2 == 35 assert bbsoi_shifted.bounding_boxes[1].x2 == 51 - 1 # copy() bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None) bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=51, label=None) bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3)) bbsoi_copy = bbsoi.copy() assert len(bbsoi.bounding_boxes) == 2 assert bbsoi_copy.bounding_boxes[0].y1 == 10 assert bbsoi_copy.bounding_boxes[0].x1 == 20 assert bbsoi_copy.bounding_boxes[0].y2 == 30 assert bbsoi_copy.bounding_boxes[0].x2 == 40 assert bbsoi_copy.bounding_boxes[1].y1 == 15 assert bbsoi_copy.bounding_boxes[1].x1 == 25 assert bbsoi_copy.bounding_boxes[1].y2 == 35 assert bbsoi_copy.bounding_boxes[1].x2 == 51 bbsoi.bounding_boxes[0].y1 = 0 assert bbsoi_copy.bounding_boxes[0].y1 == 0 # deepcopy() bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None) bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=51, label=None) bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3)) bbsoi_copy = bbsoi.deepcopy() assert len(bbsoi.bounding_boxes) == 2 assert bbsoi_copy.bounding_boxes[0].y1 == 10 assert bbsoi_copy.bounding_boxes[0].x1 == 20 assert bbsoi_copy.bounding_boxes[0].y2 == 30 assert bbsoi_copy.bounding_boxes[0].x2 == 40 assert bbsoi_copy.bounding_boxes[1].y1 == 15 assert bbsoi_copy.bounding_boxes[1].x1 == 25 assert bbsoi_copy.bounding_boxes[1].y2 == 35 assert bbsoi_copy.bounding_boxes[1].x2 == 51 bbsoi.bounding_boxes[0].y1 = 0 assert bbsoi_copy.bounding_boxes[0].y1 == 10 # repr() / str() bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None) bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=51, label=None) bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3)) bb1_expected = "BoundingBox(x1=20.0000, y1=10.0000, x2=40.0000, y2=30.0000, label=None)" bb2_expected = "BoundingBox(x1=25.0000, y1=15.0000, x2=51.0000, y2=35.0000, label=None)" expected = "BoundingBoxesOnImage([%s, %s], shape=(40, 50, 3))" % (bb1_expected, bb2_expected) assert bbsoi.__repr__() == bbsoi.__str__() == expected def test_HeatmapsOnImage_draw(): heatmaps_arr = np.float32([ [0.5, 0.0, 0.0, 0.5], [0.0, 1.0, 1.0, 0.0], [0.0, 1.0, 1.0, 0.0], [0.5, 0.0, 0.0, 0.5], ]) heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(4, 4, 3)) heatmaps_drawn = heatmaps.draw()[0] assert heatmaps_drawn.shape == (4, 4, 3) v1 = heatmaps_drawn[0, 1] v2 = heatmaps_drawn[0, 0] v3 = heatmaps_drawn[1, 1] for y, x in [(0, 1), (0, 2), (1, 0), (1, 3), (2, 0), (2, 3), (3, 1), (3, 2)]: assert np.allclose(heatmaps_drawn[y, x], v1) for y, x in [(0, 0), (0, 3), (3, 0), (3, 3)]: assert np.allclose(heatmaps_drawn[y, x], v2) for y, x in [(1, 1), (1, 2), (2, 1), (2, 2)]: assert np.allclose(heatmaps_drawn[y, x], v3) # size differs from heatmap array size heatmaps_arr = np.float32([ [0.0, 1.0], [0.0, 1.0] ]) heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(2, 2, 3)) heatmaps_drawn = heatmaps.draw(size=(4, 4))[0] assert heatmaps_drawn.shape == (4, 4, 3) v1 = heatmaps_drawn[0, 0] v2 = heatmaps_drawn[0, -1] for y in range(4): for x in range(2): assert np.allclose(heatmaps_drawn[y, x], v1) for y in range(4): for x in range(2, 4): assert np.allclose(heatmaps_drawn[y, x], v2) def test_HeatmapsOnImage_draw_on_image(): heatmaps_arr = np.float32([ [0.0, 1.0], [0.0, 1.0] ]) heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(2, 2, 3)) image = np.uint8([ [0, 0, 0, 255], [0, 0, 0, 255], [0, 0, 0, 255], [0, 0, 0, 255] ]) image = np.tile(image[..., np.newaxis], (1, 1, 3)) heatmaps_drawn = heatmaps.draw_on_image(image, alpha=0.5, cmap=None)[0] assert heatmaps_drawn.shape == (4, 4, 3) assert np.all(heatmaps_drawn[0:4, 0:2, :] == 0) assert np.all(heatmaps_drawn[0:4, 2:3, :] == 128) or np.all(heatmaps_drawn[0:4, 2:3, :] == 127) assert np.all(heatmaps_drawn[0:4, 3:4, :] == 255) or np.all(heatmaps_drawn[0:4, 3:4, :] == 254) image = np.uint8([ [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0] ]) image = np.tile(image[..., np.newaxis], (1, 1, 3)) heatmaps_drawn = heatmaps.draw_on_image(image, alpha=0.5, resize="image", cmap=None)[0] assert heatmaps_drawn.shape == (2, 2, 3) assert np.all(heatmaps_drawn[0:2, 0, :] == 0) assert np.all(heatmaps_drawn[0:2, 1, :] == 128) or np.all(heatmaps_drawn[0:2, 1, :] == 127) def test_HeatmapsOnImage_invert(): heatmaps_arr = np.float32([ [0.0, 5.0, 10.0], [-1.0, -2.0, 7.5] ]) expected = np.float32([ [8.0, 3.0, -2.0], [9.0, 10.0, 0.5] ]) # (H, W) heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(2, 3), min_value=-2.0, max_value=10.0) assert np.allclose(heatmaps.get_arr(), heatmaps_arr) assert np.allclose(heatmaps.invert().get_arr(), expected) # (H, W, 1) heatmaps = ia.HeatmapsOnImage(heatmaps_arr[..., np.newaxis], shape=(2, 3), min_value=-2.0, max_value=10.0) assert np.allclose(heatmaps.get_arr(), heatmaps_arr[..., np.newaxis]) assert np.allclose(heatmaps.invert().get_arr(), expected[..., np.newaxis]) def test_HeatmapsOnImage_pad(): heatmaps_arr = np.float32([ [0.0, 1.0], [0.0, 1.0] ]) heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(2, 2, 3)) heatmaps_padded = heatmaps.pad(top=1, right=2, bottom=3, left=4) assert heatmaps_padded.arr_0to1.shape == (2+(1+3), 2+(4+2), 1) assert np.allclose( heatmaps_padded.arr_0to1[:, :, 0], np.float32([ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] ]) ) heatmaps_padded = heatmaps.pad(top=1, right=2, bottom=3, left=4, cval=0.5) assert heatmaps_padded.arr_0to1.shape == (2+(1+3), 2+(4+2), 1) assert np.allclose( heatmaps_padded.arr_0to1[:, :, 0], np.float32([ [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.0, 1.0, 0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.0, 1.0, 0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5] ]) ) heatmaps_padded = heatmaps.pad(top=1, right=2, bottom=3, left=4, mode="edge") assert heatmaps_padded.arr_0to1.shape == (2+(1+3), 2+(4+2), 1) assert np.allclose( heatmaps_padded.arr_0to1[:, :, 0], np.float32([ [0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0] ]) ) def test_HeatmapsOnImage_avg_pool(): heatmaps_arr = np.float32([ [0.0, 0.0, 0.5, 1.0], [0.0, 0.0, 0.5, 1.0], [0.0, 0.0, 0.5, 1.0], [0.0, 0.0, 0.5, 1.0] ]) heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(4, 4, 3)) heatmaps_pooled = heatmaps.avg_pool(2) assert heatmaps_pooled.arr_0to1.shape == (2, 2, 1) assert np.allclose( heatmaps_pooled.arr_0to1[:, :, 0], np.float32([[0.0, 0.75], [0.0, 0.75]]) ) def test_HeatmapsOnImage_max_pool(): heatmaps_arr = np.float32([ [0.0, 0.0, 0.5, 1.0], [0.0, 0.0, 0.5, 1.0], [0.0, 0.0, 0.5, 1.0], [0.0, 0.0, 0.5, 1.0] ]) heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(4, 4, 3)) heatmaps_pooled = heatmaps.max_pool(2) assert heatmaps_pooled.arr_0to1.shape == (2, 2, 1) assert np.allclose( heatmaps_pooled.arr_0to1[:, :, 0], np.float32([[0.0, 1.0], [0.0, 1.0]]) ) def test_HeatmapsOnImage_scale(): heatmaps_arr = np.float32([ [0.0, 1.0] ]) heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(4, 4, 3)) heatmaps_scaled = heatmaps.scale((4, 4), interpolation="nearest") assert heatmaps_scaled.arr_0to1.shape == (4, 4, 1) assert heatmaps_scaled.arr_0to1.dtype.type == np.float32 assert np.allclose( heatmaps_scaled.arr_0to1[:, :, 0], np.float32([ [0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0] ]) ) heatmaps_arr = np.float32([ [0.0, 1.0] ]) heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(4, 4, 3)) heatmaps_scaled = heatmaps.scale(2.0, interpolation="nearest") assert heatmaps_scaled.arr_0to1.shape == (2, 4, 1) assert heatmaps_scaled.arr_0to1.dtype.type == np.float32 assert np.allclose( heatmaps_scaled.arr_0to1[:, :, 0], np.float32([ [0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0] ]) ) def test_SegmentationMapOnImage_bool(): # Test for #189 (boolean mask inputs into SegmentationMapOnImage not working) arr = np.array([ [0, 0, 0], [0, 1, 0], [0, 0, 0] ], dtype=bool) assert arr.dtype.type == np.bool_ segmap = ia.SegmentationMapOnImage(arr, shape=(3, 3)) observed = segmap.get_arr_int() assert observed.dtype.type == np.int32 assert np.array_equal(arr, observed) arr = np.array([ [0, 0, 0], [0, 1, 0], [0, 0, 0] ], dtype=np.bool) assert arr.dtype.type == np.bool_ segmap = ia.SegmentationMapOnImage(arr, shape=(3, 3)) observed = segmap.get_arr_int() assert observed.dtype.type == np.int32 assert np.array_equal(arr, observed) def test_SegmentationMapOnImage_get_arr_int(): arr = np.int32([ [0, 0, 1], [0, 2, 1], [1, 3, 1] ]) segmap = ia.SegmentationMapOnImage(arr, shape=(3, 3), nb_classes=4) observed = segmap.get_arr_int() assert observed.dtype.type == np.int32 assert np.array_equal(arr, observed) arr_c0 = np.float32([ [0.1, 0.1, 0.1], [0.1, 0.9, 0.1], [0.0, 0.1, 0.0] ]) arr_c1 = np.float32([ [0.2, 1.0, 0.2], [0.2, 0.8, 0.2], [0.0, 0.0, 0.0] ]) arr_c2 = np.float32([ [0.0, 0.0, 0.0], [0.3, 0.7, 0.3], [0.1, 0.0, 0.0001] ]) arr = np.concatenate([ arr_c0[..., np.newaxis], arr_c1[..., np.newaxis], arr_c2[..., np.newaxis] ], axis=2) segmap = ia.SegmentationMapOnImage(arr, shape=(3, 3)) observed = segmap.get_arr_int() expected = np.int32([ [2, 2, 2], [3, 1, 3], [3, 1, 0] ]) assert observed.dtype.type == np.int32 assert np.array_equal(observed, expected) got_exception = False try: _ = segmap.get_arr_int(background_class_id=2) except Exception as exc: assert "The background class id may only be changed if " in str(exc) got_exception = True assert got_exception observed = segmap.get_arr_int(background_threshold=0.21) expected = np.int32([ [0, 2, 0], [3, 1, 3], [0, 0, 0] ]) assert observed.dtype.type == np.int32 assert np.array_equal(observed, expected) def test_SegmentationMapOnImage_draw(): arr = np.int32([ [0, 1, 1], [0, 1, 1], [0, 1, 1] ]) segmap = ia.SegmentationMapOnImage(arr, shape=(3, 3), nb_classes=2) # simple example with 2 classes observed = segmap.draw() col0 = ia.SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS[0] col1 = ia.SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS[1] expected = np.uint8([ [col0, col1, col1], [col0, col1, col1], [col0, col1, col1] ]) assert np.array_equal(observed, expected) # same example, with resizing to 2x the size observed = segmap.draw(size=(6, 6)) expected = ia.imresize_single_image(expected, (6, 6), interpolation="nearest") assert np.array_equal(observed, expected) # custom choice of colors col0 = (10, 10, 10) col1 = (50, 51, 52) observed = segmap.draw(colors=[col0, col1]) expected = np.uint8([ [col0, col1, col1], [col0, col1, col1], [col0, col1, col1] ]) assert np.array_equal(observed, expected) # background_threshold, background_class and foreground mask arr_c0 = np.float32([ [0, 0, 0], [1.0, 0, 0], [0, 0, 0] ]) arr_c1 = np.float32([ [0, 1, 1], [0, 1, 1], [0.1, 1, 1] ]) arr = np.concatenate([ arr_c0[..., np.newaxis], arr_c1[..., np.newaxis] ], axis=2) segmap = ia.SegmentationMapOnImage(arr, shape=(3, 3)) observed, observed_fg = segmap.draw(background_threshold=0.01, return_foreground_mask=True) col0 = ia.SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS[0] col1 = ia.SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS[1] col2 = ia.SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS[2] expected = np.uint8([ [col0, col2, col2], [col1, col2, col2], [col2, col2, col2] ]) expected_fg = np.array([ [False, True, True], [True, True, True], [True, True, True] ], dtype=np.bool) assert np.array_equal(observed, expected) assert np.array_equal(observed_fg, expected_fg) # background_threshold, background_class and foreground mask # here with higher threshold so that bottom left pixel switches to background observed, observed_fg = segmap.draw(background_threshold=0.11, return_foreground_mask=True) col0 = ia.SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS[0] col1 = ia.SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS[1] col2 = ia.SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS[2] expected = np.uint8([ [col0, col2, col2], [col1, col2, col2], [col0, col2, col2] ]) expected_fg = np.array([ [False, True, True], [True, True, True], [False, True, True] ], dtype=np.bool) assert np.array_equal(observed, expected) assert np.array_equal(observed_fg, expected_fg) def test_SegmentationMapOnImage_draw_on_image(): arr = np.int32([ [0, 1, 1], [0, 1, 1], [0, 1, 1] ]) segmap = ia.SegmentationMapOnImage(arr, shape=(3, 3), nb_classes=2) image = np.uint8([ [0, 10, 20], [30, 40, 50], [60, 70, 80] ]) image = np.tile(image[:, :, np.newaxis], (1, 1, 3)) # only image visible observed = segmap.draw_on_image(image, alpha=0) assert np.array_equal(observed, image) # only segmap visible observed = segmap.draw_on_image(image, alpha=1.0, draw_background=True) col0 = ia.SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS[0] col1 = ia.SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS[1] expected = np.uint8([ [col0, col1, col1], [col0, col1, col1], [col0, col1, col1] ]) assert
np.array_equal(observed, expected)
numpy.array_equal
from __future__ import print_function import string import sys import os from collections import deque import pandas as pd import numpy as np import matplotlib.pyplot as plt plt.switch_backend('Agg') import tensorflow as tf import keras keras.backend.image_data_format() from keras import backend as K from keras import regularizers from keras.layers import Input, Dense, Reshape, Lambda, Conv1D, Flatten, MaxPooling1D, UpSampling1D, GlobalMaxPooling1D from keras.layers import LSTM, Bidirectional, BatchNormalization, Dropout, Concatenate, Embedding, Activation, Dot, dot from keras.models import Model, clone_model, Sequential from keras.optimizers import Adam from keras.callbacks import EarlyStopping,ModelCheckpoint from keras.constraints import unitnorm from keras_layer_normalization import LayerNormalization tf.keras.backend.set_floatx('float32') import sklearn as sk from sklearn.base import BaseEstimator, _pprint from sklearn.utils import check_array, check_random_state from sklearn.utils.validation import check_is_fitted from sklearn.preprocessing import StandardScaler from sklearn.manifold import LocallyLinearEmbedding, MDS, Isomap, TSNE from sklearn.decomposition import PCA, IncrementalPCA, KernelPCA, SparsePCA, TruncatedSVD, FastICA, NMF, MiniBatchDictionaryLearning from sklearn.random_projection import GaussianRandomProjection, SparseRandomProjection from sklearn.linear_model import LinearRegression, LogisticRegression from sklearn.ensemble import RandomForestRegressor from sklearn.model_selection import KFold, GroupKFold, train_test_split from sklearn.metrics import mean_squared_error, explained_variance_score, mean_absolute_error, median_absolute_error, r2_score from sklearn.metrics import average_precision_score, precision_score, recall_score, f1_score, roc_auc_score, matthews_corrcoef from sklearn.metrics import roc_curve, precision_recall_curve, RocCurveDisplay, PrecisionRecallDisplay from sklearn.metrics import roc_auc_score,accuracy_score,matthews_corrcoef from scipy import stats from scipy.stats import multivariate_normal, kurtosis, skew, pearsonr, spearmanr import processSeq from processSeq import load_seq_1, kmer_dict, load_signal_1, load_seq_2, load_seq_2_kmer, load_seq_altfeature import xgboost import pickle import os.path from optparse import OptionParser import time from timeit import default_timer as timer import utility_1 from utility_1 import mapping_Idx import h5py import json # generate sequences # idx_sel_list: chrom, serial # seq_list: relative positions def generate_sequences(idx_sel_list, gap_tol=5, region_list=[]): chrom = idx_sel_list[:,0] chrom_vec = np.unique(chrom) chrom_vec = np.sort(chrom_vec) seq_list = [] print(len(chrom),chrom_vec) for chrom_id in chrom_vec: b1 = np.where(chrom==chrom_id)[0] t_serial = idx_sel_list[b1,1] prev_serial = t_serial[0:-1] next_serial = t_serial[1:] distance = next_serial-prev_serial b2 = np.where(distance>gap_tol)[0] if len(b2)>0: if len(region_list)>0: # print('region_list',region_list,len(b2)) b_1 = np.where(region_list[:,0]==chrom_id)[0] # print(b2) t_serial = idx_sel_list[b2,1] if len(b_1)>0: # b2 = np.setdiff1d(b2,region_list[b_1,1]) # print(region_list,region_list[b_1,1],len(b2)) t_id1 = utility_1.mapping_Idx(t_serial,region_list[b_1,1]) t_id1 = t_id1[t_id1>=0] t_id2 = b2[t_id1] b2 = np.setdiff1d(b2,t_id2) # print(len(b2)) # print(idx_sel_list[b2]) # return # print('gap',len(b2)) if len(b2)>0: t_seq = list(np.vstack((b2[0:-1]+1,b2[1:])).T) t_seq.insert(0,np.asarray([0,b2[0]])) t_seq.append(np.asarray([b2[-1]+1,len(b1)-1])) else: t_seq = [np.asarray([0,len(b1)-1])] # print(t_seq) # print(chrom_id,len(t_seq),max(distance)) seq_list.extend(b1[np.asarray(t_seq)]) return np.asarray(seq_list) # select sample def sample_select2a1(x_mtx, y, idx_sel_list, seq_list, tol=5, L=5): num_sample = len(idx_sel_list) num1 = len(seq_list) size1 = 2*L+1 print(num_sample,num1,size1) feature_dim = x_mtx.shape[1] vec1_local = np.zeros((num_sample,size1),dtype=int) vec1_serial = np.zeros((num_sample,size1),dtype=int) feature_mtx = np.zeros((num_sample,size1,feature_dim),dtype=np.float32) signal_mtx = np.zeros((num_sample,size1)) ref_serial = idx_sel_list[:,1] id_vec = np.zeros(num_sample,dtype=np.int8) for i in range(0,num1): s1, s2 = seq_list[i][0], seq_list[i][1]+1 serial = ref_serial[s1:s2] id_vec[s1:s2] = 1 # print('start stop',s1,s2,serial) num2 = len(serial) t1 = np.outer(list(range(s1,s2)),np.ones(size1)) t2 = t1 + np.outer(np.ones(num2),list(range(-L,L+1))) t2[t2<s1] = s1 t2[t2>=s2] = s2-1 idx = np.int64(t2) # print(idx) vec1_local[s1:s2] = idx vec1_serial[s1:s2] = ref_serial[idx] feature_mtx[s1:s2] = x_mtx[idx] signal_mtx[s1:s2] = y[idx] # if i%10000==0: # print(i,num2,vec1_local[s1],vec1_serial[s1]) id1 = np.where(id_vec>0)[0] num2 = len(id1) if num2<num_sample: feature_mtx, signal_mtx = feature_mtx[id1], signal_mtx[id1] # vec1_serial, vec1_local = vec1_serial[id1], vec1_local[id1] vec1_serial = vec1_serial[id1] id_1 = -np.ones(sample_num,dtype=np.int64) id_1[id1] = np.arange(num2) vec1_local = id_1[vec1_local] b1 = np.where(vec1_local<0)[0] if len(b1)>0: print('error!',b1) return -1 # signal_mtx = signal_mtx[:,np.newaxis] signal_mtx = np.expand_dims(signal_mtx, axis=-1) # signal_mtx = np.expand_dims(signal_ntx, axis=-1) return feature_mtx, signal_mtx, vec1_serial, vec1_local def score_2a(y, y_predicted): score1 = mean_squared_error(y, y_predicted) score2 = pearsonr(y, y_predicted) score3 = explained_variance_score(y, y_predicted) score4 = mean_absolute_error(y, y_predicted) score5 = median_absolute_error(y, y_predicted) score6 = r2_score(y, y_predicted) score7, pvalue = spearmanr(y,y_predicted) # vec1 = [score1, score2[0], score2[1], score3, score4, score5, score6] vec1 = [score1, score2[0], score2[1], score3, score4, score5, score6, score7, pvalue] return vec1 def read_phyloP(species_name): path1 = './' filename1 = '%s/estimate_rt/estimate_rt_%s.txt'%(path1,species_name) # filename2a = 'test_seq_%s.1.txt'%(species_name) file1 = pd.read_csv(filename1,sep='\t') col1, col2, col3 = '%s.chrom'%(species_name), '%s.start'%(species_name), '%s.stop'%(species_name) chrom_ori, start_ori, stop_ori, serial_ori = np.asarray(file1[col1]), np.asarray(file1[col2]), np.asarray(file1[col3]), np.asarray(file1['serial']) num_sample = len(chrom_ori) chrom_vec = np.unique(chrom_ori) chrom_vec = ['chr22'] for chrom_id in chrom_vec: filename1 = '%s/phyloP/hg19.phyloP100way.%s.bedGraph'%(path1,chrom_id) data1 = pd.read_csv(filename1,header=None,sep='\t') chrom, start, stop, score = data1[0], data1[1], data1[2], data1[3] len1 = stop-start b = np.where(chrom_ori==chrom_id)[0] num_sample1 = len(b) vec1 = np.zeros((num_sample1,16)) print(chrom_id,len(chrom),len(b)) cnt = 0 b1 = [-1] for i in b: t1 = b1[-1]+1 b1 = np.where((start[t1:]>=start_ori[i])&(stop[t1:]<stop_ori[i]))[0]+t1 if len(b1)==0: b1 = [-1] continue t_len1, t_score = np.asarray(len1[b1]), np.asarray(score[b1]) s1 = 0 s2 = np.sum(t_len1) i1 = cnt for j in range(0,12): temp1 = (j-8)*2.5 b2 = np.where((t_score<temp1+2.5)&(t_score>=temp1))[0] print(b2) vec1[i1,j] = np.sum(t_len1[b2])*1.0/s2 s1 = s1+temp1*vec1[i1,j] vec1[i1,12] = s1 # average vec1[i1,13] = np.median(t_score) vec1[i1,14] = np.max(t_score) vec1[i1,15] = np.min(t_score) cnt += 1 if cnt%1000==0: print(cnt,len(b1),s2,vec1[i1,12:16]) break # dict1 = dict() # dict1['vec'], dict1['index'] = vec1,b # np.save('phyloP_%s'%(chrom_id),dict1,allow_pickle=True) fields = ['index'] for j in range(0,12): temp1 = (j-8)*2.5 fields.append('%s-%s'%(temp1,temp1+2.5)) fields.extend(range(0,4)) data1 = pd.DataFrame(data = np.hstack((b[:,np.newaxis],vec1)),columns=fields) data1.to_csv('phyloP_%s.txt'%(chrom_id),sep='\t',index=False) return vec1 def read_phyloP_1(ref_filename,header,file_path,chrom_vec,n_level=15,offset=10,magnitude=2): file1 = pd.read_csv(ref_filename,header=header,sep='\t') # col1, col2, col3 = '%s.chrom'%(species_name), '%s.start'%(species_name), '%s.stop'%(species_name) colnames = list(file1) col1, col2, col3, col4 = colnames[0], colnames[1], colnames[2], colnames[3] chrom_ori, start_ori, stop_ori, serial_ori = np.asarray(file1[col1]), np.asarray(file1[col2]), np.asarray(file1[col3]), np.asarray(file1[col4]) num_sample = len(chrom_ori) # chrom_vec = np.unique(chrom_ori) # chrom_vec = [chrom_id] # n_level, offset, magnitude = 15, 10, 2 score_max = (n_level-offset)*magnitude for chrom_id in chrom_vec: # filename1 = '%s/hg19.phyloP100way.%s.bedGraph'%(file_path,chrom_id) filename1 = '%s/chr%s.phyloP100way.bedGraph'%(file_path,chrom_id) data1 = pd.read_csv(filename1,header=None,sep='\t') chrom, start, stop, score = data1[0], data1[1], data1[2], data1[3] len1 = stop-start chrom_id1 = 'chr%s'%(chrom_id) b = np.where(chrom_ori==chrom_id1)[0] num_sample1 = len(b) vec1 = np.zeros((num_sample1,n_level+4)) print(chrom_id,len(chrom),len(b)) cnt = 0 m_idx = len(start)-1 start_idx = 0 print("number of regions", len(b)) for i in b: t_start, t_stop = start_ori[i], stop_ori[i] # position of zero region position = [t_start,t_stop] if start_idx<=m_idx: b1, start_idx = utility_1.search_region_include(position, start, stop, m_idx, start_idx) # print(count,t_start,t_stop,t_stop-t_start,start_idx,len(id3)) if len(b1)==0: continue t_len1, t_score = np.asarray(len1[b1]), np.asarray(score[b1]) t_score[t_score>score_max] = score_max-1e-04 s1 = 0 s2 = np.sum(t_len1) for j in range(0,n_level): temp1 = (j-offset)*magnitude b2 = np.where((t_score<temp1+magnitude)&(t_score>=temp1))[0] # print(b2) vec1[cnt,j] = np.sum(t_len1[b2])*1.0/s2 s1 = s1+temp1*vec1[cnt,j] vec1[cnt,n_level:n_level+4] = [s1,np.median(t_score),np.max(t_score),np.min(t_score)] cnt += 1 pre_b1 = b1 if cnt%1000==0: print(chrom_id,cnt,len(b1),s2,vec1[cnt,-4:]) # break # dict1 = dict() # dict1['vec'], dict1['index'] = vec1,b # np.save('phyloP_%s'%(chrom_id),dict1,allow_pickle=True) fields = ['index'] for j in range(0,n_level): temp1 = (j-offset)*magnitude fields.append('%s-%s'%(temp1,temp1+magnitude)) fields.extend(range(0,4)) idx = serial_ori[b] data1 = pd.DataFrame(data = np.hstack((idx[:,np.newaxis],vec1)),columns=fields) data1.to_csv('phyloP_%s.txt'%(chrom_id),sep='\t',index=False) return vec1 def read_motif_1(filename,output_filename=-1): data1 = pd.read_csv(filename,sep='\t') colnames = list(data1) col1, col2, col3 = colnames[0], colnames[1], colnames[2] chrom, start, stop = np.asarray(data1[col1]), np.asarray(data1[col2]), np.asarray(data1[col3]) region_len = stop-start m1, m2, median_len = np.max(region_len), np.min(region_len), np.median(region_len) b1 = np.where(region_len!=median_len)[0] print(m1,m2,median_len,len(b1)) bin_size = median_len motif_name = colnames[3:] mtx1 = np.asarray(data1.loc[:,motif_name]) mtx1 = mtx1*1000.0/np.outer(region_len,np.ones(mtx1.shape[1])) print('motif',len(motif_name)) print(mtx1.shape) print(np.max(mtx1),np.min(mtx1),np.median(mtx1)) if output_filename!=-1: fields = colnames data1 = pd.DataFrame(columns=fields) data1[colnames[0]], data1[colnames[1]], data1[colnames[2]] = chrom, start, stop num1 = len(fields)-3 for i in range(0,num1): data1[colnames[i+3]] = mtx1[:,i] data1.to_csv(output_filename,header=True,index=False,sep='\t') print(output_filename, data1.shape) return mtx1, chrom, start, stop, colnames def read_gc_1(ref_filename,header,filename,output_filename): sel_idx = [] file1 = pd.read_csv(ref_filename,header=header,sep='\t') f_list = load_seq_altfeature(filename,sel_idx) # col1, col2, col3 = '%s.chrom'%(species_name), '%s.start'%(species_name), '%s.stop'%(species_name) colnames = list(file1) col1, col2, col3, col4 = colnames[0], colnames[1], colnames[2], colnames[3] chrom_ori, start_ori, stop_ori, serial_ori = np.asarray(file1[col1]), np.asarray(file1[col2]), np.asarray(file1[col3]), np.asarray(file1[col4]) num_sample = len(chrom_ori) if num_sample!=f_list.shape[0]: print('error!',num_sample,f_list.shape[0]) fields = ['chrom','start','stop','serial','GC','GC_N','GC_skew'] file2 = pd.DataFrame(columns=fields) file2['chrom'], file2['start'], file2['stop'], file2['serial'] = chrom_ori, start_ori, stop_ori, serial_ori for i in range(0,3): file2[fields[i+4]] = f_list[:,i] file2.to_csv(output_filename,index=False,sep='\t') return f_list def generate_serial(filename1,chrom,start,stop): # chrom_vec = np.sort(np.unique(chrom)) # print(chrom_vec) chrom_vec = [] for i in range(1,23): chrom_vec.append('chr%d'%(i)) chrom_vec += ['chrX'] chrom_vec += ['chrY'] print(chrom_vec) # print(chrom) print(len(chrom)) data1 = pd.read_csv(filename1,header=None,sep='\t') ref_chrom, chrom_size = np.asarray(data1[0]), np.asarray(data1[1]) serial_start = 0 serial_vec = np.zeros(len(chrom)) bin_size = stop[1]-start[1] print(bin_size) for chrom_id in chrom_vec: b1 = np.where(ref_chrom==chrom_id)[0] t_size = chrom_size[b1[0]] b2 = np.where(chrom==chrom_id)[0] if len(b1)>0: size1 = int(np.ceil(t_size*1.0/bin_size)) serial = np.int64(start[b2]/bin_size)+serial_start serial_vec[b2] = serial print(chrom_id,b2,len(serial),serial_start,size1) serial_start = serial_start+size1 else: print("error!") return return np.int64(serial_vec) def generate_serial_local(filename1,chrom,start,stop,chrom_num): # chrom_vec = np.sort(np.unique(chrom)) # print(chrom_vec) chrom_vec = [] for i in range(1,chrom_num+1): chrom_vec.append('chr%d'%(i)) chrom_vec += ['chrX'] chrom_vec += ['chrY'] chrom_vec += ['chrM'] print(chrom_vec) print(chrom) print(len(chrom)) t_chrom = np.unique(chrom) data1 = pd.read_csv(filename1,header=None,sep='\t') ref_chrom, chrom_size = np.asarray(data1[0]), np.asarray(data1[1]) # serial_start = np.zeros(len(chrom)) serial_start = 0 serial_start_1 = dict() serial_vec = np.zeros(len(chrom)) bin_size = stop[1]-start[1] print(bin_size) for chrom_id in chrom_vec: b1 = np.where(ref_chrom==chrom_id)[0] t_size = chrom_size[b1[0]] serial_start_1[chrom_id] = serial_start size1 = int(np.ceil(t_size*1.0/bin_size)) serial_start = serial_start+size1 for chrom_id in t_chrom: b2 = np.where(chrom==chrom_id) serial = np.int64(start[b2]/bin_size)+serial_start_1[chrom_id] serial_vec[b2] = serial return np.int64(serial_vec) def generate_serial_start(filename1,chrom,start,stop,chrom_num=19): # chrom_vec = np.sort(np.unique(chrom)) # print(chrom_vec) chrom_vec = [] for i in range(1,chrom_num+1): chrom_vec.append('chr%d'%(i)) chrom_vec += ['chrX'] chrom_vec += ['chrY'] print(chrom_vec) print(chrom) print(len(chrom)) data1 = pd.read_csv(filename1,header=None,sep='\t') ref_chrom, chrom_size = np.asarray(data1[0]), np.asarray(data1[1]) serial_start = 0 serial_vec = -np.ones(len(chrom)) bin_size = stop[1]-start[1] print(bin_size) start_vec = dict() for chrom_id in chrom_vec: start_vec[chrom_id] = serial_start b1 = np.where(ref_chrom==chrom_id)[0] t_size = chrom_size[b1[0]] b2 = np.where(chrom==chrom_id)[0] if len(b1)>0: size1 = int(np.ceil(t_size*1.0/bin_size)) serial = np.int64(start[b2]/bin_size)+serial_start serial_vec[b2] = serial print(chrom_id,b2,len(serial),serial_start,size1) serial_start = serial_start+size1 else: print("error!") return return np.int64(serial_vec), start_vec def shuffle_array(vec): num1 = len(vec) idx = np.random.permutation(num1) vec = vec[idx] return vec, idx # input: estimated attention, type_id: training, validation, or test data # output: ranking of attention def select_region1_sub(filename,type_id): data1 = pd.read_csv(filename,sep='\t') colnames = list(data1) # chrom start stop serial signal predicted_signal predicted_attention chrom, start, serial = data1['chrom'], data1['start'], data1['serial'] chrom, start, serial = np.asarray(chrom), np.asarray(start), np.asarray(serial) predicted_attention = data1['predicted_attention'] predicted_attention = np.asarray(predicted_attention) ranking = stats.rankdata(predicted_attention,'average')/len(predicted_attention) rank1 = np.zeros((len(predicted_attention),2)) rank1[:,0] = ranking chrom_vec = np.unique(chrom) for t_chrom in chrom_vec: b1 = np.where(chrom==t_chrom)[0] t_attention = predicted_attention[b1] t_ranking = stats.rankdata(t_attention,'average')/len(t_attention) rank1[b1,1] = t_ranking data1['Q1'] = rank1[:,0] # rank across all the included chromosomes data1['Q2'] = rank1[:,1] # rank by each chromosome data1['typeId'] = np.int8(type_id*np.ones(len(rank1))) return data1,chrom_vec # merge estimated attention from different training/test splits # type_id1: chromosome order; type_id2: training: 0, test: 1, valid: 2 def select_region1_merge(filename_list,output_filename,type_id1=0,type_id2=1): list1 = [] chrom_numList = [] # b1 = np.where((self.chrom!='chrX')&(self.chrom!='chrY'))[0] # ref_chrom, ref_start, ref_serial = self.chrom[b1], self.start[b1], self.serial[b1] # num_sameple = len(ref_chrom) i = 0 serial1 = [] num1 = len(filename_list) vec1 = list(range(num1)) if type_id1==1: vec1 = list(range(num1-1,-1,-1)) for i in vec1: filename1 = filename_list[i] # data1: chrom, start, stop, serial, signal, predicted_signal, predicted_attention, Q1, Q2, typeId # typeId: training: 0, test: 1, valid: 2 data1, chrom_vec = select_region1_sub(filename1,type_id2) print(filename1,len(data1)) # list1.append(data1) # if i==0: # serial1 = np.asarray(data1['serial']) t_serial = np.asarray(data1['serial'],dtype=np.int64) t_serial2 = np.setdiff1d(t_serial,serial1) serial1 = np.union1d(serial1,t_serial) id1 = mapping_Idx(t_serial,t_serial2) colnames = list(data1) data1 = data1.loc[id1,colnames] list1.append(data1) chrom_numList.append(chrom_vec) data2 = pd.concat(list1, axis=0, join='outer', ignore_index=True, keys=None, levels=None, names=None, verify_integrity=False, copy=True) print('sort') data2 = data2.sort_values(by=['serial']) data2.to_csv(output_filename,index=False,sep='\t') return data2, chrom_numList class Reader(object): def __init__(self, ref_filename, feature_idvec = [1,1,1,1]): # Initializes RepliSeq self.ref_filename = ref_filename self.feature_idvec = feature_idvec def generate_serial(self,filename1,filename2,output_filename,header=None): data1 = pd.read_csv(filename2, header=header, sep='\t') colnames = list(data1) chrom, start, stop = np.asarray(data1[colnames[0]]), np.asarray(data1[colnames[1]]), np.asarray(data1[colnames[2]]) serial_vec, start_vec = generate_serial_start(filename1,chrom,start,stop) if output_filename!=None: colnames2 = colnames[0:3]+['serial']+colnames[3:] data2 = pd.DataFrame(columns=colnames2) data2['serial'] = serial_vec for colname1 in colnames: data2[colname1] = data1[colname1] flag = False if header!=None: flag = True data2.to_csv(output_filename,header=flag,index=False,sep='\t') return serial_vec, start_vec def load_motif(self,filename1,motif_filename,output_filename): # output_filename = None # ref_filename = 'hg38.5k.serial.bed' # motif_filename = 'hg38.motif.count.txt' # output_filename1 = None mtx1, chrom, start, stop, colnames = read_motif_1(motif_filename) serial_vec, start_vec = generate_serial_start(filename1,chrom,start,stop) if output_filename!=None: colnames2 = ['chrom','start','stop','serial'] data2 = pd.DataFrame(columns=colnames2) data2['chrom'], data2['start'], data2['stop'], data2['serial'] = chrom, start, stop, serial_vec data3 = pd.DataFrame(columns=colnames[3:],data=mtx1) data1 = pd.concat([data2,data3], axis=1, join='outer', ignore_index=True, keys=None, levels=None, names=None, verify_integrity=False, copy=True) data1.to_csv(output_filename,header=True,index=False,sep='\t') print('data1',data1.shape) return True class ConvergenceMonitor(object): _template = "{iter:>10d} {logprob:>16.4f} {delta:>+16.4f}" def __init__(self, tol, n_iter, verbose): self.tol = tol self.n_iter = n_iter self.verbose = verbose self.history = deque(maxlen=2) self.iter = 0 def __repr__(self): class_name = self.__class__.__name__ params = dict(vars(self), history=list(self.history)) return "{0}({1})".format( class_name, _pprint(params, offset=len(class_name))) def report(self, logprob): if self.verbose: delta = logprob - self.history[-1] if self.history else np.nan message = self._template.format( iter=self.iter + 1, logprob=logprob, delta=delta) print(message, file=sys.stderr) self.history.append(logprob) self.iter += 1 @property def converged(self): return (self.iter == self.n_iter or (len(self.history) == 2 and self.history[1] - self.history[0] < self.tol)) class _Base1(BaseEstimator): def __init__(self, file_path, species_id, resolution, run_id, generate, chromvec,test_chromvec, featureid,type_id,cell,method,ftype,ftrans,tlist, flanking,normalize, config, attention=1,feature_dim_motif=1, kmer_size=[6,5]): # Initializes RepliSeq self.run_id = run_id self.cell = cell self.generate = generate self.train_chromvec = chromvec self.chromosome = chromvec[0] print('train_chromvec',train_chromvec) print('test_chromvec',test_chromvec) self.test_chromvec = test_chromvec self.config = config self.n_epochs = config['n_epochs'] self.species_id = species_id self.type_id = type_id self.cell_type = cell self.cell_type1 = config['celltype_id'] self.method = method self.ftype = ftype self.ftrans = ftrans[0] self.ftrans1 = ftrans[1] self.t_list = tlist self.flanking = flanking self.flanking1 = 3 self.normalize = normalize self.batch_size = config['batch_size'] # config = dict(output_dim=hidden_unit,fc1_output_dim=fc1,fc2_output_dim=fc2,units1=units1[0], # units2=units1[1],n_epochs=n_epochs,batch_size=batch_size) # config['feature_dim_vec'] = units1[2:] self.tol = config['tol'] self.attention = attention self.attention_vec = [12,17,22,32,51,52,58,60] self.attention_vec1 = [1] self.lr = config['lr'] self.step = config['step'] self.feature_type = -1 self.kmer_size = kmer_size self.activation = config['activation'] self.min_delta = config['min_delta'] self.chromvec_sel = chromvec self.feature_dim_transform = config['feature_dim_transform'] feature_idvec = [1,1,1,1] # ref_filename = 'hg38_5k_serial.bed' if 'ref_filename' in config: ref_filename = config['ref_filename'] else: ref_filename = 'hg38_5k_serial.bed' self.reader = Reader(ref_filename, feature_idvec) self.predict_type_id = 0 self.method = method self.train = self.config['train_mode'] self.path = file_path self.model_path = '%s/test_%d.h5'%(self.path,run_id) self.pos_code = config['pos_code'] self.feature_dim_select1 = config['feature_dim_select'] self.method_vec = [[11,31],[22,32,52,17,51,58,60],[56,62]] self.resolution = resolution # if self.species_id=='mm10': # self.cell_type1 = config['cell_type1'] if 'cell_type1' in self.config: self.cell_type1 = config['cell_type1'] if ('load_type' in self.config) and (self.config['load_type']==1): self.load_type = 1 else: self.load_type = 0 if (method>10) and not(method in [56]) : self.predict_context = 1 else: self.predict_context = 0 if ftype[0]==-5: self.feature_idx1= -5 # full dimensions elif ftype[0]==-6: self.feature_idx1 = -6 # frequency dimensions else: self.feature_idx1 = ftype if 'est_attention_type1' in self.config: self.est_attention_type1 = self.config['est_attention_type1'] else: self.est_attention_type1 = 1 if 'est_attention_sel1' in self.config: self.est_attention_sel1 = self.config['est_attention_sel1'] else: self.est_attention_sel1 = 0 # self.feature_idx = [0,2] self.feature_idx = featureid self.x, self.y = dict(), dict() # feature matrix and signals self.vec = dict() # serial self.vec_local = dict() if self.species_id.find('hg')>=0: self.chrom_num = 22 elif self.species_id.find('mm')>=0: self.chrom_num = 19 else: self.chrom_num = -1 self.region_list_test, self.region_list_train, self.region_list_valid = [],[],[] if 'region_list_test' in config: self.region_list_test = config['region_list_test'] if 'region_list_train' in config: self.region_list_train = config['region_list_train'] if 'region_list_valid' in config: self.region_list_valid = config['region_list_valid'] flag = False if 'scale' in config: flag = True self.scale = config['scale'] else: self.scale = [0,1] if ('activation_basic' in config) and (config['activation_basic']=='tanh'): if (flag==True) and (self.scale[0]>=0): flag = False if flag==False: self.scale = [-1,1] self.region_boundary = [] self.serial_vec = [] self.f_mtx = [] print('scale',self.scale) print(self.test_chromvec) filename1 = '%s_chr%s-chr%s_chr%s-chr%s'%(self.cell_type, self.train_chromvec[0], self.train_chromvec[-1], self.test_chromvec[0], self.test_chromvec[-1]) self.filename_load = filename1 print(self.filename_load,self.method,self.predict_context,self.attention) self.set_generate(generate,filename1) def load_ref_serial(self, ref_filename, header=None): if header==None: file1 = pd.read_csv(ref_filename,header=header,sep='\t') else: file1 = pd.read_csv(ref_filename,sep='\t') colnames = list(file1) # col1, col2, col3 = '%s.chrom'%(species_name), '%s.start'%(species_name), '%s.stop'%(species_name) col1, col2, col3, col_serial = colnames[0], colnames[1], colnames[2], colnames[3] self.chrom_ori, self.start_ori, self.stop_ori, self.serial_ori = np.asarray(file1[col1]), np.asarray(file1[col2]), np.asarray(file1[col3]), np.asarray(file1[col_serial]) print('load ref serial', self.serial_ori.shape) return self.serial_ori # load local serial and signal def load_local_serial(self, filename1, header=None, region_list=[], type_id2=1, signal_normalize=1,region_list_1=[]): if header==None: file2 = pd.read_csv(filename1,header=header,sep='\t') else: file2 = pd.read_csv(filename1,sep='\t') colnames = list(file2) col1, col2, col3, col_serial = colnames[0], colnames[1], colnames[2], colnames[3] # sort the table by serial file2 = file2.sort_values(by=[col_serial]) self.chrom, self.start, self.stop, self.serial = np.asarray(file2[col1]), np.asarray(file2[col2]), np.asarray(file2[col3]), np.asarray(file2[col_serial]) b = np.where((self.chrom!='chrX')&(self.chrom!='chrY')&(self.chrom!='chrM'))[0] self.chrom, self.start, self.stop, self.serial = self.chrom[b], self.start[b], self.stop[b], self.serial[b] if self.chrom_num>0: chrom_num = self.chrom_num else: chrom_num = len(np.unique(self.chrom)) chrom_vec = [str(i) for i in range(1,chrom_num+1)] print('chrom_vec', chrom_vec) self.bin_size = self.stop[1]-self.start[1] scale = self.scale if len(colnames)>=5: col_signal = colnames[4] self.signal = np.asarray(file2[col_signal]) self.signal = self.signal[b] self.signal_pre = self.signal.copy() if signal_normalize==1: if self.run_id>10: # self.signal = signal_normalize(self.signal,[0,1]) # normalize signals self.signal_pre1, id1, signal_vec1 = self.signal_normalize_chrom(self.chrom,self.signal,chrom_vec,scale) if not('train_signal_update' in self.config) or (self.config['train_signal_update']==1): train_signal, id2, signal_vec2 = self.signal_normalize_chrom(self.chrom,self.signal,self.train_chromvec,scale) id_1 = mapping_Idx(id1,id2) self.signal = self.signal_pre1.copy() self.signal[id_1] = train_signal else: self.signal = self.signal_pre1.copy() else: print('signal_normalize_bychrom') self.signal, id1, signal_vec = self.signal_normalize_bychrom(self.chrom,self.signal,chrom_vec,scale) else: self.signal = np.ones(len(b)) # print(self.signal.shape) print('load local serial', self.serial.shape, self.signal.shape, np.max(self.signal), np.min(self.signal)) if 'tol_region_search' in self.config: tol = self.config['tol_region_search'] else: tol = 2 # only train or predict on some regions print('load_local_serial',len(self.chrom)) if len(region_list_1)>0: num1 = len(region_list_1) list1 = [] for i in range(num1): t_region = region_list_1[i] t_chrom, t_start, t_stop = 'chr%d'%(t_region[0]), t_region[1], t_region[2] t_id1 = np.where((self.chrom==t_chrom)&(self.start<t_stop)&(self.stop>t_start))[0] list1.extend(t_id1) b1 = np.asarray(list1) self.chrom, self.start, self.stop, self.serial = self.chrom[b1], self.start[b1], self.stop[b1], self.serial[b1] print('load_local_serial',num1,len(self.chrom)) print(region_list_1) if len(region_list)>0: # print('load_local_serial',region_list) # id1, region_list = self.region_search_1(chrom,start,stop,serial,region_list) id1, region_list = self.region_search_1(self.chrom,self.start,self.stop,self.serial,region_list,type_id2,tol) self.chrom, self.start, self.stop, self.serial, self.signal = self.chrom[id1], self.start[id1], self.stop[id1], self.serial[id1], self.signal[id1] id2 = self.region_search_boundary(self.chrom,self.start,self.stop,self.serial,region_list) # print('region_search_boundary', id2[:,0], self.start[id2[:,1:3]],self.stop[id2[:,1:3]]) self.region_boundary = id2 # print(self.serial[id2[:,1:3]]) print('region_boundary',id2) # return else: print('load_local_serial',region_list) # assert len(region_list)>0 # return return self.serial, self.signal # training, validation and test data index def prep_training_test(self,train_sel_list_ori): train_id1, test_id1, y_signal_train1, y_signal_test, train1_sel_list, test_sel_list = self.generate_train_test_1(train_sel_list_ori) self.idx_list = {'test':test_id1} self.y_signal = {'test':y_signal_test} if len(y_signal_test)>0: print('y_signal_test',np.max(y_signal_test),np.min(y_signal_test)) if len(y_signal_train1)>0: print('y_signal_train',np.max(y_signal_train1),np.min(y_signal_train1)) self.idx_list.update({'train':[],'valid':[]}) else: return # y_signal_test_ori = signal_normalize(y_signal_test,[0,1]) # shuffle array # x_test_trans, shuffle_id2 = shuffle_array(x_test_trans) # test_sel_list = test_sel_list[shuffle_id2] # x_train1_trans, shuffle_id1 = shuffle_array(x_train1_trans) # train_sel_list = train_sel_list[shuffle_id1] print(train1_sel_list[0:5]) # split training and validation data if 'ratio1' in self.config: ratio = self.config['ratio1'] else: ratio = 0.95 if 'type_id1' in self.config: type_id_1 = self.config['type_id1'] else: type_id_1 = 0 idx_train, idx_valid, idx_test = self.generate_index_1(train1_sel_list, test_sel_list, ratio, type_id_1) print('idx_train,idx_valid,idx_test', len(idx_train), len(idx_valid), len(idx_test)) if (len(self.region_list_train)>0) or (len(self.region_list_valid)>0): idx_train, idx_valid = self.generate_train_test_2(train1_sel_list,idx_train,idx_valid) print('idx_train,idx_valid', len(idx_train), len(idx_valid)) train_sel_list, val_sel_list = train1_sel_list[idx_train], train1_sel_list[idx_valid] self.idx_list.update({'train':train_id1[idx_train],'valid':train_id1[idx_valid]}) self.idx_train_val = {'train':idx_train,'valid':idx_valid} self.y_signal.update({'train':y_signal_train1[idx_train],'valid':y_signal_train1[idx_valid]}) return train_sel_list, val_sel_list, test_sel_list # prepare data from predefined features: kmer frequency feature and motif feature def prep_data_sub2(self,path1,file_prefix,type_id2,feature_dim1,feature_dim2,flag_1): species_id = self.species_id celltype_id = self.cell_type1 if species_id=='mm10': kmer_dim_ori, motif_dim_ori = 100, 50 filename1 = '%s/%s_%d_%d_%d.npy'%(path1,file_prefix,type_id2,kmer_dim_ori,motif_dim_ori) # filename2 = 'test_%s_genome%d_kmer7.h5'%(species_id,celltype_id) filename2 = '%s_%d_kmer7_0_200_trans.h5'%(species_id,celltype_id) else: kmer_dim_ori, motif_dim_ori = 50, 50 filename1 = '%s/%s_%d_%d_%d.npy'%(path1,file_prefix,type_id2,kmer_dim_ori,motif_dim_ori) # filename2 = 'test_%s_kmer7.h5'%(species_id) filename2 = '%s_kmer7_0_200_trans.h5'%(species_id) kmer_size1, kmer_size2, kmer_size3 = 5,6,7 x_train1_trans, train_sel_list_ori = [], [] flag1, flag2 = 0, 0 flag3 = True # if kmer_size2 in self.kmer_size: if flag3==True: if os.path.exists(filename1)==True: print("loading data...") data1 = np.load(filename1,allow_pickle=True) data_1 = data1[()] x_train1_trans_ori, train_sel_list_ori = np.asarray(data_1['x1']), np.asarray(data_1['idx']) print('train_sel_list',train_sel_list_ori.shape) print('x_train1_trans',x_train1_trans_ori.shape) if kmer_size2 in self.kmer_size: flag1 = 1 serial1 = train_sel_list_ori[:,1] dim1 = x_train1_trans_ori.shape[1] if (self.feature_dim_motif==0) or (flag_1==True): x_train1_trans = x_train1_trans_ori[:,0:-motif_dim_ori] else: # d1 = np.min((dim1-motif_dim_ori+feature_dim2,d1)) # d2 = dim1-motif_dim_ori # sel_id1 = list(range(21))+list(range(21,21+feature_dim1)) # x_train1_trans_1 = x_train1_trans[:,sel_id1] # x_train1_trans_2 = x_train1_trans[:,d2:d1] x_train1_trans_1 = x_train1_trans_ori[:,0:dim1-motif_dim_ori] x_train1_trans_2 = x_train1_trans_ori[:,dim1-motif_dim_ori:] else: print('data not found!') print(filename1) return x_train1_trans, trans_sel_list_ori if kmer_size3 in self.kmer_size: with h5py.File(filename2,'r') as fid: serial2 = fid["serial"][:] feature_mtx = fid["vec"][:] # feature_mtx = feature_mtx[:,0:kmer_dim_ori] print(serial2) print(len(serial2),feature_mtx.shape) flag2 = 1 if flag1==1: if flag2==1: t_serial = np.intersect1d(serial1,serial2) id1 = mapping_Idx(serial1,t_serial) id2 = mapping_Idx(serial2,t_serial) if 'feature_dim_transform_1' in self.config: sel_idx = self.config['feature_dim_transform_1'] sel_id1, sel_id2 = list(0,21)+list(range(sel_idx[0])), range(sel_idx[1]) else: sel_id1 = list(0,21)+list(range(10)) sel_id2 = range(feature_dim1-sel_idx1) if (self.feature_dim_motif==0) or (flag_1==True): x_train1_trans = np.hstack((x_train1_trans[id1,sel_id1],feature_mtx[id2,sel_id2])) else: x_train1_trans = np.hstack((x_train1_trans_1[id1,sel_id1],feature_mtx[id2,sel_id2],x_train1_trans_2[id1,0:feature_dim2])) train_sel_list_ori = train_sel_list_ori[id1] else: pass elif flag2==1: t_serial = np.intersect1d(serial1,serial2) id1 = mapping_Idx(serial1,t_serial) id2 = mapping_Idx(serial2,t_serial) x_train1_trans = np.hstack((x_train1_trans_ori[id1,0:2],feature_mtx[id2,0:feature_dim1])) train_sel_list_ori = train_sel_list_ori[id1] self.feature_dim_select1 = -1 if (self.feature_dim_motif==1) and (flag_1==False): x_train1_trans = np.hstack((x_train1_trans,x_train1_trans_2[id1,0:feature_dim2])) # id1 = mapping_Idx(self.serial_ori,serial2) # b1 = (id1>=0) # id1 = id1[b1] # serial2, feature_mtx = serial2[b1], feature_mtx[b1] # chrom1 = self.chrom_ori[id1] # chrom2 = np.zeros(len(serial2),dtype=np.int32) # chrom_vec = np.unique(chrom1) # for chrom_id in chrom_vec: # b2 = np.where(chrom1==chrom_id)[0] # chrom_id1 = int(chrom_id[3:]) # chrom2[b2] = chrom_id1 # x_train1_trans = feature_mtx[:,0:feature_dim1] # trans_sel_list_ori = np.vstack((chrom2,serial2)).T else: print('data not found!') return x_train1_trans, train_sel_list_ori # prepare data from predefined features def prep_data_sub1(self,path1,file_prefix,type_id2,feature_dim_transform,load_type=0): self.feature_dim_transform = feature_dim_transform # map_idx = mapping_Idx(serial_ori,serial) sub_sample_ratio = 1 shuffle = 0 normalize, flanking, attention, run_id = self.normalize, self.flanking, self.attention, self.run_id config = self.config vec2 = dict() tol = self.tol L = flanking # np.save(filename1) print("feature transform") # filename1 = '%s/%s_%d_%d_%d.npy'%(path1,file_prefix,type_id2,feature_dim_transform[0],feature_dim_transform[1]) print(self.species_id) t_featuredim1, t_featuredim2 = feature_dim_transform[0], feature_dim_transform[1] flag1 = False if self.species_id=='hg38': if 'motif_trans_typeid' in self.config: flag1 = True if (self.species_id=='mm10'): flag1 = True if (t_featuredim1>0) or (flag1==False): x_train1_trans, train_sel_list_ori = self.prep_data_sub2(path1,file_prefix,type_id2,t_featuredim1,t_featuredim2,flag1) if len(x_train1_trans)==0: print('data not found!') return -1 if t_featuredim2>0: print('train_sel_list',train_sel_list_ori.shape) print('x_train1_trans',x_train1_trans.shape) if (self.feature_dim_motif>=1) and (flag1==True): if self.species_id=='mm10': annot1 = '%s_%d_motif'%(self.species_id,self.cell_type1) else: annot1 = '%s_motif'%(self.species_id) motif_trans_typeid = self.config['motif_trans_typeid'] motif_featuredim = self.config['motif_featuredim'] motif_filename = '%s_%d_%d_trans.h5'%(annot1,motif_trans_typeid,motif_featuredim) if motif_featuredim<t_featuredim2: print('error! %d %d',motif_featuredim,t_featuredim2) t_featuredim2 = motif_featuredim with h5py.File(motif_filename,'r') as fid: serial_1 = fid["serial"][:] motif_data = fid["vec"][:] print(len(serial_1),motif_data.shape) serial1 = train_sel_list_ori[:,1] serial2 = serial_1 t_serial = np.intersect1d(serial1,serial2) id1 = mapping_Idx(serial1,t_serial) id2 = mapping_Idx(serial2,t_serial) x_train1_trans = np.hstack((x_train1_trans[id1],motif_data[id2,0:t_featuredim2])) train_sel_list_ori = train_sel_list_ori[id1] # train_sel_list_ori2 = serial_1[id2] else: print("data not found!") return x_train1_trans = self.feature_dim_select(x_train1_trans,feature_dim_transform) # feature loaded not specific to cell type if load_type==1: return x_train1_trans, train_sel_list_ori list1 = ['motif_feature','feature2'] for t_feature in list1: if (t_feature in self.config) and (self.config[t_feature]==1): if t_feature=='feature2': pre_config = self.config['pre_config'] if self.chrom_num>0: chrom_num = self.chrom_num else: chrom_num = len(np.unique(self.chrom)) chrom_vec = list(range(1,chrom_num+1)) feature_mtx2, serial_2 = self.prep_data_sequence_3(pre_config,chrom_vec) else: x = 1 x_train1_trans_ori1 = x_train1_trans.copy() train_sel_list_ori1 = train_sel_list_ori.copy() serial1 = train_sel_list_ori[:,1] serial2 = serial_2[:,1] t_serial = np.intersect1d(serial1,serial2) id1 = mapping_Idx(serial1,t_serial)[0] id2 = mapping_Idx(serial2,t_serial)[0] x_train1_trans = np.hstack((x_train1_trans[id1],feature_mtx2[id2])) train_sel_list_ori = train_sel_list_ori[id1] train_sel_list_ori2 = serial_2[id2] b1 = np.where(train_sel_list_ori[:,0]!=train_sel_list_ori2[:,0])[0] if len(b1)>0: print('error! train_sel_list_ori',len(b1)) if ('centromere' in self.config) and (self.config['centromere']==1): regionlist_filename = 'hg38.centromere.bed' serial1 = train_sel_list_ori[:,1] serial_list1, centromere_serial = self.select_region(serial1, regionlist_filename) id1 = mapping_Idx(serial1,serial_list1) id1 = id1[id1>=0] x_train1_trans = x_train1_trans[id1] train_sel_list_ori = train_sel_list_ori[id1] print(x_train1_trans.shape,train_sel_list_ori.shape) print('positional encoding', self.pos_code) print('feature dim',x_train1_trans.shape) self.feature_dim = x_train1_trans.shape[1] start = time.time() if self.pos_code ==1: x_train1_trans = self.positional_encoding1(x_train1_trans,train_sel_list_ori,self.feature_dim) print(x_train1_trans.shape) stop = time.time() print('positional encoding', stop-start) ## shuffle array if ('shuffle' in self.config) and (self.config['shuffle']==1): x_train1_trans, shuffle_id1 = shuffle_array(x_train1_trans) print('array shuffled') # np.random.shuffle(x_tran1_trans) # train_sel_list = train_sel_list[shuffle_id1] elif ('noise' in self.config) and (self.config['noise']>0): if self.config['noise']==1: x_train1_trans = np.zeros_like(x_train1_trans) print('x_train1_trans, noise 1', x_train1_trans[0:5]) elif self.config['noise']==2: x_train1_trans = np.random.uniform(0,1,x_train1_trans.shape) else: x_train1_trans = np.random.normal(0,1,x_train1_trans.shape) else: pass if 'sub_sample_ratio' in self.config: sub_sample_ratio = self.config['sub_sample_ratio'] num_sample = len(train_sel_list_ori) sub_sample = int(num_sample*sub_sample_ratio) train_sel_list_ori = train_sel_list_ori[0:sub_sample] x_train1_trans = x_train1_trans[0:sub_sample] # align train_sel_list_ori and serial print(train_sel_list_ori.shape,len(self.serial)) id1 = mapping_Idx(train_sel_list_ori[:,1],self.serial) id2 = (id1>=0) print('mapping',len(self.serial),np.sum(id2),len(self.serial),len(id2)) # self.chrom, self.start, self.stop, self.serial, self.signal = self.chrom[id2], self.start[id2], self.stop[id2], self.serial[id2], self.signal[id2] self.local_serial_1(id2) id1 = id1[id2] train_sel_list_ori = train_sel_list_ori[id1] x_train1_trans = x_train1_trans[id1] self.x_train1_trans = x_train1_trans self.train_sel_list = train_sel_list_ori return x_train1_trans, train_sel_list_ori def output_generate_sequences(self,idx_sel_list,seq_list): num1 = len(seq_list) t_serial1 = idx_sel_list[:,1] seq_list = np.asarray(seq_list) t_serial = t_serial1[seq_list] id1 = mapping_Idx(self.serial,t_serial[:,0]) chrom1, start1, stop1 = self.chrom[id1], self.start[id1], self.stop[id1] id2 = mapping_Idx(self.serial,t_serial[:,1]) chrom2, start2, stop2 = self.chrom[id2], self.start[id2], self.stop[id2] fields = ['chrom','start','stop','serial1','serial2'] data1 = pd.DataFrame(columns=fields) data1['chrom'], data1['start'], data1['stop'] = chrom1, start1, stop2 data1['serial1'], data1['serial2'] = t_serial[:,0], t_serial[:,1] data1['region_len'] = t_serial[:,1]-t_serial[:,0]+1 output_filename = 'test_seqList_%d_%d.txt'%(idx_sel_list[0][0],idx_sel_list[0][1]) data1.to_csv(output_filename,index=False,sep='\t') return True # prepare data from predefined features def prep_data(self,path1,file_prefix,type_id2,feature_dim_transform): x_train1_trans, train_sel_list_ori = self.prep_data_sub1(path1,file_prefix,type_id2,feature_dim_transform) train_sel_list, val_sel_list, test_sel_list = self.prep_training_test(train_sel_list_ori) # keys = ['train','valid','test'] keys = ['train','valid'] # self.idx_sel_list = {'train':train1_sel_list,'valid':val_sel_list,'test':test_sel_list} idx_sel_list = {'train':train_sel_list,'valid':val_sel_list,'test':test_sel_list} # self.idx_sel_list = idx_sel_list # seq_list_train, seq_list_valid: both locally calculated self.seq_list = dict() start = time.time() for i in keys: self.seq_list[i] = generate_sequences(idx_sel_list[i],region_list=self.region_boundary) print(len(self.seq_list[i])) self.output_generate_sequences(idx_sel_list[i],self.seq_list[i]) stop = time.time() print('generate_sequences', stop-start) # generate initial state index self.init_id = dict() self.init_index(keys) # training and validation data # x_train1_trans = self.x_train1_trans for i in keys: idx = self.idx_list[i] if self.method<5 or self.method in [56]: self.x[i] = x_train1_trans[idx] self.y[i] = self.y_signal[i] print(self.x[i].shape, self.y[i].shape) else: idx_sel_list = self.train_sel_list[idx] start = time.time() x, y, self.vec[i], self.vec_local[i] = sample_select2a1(x_train1_trans[idx],self.y_signal[i], idx_sel_list, self.seq_list[i], self.tol, self.flanking) stop = time.time() print('sample_select2a1',stop-start) # concate context for baseline methods if self.method<=10: # x_train, x_valid, y_train, y_valid = train_test_split(x_train1, y_train1, test_size=0.2, random_state=42) x = x.reshape(x.shape[0],x.shape[1]*x.shape[-1]) y = y[:,self.flanking] self.x[i], self.y[i] = x, y print(self.x[i].shape, self.y[i].shape) return True # prepare data from predefined features def prep_data_1(self,path1,file_prefix,type_id2,feature_dim_transform, n_fold=5, ratio=0.9, type_id=1): x_train1_trans, train_sel_list_ori = self.prep_data_sub1(path1,file_prefix,type_id2,feature_dim_transform) print(train_sel_list_ori) id1 = mapping_Idx(train_sel_list_ori[:,1],self.serial) id2 = (id1>=0) print('mapping',len(self.serial),np.sum(id2)) self.chrom, self.start, self.stop, self.serial, self.signal = self.chrom[id2], self.start[id2], self.stop[id2], self.serial[id2], self.signal[id2] id1 = id1[id2] train_sel_list_ori = train_sel_list_ori[id1] self.x_train1_trans = self.x_train1_trans[id1] print(train_sel_list_ori.shape,self.x_train1_trans.shape) id_vec = self.generate_index_2(train_sel_list_ori, n_fold=n_fold, ratio=ratio, type_id=type_id) return id_vec def find_serial_ori_1_local(self,chrom_vec,type_id2=1): # filename1 = 'mm10_%d_%s_encoded1.h5'%(self.config['cell_type1'],chrom_id1) self.species_id = 'mm10' self.cell_type1 = self.config['cell_type1'] file_path1 = '/work/magroup/yy3/data1/replication_timing3/mouse' # filename1 = '%s/mm10_5k_seq_genome%d_1.txt'%(file_path1,self.config['cell_type1']) chrom_id1 = 'chr1' filename1 = '%s_%d_%s_encoded1.h5'%(self.species_id,self.cell_type1,chrom_id1) list1, list2 = [], [] serial_vec = [] print(filename1) if os.path.exists(filename1)==False: # prepare data from predefined features # one hot encoded feature vectors for each chromosome self.prep_data_sequence_ori() print('prep_data_sequence_ori',filename1) for chrom_id in chrom_vec: # if chrom_id<22: # continue chrom_id1 = 'chr%s'%(chrom_id) # if self.config['species_id']==0: # filename2 = 'mm10_%d_%s_encoded1.h5'%(self.config['cell_type1'],chrom_id1) # else: # filename2 = '%s_%s_encoded1.h5'%(self.species_id,chrom_id1) filename2 = '%s_%d_%s_encoded1.h5'%(self.species_id,self.cell_type1,chrom_id1) with h5py.File(filename2,'r') as fid: serial1 = fid["serial"][:] if type_id2==1: seq1 = fid["vec"][:] list2.extend(seq1) list1.extend([chrom_id]*len(serial1)) serial_vec.extend(serial1) print(chrom_id,len(serial1)) list1, serial_vec = np.asarray(list1), np.asarray(serial_vec) serial_vec = np.hstack((list1[:,np.newaxis],serial_vec)) f_mtx = np.asarray(list2) # data_1 = pd.read_csv(filename1,sep='\t') # colnames = list(data_1) # local_serial = np.asarray(data_1['serial']) # local_seq = np.asarray(data_1['seq']) # print('local_seq', local_seq.shape) # serial_vec = local_serial # f_mtx = local_seq # filename2 = '%s/mm10_5k_serial.bed'%(file_path1) # file2 = pd.read_csv(filename2,header=None,sep='\t') # ref_chrom, ref_start, ref_stop, ref_serial = np.asarray(file2[0]), np.asarray(file2[1]), np.asarray(file2[2]), np.asarray(file2[3]) # # assert list(local_serial==list(ref_serial)) # id_vec1 = [] # for chrom_id in chrom_vec: # # if chrom_id<22: # # continue # # chrom_id1 = 'chr%s'%(chrom_id) # id1 = np.where(ref_chrom=='chr%d'%(chrom_id))[0] # id_vec1.extend(id1) # print(chrom_id,len(id1)) # id_vec1 = np.asarray(id_vec1) # ref_chrom_1, ref_serial_1 = ref_chrom[id_vec1], ref_serial[id_vec1] # print('ref chrom local', len(ref_chrom_1), len(ref_serial_1)) # id1 = utility_1.mapping_Idx(ref_serial_1,local_serial) # id2 = np.where(id1>=0)[0] # id1 = id1[id2] # # assert len(id2)==len(id1) # chrom1 = ref_chrom_1[id1] # local_chrom = [int(chrom1[3:]) for chrom1 in ref_chrom_1] # local_chrom = np.asarray(local_chrom) # local_serial, local_seq = local_serial[id2], local_seq[id2] # serial_vec = np.column_stack((local_chrom,local_serial)) # f_mtx = np.asarray(local_seq) return serial_vec, f_mtx # find serial and feature vectors # input: type_id1: load sequence feature or kmer frequency feature, motif feature # type_id2: load serial or feature vectors def find_serial_ori_1(self,file_path,file_prefix,chrom_vec,type_id1=0,type_id2=0,select_config={}): # load the sequences if type_id1==0: # list2 = np.zeros((interval,region_unit_size,4),dtype=np.int8) filename1 = '%s_serial_2.txt'%(self.species_id) list1, list2 = [], [] serial_vec = [] if (os.path.exists(filename1)==False) or (type_id2==1): if self.config['species_id']==0: serial_vec, list2 = self.find_serial_ori_1_local(chrom_vec) else: for chrom_id in chrom_vec: # if chrom_id<22: # continue chrom_id1 = 'chr%s'%(chrom_id) filename2 = '%s_%s_encoded1.h5'%(self.species_id,chrom_id1) with h5py.File(filename2,'r') as fid: serial1 = fid["serial"][:] if type_id2==1: seq1 = fid["vec"][:] list2.extend(seq1) list1.extend([chrom_id]*len(serial1)) serial_vec.extend(serial1) print(chrom_id,len(serial1)) list1, serial_vec = np.asarray(list1), np.asarray(serial_vec) serial_vec = np.hstack((list1[:,np.newaxis],serial_vec)) np.savetxt(filename1,serial_vec,fmt='%d',delimiter='\t') else: serial_vec = np.loadtxt(filename1,dtype=np.int64) if serial_vec.shape[-1]>2: cnt1 = serial_vec[:,-1] b1 = np.where(cnt1>0)[0] ratio1 = len(b1)/len(serial_vec) print('sequence with N', len(b1),len(serial_vec),ratio1) # serial_vec = serial_vec[:,0] f_mtx = np.asarray(list2) elif type_id1==2: filename1 = select_config['input_filename1'] layer_name = select_config['layer_name'] with h5py.File(filename1,'r') as fid: f_mtx = np.asarray(fid[layer_name][:],dtype=np.float32) print(f_mtx.shape) serial_vec = fid["serial"][:] assert len(serial_vec )==f_mtx.shape[0] print(serial_vec[0:5]) else: # load kmer frequency features and motif features load_type_id2 = 0 x_train1_trans, train_sel_list_ori = self.prep_data_sub1(file_path,file_prefix,load_type_id2,self.feature_dim_transform,load_type=1) # serial_vec = train_sel_list_ori[:,1] serial_vec = np.asarray(train_sel_list_ori) f_mtx = np.asarray(x_train1_trans) return serial_vec, f_mtx def find_serial_ori(self,file_path,file_prefix,type_id1=0,type_id2=0,select_config={}): chrom_vec = np.unique(self.chrom) chrom_vec1 = [] for chrom_id in chrom_vec: try: id1 = chrom_id.find('chr') if id1>=0: chrom_id1 = int(chrom_id[3:]) chrom_vec1.append(chrom_id1) except: continue chrom_vec1 = np.sort(chrom_vec1) serial_vec, f_mtx = self.find_serial_ori_1(file_path,file_prefix,chrom_vec1, type_id1=type_id1,type_id2=type_id2, select_config=select_config) self.serial_vec = serial_vec self.f_mtx = f_mtx # list2 = np.zeros((interval,region_unit_size,4),dtype=np.int8) print(len(self.chrom),len(self.serial)) # cnt1 = serial_vec[:,1] # b1 = np.where(cnt1>0)[0] # ratio1 = len(b1)/len(serial_vec) # print(len(b1),len(serial_vec),ratio1) id1 = mapping_Idx(serial_vec[:,1],self.serial) b1 = np.where(id1>=0)[0] self.local_serial_1(b1,type_id=0) print(len(self.chrom),len(self.serial)) return True def prep_data_2(self,file_path,file_prefix,seq_len_thresh=50): self.find_serial_ori(file_path,file_prefix) chrom_vec = np.unique(self.chrom) chrom_vec1 = [] for chrom_id in chrom_vec: try: id1 = chrom_id.find('chr') if id1>=0: chrom_id1 = int(chrom_id[3:]) chrom_vec1.append(chrom_id1) except: continue chrom_vec1 = np.sort(chrom_vec1) sample_num = len(self.chrom) idx_sel_list = -np.ones((sample_num,2),dtype=np.int64) for chrom_id in chrom_vec1: chrom_id1 = 'chr%d'%(chrom_id) b1 = np.where(self.chrom==chrom_id1)[0] idx_sel_list[b1,0] = [chrom_id]*len(b1) idx_sel_list[b1,1] = self.serial[b1] id1 = idx_sel_list[:,0]>=0 idx_sel_list = idx_sel_list[id1] sample_num = len(id1) y = self.signal[id1] x_mtx = idx_sel_list[id1] seq_list = generate_sequences(idx_sel_list, gap_tol=5, region_list=[]) seq_len = seq_list[:,1]-seq_list[:,0]+1 thresh1 = seq_len_thresh b1 = np.where(seq_len>thresh1)[0] print(len(seq_list),len(b1)) seq_list = seq_list[b1] seq_len1 = seq_list[:,1]-seq_list[:,0]+1 print(sample_num,np.sum(seq_len1),seq_list.shape,np.max(seq_len),np.min(seq_len),np.median(seq_len),
np.max(seq_len1)
numpy.max
# -*- coding: utf-8 -*- """ Created on Tue May 5 17:11:19 2020 @author: TOMAS (UDEA-FAU) """ import numpy as np from scipy.fftpack import dct from scipy.signal import lfilter ### UTILITY FUNCTIONS ### def erb_space(low_freq=50, high_freq=8000, n=64): ear_q = 9.26449 min_bw = 24.7 cf_array = -(ear_q * min_bw) + np.exp(np.linspace(1,n,n) * (-np.log(high_freq + ear_q * min_bw) + np.log(low_freq + ear_q * min_bw)) / n) \ * (high_freq + ear_q * min_bw) return cf_array def powerspec(X,n_padded): # Fourier transform # Y = np.fft.rfft(X, n=n_padded) Y = np.fft.fft(X, n=n_padded) Y = np.absolute(Y) # non-redundant part m = int(n_padded / 2) + 1 Y = Y[:, :m] return np.abs(Y) ** 2, n_padded ### GAMMATONE IMPULSE RESPONSE ### def gammatone_impulse_response(samplerate_hz, length_in_samples, center_freq_hz,p): # Generate a Glasberg&Moore parametrized gammatone filter erb = 24.7 + (center_freq_hz/9.26449) # equivalent rectangular bandwidth. #Center frequencies an = (np.pi * np.math.factorial(2*p-2) * np.power(2, float(-(2*p-2))) )/ np.square(np.math.factorial(p-1)) b = erb/an # bandwidth parameter a = 1 # amplitude. This is varied later by the normalization process. t = np.linspace(1./samplerate_hz, length_in_samples/samplerate_hz, length_in_samples) gammatone_ir = a * np.power(t, p-1)*np.exp(-2*np.pi*b*t) * np.cos(2*np.pi*center_freq_hz*t) return gammatone_ir ### MP-GTF CONSTRUCTION ### def generate_filterbank(fs,fmax, L, N,p=4): """ L: Size of the signal measured in samples N: Number of filters p: Order of the Gammatone impulse response """ #Center frequencies if fs==8000: fmax = 4000 center_freqs = erb_space(50,fmax, N) center_freqs = np.flip(center_freqs) n_center_freqs = len(center_freqs) # Initialize variables filterbank =
np.zeros((N, L))
numpy.zeros
import numpy as np from multiagent.core import World, Agent, Landmark from multiagent.scenario import BaseScenario import cmath, math, os, time from UTILS.tensor_ops import np_normalize_last_dim, dir3d_rad def Norm(x): # 求长度 return np.linalg.norm(x) def Unit(m): return m * 0.05 def ToMeter(x): return x / 0.05 def assert_and_break(cond): if cond: return else: print("fail!") class ScenarioConfig(object): hunter_num = 15 invader_num = 5 num_landmarks = 6 num_dummy_coordinate = 4 num_entity = invader_num + num_landmarks + num_dummy_coordinate num_subject_in_obs = (hunter_num + invader_num) + num_landmarks + num_dummy_coordinate # 观测向量长度 uid_dictionary = { # need to contain at least 'agent_uid' and 'entity_uid' 'invader_uid' : range(0, invader_num), 'agent_uid' : range(invader_num, invader_num+hunter_num), 'landmark_uid': range(invader_num+hunter_num, invader_num+hunter_num+num_landmarks), 'dummy_uid': range(invader_num+hunter_num+num_landmarks, invader_num+hunter_num+num_landmarks+num_dummy_coordinate), } uid_dictionary['entity_uid'] = list(uid_dictionary['invader_uid']) + list(uid_dictionary['landmark_uid']) + list(uid_dictionary['dummy_uid']) N_TEAM = 2 N_AGENT_EACH_TEAM = [invader_num, hunter_num] AGENT_ID_EACH_TEAM = [list(range(0,invader_num)), list(range(invader_num, invader_num+hunter_num))] TEAM_NAMES = ['script-team', 'rl-team'] obs_vec_length = 8 obs_vec_dictionary = { 'pos' : (0,1,2), 'vel' : (3,4,5), 'alive': (6), 'other': (7), } discrete_action = True MaxEpisodeStep = 140 arena_size = Unit(m=140) num_MPE_agent = hunter_num + invader_num nest_center_pos = np.array([Unit(m=0), Unit(m=0), Unit(m=0)]) hunter_spawn_pos_lim = Unit(m=25) landmark_spawn_limit = Unit(m=20) invader_spawn_limit = Unit(m=80) distance_dectection = Unit(m=25) intercept_hunter_needed = 3 hunter_affect_range = Unit(m=5.999) hunter_speed_pressure = Unit(m=5.999) Invader_Size = Unit(m=2) Invader_Accel = Unit(m=400) Invader_MaxSpeed = Unit(m=12) # 12 * 0.05=0.6 Hunter_Size = Unit(m=1.5) Hunter_Accel = Unit(m=400) Hunter_MaxSpeed = Unit(m=12) # 12 Landmark_Size = Unit(m=6) Invader_Kill_Range = Unit(m=5.999) # Invader_Spawn_Times = invader_num*2-invader_num invader_spawn_cd = 20 RewardAsUnity = True render = False extreme_sparse = True class Scenario(BaseScenario): def __init__(self, process_id=-1): self.invader_spawn_cd = ScenarioConfig.invader_spawn_cd self.num_agents = ScenarioConfig.num_MPE_agent self.arena_size = ScenarioConfig.arena_size self.discrete_action = ScenarioConfig.discrete_action self.cam_range = ScenarioConfig.arena_size * 1.2 self.hunter_spawn_pos_lim = ScenarioConfig.hunter_spawn_pos_lim self.nest_center_pos = ScenarioConfig.nest_center_pos self.invader_spawn_limit = ScenarioConfig.invader_spawn_limit self.distance_dectection = ScenarioConfig.distance_dectection self.Invader_MaxSpeed = ScenarioConfig.Invader_MaxSpeed self.hunter_affect_range = ScenarioConfig.hunter_affect_range self.hunter_speed_pressure = ScenarioConfig.hunter_speed_pressure self.intercept_hunter_needed = ScenarioConfig.intercept_hunter_needed self.num_subject_in_obs = ScenarioConfig.num_subject_in_obs self.Invader_Kill_Range = ScenarioConfig.Invader_Kill_Range self.Invader_Spawn_Times = ScenarioConfig.Invader_Spawn_Times self.obs_vec_length = ScenarioConfig.obs_vec_length self.invader_num = ScenarioConfig.invader_num self.Invader_To_Intercept = self.invader_num + self.Invader_Spawn_Times # self.thread_index = thread_index # thread_index = 0 是主进程 self.caught = 0 self.rew_other = 0 self.manual_render = None self.eval_mode = False self.show_off = False if process_id != 0 else ScenarioConfig.render def render(self): if not hasattr(self, 'threejs_bridge'): from VISUALIZE.mcom import mcom self.threejs_bridge = mcom(path='RECYCLE/v2d_logger/', digit=8, rapid_flush=False, draw_mode='Threejs') self.threejs_bridge.v2d_init() # self.threejs_bridge.set_style('star') # self.threejs_bridge.set_style('grid') self.threejs_bridge.set_style('background', color='Lavender') # self.threejs_bridge.geometry_rotate_scale_translate('monkey',0, 0, np.pi/2, 1, 1, 1, 0,0,0) self.threejs_bridge.geometry_rotate_scale_translate('box', 0, 0, 0, 0.5, 0.5, 1.5, 0,0,0) self.threejs_bridge.geometry_rotate_scale_translate('ball', 0, 0, 0, 1, 1, 1, 0,0,0) self.threejs_bridge.geometry_rotate_scale_translate('cone', 0, np.pi/2, 0, 1.2, 0.9, 0.9, -0.5,0,0) # x -> y -> z self.threejs_bridge.其他几何体之旋转缩放和平移('oct', 'OctahedronGeometry(1,0)', 0,0,0, 1,1,1, 0,0,0) # 八面体 self.threejs_bridge.advanced_geometry_material('oct', map='/wget/hex_texture.jpg') self.threejs_bridge.agent_alive_pos = {} self.threejs_bridge.agent_alive_time = {} for index, agent in enumerate(self.invaders + self.hunters): dir_1, dir_2 = dir3d_rad(agent.state.p_vel) # # Euler Angle y-x-z x = agent.state.p_pos[0]; y=agent.state.p_pos[1]; z=agent.state.p_pos[2] if agent.live: self.threejs_bridge.agent_alive_pos[index] = (x,y,z) if agent.IsInvader: color = 'Red' size = 0.04 if not agent.IsInvader: color = 'Blue' size = 0.01 opacity = 0.99 if index not in self.threejs_bridge.agent_alive_time: self.threejs_bridge.agent_alive_time[index] = 0 self.threejs_bridge.agent_alive_time[index] += 1 else: color = 'black' opacity = 0 self.threejs_bridge.agent_alive_time[index] = 0 self.threejs_bridge.v2dx( 'cone|%d|%s|%.2f'%(index, color, size), self.threejs_bridge.agent_alive_pos[index][0], self.threejs_bridge.agent_alive_pos[index][1], self.threejs_bridge.agent_alive_pos[index][2], ro_x=0, ro_y=-dir_2, ro_z=dir_1, # rotation label='', label_color='white', opacity=opacity if agent.live else 0, track_n_frame = max(min(self.threejs_bridge.agent_alive_time[index]-1, 10),0) ) x_ = self.threejs_bridge.agent_alive_pos[index][0] y_ = self.threejs_bridge.agent_alive_pos[index][1] z_ = self.threejs_bridge.agent_alive_pos[index][2] self.threejs_bridge.line3d( 'fat|%d|%s|%.3f'%(index+3999, color, 0.004), x_arr=np.array([x_, x_]), y_arr=np.array([y_, y_]), z_arr=np.array([0, z_]), dashScale=20, # to make dash denser, Increase this instead of decrease !! dashSize=1, gapSize=1, tension=0, opacity=1, ) for index, agent in enumerate(self.hunters): dir_1, dir_2 = dir3d_rad(agent.state.p_vel) # Euler Angle y-x-z dis2invader = self.distance[index, :] mindis2invader_id = np.argmin(dis2invader) if dis2invader[mindis2invader_id]<self.hunter_affect_range: self.threejs_bridge.flash('beam', src=index+len(self.invaders), dst=mindis2invader_id, dur=0.5, size=0.03, color='DeepSkyBlue') for index, agent in enumerate(self.landmarks): nearest_invader_dis = min(self.distance_landmark[:, index]) self.threejs_bridge.v2dx( 'oct|%d|LawnGreen|0.2'%(index+999), agent.state.p_pos[0], agent.state.p_pos[1], agent.state.p_pos[2], ro_x=0, ro_y=0, ro_z=0, # Euler Angle y-x-z label='Danger@ %.1f'%nearest_invader_dis, label_color='Black' if nearest_invader_dis>1 else 'red', opacity=1 ) self.threejs_bridge.v2dx( 'oct|%d|green|0.3'%(1999), 0, 2.5, -1.6, ro_x=0, ro_y=0, ro_z=0, # Euler Angle y-x-z label='Time %d/%d'%(self.step, ScenarioConfig.MaxEpisodeStep), label_color='BlueViolet', opacity=0 ) self.threejs_bridge.v2d_show() if (self.step)==0: self.threejs_bridge.set_env('clear_track') self.threejs_bridge.set_env('clear_flash') def scenario_step(self, agent, world): invaders = self.invaders hunters = self.hunters # 计算距离矩阵(invader2landmark) self.distance, self.distance_landmark = self.get_distance_landmark() # 二重循环,对出现在周围的invader造成减速 self.threat_clear = True # 检查invader是否被驱除出 for invader in invaders: invader.tracked_by = [] if np.linalg.norm(invader.state.p_pos) < ScenarioConfig.invader_spawn_limit*1.11: self.threat_clear = False for i, hunter in enumerate(hunters): hunter_index = i assert hunter.live for j, invader in enumerate(invaders): distance_i_j = self.distance[i, j] if distance_i_j > self.hunter_affect_range: continue invader.tracked_by.append(hunter_index) # push invaders to opposite direction for invader_index, invader in enumerate(invaders): if invader.state.previous_pos is None: continue # special situations if not hasattr(invader, 'force'): continue # special situations invader.state.p_vel = invader.state.previous_vel * (1 - world.damping) # read vel if (invader.force is not None): # use force to update vel invader.force_real = invader.force + (-invader.force) * len(invader.tracked_by) * 0.55 invader.state.p_vel += (invader.force_real / invader.mass) * world.dt # limit max speed if (invader.max_speed is not None): speed = np.linalg.norm(invader.state.p_vel) if speed > invader.max_speed: invader.state.p_vel = invader.state.p_vel / speed * invader.max_speed # update position invader.state.p_pos = invader.state.previous_pos + invader.state.p_vel * world.dt # 检查landmark是否被摧毁 self.distance_landmark [self.num_invaders, self.num_landmarks] if np.min(self.distance_landmark) <= self.Invader_Kill_Range: self.hunter_failed = True def observation(self, agent, world): if agent.iden == 0: # by now the agents has already moved according to action self.scenario_step(agent, world) # 第一步更新距离矩阵,更新智能体live的状态 self.joint_rewards = self.reward_forall(world) #第二步更新奖励 if self.show_off: self.render() # 第三步更新UI self.step += 1 self.obs_dimension = self.obs_vec_length*self.num_subject_in_obs self.obs_pointer = 0 self.obs = np.zeros(shape=(self.obs_dimension,)) self.load_obs( np.concatenate( [ np.concatenate( (entity.state.p_pos, entity.state.p_vel, [entity.live, agent.iden]) ) for entity in world.agents] ) ) self.load_obs( np.concatenate( [ np.concatenate( (entity.state.p_pos, entity.state.p_vel, [1, agent.iden]) ) for entity in world.landmarks] ) ) # dummy coordinate self.load_obs( np.concatenate( [ np.concatenate( ([0, 0, 0], [0,0,0], [world.steps, agent.iden]) ), np.concatenate( ([1, 0, 0], [0,0,0], [world.steps, agent.iden]) ), np.concatenate( ([0, 1, 0], [0,0,0], [world.steps, agent.iden]) ), np.concatenate( ([0, 0, 1], [0,0,0], [world.steps, agent.iden]) ), ] ) ) return self.obs.copy() def reward_forall(self, world): # 初始化奖励列表 hunter_reward = np.array([0.] * self.num_hunters) invader_reward = np.array([0.] * self.num_invaders) win_cond1 = world.steps >= world.MaxEpisodeStep win_cond2 = self.threat_clear win_cond = win_cond1 or win_cond2 loss_cond = self.hunter_failed if loss_cond: win_cond = False # lose condition has higher priority if ScenarioConfig.extreme_sparse: # 极度稀疏奖励 if win_cond: hunter_reward += 1 invader_reward -= 1 if loss_cond: hunter_reward -= 1 invader_reward += 1 else: # 一般型稀疏奖励 HUNT_INVDR_SUCCESSFUL_REWARD = 0.05 REWARD_WHEN_TRACKED_BY_N = 3 WIN_REWARD = 1 for invader_index, invader in enumerate(self.invaders): if len(invader.tracked_by) >= REWARD_WHEN_TRACKED_BY_N and (not invader.intercepted): invader.intercepted = True hunter_reward += HUNT_INVDR_SUCCESSFUL_REWARD invader_reward -= HUNT_INVDR_SUCCESSFUL_REWARD if win_cond: hunter_reward += WIN_REWARD invader_reward -= WIN_REWARD if loss_cond: hunter_reward -= WIN_REWARD invader_reward += WIN_REWARD self.reward_sample += hunter_reward[0] return invader_reward.tolist() + hunter_reward.tolist() def spawn_position(self, agent, world): if not agent.IsInvader: # 处理hunter # 初始化,随机地分布在一个正方形内 agent.state.p_pos = np.random.uniform(-self.hunter_spawn_pos_lim, self.hunter_spawn_pos_lim, world.dim_p) + self.nest_center_pos agent.state.previous_pos = agent.state.p_pos.copy() # 速度,初始化为0 agent.state.p_vel = np.zeros(world.dim_p) agent.state.previous_vel = agent.state.p_vel.copy() agent.state.c = np.zeros(world.dim_c) agent.live = True agent.movable = True agent.intercepted = False else: # 处理invader # spawn direction relative to nest self.process_invader_pos(agent) # 速度,初始化为0 agent.state.p_vel = np.zeros(world.dim_p) agent.state.previous_vel = agent.state.p_vel.copy() agent.state.c = np.zeros(world.dim_c) agent.live = True agent.movable = True agent.intercepted = False def process_invader_pos(self, agent): while True: theta = (np.random.rand() * 2 * np.pi - np.pi)*0.35 phi = (np.random.rand() * 2 * np.pi - np.pi) d = self.rand(low=1.0 * self.invader_spawn_limit, high=1.1 * self.invader_spawn_limit) agent.state.p_pos = d * np.array([ np.cos(theta)*np.cos(phi), np.cos(theta)*np.sin(phi), np.sin(theta) ]) + self.nest_center_pos agent.state.previous_pos = agent.state.p_pos.copy() x = agent.state.p_pos[0] y = agent.state.p_pos[1] z = agent.state.p_pos[2] if x < self.arena_size and x > -self.arena_size \ and y < self.arena_size and y > -self.arena_size \ and z < self.arena_size and z > -self.arena_size: break else: assert False def invader_revise(self, agent, world): self.spawn_position(agent, world) agent.live = True agent.movable = True agent.spawn_cd = -1 def reset_world(self, world): self.step = 0 for agent in world.agents: self.spawn_position(agent, world) self.spawn_vip_landmark(world) world.steps = 0 self.rew_other = 0 self.hunter_failed = False self.threat_clear = False self.reward_sample = 0 # self.invader_spawn_time_left = self.Invader_Spawn_Times # self.indader_left_to_hunt = self.Invader_To_Intercept if self.show_off: print('reset world') def spawn_vip_landmark(self, world): theta = np.pi/2 phi = np.random.rand() * 2 * np.pi - np.pi phi_dis = np.pi * 2 / self.num_landmarks for i, landmark in enumerate(world.landmarks): self.landmark_spawn_position(landmark, world, theta, phi + phi_dis * i) # 按顺序 return all agents that are not invaders def hunters(self, world): return [agent for agent in world.agents if not agent.IsInvader] # 按顺序 return all adversarial agents def invaders(self, world): return [agent for agent in world.agents if agent.IsInvader] def reward(self, agent, world): assert self.joint_rewards is not None reward = self.joint_rewards[agent.iden] if agent.iden == self.num_agents: self.joint_rewards = None return reward def get_distance_landmark(self): hunters = self.hunters invaders = self.invaders landmarks = self.landmarks distance = np.zeros(shape=(self.num_hunters, self.num_invaders), dtype=np.float32) distance_landmark = np.zeros(shape=(self.num_invaders, self.num_landmarks), dtype=np.float32) for b, B in enumerate(invaders): for a, A in enumerate(hunters): distance[a, b] = Norm(A.state.p_pos - B.state.p_pos) if B.live else np.inf for c, C in enumerate(landmarks): distance_landmark[b, c] = Norm(B.state.p_pos - C.state.p_pos) if B.live else np.inf return distance, distance_landmark def done(self, agent, world): win_cond1 = world.steps >= world.MaxEpisodeStep win_cond2 = self.threat_clear win_cond = win_cond1 or win_cond2 self.is_success = win_cond loss_cond = self.hunter_failed if loss_cond: win_cond = False # lose condition has higher priority done = win_cond or loss_cond if done: assert loss_cond != win_cond if agent.iden==0 and self.show_off and win_cond: print('hunt success') return done def load_obs(self, fragment): L = len(fragment) if isinstance(fragment, np.ndarray) else 1 # assert self.obs_pointer + L <= self.obs_dimension self.obs[self.obs_pointer:self.obs_pointer + L] = fragment # print('[%d ~ %d] filled / Total Length %d / total [0 ~ %d]'%(self.obs_pointer, self.obs_pointer + L -1, self.obs_pointer + L, self.obs_dimension-1)) self.obs_pointer = self.obs_pointer + L def check_obs(self): assert self.obs_pointer == self.obs_dimension def info(self, agent, world): return {'hunter_failed': self.hunter_failed, 'world_steps': world.steps, 'is_success': self.is_success} def make_world(self): self.num_good_agents = ScenarioConfig.hunter_num self.num_hunters = ScenarioConfig.hunter_num self.num_adversaries = ScenarioConfig.invader_num self.num_invaders = ScenarioConfig.invader_num self.num_landmarks = ScenarioConfig.num_landmarks world = World() # set any world properties first world.dim_c = 3 world.dim_p = 3 num_agents = self.num_agents # add agents, 包括 hunter 和 invader world.agents = [Agent(iden=i) for i in range(num_agents)] for i, agent in enumerate(world.agents): agent.name = 'agent %d' % i agent.collide = False # no collide any more agent.silent = True # 前面的 num_adversaries 是 Invader,剩下的是 Hunter, Invader is adversary if i < self.num_adversaries: agent.IsInvader = True agent.adversary = True agent.id_in_team = i agent.require_mannual_control = True else: agent.IsInvader = False agent.adversary = False agent.id_in_team = i - self.num_invaders agent.size = ScenarioConfig.Invader_Size if agent.IsInvader else ScenarioConfig.Hunter_Size # size 中的数值是半径 agent.accel = ScenarioConfig.Invader_Accel if agent.IsInvader else ScenarioConfig.Hunter_Accel agent.max_speed = self.Invader_MaxSpeed if agent.IsInvader else ScenarioConfig.Hunter_MaxSpeed agent.caught = False # agent.life = 1 agent.live = True agent.movable = True agent.live_adv = 1 agent.initial_mass = 14 # add landmarks world.landmarks = [Landmark() for i in range(self.num_landmarks)] for i, landmark in enumerate(world.landmarks): landmark.name = 'landmark %d' % i landmark.collide = False landmark.movable = False landmark.size = ScenarioConfig.Landmark_Size landmark.boundary = False # make initial conditions # self.reset_world(world) world.MaxEpisodeStep = ScenarioConfig.MaxEpisodeStep self.hunters = [agent for agent in world.agents if not agent.IsInvader] self.invaders = [agent for agent in world.agents if agent.IsInvader] self.landmarks = world.landmarks return world @staticmethod def rand(low, high): return np.random.rand() * (high - low) + low def landmark_spawn_position(self, landmark, world, theta=45 * np.pi / 180, phi = 45 * np.pi / 180): d = ScenarioConfig.landmark_spawn_limit offset = d * np.array([ np.sin(theta)*np.cos(phi), np.sin(theta)*np.sin(phi),
np.cos(theta)
numpy.cos
# coding: utf-8 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Dataset for 3D object detection on SUN RGB-D (with support of vote supervision). A sunrgbd oriented bounding box is parameterized by (cx,cy,cz), (l,w,h) -- (dx,dy,dz) in upright depth coord (Z is up, Y is forward, X is right ward), heading angle (from +X rotating to -Y) and semantic class Point clouds are in **upright_depth coordinate (X right, Y forward, Z upward)** Return heading class, heading residual, size class and size residual for 3D bounding boxes. Oriented bounding box is parameterized by (cx,cy,cz), (l,w,h), heading_angle and semantic class label. (cx,cy,cz) is in upright depth coordinate (l,h,w) are length of the object sizes The heading angle is a rotation rad from +X rotating towards -Y. (+X is 0, -Y is pi/2) Author: <NAME> Date: 2019 """ import os import sys import numpy as np from torch.utils.data import Dataset BASE_DIR = os.path.dirname(os.path.abspath(__file__)) ROOT_DIR = os.path.dirname(BASE_DIR) sys.path.append(BASE_DIR) sys.path.append(os.path.join(ROOT_DIR, 'utils')) import pc_util import sunrgbd_utils from sunrgbd_utils import extract_pc_in_box3d from model_util_sunrgbd import SunrgbdDatasetConfig DC = SunrgbdDatasetConfig() # dataset specific config MAX_NUM_OBJ = 64 # maximum number of objects allowed per scene MEAN_COLOR_RGB = np.array([0.5,0.5,0.5]) # sunrgbd color is in 0~1 DIST_THRESH = 0.1#0.2 VAR_THRESH = 5e-3 CENTER_THRESH = 0.1 LOWER_THRESH = 1e-6 NUM_POINT = 50 NUM_POINT_LINE = 10 LINE_THRESH = 0.1#0.2 MIND_THRESH = 0.1 NUM_POINT_SEM_THRESHOLD = 1 def check_upright(para_points): return (para_points[0][-1] == para_points[1][-1]) and (para_points[1][-1] == para_points[2][-1]) and (para_points[2][-1] == para_points[3][-1]) def check_z(plane_equ, para_points): return np.sum(para_points[:,2] + plane_equ[-1]) / 4.0 < LOWER_THRESH def clockwise2counter(angle): ''' @Args: angle: clockwise from x axis, from 0 to 2*pi, @Returns: theta: counter clockwise, -pi / 2 ~ pi / 2, +x~+y: (0, pi/2), +x~-y: (0, -pi/2) ''' return -((angle + np.pi / 2) % np.pi) + np.pi / 2; def point2line_dist(points, a, b): ''' @Args: points: (N, 3) a / b: (3,) @Returns: distance: (N,) ''' x = b - a t = np.dot(points - a, x) / np.dot(x, x) c = a + t[:, None] * np.tile(x, (t.shape[0], 1)) return np.linalg.norm(points - c, axis=1) def get_linesel(points, corners, direction): ''' corners: [[xmin, ymin, zmin], [xmin, ymin, zmax], [xmin, ymax, zmin], [xmin, ymax, zmax], [xmax, ymin, zmin], [xmax, ymin, zmax], [xmax, ymax, zmin], [xmax, ymax, zmax]] ''' if direction == 'lower': sel1 = point2line_dist(points, corners[0], corners[2]) < LINE_THRESH sel2 = point2line_dist(points, corners[4], corners[6]) < LINE_THRESH sel3 = point2line_dist(points, corners[0], corners[4]) < LINE_THRESH sel4 = point2line_dist(points, corners[2], corners[6]) < LINE_THRESH return sel1, sel2, sel3, sel4 elif direction == 'upper': sel1 = point2line_dist(points, corners[1], corners[3]) < LINE_THRESH sel2 = point2line_dist(points, corners[5], corners[7]) < LINE_THRESH sel3 = point2line_dist(points, corners[1], corners[5]) < LINE_THRESH sel4 = point2line_dist(points, corners[3], corners[7]) < LINE_THRESH return sel1, sel2, sel3, sel4 elif direction == 'left': sel1 = point2line_dist(points, corners[0], corners[1]) < LINE_THRESH sel2 = point2line_dist(points, corners[2], corners[3]) < LINE_THRESH return sel1, sel2 elif direction == 'right': sel1 = point2line_dist(points, corners[4], corners[5]) < LINE_THRESH sel2 = point2line_dist(points, corners[6], corners[7]) < LINE_THRESH return sel1, sel2 else: AssertionError('direction = lower / upper / left') def get_linesel2(points, ymin, ymax, zmin, zmax, axis=0): #sel3 = sweep(points, axis, ymax, 2, zmin, zmax) #sel4 = sweep(points, axis, ymax, 2, zmin, zmax) sel3 = np.abs(points[:,axis] - ymin) < LINE_THRESH sel4 = np.abs(points[:,axis] - ymax) < LINE_THRESH return sel3, sel4 ''' ATTENTION: SUNRGBD, size_label is only half the actual size ''' def params2bbox(center, size, angle): ''' from bbox_center, angle and size to bbox @Args: center: (3,) size: (3,) angle: -pi ~ pi, +x~+y: (0, pi/2), +x~-y: (0, -pi/2) @Returns: bbox: 8 x 3, order: [[xmin, ymin, zmin], [xmin, ymin, zmax], [xmin, ymax, zmin], [xmin, ymax, zmax], [xmax, ymin, zmin], [xmax, ymin, zmax], [xmax, ymax, zmin], [xmax, ymax, zmax]] ''' xsize = size[0] ysize = size[1] zsize = size[2] vx = np.array([np.cos(angle), np.sin(angle), 0]) vy = np.array([-np.sin(angle), np.cos(angle), 0]) vx = vx * np.abs(xsize) / 2 vy = vy * np.abs(ysize) / 2 vz = np.array([0, 0, np.abs(zsize) / 2]) bbox = np.array([\ center - vx - vy - vz, center - vx - vy + vz, center - vx + vy - vz, center - vx + vy + vz, center + vx - vy - vz, center + vx - vy + vz, center + vx + vy - vz, center + vx + vy + vz]) return bbox class SunrgbdDetectionVotesDataset(Dataset): def __init__(self, data_path=None, split_set='train', num_points=20000, use_color=False, use_height=False, use_v1=False, augment=False, scan_idx_list=None): assert(num_points<=50000) self.use_v1 = use_v1 if use_v1: self.data_path = os.path.join(data_path, 'sunrgbd_pc_bbox_votes_50k_v1_' + split_set) # self.data_path = os.path.join('/scratch/cluster/yanght/Dataset/sunrgbd/sunrgbd_pc_bbox_votes_50k_v1_' + split_set) else: AssertionError("v2 data is not prepared") self.raw_data_path = os.path.join(ROOT_DIR, 'sunrgbd/sunrgbd_trainval') self.scan_names = sorted(list(set([os.path.basename(x)[0:6] \ for x in os.listdir(self.data_path)]))) if scan_idx_list is not None: self.scan_names = [self.scan_names[i] for i in scan_idx_list] self.num_points = num_points self.augment = augment self.use_color = use_color self.use_height = use_height def __len__(self): return len(self.scan_names) def __getitem__(self, idx): """ Returns a dict with following keys: point_clouds: (N,3+C) center_label: (MAX_NUM_OBJ,3) for GT box center XYZ heading_class_label: (MAX_NUM_OBJ,) with int values in 0,...,NUM_HEADING_BIN-1 heading_residual_label: (MAX_NUM_OBJ,) size_classe_label: (MAX_NUM_OBJ,) with int values in 0,...,NUM_SIZE_CLUSTER size_residual_label: (MAX_NUM_OBJ,3) sem_cls_label: (MAX_NUM_OBJ,) semantic class index box_label_mask: (MAX_NUM_OBJ) as 0/1 with 1 indicating a unique box vote_label: (N,9) with votes XYZ (3 votes: X1Y1Z1, X2Y2Z2, X3Y3Z3) if there is only one vote than X1==X2==X3 etc. vote_label_mask: (N,) with 0/1 with 1 indicating the point is in one of the object's OBB. scan_idx: int scan index in scan_names list max_gt_bboxes: unused """ scan_name = self.scan_names[idx] point_color_sem = np.load(os.path.join(self.data_path, scan_name)+'_pc.npz')['pc'] # Nx6 bboxes = np.load(os.path.join(self.data_path, scan_name)+'_bbox.npy') # K,8 point_votes = np.load(os.path.join(self.data_path, scan_name)+'_votes.npz')['point_votes'] # Nx10 semantics37 = point_color_sem[:, 6] semantics10 = np.array([DC.class37_2_class10[k] for k in semantics37]) semantics10_multi = [DC.class37_2_class10_multi[k] for k in semantics37] if not self.use_color: point_cloud = point_color_sem[:, 0:3] else: point_cloud = point_color_sem[:,0:6] point_cloud[:,3:6] = (point_color_sem[:,3:6]-MEAN_COLOR_RGB) if self.use_height: floor_height = np.percentile(point_cloud[:,2],0.99) height = point_cloud[:,2] - floor_height point_cloud = np.concatenate([point_cloud, np.expand_dims(height, 1)],1) # (N,4) or (N,7) # ------------------------------- DATA AUGMENTATION ------------------------------ if self.augment: if np.random.random() > 0.5: # Flipping along the YZ plane point_cloud[:,0] = -1 * point_cloud[:,0] bboxes[:,0] = -1 * bboxes[:,0] bboxes[:,6] = np.pi - bboxes[:,6] point_votes[:,[1,4,7]] = -1 * point_votes[:,[1,4,7]] # Rotation along up-axis/Z-axis rot_angle = (np.random.random()*np.pi/3) - np.pi/6 # -30 ~ +30 degree rot_mat = sunrgbd_utils.rotz(rot_angle) point_votes_end = np.zeros_like(point_votes) point_votes_end[:,1:4] = np.dot(point_cloud[:,0:3] + point_votes[:,1:4], np.transpose(rot_mat)) point_votes_end[:,4:7] = np.dot(point_cloud[:,0:3] + point_votes[:,4:7], np.transpose(rot_mat)) point_votes_end[:,7:10] = np.dot(point_cloud[:,0:3] + point_votes[:,7:10], np.transpose(rot_mat)) point_cloud[:,0:3] = np.dot(point_cloud[:,0:3], np.transpose(rot_mat)) bboxes[:,0:3] = np.dot(bboxes[:,0:3], np.transpose(rot_mat)) bboxes[:,6] -= rot_angle point_votes[:,1:4] = point_votes_end[:,1:4] - point_cloud[:,0:3] point_votes[:,4:7] = point_votes_end[:,4:7] - point_cloud[:,0:3] point_votes[:,7:10] = point_votes_end[:,7:10] - point_cloud[:,0:3] # Augment RGB color if self.use_color: rgb_color = point_cloud[:,3:6] + MEAN_COLOR_RGB rgb_color *= (1+0.4*np.random.random(3)-0.2) # brightness change for each channel rgb_color += (0.1*np.random.random(3)-0.05) # color shift for each channel rgb_color += np.expand_dims((0.05*np.random.random(point_cloud.shape[0])-0.025), -1) # jittering on each pixel rgb_color = np.clip(rgb_color, 0, 1) # randomly drop out 30% of the points' colors rgb_color *= np.expand_dims(np.random.random(point_cloud.shape[0])>0.3,-1) point_cloud[:,3:6] = rgb_color - MEAN_COLOR_RGB # Augment point cloud scale: 0.85x-1.15x scale_ratio = np.random.random()*0.3+0.85 scale_ratio = np.expand_dims(np.tile(scale_ratio,3),0) point_cloud[:,0:3] *= scale_ratio bboxes[:,0:3] *= scale_ratio bboxes[:,3:6] *= scale_ratio point_votes[:,1:4] *= scale_ratio point_votes[:,4:7] *= scale_ratio point_votes[:,7:10] *= scale_ratio if self.use_height: point_cloud[:,-1] *= scale_ratio[0,0] # ------------------------------- LABELS ------------------------------ box3d_centers = np.zeros((MAX_NUM_OBJ, 3)) box3d_sizes = np.zeros((MAX_NUM_OBJ, 3)) angle_classes = np.zeros((MAX_NUM_OBJ,)) angle_residuals = np.zeros((MAX_NUM_OBJ,)) size_classes = np.zeros((MAX_NUM_OBJ,)) size_residuals = np.zeros((MAX_NUM_OBJ, 3)) label_mask = np.zeros((MAX_NUM_OBJ)) label_mask[0:bboxes.shape[0]] = 1 max_bboxes = np.zeros((MAX_NUM_OBJ, 8)) max_bboxes[0:bboxes.shape[0],:] = bboxes # new items box3d_angles = np.zeros((MAX_NUM_OBJ,)) point_boundary_mask_z = np.zeros(self.num_points) point_boundary_mask_xy = np.zeros(self.num_points) point_boundary_offset_z = np.zeros([self.num_points, 3]) point_boundary_offset_xy = np.zeros([self.num_points, 3]) point_boundary_sem_z = np.zeros([self.num_points, 3+2+1]) point_boundary_sem_xy = np.zeros([self.num_points, 3+1+1]) point_line_mask = np.zeros(self.num_points) point_line_offset = np.zeros([self.num_points, 3]) point_line_sem = np.zeros([self.num_points, 3+1]) for i in range(bboxes.shape[0]): bbox = bboxes[i] semantic_class = bbox[7] box3d_center = bbox[0:3] angle_class, angle_residual = DC.angle2class(bbox[6]) # NOTE: The mean size stored in size2class is of full length of box edges, # while in sunrgbd_data.py data dumping we dumped *half* length l,w,h.. so have to time it by 2 here box3d_size = bbox[3:6]*2 size_class, size_residual = DC.size2class(box3d_size, DC.class2type[semantic_class]) box3d_centers[i,:] = box3d_center angle_classes[i] = angle_class angle_residuals[i] = angle_residual size_classes[i] = size_class size_residuals[i] = size_residual box3d_sizes[i,:] = box3d_size box3d_angles[i] = bbox[6] target_bboxes_mask = label_mask target_bboxes = np.zeros((MAX_NUM_OBJ, 6)) for i in range(bboxes.shape[0]): bbox = bboxes[i] corners_3d = sunrgbd_utils.my_compute_box_3d(bbox[0:3], bbox[3:6], bbox[6]) # compute axis aligned box xmin = np.min(corners_3d[:,0]) ymin = np.min(corners_3d[:,1]) zmin = np.min(corners_3d[:,2]) xmax = np.max(corners_3d[:,0]) ymax = np.max(corners_3d[:,1]) zmax = np.max(corners_3d[:,2]) target_bbox = np.array([(xmin+xmax)/2, (ymin+ymax)/2, (zmin+zmax)/2, xmax-xmin, ymax-ymin, zmax-zmin]) target_bboxes[i,:] = target_bbox point_cloud, choices = pc_util.random_sampling(point_cloud, self.num_points, return_choices=True) semantics37 = semantics37[choices] semantics10 = semantics10[choices] semantics10_multi = [semantics10_multi[i] for i in choices] point_votes_mask = point_votes[choices,0] point_votes = point_votes[choices,1:] # box angle is -pi to pi for i in range(bboxes.shape[0]): bbox = bboxes[i] corners = params2bbox(bbox[:3], 2 * bbox[3:6], clockwise2counter(bbox[6])) # corners_votenet = sunrgbd_utils.my_compute_box_3d(bbox[:3], bbox[3:6], bbox[6]) try: x_all_cls, ind_all_cls = extract_pc_in_box3d(point_cloud, corners) except: continue ind_all_cls = np.where(ind_all_cls)[0] # T/F to index # find point with same semantic as bbox, note semantics is 37 cls in sunrgbd # ind = ind_all_cls[np.where(semantics10[ind_all_cls] == bbox[7])[0]] ind = [] for j in ind_all_cls: if bbox[7] in semantics10_multi[j]: ind.append(j) ind = np.array(ind) if ind.shape[0] < NUM_POINT_SEM_THRESHOLD: pass else: x = point_cloud[ind, :3] ###Get bb planes and boundary points plane_lower_temp = np.array([0,0,1,-corners[6,-1]]) para_points = np.array([corners[1], corners[3], corners[5], corners[7]]) newd = np.sum(para_points * plane_lower_temp[:3], 1) if check_upright(para_points) and plane_lower_temp[0]+plane_lower_temp[1] < LOWER_THRESH: plane_lower = np.array([0,0,1,plane_lower_temp[-1]]) plane_upper = np.array([0,0,1,-np.mean(newd)]) else: import pdb;pdb.set_trace() print ("error with upright") if check_z(plane_upper, para_points) == False: import pdb;pdb.set_trace() ### Get the boundary points here #alldist = np.abs(np.sum(point_cloud[:,:3]*plane_lower[:3], 1) + plane_lower[-1]) alldist = np.abs(np.sum(x*plane_lower[:3], 1) + plane_lower[-1]) mind = np.min(alldist) #[count, val] = np.histogram(alldist, bins=20) #mind = val[np.argmax(count)] sel = np.abs(alldist - mind) < DIST_THRESH #sel = (np.abs(alldist - mind) < DIST_THRESH) & (point_cloud[:,0] >= xmin) & (point_cloud[:,0] <= xmax) & (point_cloud[:,1] >= ymin) & (point_cloud[:,1] <= ymax) ## Get lower four lines line_sel1, line_sel2, line_sel3, line_sel4 = get_linesel(x[sel], corners, 'lower') if np.sum(line_sel1) > NUM_POINT_LINE: point_line_mask[ind[sel][line_sel1]] = 1.0 linecenter = (corners[0] + corners[2]) / 2.0 point_line_offset[ind[sel][line_sel1]] = linecenter - x[sel][line_sel1] point_line_sem[ind[sel][line_sel1]] = np.array([linecenter[0], linecenter[1], linecenter[2], bbox[7]]) if np.sum(line_sel2) > NUM_POINT_LINE: point_line_mask[ind[sel][line_sel2]] = 1.0 linecenter = (corners[4] + corners[6]) / 2.0 point_line_offset[ind[sel][line_sel2]] = linecenter - x[sel][line_sel2] point_line_sem[ind[sel][line_sel2]] = np.array([linecenter[0], linecenter[1], linecenter[2], bbox[7]]) if np.sum(line_sel3) > NUM_POINT_LINE: point_line_mask[ind[sel][line_sel3]] = 1.0 linecenter = (corners[0] + corners[4]) / 2.0 point_line_offset[ind[sel][line_sel3]] = linecenter - x[sel][line_sel3] point_line_sem[ind[sel][line_sel3]] = np.array([linecenter[0], linecenter[1], linecenter[2], bbox[7]]) if np.sum(line_sel4) > NUM_POINT_LINE: point_line_mask[ind[sel][line_sel4]] = 1.0 linecenter = (corners[2] + corners[6]) / 2.0 point_line_offset[ind[sel][line_sel4]] = linecenter - x[sel][line_sel4] point_line_sem[ind[sel][line_sel4]] = np.array([linecenter[0], linecenter[1], linecenter[2], bbox[7]]) if np.sum(sel) > NUM_POINT and np.var(alldist[sel]) < VAR_THRESH: # center = np.array([(xmin+xmax)/2.0, (ymin+ymax)/2.0, np.mean(x[sel][:,2])]) center = (corners[0] + corners[6]) / 2.0 center[2] = np.mean(x[sel][:,2]) sel_global = ind[sel] point_boundary_mask_z[sel_global] = 1.0 point_boundary_sem_z[sel_global] = np.array([center[0], center[1], center[2], np.linalg.norm(corners[4] - corners[0]), np.linalg.norm(corners[2] - corners[0]), bbox[7]]) point_boundary_offset_z[sel_global] = center - x[sel] ''' ### Check for middle z surfaces [count, val] = np.histogram(alldist, bins=20) mind_middle = val[np.argmax(count)] sel_pre = np.copy(sel) sel = np.abs(alldist - mind_middle) < DIST_THRESH if np.abs(np.mean(x[sel_pre][:,2]) - np.mean(x[sel][:,2])) > MIND_THRESH: ### Do not use line for middle surfaces if np.sum(sel) > NUM_POINT and np.var(alldist[sel]) < VAR_THRESH: center = (corners[0] + corners[6]) / 2.0 center[2] = np.mean(x[sel][:,2]) # center = np.array([(xmin+xmax)/2.0, (ymin+ymax)/2.0, np.mean(x[sel][:,2])]) sel_global = ind[sel] point_boundary_mask_z[sel_global] = 1.0 point_boundary_sem_z[sel_global] = np.array([center[0], center[1], center[2], np.linalg.norm(corners[4] - corners[0]), np.linalg.norm(corners[2] - corners[0]), bbox[7]]) point_boundary_offset_z[sel_global] = center - x[sel] ''' ### Get the boundary points here alldist = np.abs(np.sum(x*plane_upper[:3], 1) + plane_upper[-1]) mind = np.min(alldist) #[count, val] = np.histogram(alldist, bins=20) #mind = val[np.argmax(count)] sel = np.abs(alldist - mind) < DIST_THRESH #sel = (np.abs(alldist - mind) < DIST_THRESH) & (point_cloud[:,0] >= xmin) & (point_cloud[:,0] <= xmax) & (point_cloud[:,1] >= ymin) & (point_cloud[:,1] <= ymax) ## Get upper four lines line_sel1, line_sel2, line_sel3, line_sel4 = get_linesel(x[sel], corners, 'upper') if np.sum(line_sel1) > NUM_POINT_LINE: point_line_mask[ind[sel][line_sel1]] = 1.0 linecenter = (corners[1] + corners[3]) / 2.0 point_line_offset[ind[sel][line_sel1]] = linecenter - x[sel][line_sel1] point_line_sem[ind[sel][line_sel1]] = np.array([linecenter[0], linecenter[1], linecenter[2], bbox[7]]) if np.sum(line_sel2) > NUM_POINT_LINE: point_line_mask[ind[sel][line_sel2]] = 1.0 linecenter = (corners[5] + corners[7]) / 2.0 point_line_offset[ind[sel][line_sel2]] = linecenter - x[sel][line_sel2] point_line_sem[ind[sel][line_sel2]] = np.array([linecenter[0], linecenter[1], linecenter[2], bbox[7]]) if np.sum(line_sel3) > NUM_POINT_LINE: point_line_mask[ind[sel][line_sel3]] = 1.0 linecenter = (corners[1] + corners[5]) / 2.0 point_line_offset[ind[sel][line_sel3]] = linecenter - x[sel][line_sel3] point_line_sem[ind[sel][line_sel3]] = np.array([linecenter[0], linecenter[1], linecenter[2], bbox[7]]) if np.sum(line_sel4) > NUM_POINT_LINE: point_line_mask[ind[sel][line_sel4]] = 1.0 linecenter = (corners[3] + corners[7]) / 2.0 point_line_offset[ind[sel][line_sel4]] = linecenter - x[sel][line_sel4] point_line_sem[ind[sel][line_sel4]] = np.array([linecenter[0], linecenter[1], linecenter[2], bbox[7]]) if np.sum(sel) > NUM_POINT and np.var(alldist[sel]) < VAR_THRESH: # center = np.array([(xmin+xmax)/2.0, (ymin+ymax)/2.0, np.mean(x[sel][:,2])]) center = (corners[1] + corners[7]) / 2.0 center[2] = np.mean(x[sel][:,2]) sel_global = ind[sel] point_boundary_mask_z[sel_global] = 1.0 point_boundary_sem_z[sel_global] = np.array([center[0], center[1], center[2], np.linalg.norm(corners[5] - corners[1]), np.linalg.norm(corners[3] - corners[1]), bbox[7]]) point_boundary_offset_z[sel_global] = center - x[sel] v1 = corners[3] - corners[2] v2 = corners[2] - corners[0] cp = np.cross(v1, v2) d = -np.dot(cp,corners[0]) a,b,c = cp plane_left_temp = np.array([a, b, c, d]) para_points = np.array([corners[4], corners[5], corners[6], corners[7]]) ### Normalize xy here plane_left_temp /= np.linalg.norm(plane_left_temp[:3]) newd = np.sum(para_points * plane_left_temp[:3], 1) if plane_left_temp[2] < LOWER_THRESH: plane_left = plane_left_temp#np.array([cls,res,tempsign,plane_left_temp[-1]]) plane_right = np.array([plane_left_temp[0], plane_left_temp[1], plane_left_temp[2], -np.mean(newd)]) else: import pdb;pdb.set_trace() print ("error with upright") ### Get the boundary points here alldist = np.abs(np.sum(x*plane_left[:3], 1) + plane_left[-1]) mind = np.min(alldist) #[count, val] = np.histogram(alldist, bins=20) #mind = val[np.argmax(count)] sel = np.abs(alldist - mind) < DIST_THRESH #sel = (np.abs(alldist - mind) < DIST_THRESH) & (point_cloud[:,2] >= zmin) & (point_cloud[:,2] <= zmax) & (point_cloud[:,1] >= ymin) & (point_cloud[:,1] <= ymax) ## Get upper four lines line_sel1, line_sel2 = get_linesel(x[sel], corners, 'left') if np.sum(line_sel1) > NUM_POINT_LINE: point_line_mask[ind[sel][line_sel1]] = 1.0 linecenter = (corners[0] + corners[1]) / 2.0 point_line_offset[ind[sel][line_sel1]] = linecenter - x[sel][line_sel1] point_line_sem[ind[sel][line_sel1]] = np.array([linecenter[0], linecenter[1], linecenter[2], bbox[7]]) if np.sum(line_sel2) > NUM_POINT_LINE: point_line_mask[ind[sel][line_sel2]] = 1.0 linecenter = (corners[2] + corners[3]) / 2.0 point_line_offset[ind[sel][line_sel2]] = linecenter - x[sel][line_sel2] point_line_sem[ind[sel][line_sel2]] = np.array([linecenter[0], linecenter[1], linecenter[2], bbox[7]]) if np.sum(sel) > NUM_POINT and np.var(alldist[sel]) < VAR_THRESH: # center = np.array([np.mean(x[sel][:,0]), np.mean(x[sel][:,1]), (zmin+zmax)/2.0]) center = np.array([np.mean(x[sel][:,0]), np.mean(x[sel][:,1]), (corners[0, 2] + corners[1, 2])/2.0]) sel_global = ind[sel] point_boundary_mask_xy[sel_global] = 1.0 # point_boundary_sem_xy[sel_global] = np.array([center[0], center[1], center[2], zmax - zmin, np.where(DC.nyu40ids == meta_vertices[ind[0],-1])[0][0]]) point_boundary_sem_xy[sel_global] = np.array([center[0], center[1], center[2], corners[1, 2] - corners[0, 2], bbox[7]]) point_boundary_offset_xy[sel_global] = center - x[sel] ''' [count, val] = np.histogram(alldist, bins=20) mind_middle = val[np.argmax(count)] #sel = (np.abs(alldist - mind) < DIST_THRESH) & (point_cloud[:,2] >= zmin) & (point_cloud[:,2] <= zmax) & (point_cloud[:,1] >= ymin) & (point_cloud[:,1] <= ymax) ## Get upper four lines sel_pre = np.copy(sel) sel = np.abs(alldist - mind_middle) < DIST_THRESH if np.abs(np.mean(x[sel_pre][:,0]) - np.mean(x[sel][:,0])) > MIND_THRESH: ### Do not use line for middle surfaces if np.sum(sel) > NUM_POINT and np.var(alldist[sel]) < VAR_THRESH: # center = np.array([np.mean(x[sel][:,0]), np.mean(x[sel][:,1]), (zmin+zmax)/2.0]) center = np.array([np.mean(x[sel][:,0]), np.mean(x[sel][:,1]), (corners[0, 2] + corners[1, 2])/2.0]) sel_global = ind[sel] point_boundary_mask_xy[sel_global] = 1.0 point_boundary_sem_xy[sel_global] = np.array([center[0], center[1], center[2], corners[1, 2] - corners[0, 2], bbox[7]]) point_boundary_offset_xy[sel_global] = center - x[sel] ''' ### Get the boundary points here alldist = np.abs(np.sum(x*plane_right[:3], 1) + plane_right[-1]) mind =
np.min(alldist)
numpy.min
import cv2 import numpy as np import configparser from configparser import SafeConfigParser def frame_change(pos): global posicao, new posicao = pos new = True def sliders_update(val): global new new = True def main(): config = configparser.ConfigParser() config.read('config.ini') arquivo = config['default']['video'] font = cv2.FONT_HERSHEY_SIMPLEX posicao = 1 new = False capture = cv2.VideoCapture(arquivo) _, image = capture.read() image_line1 = np.hstack((image, image)) image_line2 = np.hstack((image, image)) image = np.vstack((image_line1, image_line2)) blur = int(config['default']['blur']) Bsize = int(config['default']['Bsize']) Hmin = int(config['default']['Hmin']) Hmax = int(config['default']['Hmax']) Smin = int(config['default']['Smin']) Smax = int(config['default']['Smax']) Vmin = int(config['default']['Vmin']) Vmax = int(config['default']['Vmax']) cv2.namedWindow("image") cv2.createTrackbar('Frame','image',0,int(capture.get(cv2.CAP_PROP_FRAME_COUNT)),frame_change) cv2.createTrackbar('Blur','image',blur,30,sliders_update) cv2.createTrackbar('Hmin','image',Hmin,100,sliders_update) cv2.createTrackbar('Hmax','image',Hmax,179,sliders_update) cv2.createTrackbar('Smin','image',Smin,255,sliders_update) cv2.createTrackbar('Smax','image',Smax,255,sliders_update) cv2.createTrackbar('Vmin','image',Vmin,255,sliders_update) cv2.createTrackbar('Vmax','image',Vmax,255,sliders_update) cv2.createTrackbar('Bsize','image',Bsize,50,sliders_update) rmin = Bsize - int(Bsize/3) # Raio minimo para ser considerado um objeto circular (em pixels) rmax = Bsize + int(Bsize/3) while True: if new: new = False capture.set(cv2.CAP_PROP_POS_FRAMES, posicao) _, image_raw = capture.read() blur = int(cv2.getTrackbarPos('Blur', 'image')) if blur%2 == 0: blur += 1 Hmin = int(cv2.getTrackbarPos('Hmin', 'image')) Hmax = int(cv2.getTrackbarPos('Hmax', 'image')) Smin = int(cv2.getTrackbarPos('Smin', 'image')) Smax = int(cv2.getTrackbarPos('Smax', 'image')) Vmin = int(cv2.getTrackbarPos('Vmin', 'image')) Vmax = int(cv2.getTrackbarPos('Vmax', 'image')) Bsize = int(cv2.getTrackbarPos('Bsize', 'image')) Hmax = int(cv2.getTrackbarPos('Hmax', 'image')) # TODO: max nao pode ser menor que min para H, S e V image_blur = cv2.blur(image_raw, (blur, blur)) image_hsv = cv2.cvtColor(image_blur, cv2.COLOR_BGR2HSV) image_thresh = cv2.inRange(image_hsv,np.array((Hmin, Smin, Vmin)),
np.array((Hmax, Smax, Vmax))
numpy.array
# This module has been generated automatically from space group information # obtained from the Computational Crystallography Toolbox # """ Space groups This module contains a list of all the 230 space groups that can occur in a crystal. The variable space_groups contains a dictionary that maps space group numbers and space group names to the corresponding space group objects. .. moduleauthor:: <NAME> <<EMAIL>> """ #----------------------------------------------------------------------------- # Copyright (C) 2013 The Mosaic Development Team # # Distributed under the terms of the BSD License. The full license is in # the file LICENSE.txt, distributed as part of this software. #----------------------------------------------------------------------------- import numpy as N class SpaceGroup(object): """ Space group All possible space group objects are created in this module. Other modules should access these objects through the dictionary space_groups rather than create their own space group objects. """ def __init__(self, number, symbol, transformations): """ :param number: the number assigned to the space group by international convention :type number: int :param symbol: the Hermann-Mauguin space-group symbol as used in PDB and mmCIF files :type symbol: str :param transformations: a list of space group transformations, each consisting of a tuple of three integer arrays (rot, tn, td), where rot is the rotation matrix and tn/td are the numerator and denominator of the translation vector. The transformations are defined in fractional coordinates. :type transformations: list """ self.number = number self.symbol = symbol self.transformations = transformations self.transposed_rotations = N.array([N.transpose(t[0]) for t in transformations]) self.phase_factors = N.exp(N.array([(-2j*N.pi*t[1])/t[2] for t in transformations])) def __repr__(self): return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol)) def __len__(self): """ :return: the number of space group transformations :rtype: int """ return len(self.transformations) def symmetryEquivalentMillerIndices(self, hkl): """ :param hkl: a set of Miller indices :type hkl: Scientific.N.array_type :return: a tuple (miller_indices, phase_factor) of two arrays of length equal to the number of space group transformations. miller_indices contains the Miller indices of each reflection equivalent by symmetry to the reflection hkl (including hkl itself as the first element). phase_factor contains the phase factors that must be applied to the structure factor of reflection hkl to obtain the structure factor of the symmetry equivalent reflection. :rtype: tuple """ hkls = N.dot(self.transposed_rotations, hkl) p = N.multiply.reduce(self.phase_factors**hkl, -1) return hkls, p space_groups = {} transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(1, 'P 1', transformations) space_groups[1] = sg space_groups['P 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(2, 'P -1', transformations) space_groups[2] = sg space_groups['P -1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(3, 'P 1 2 1', transformations) space_groups[3] = sg space_groups['P 1 2 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(4, 'P 1 21 1', transformations) space_groups[4] = sg space_groups['P 1 21 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(5, 'C 1 2 1', transformations) space_groups[5] = sg space_groups['C 1 2 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(6, 'P 1 m 1', transformations) space_groups[6] = sg space_groups['P 1 m 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(7, 'P 1 c 1', transformations) space_groups[7] = sg space_groups['P 1 c 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(8, 'C 1 m 1', transformations) space_groups[8] = sg space_groups['C 1 m 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(9, 'C 1 c 1', transformations) space_groups[9] = sg space_groups['C 1 c 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(10, 'P 1 2/m 1', transformations) space_groups[10] = sg space_groups['P 1 2/m 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(11, 'P 1 21/m 1', transformations) space_groups[11] = sg space_groups['P 1 21/m 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(12, 'C 1 2/m 1', transformations) space_groups[12] = sg space_groups['C 1 2/m 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(13, 'P 1 2/c 1', transformations) space_groups[13] = sg space_groups['P 1 2/c 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(14, 'P 1 21/c 1', transformations) space_groups[14] = sg space_groups['P 1 21/c 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(15, 'C 1 2/c 1', transformations) space_groups[15] = sg space_groups['C 1 2/c 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(16, 'P 2 2 2', transformations) space_groups[16] = sg space_groups['P 2 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(17, 'P 2 2 21', transformations) space_groups[17] = sg space_groups['P 2 2 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(18, 'P 21 21 2', transformations) space_groups[18] = sg space_groups['P 21 21 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(19, 'P 21 21 21', transformations) space_groups[19] = sg space_groups['P 21 21 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(20, 'C 2 2 21', transformations) space_groups[20] = sg space_groups['C 2 2 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(21, 'C 2 2 2', transformations) space_groups[21] = sg space_groups['C 2 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(22, 'F 2 2 2', transformations) space_groups[22] = sg space_groups['F 2 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(23, 'I 2 2 2', transformations) space_groups[23] = sg space_groups['I 2 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(24, 'I 21 21 21', transformations) space_groups[24] = sg space_groups['I 21 21 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(25, 'P m m 2', transformations) space_groups[25] = sg space_groups['P m m 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(26, 'P m c 21', transformations) space_groups[26] = sg space_groups['P m c 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(27, 'P c c 2', transformations) space_groups[27] = sg space_groups['P c c 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(28, 'P m a 2', transformations) space_groups[28] = sg space_groups['P m a 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(29, 'P c a 21', transformations) space_groups[29] = sg space_groups['P c a 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(30, 'P n c 2', transformations) space_groups[30] = sg space_groups['P n c 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(31, 'P m n 21', transformations) space_groups[31] = sg space_groups['P m n 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(32, 'P b a 2', transformations) space_groups[32] = sg space_groups['P b a 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(33, 'P n a 21', transformations) space_groups[33] = sg space_groups['P n a 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(34, 'P n n 2', transformations) space_groups[34] = sg space_groups['P n n 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(35, 'C m m 2', transformations) space_groups[35] = sg space_groups['C m m 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(36, 'C m c 21', transformations) space_groups[36] = sg space_groups['C m c 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(37, 'C c c 2', transformations) space_groups[37] = sg space_groups['C c c 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(38, 'A m m 2', transformations) space_groups[38] = sg space_groups['A m m 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(39, 'A b m 2', transformations) space_groups[39] = sg space_groups['A b m 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(40, 'A m a 2', transformations) space_groups[40] = sg space_groups['A m a 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(41, 'A b a 2', transformations) space_groups[41] = sg space_groups['A b a 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(42, 'F m m 2', transformations) space_groups[42] = sg space_groups['F m m 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,1,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,1,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,3,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,3,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(43, 'F d d 2', transformations) space_groups[43] = sg space_groups['F d d 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(44, 'I m m 2', transformations) space_groups[44] = sg space_groups['I m m 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(45, 'I b a 2', transformations) space_groups[45] = sg space_groups['I b a 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(46, 'I m a 2', transformations) space_groups[46] = sg space_groups['I m a 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(47, 'P m m m', transformations) space_groups[47] = sg space_groups['P m m m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(48, 'P n n n :2', transformations) space_groups[48] = sg space_groups['P n n n :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(49, 'P c c m', transformations) space_groups[49] = sg space_groups['P c c m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(50, 'P b a n :2', transformations) space_groups[50] = sg space_groups['P b a n :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(51, 'P m m a', transformations) space_groups[51] = sg space_groups['P m m a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(52, 'P n n a', transformations) space_groups[52] = sg space_groups['P n n a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(53, 'P m n a', transformations) space_groups[53] = sg space_groups['P m n a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(54, 'P c c a', transformations) space_groups[54] = sg space_groups['P c c a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(55, 'P b a m', transformations) space_groups[55] = sg space_groups['P b a m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(56, 'P c c n', transformations) space_groups[56] = sg space_groups['P c c n'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(57, 'P b c m', transformations) space_groups[57] = sg space_groups['P b c m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(58, 'P n n m', transformations) space_groups[58] = sg space_groups['P n n m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(59, 'P m m n :2', transformations) space_groups[59] = sg space_groups['P m m n :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(60, 'P b c n', transformations) space_groups[60] = sg space_groups['P b c n'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(61, 'P b c a', transformations) space_groups[61] = sg space_groups['P b c a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(62, 'P n m a', transformations) space_groups[62] = sg space_groups['P n m a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(63, 'C m c m', transformations) space_groups[63] = sg space_groups['C m c m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(64, 'C m c a', transformations) space_groups[64] = sg space_groups['C m c a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(65, 'C m m m', transformations) space_groups[65] = sg space_groups['C m m m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(66, 'C c c m', transformations) space_groups[66] = sg space_groups['C c c m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(67, 'C m m a', transformations) space_groups[67] = sg space_groups['C m m a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(68, 'C c c a :2', transformations) space_groups[68] = sg space_groups['C c c a :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(69, 'F m m m', transformations) space_groups[69] = sg space_groups['F m m m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([4,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([4,4,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([4,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([4,4,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,3,3]) trans_den = N.array([1,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([4,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,3,1]) trans_den = N.array([4,4,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,1,1]) trans_den = N.array([4,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,1,1]) trans_den = N.array([4,4,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([2,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([3,0,3]) trans_den = N.array([4,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,1,1]) trans_den = N.array([4,4,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,-1,1]) trans_den = N.array([2,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([4,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,-1,1]) trans_den = N.array([4,4,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,3,1]) trans_den = N.array([2,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([3,1,1]) trans_den = N.array([4,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,3,0]) trans_den = N.array([4,4,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([2,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([4,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([4,4,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(70, 'F d d d :2', transformations) space_groups[70] = sg space_groups['F d d d :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(71, 'I m m m', transformations) space_groups[71] = sg space_groups['I m m m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(72, 'I b a m', transformations) space_groups[72] = sg space_groups['I b a m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(73, 'I b c a', transformations) space_groups[73] = sg space_groups['I b c a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(74, 'I m m a', transformations) space_groups[74] = sg space_groups['I m m a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(75, 'P 4', transformations) space_groups[75] = sg space_groups['P 4'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,3]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(76, 'P 41', transformations) space_groups[76] = sg space_groups['P 41'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(77, 'P 42', transformations) space_groups[77] = sg space_groups['P 42'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,3]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(78, 'P 43', transformations) space_groups[78] = sg space_groups['P 43'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(79, 'I 4', transformations) space_groups[79] = sg space_groups['I 4'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(80, 'I 41', transformations) space_groups[80] = sg space_groups['I 41'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(81, 'P -4', transformations) space_groups[81] = sg space_groups['P -4'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(82, 'I -4', transformations) space_groups[82] = sg space_groups['I -4'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(83, 'P 4/m', transformations) space_groups[83] = sg space_groups['P 4/m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(84, 'P 42/m', transformations) space_groups[84] = sg space_groups['P 42/m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(85, 'P 4/n :2', transformations) space_groups[85] = sg space_groups['P 4/n :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(86, 'P 42/n :2', transformations) space_groups[86] = sg space_groups['P 42/n :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(87, 'I 4/m', transformations) space_groups[87] = sg space_groups['I 4/m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-3,-3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,5,5]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,-1,-1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(88, 'I 41/a :2', transformations) space_groups[88] = sg space_groups['I 41/a :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(89, 'P 4 2 2', transformations) space_groups[89] = sg space_groups['P 4 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(90, 'P 4 21 2', transformations) space_groups[90] = sg space_groups['P 4 21 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,3]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,3]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(91, 'P 41 2 2', transformations) space_groups[91] = sg space_groups['P 41 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([2,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([2,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(92, 'P 41 21 2', transformations) space_groups[92] = sg space_groups['P 41 21 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(93, 'P 42 2 2', transformations) space_groups[93] = sg space_groups['P 42 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(94, 'P 42 21 2', transformations) space_groups[94] = sg space_groups['P 42 21 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,3]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,3]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(95, 'P 43 2 2', transformations) space_groups[95] = sg space_groups['P 43 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([2,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([2,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(96, 'P 43 21 2', transformations) space_groups[96] = sg space_groups['P 43 21 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(97, 'I 4 2 2', transformations) space_groups[97] = sg space_groups['I 4 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(98, 'I 41 2 2', transformations) space_groups[98] = sg space_groups['I 41 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(99, 'P 4 m m', transformations) space_groups[99] = sg space_groups['P 4 m m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(100, 'P 4 b m', transformations) space_groups[100] = sg space_groups['P 4 b m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(101, 'P 42 c m', transformations) space_groups[101] = sg space_groups['P 42 c m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(102, 'P 42 n m', transformations) space_groups[102] = sg space_groups['P 42 n m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(103, 'P 4 c c', transformations) space_groups[103] = sg space_groups['P 4 c c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(104, 'P 4 n c', transformations) space_groups[104] = sg space_groups['P 4 n c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(105, 'P 42 m c', transformations) space_groups[105] = sg space_groups['P 42 m c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(106, 'P 42 b c', transformations) space_groups[106] = sg space_groups['P 42 b c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(107, 'I 4 m m', transformations) space_groups[107] = sg space_groups['I 4 m m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(108, 'I 4 c m', transformations) space_groups[108] = sg space_groups['I 4 c m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(109, 'I 41 m d', transformations) space_groups[109] = sg space_groups['I 41 m d'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(110, 'I 41 c d', transformations) space_groups[110] = sg space_groups['I 41 c d'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(111, 'P -4 2 m', transformations) space_groups[111] = sg space_groups['P -4 2 m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(112, 'P -4 2 c', transformations) space_groups[112] = sg space_groups['P -4 2 c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(113, 'P -4 21 m', transformations) space_groups[113] = sg space_groups['P -4 21 m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(114, 'P -4 21 c', transformations) space_groups[114] = sg space_groups['P -4 21 c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(115, 'P -4 m 2', transformations) space_groups[115] = sg space_groups['P -4 m 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(116, 'P -4 c 2', transformations) space_groups[116] = sg space_groups['P -4 c 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(117, 'P -4 b 2', transformations) space_groups[117] = sg space_groups['P -4 b 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(118, 'P -4 n 2', transformations) space_groups[118] = sg space_groups['P -4 n 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(119, 'I -4 m 2', transformations) space_groups[119] = sg space_groups['I -4 m 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(120, 'I -4 c 2', transformations) space_groups[120] = sg space_groups['I -4 c 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(121, 'I -4 2 m', transformations) space_groups[121] = sg space_groups['I -4 2 m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(122, 'I -4 2 d', transformations) space_groups[122] = sg space_groups['I -4 2 d'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(123, 'P 4/m m m', transformations) space_groups[123] = sg space_groups['P 4/m m m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(124, 'P 4/m c c', transformations) space_groups[124] = sg space_groups['P 4/m c c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(125, 'P 4/n b m :2', transformations) space_groups[125] = sg space_groups['P 4/n b m :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(126, 'P 4/n n c :2', transformations) space_groups[126] = sg space_groups['P 4/n n c :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(127, 'P 4/m b m', transformations) space_groups[127] = sg space_groups['P 4/m b m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(128, 'P 4/m n c', transformations) space_groups[128] = sg space_groups['P 4/m n c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(129, 'P 4/n m m :2', transformations) space_groups[129] = sg space_groups['P 4/n m m :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(130, 'P 4/n c c :2', transformations) space_groups[130] = sg space_groups['P 4/n c c :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(131, 'P 42/m m c', transformations) space_groups[131] = sg space_groups['P 42/m m c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(132, 'P 42/m c m', transformations) space_groups[132] = sg space_groups['P 42/m c m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(133, 'P 42/n b c :2', transformations) space_groups[133] = sg space_groups['P 42/n b c :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(134, 'P 42/n n m :2', transformations) space_groups[134] = sg space_groups['P 42/n n m :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(135, 'P 42/m b c', transformations) space_groups[135] = sg space_groups['P 42/m b c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(136, 'P 42/m n m', transformations) space_groups[136] = sg space_groups['P 42/m n m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(137, 'P 42/n m c :2', transformations) space_groups[137] = sg space_groups['P 42/n m c :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(138, 'P 42/n c m :2', transformations) space_groups[138] = sg space_groups['P 42/n c m :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(139, 'I 4/m m m', transformations) space_groups[139] = sg space_groups['I 4/m m m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(140, 'I 4/m c m', transformations) space_groups[140] = sg space_groups['I 4/m c m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,3,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,3,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-3,-1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-3,-1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,5,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,3,5]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([3,5,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([3,3,5]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,-1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,-1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(141, 'I 41/a m d :2', transformations) space_groups[141] = sg space_groups['I 41/a m d :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,3,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-3,-1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-3,-3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,5,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,3,5]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([3,5,5]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([3,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,-1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,-1,-1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(142, 'I 41/a c d :2', transformations) space_groups[142] = sg space_groups['I 41/a c d :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(143, 'P 3', transformations) space_groups[143] = sg space_groups['P 3'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(144, 'P 31', transformations) space_groups[144] = sg space_groups['P 31'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(145, 'P 32', transformations) space_groups[145] = sg space_groups['P 32'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(146, 'R 3 :H', transformations) space_groups[146] = sg space_groups['R 3 :H'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(147, 'P -3', transformations) space_groups[147] = sg space_groups['P -3'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(148, 'R -3 :H', transformations) space_groups[148] = sg space_groups['R -3 :H'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(149, 'P 3 1 2', transformations) space_groups[149] = sg space_groups['P 3 1 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(150, 'P 3 2 1', transformations) space_groups[150] = sg space_groups['P 3 2 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(151, 'P 31 1 2', transformations) space_groups[151] = sg space_groups['P 31 1 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(152, 'P 31 2 1', transformations) space_groups[152] = sg space_groups['P 31 2 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(153, 'P 32 1 2', transformations) space_groups[153] = sg space_groups['P 32 1 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(154, 'P 32 2 1', transformations) space_groups[154] = sg space_groups['P 32 2 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(155, 'R 3 2 :H', transformations) space_groups[155] = sg space_groups['R 3 2 :H'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(156, 'P 3 m 1', transformations) space_groups[156] = sg space_groups['P 3 m 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(157, 'P 3 1 m', transformations) space_groups[157] = sg space_groups['P 3 1 m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(158, 'P 3 c 1', transformations) space_groups[158] = sg space_groups['P 3 c 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(159, 'P 3 1 c', transformations) space_groups[159] = sg space_groups['P 3 1 c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(160, 'R 3 m :H', transformations) space_groups[160] = sg space_groups['R 3 m :H'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,7]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,7]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,7]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,5]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,5]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,5]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(161, 'R 3 c :H', transformations) space_groups[161] = sg space_groups['R 3 c :H'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(162, 'P -3 1 m', transformations) space_groups[162] = sg space_groups['P -3 1 m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(163, 'P -3 1 c', transformations) space_groups[163] = sg space_groups['P -3 1 c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(164, 'P -3 m 1', transformations) space_groups[164] = sg space_groups['P -3 m 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(165, 'P -3 c 1', transformations) space_groups[165] = sg space_groups['P -3 c 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(166, 'R -3 m :H', transformations) space_groups[166] = sg space_groups['R -3 m :H'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,7]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,7]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,7]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,1]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,1]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,1]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,5]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,5]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,5]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,-1]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,-1]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,-1]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(167, 'R -3 c :H', transformations) space_groups[167] = sg space_groups['R -3 c :H'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(168, 'P 6', transformations) space_groups[168] = sg space_groups['P 6'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,5]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(169, 'P 61', transformations) space_groups[169] = sg space_groups['P 61'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,5]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(170, 'P 65', transformations) space_groups[170] = sg space_groups['P 65'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(171, 'P 62', transformations) space_groups[171] = sg space_groups['P 62'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(172, 'P 64', transformations) space_groups[172] = sg space_groups['P 64'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(173, 'P 63', transformations) space_groups[173] = sg space_groups['P 63'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(174, 'P -6', transformations) space_groups[174] = sg space_groups['P -6'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(175, 'P 6/m', transformations) space_groups[175] = sg space_groups['P 6/m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(176, 'P 63/m', transformations) space_groups[176] = sg space_groups['P 63/m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(177, 'P 6 2 2', transformations) space_groups[177] = sg space_groups['P 6 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,5]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,5]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(178, 'P 61 2 2', transformations) space_groups[178] = sg space_groups['P 61 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,5]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,5]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(179, 'P 65 2 2', transformations) space_groups[179] = sg space_groups['P 65 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(180, 'P 62 2 2', transformations) space_groups[180] = sg space_groups['P 62 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(181, 'P 64 2 2', transformations) space_groups[181] = sg space_groups['P 64 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(182, 'P 63 2 2', transformations) space_groups[182] = sg space_groups['P 63 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(183, 'P 6 m m', transformations) space_groups[183] = sg space_groups['P 6 m m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(184, 'P 6 c c', transformations) space_groups[184] = sg space_groups['P 6 c c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(185, 'P 63 c m', transformations) space_groups[185] = sg space_groups['P 63 c m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(186, 'P 63 m c', transformations) space_groups[186] = sg space_groups['P 63 m c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(187, 'P -6 m 2', transformations) space_groups[187] = sg space_groups['P -6 m 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(188, 'P -6 c 2', transformations) space_groups[188] = sg space_groups['P -6 c 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(189, 'P -6 2 m', transformations) space_groups[189] = sg space_groups['P -6 2 m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(190, 'P -6 2 c', transformations) space_groups[190] = sg space_groups['P -6 2 c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(191, 'P 6/m m m', transformations) space_groups[191] = sg space_groups['P 6/m m m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(192, 'P 6/m c c', transformations) space_groups[192] = sg space_groups['P 6/m c c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(193, 'P 63/m c m', transformations) space_groups[193] = sg space_groups['P 63/m c m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(194, 'P 63/m m c', transformations) space_groups[194] = sg space_groups['P 63/m m c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(195, 'P 2 3', transformations) space_groups[195] = sg space_groups['P 2 3'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(196, 'F 2 3', transformations) space_groups[196] = sg space_groups['F 2 3'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(197, 'I 2 3', transformations) space_groups[197] = sg space_groups['I 2 3'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(198, 'P 21 3', transformations) space_groups[198] = sg space_groups['P 21 3'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(199, 'I 21 3', transformations) space_groups[199] = sg space_groups['I 21 3'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(200, 'P m -3', transformations) space_groups[200] = sg space_groups['P m -3'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(201, 'P n -3 :2', transformations) space_groups[201] = sg space_groups['P n -3 :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(202, 'F m -3', transformations) space_groups[202] = sg space_groups['F m -3'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([4,4,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([4,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([4,4,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([4,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([4,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([4,4,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([4,4,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([4,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([4,4,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([4,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([4,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([4,4,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,3,1]) trans_den = N.array([4,4,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,3,3]) trans_den = N.array([1,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([4,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,3,1]) trans_den = N.array([4,4,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([4,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,3,3]) trans_den = N.array([1,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,3,3]) trans_den = N.array([1,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([4,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,3,1]) trans_den = N.array([4,4,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([-1,1,1]) trans_den = N.array([4,4,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([-1,1,1]) trans_den = N.array([4,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([-1,1,1]) trans_den = N.array([4,4,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([-1,1,1]) trans_den = N.array([4,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,1,1]) trans_den = N.array([4,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,1,1]) trans_den = N.array([4,4,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([3,1,1]) trans_den = N.array([4,4,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([2,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([3,0,3]) trans_den = N.array([4,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([3,1,1]) trans_den = N.array([4,4,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([3,0,3]) trans_den = N.array([4,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([2,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([2,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([3,0,3]) trans_den = N.array([4,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,1,1]) trans_den = N.array([4,4,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,-1,1]) trans_den = N.array([4,4,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,-1,1]) trans_den = N.array([2,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([4,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,-1,1]) trans_den = N.array([4,4,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([4,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,-1,1]) trans_den = N.array([2,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,-1,1]) trans_den = N.array([2,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([4,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,-1,1]) trans_den = N.array([4,4,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([3,3,0]) trans_den = N.array([4,4,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,3,1]) trans_den = N.array([2,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([3,1,1]) trans_den = N.array([4,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([3,3,0]) trans_den = N.array([4,4,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([3,1,1]) trans_den = N.array([4,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,3,1]) trans_den = N.array([2,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,3,1]) trans_den = N.array([2,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([3,1,1]) trans_den = N.array([4,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,3,0]) trans_den = N.array([4,4,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([4,4,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([2,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([4,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([4,4,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([4,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([2,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([2,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([4,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([4,4,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(203, 'F d -3 :2', transformations) space_groups[203] = sg space_groups['F d -3 :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(204, 'I m -3', transformations) space_groups[204] = sg space_groups['I m -3'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(205, 'P a -3', transformations) space_groups[205] = sg space_groups['P a -3'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(206, 'I a -3', transformations) space_groups[206] = sg space_groups['I a -3'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,-1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,-1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,-1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,-1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(207, 'P 4 3 2', transformations) space_groups[207] = sg space_groups['P 4 3 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,-1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,-1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,-1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,-1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(208, 'P 42 3 2', transformations) space_groups[208] = sg space_groups['P 42 3 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,-1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,-1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,-1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,-1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,-1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,-1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,-1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,-1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,-1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,-1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,-1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,-1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,-1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,-1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,-1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,-1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(209, 'F 4 3 2', transformations) space_groups[209] = sg space_groups['F 4 3 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,-1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,-1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,-1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,-1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,-1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,-1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,-1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,-1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,-1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([3,1,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([3,1,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([3,1,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([3,1,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,1,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,1,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([3,1,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([3,1,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,-1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([3,1,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,-1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([3,1,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([3,1,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,-1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([3,1,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,-1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([3,3,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([3,3,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([3,3,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([3,3,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,3,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,3,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([3,3,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([3,3,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,-1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([3,3,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,-1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([3,3,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([3,3,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,-1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([3,3,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(210, 'F 41 3 2', transformations) space_groups[210] = sg space_groups['F 41 3 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,-1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,-1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,-1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,-1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,-1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,-1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,-1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,-1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(211, 'I 4 3 2', transformations) space_groups[211] = sg space_groups['I 4 3 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,-1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([3,3,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([3,1,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,1,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,3,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,-1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([3,3,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,-1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([3,1,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,-1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(212, 'P 43 3 2', transformations) space_groups[212] = sg space_groups['P 43 3 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,-1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([3,1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([3,1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,3,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,3,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([3,1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([3,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,-1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,-1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([3,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,3,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,-1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([3,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(213, 'P 41 3 2', transformations) space_groups[213] = sg space_groups['P 41 3 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,-1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,3,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,3,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,-1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,-1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,3,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,-1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,-1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([3,3,5]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([3,5,5]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([3,5,5]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([3,5,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,5,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,3,5]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([3,5,5]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([3,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,-1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([3,3,5]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,-1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([3,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([3,5,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,-1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([3,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(214, 'I 41 3 2', transformations) space_groups[214] = sg space_groups['I 41 3 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,-1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,-1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,-1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,-1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(215, 'P -4 3 m', transformations) space_groups[215] = sg space_groups['P -4 3 m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,-1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,-1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,-1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,-1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,-1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,-1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,-1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,-1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,-1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,-1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,-1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,-1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,-1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,-1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,-1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,-1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(216, 'F -4 3 m', transformations) space_groups[216] = sg space_groups['F -4 3 m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,-1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,-1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,-1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,-1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,-1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,-1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,-1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,-1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(217, 'I -4 3 m', transformations) space_groups[217] = sg space_groups['I -4 3 m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,-1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,-1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,-1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,-1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(218, 'P -4 3 n', transformations) space_groups[218] = sg space_groups['P -4 3 n'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,-1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,-1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,-1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,-1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,-1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,-1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,-1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,-1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,-1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,-1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,-1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,-1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,-1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,-1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,-1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,-1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(219, 'F -4 3 c', transformations) space_groups[219] = sg space_groups['F -4 3 c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,-1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,-1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,-1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,3,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,3,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,-1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,3,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([3,3,5]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,-1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([3,5,5]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,-1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([3,5,5]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,-1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([3,5,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([3,5,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([3,3,5]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,5,5]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([3,3,5]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([3,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,-1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([3,5,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([3,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(220, 'I -4 3 d', transformations) space_groups[220] = sg space_groups['I -4 3 d'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,-1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,-1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,-1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,-1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,-1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,-1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,-1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,-1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(221, 'P m -3 m', transformations) space_groups[221] = sg space_groups['P m -3 m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,-1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,-1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,-1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,-1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,-1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,-1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,-1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,-1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(222, 'P n -3 n :2', transformations) space_groups[222] = sg space_groups['P n -3 n :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,-1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,-1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,-1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,-1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,-1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,-1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,-1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,-1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(223, 'P m -3 n', transformations) space_groups[223] = sg space_groups['P m -3 n'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,-1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,-1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,-1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,-1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,-1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,-1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,-1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,-1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(224, 'P n -3 m :2', transformations) space_groups[224] = sg space_groups['P n -3 m :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,-1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,-1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,-1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,-1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,-1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,-1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,-1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,-1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,-1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,-1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,-1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,-1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,-1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,-1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,-1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,-1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,-1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,0,1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,-1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,-1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,-1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,1,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,0,-1,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,0,-1,0,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,0,-1,0,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num =
N.array([1,0,1])
numpy.array
# -*- coding: utf-8 -*- """Copyright 2015 <NAME>. FilterPy library. http://github.com/rlabbe/filterpy Documentation at: https://filterpy.readthedocs.org Supporting book at: https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python This is licensed under an MIT license. See the readme.MD file for more information. """ from __future__ import (absolute_import, division, print_function, unicode_literals) import numpy.random as random import numpy as np import matplotlib.pyplot as plt from filterpy.common import Saver from filterpy.kalman import KalmanFilter, InformationFilter DO_PLOT = False def test_1d_0P(): global inf f = KalmanFilter (dim_x=2, dim_z=1) inf = InformationFilter (dim_x=2, dim_z=1) f.x = np.array([[2.], [0.]]) # initial state (location and velocity) f.F = (np.array([[1., 1.], [0., 1.]])) # state transition matrix f.H = np.array([[1., 0.]]) # Measurement function f.R =
np.array([[5.]])
numpy.array
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for optimizers with weight decay.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.contrib.opt.python.training import weight_decay_optimizers from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import variables from tensorflow.python.platform import test from tensorflow.python.training import adam WEIGHT_DECAY = 0.01 def adamw_update_numpy(param, g_t, t, m, v, lr=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8): lr_t = lr * np.sqrt(1 - beta2**t) / (1 - beta1**t) m_t = beta1 * m + (1 - beta1) * g_t v_t = beta2 * v + (1 - beta2) * g_t * g_t param_t = (param - lr_t * m_t / (np.sqrt(v_t) + epsilon) - (param * WEIGHT_DECAY)) return param_t, m_t, v_t def momentumw_update_numpy(param, g_t, m, lr=0.001, momentum=0.9, **_): # v, t are not needed for momentum optimizer m = momentum * m + g_t param_t = param - lr * m - param * WEIGHT_DECAY return param_t, m, None class WeightDecayOptimizerTest(test.TestCase): def doTest(self, optimizer, update_fn, optimizer_name, slot_name, use_resource=False, do_sparse=False): for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]): with self.test_session(graph=ops.Graph()): # Initialize variables for numpy implementation. m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0 var0_np =
np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
numpy.array
import numpy as np import seaborn as sns import matplotlib.pyplot as plt from sympy.solvers import solve from sympy import Symbol from matplotlib import patches import matplotlib.patches as mpatches import scipy.io as sio from matplotlib.colors import LogNorm from matplotlib.colors import LinearSegmentedColormap from sympy.solvers import solve from sympy import Symbol from wolframclient.evaluation import WolframLanguageSession from wolframclient.language import wl, wlexpr session = WolframLanguageSession() ratio = 1.25 # plotting configuration figure_len, figure_width = 15*1.5, 12*1.5 font_size_1, font_size_2 = 36*ratio, 36*ratio legend_size = 18*ratio line_width, tick_len = 3*ratio, 10*ratio marker_size = 15*ratio plot_line_width = 5*ratio hfont = {'fontname': 'Arial'} sns.set(style='ticks') tau_e = 0.02 tau_i = 0.01 k = 0.1 l = 0.5 beta = 0.9 N = 2 e = 1/tau_e f = 1/tau_i l_a = np.arange(0, 201, 1) l_d = np.arange(0, 201, 1) uni_3D_mat = np.zeros((len(l_d), len(l_a))) * np.nan uni_4D_mat = np.zeros((len(l_d), len(l_a))) * np.nan example_global_unistable_x = (1.3*2*np.power(0.1619020544546218, 1/2))/tau_e example_global_unistable_y = (0.6*2*np.power(1.0145746970782272, 1/2))/tau_i example_global_multistable_x = (1.4*2*np.power(0.29638770180500046, 1/2))/tau_e example_global_multistable_y = (0.6*2*np.power(1.0396121792300297, 1/2))/tau_i for a_idx in np.arange(len(l_a)): a = l_a[a_idx] for d_idx in np.arange(len(l_d)): d = l_d[d_idx] bc = beta * a * d uni_3D_mat[d_idx, a_idx] = a - e - k * a if
np.power(a - e - k * a + N * d - N * l * d + f, 2)
numpy.power
from pyscf.gto import eval_gto from pyscf import gto import numpy as np import pyqmc class J3: def __init__(self, mol): self.mol = mol randpos =
np.random.random((1,3))
numpy.random.random
#v2.3 import numpy as np from util import * L_SOFTPLUS = 0 L_RELU = 1 L_LINEAR = 2 L_SIGMOID = 3 L_DISCRETE = 4 L_BINARY_Z = 5 L_BINARY_N = 6 LS_REAL = [L_SOFTPLUS, L_RELU, L_LINEAR, L_SIGMOID] LS_DISCRETE = [L_DISCRETE, L_BINARY_Z, L_BINARY_N, ] ACT_F = {L_SOFTPLUS: softplus, L_RELU: relu, L_SIGMOID: sigmoid, L_LINEAR: lambda x: x, L_BINARY_Z: sigmoid, L_BINARY_N: lambda x: 2*sigmoid(x)-1, } ACT_D_F = {L_SOFTPLUS: sigmoid, L_RELU: relu_d, L_SIGMOID: sigmoid_d, L_LINEAR: lambda x: 1, L_BINARY_Z: sigmoid_d, L_BINARY_N: lambda x: 2*sigmoid_d(x), } class eq_prop_layer(): def __init__(self, name, input_size, output_size, optimizer, var, temp, l_type, unbiased=False): if l_type not in [L_SOFTPLUS, L_RELU, L_LINEAR, L_SIGMOID, L_DISCRETE, L_BINARY_Z, L_BINARY_N, ]: raise Exception('l_type (%d) not implemented' % l_type) self.name = name self.input_size = input_size self.output_size = output_size self.optimizer = optimizer self.l_type = l_type self.temp = temp if l_type in LS_DISCRETE else 1 self.unbiased = unbiased lim = np.sqrt(6 / (input_size + output_size)) if l_type == L_DISCRETE: output_size -= 1 self._w = np.random.uniform(-lim, lim, size=(input_size, output_size)) self._b = np.random.uniform(-1e-3, 1e-3, size=(output_size)) self._inv_var = np.full(output_size, 1/var) if var > 0 else None self.prev_layer = None # Set manually self.next_layer = None # Set manually self.values = np.zeros((1, output_size)) self.w_trace = np.zeros((1, input_size, output_size,)) self.b_trace = np.zeros((1, output_size,)) if self.unbiased: self.p_w_trace = np.zeros((1, input_size, output_size,)) self.p_b_trace = np.zeros((1, output_size,)) def sample(self, inputs, det=False): self.compute_pot_mean(inputs) if self.l_type in LS_REAL: if self._inv_var is None or det: self.values = self.mean else: sigma =
np.sqrt(1/self._inv_var)
numpy.sqrt
import os import numpy as np # Tiles are 10x10 squares TILE_LENGTH = 10 # 2d array representing the seamonster SEA_MONSTER = np.array([ [" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "#", " "], ["#", " ", " ", " ", " ", "#", "#", " ", " ", " ", " ", "#", "#", " ", " ", " ", " ", "#", "#", "#"], [" ", "#", " ", " ", "#", " ", " ", "#", " ", " ", "#", " ", " ", "#", " ", " ", "#", " ", " ", " "], ]) # Possible values for flipX and flipY FLIP_VALUES = [False, True] class Tile: def __init__(self, number): self.number = number self.top = set() self.right = set() self.bottom = set() self.left = set() class Transformation: def __init__(self, flipX, flipY, numRot): self.flipX = flipX self.flipY = flipY self.numRot = numRot def areTilesAdjacent(tilePixels, otherTilePixels, getTargetArray, getComparisonArray): targetArray = getTargetArray(tilePixels) for flipX in FLIP_VALUES: for flipY in FLIP_VALUES: comparisonTile = np.copy(otherTilePixels) if flipX: comparisonTile = np.fliplr(comparisonTile) if flipY: comparisonTile = np.flipud(comparisonTile) # Rotate the tile 0, 90, 180, and 270 degrees numRot = 0 while numRot < 4: rotatedTile = np.rot90(comparisonTile, numRot) # Check other tile to see if it fits... if (targetArray == getComparisonArray(rotatedTile)).all(): return True, Transformation(flipX, flipY, numRot) numRot += 1 return False, Transformation(False, False, 0) def tileMatchesTop(tilePixels, otherTilePixels): return areTilesAdjacent(tilePixels, otherTilePixels, lambda tile: tile[0], lambda tile: tile[-1]) def tileMatchesRight(tilePixels, otherTilePixels): return areTilesAdjacent(tilePixels, otherTilePixels, lambda tile: tile[:, -1], lambda tile: tile[:, 0]) def tileMatchesBottom(tilePixels, otherTilePixels): return areTilesAdjacent(tilePixels, otherTilePixels, lambda tile: tile[-1], lambda tile: tile[0]) def tileMatchesLeft(tilePixels, otherTilePixels): return areTilesAdjacent(tilePixels, otherTilePixels, lambda tile: tile[:, 0], lambda tile: tile[:, -1]) def populateTileMatches(tileObjs): # Find the matching top, right, bottom, left tiles for each individual tile... tileIdx = 0 while tileIdx < len(tileObjs): tileObj = tileObjs[tileIdx] otherTileIdx = 0 while otherTileIdx < len(tileObjs): if tileIdx == otherTileIdx: otherTileIdx += 1 continue otherTileObj = tileObjs[otherTileIdx] # check if other tile matches top (tileMatches, _) = tileMatchesTop(tiles[tileObj.number], tiles[otherTileObj.number]) if tileMatches: tileObj.top.add(otherTileObj.number) # check if other tile matches right (tileMatches, _) = tileMatchesRight(tiles[tileObj.number], tiles[otherTileObj.number]) if tileMatches: tileObj.right.add(otherTileObj.number) # check if other tile matches bottom (tileMatches, _) = tileMatchesBottom(tiles[tileObj.number], tiles[otherTileObj.number]) if tileMatches: tileObj.bottom.add(otherTileObj.number) # check if other tile matches left (tileMatches, _) = tileMatchesLeft(tiles[tileObj.number], tiles[otherTileObj.number]) if tileMatches: tileObj.left.add(otherTileObj.number) otherTileIdx += 1 tileIdx += 1 def getCornerTiles(tileObjs): cornerTiles = [] for tileObj in tileObjs: # Determine if this is a corner tile hasAdjacentTopRight = (len(tileObj.top) == 0 and len(tileObj.right) == 0) hasAdjacentBottomRight = (len(tileObj.right) == 0 and len(tileObj.bottom) == 0) hasAdjacentBottomLeft = (len(tileObj.bottom) == 0 and len(tileObj.left) == 0) hasAdjacentTopLeft = (len(tileObj.left) == 0 and len(tileObj.top) == 0) if hasAdjacentTopRight or hasAdjacentBottomRight or hasAdjacentBottomLeft or hasAdjacentTopLeft: cornerTiles.append(tileObj) return cornerTiles def setPixelsOnImage(image, rowOffset, colOffset, tilePixels): for rowIdx, row in enumerate(tilePixels): for colIdx, pixel in enumerate(row): image[rowOffset + rowIdx, colOffset + colIdx] = pixel def getMatchingTile(matchFunction, currentTilePixels, tiles, usedTilesNumbers): matches = [] for candidateTileNumber in tiles: if candidateTileNumber in usedTilesNumbers: continue candidateTilePixels = tiles[candidateTileNumber] # Check if candidate tile can be placed as determined by the matchFunction (one of the tileMatches* functions defined above) isMatch, transformations = matchFunction(currentTilePixels, candidateTilePixels) if isMatch: if transformations.flipX: candidateTilePixels = np.fliplr(candidateTilePixels) if transformations.flipY: candidateTilePixels = np.flipud(candidateTilePixels) candidateTilePixels = np.rot90(candidateTilePixels, transformations.numRot) matches.append((candidateTileNumber, candidateTilePixels)) if len(matches) == 0: raise Exception("No matching tiles found.") return matches[0] def generateImageFromTopLeft(tiles, currentTileNumber, currentTilePixels): # Determine size of final image imageLength = int(np.ceil(np.sqrt(len(tiles)))) imagePixelLength = imageLength * TILE_LENGTH image = np.empty((imagePixelLength, imagePixelLength), str) imageNoBordersPixelLength = imageLength * (TILE_LENGTH - 2) imageNoBorders = np.empty((imageNoBordersPixelLength, imageNoBordersPixelLength), str) # Track used tiles usedTilesNumbers = [] rowOffset = 0 rowOffsetNoBorders = 0 while rowOffset < imagePixelLength: rowTilePixels = currentTilePixels colOffset = 0 colOffsetNoBorders = 0 while colOffset < imagePixelLength: setPixelsOnImage(image, rowOffset, colOffset, currentTilePixels) setPixelsOnImage(imageNoBorders, rowOffsetNoBorders, colOffsetNoBorders, currentTilePixels[1:-1, 1:-1]) usedTilesNumbers.append(currentTileNumber) colOffset += TILE_LENGTH colOffsetNoBorders += (TILE_LENGTH - 2) if colOffset < imagePixelLength: currentTileNumber, currentTilePixels = getMatchingTile(tileMatchesRight, currentTilePixels, tiles, usedTilesNumbers) rowOffset += TILE_LENGTH rowOffsetNoBorders += (TILE_LENGTH - 2) if rowOffset < imagePixelLength: currentTileNumber, currentTilePixels = getMatchingTile(tileMatchesBottom, rowTilePixels, tiles, usedTilesNumbers) return image, imageNoBorders def hasSeaMonster(imageSegment): for rowIdx, imageRow in enumerate(imageSegment): for colIdx, imagePixel in enumerate(imageRow): seaMonsterPixel = SEA_MONSTER[rowIdx, colIdx] if seaMonsterPixel == "#" and imagePixel != seaMonsterPixel: return False return True def countSeaMonsters(image): count = 0 imageRowIdx = 0 while imageRowIdx <= image.shape[0] - SEA_MONSTER.shape[0]: imageColIdx = 0 while imageColIdx <= image.shape[1] - SEA_MONSTER.shape[1]: if hasSeaMonster(image[imageRowIdx:imageRowIdx + SEA_MONSTER.shape[0], imageColIdx:imageColIdx + SEA_MONSTER.shape[1]]): count += 1 imageColIdx += 1 imageRowIdx += 1 return count def printImage(image, tileInterval): for rowIdx, row in enumerate(image): if rowIdx % tileInterval == 0: print("") outputLine = "" for colIdx, pixel in enumerate(row): if colIdx % tileInterval == 0: outputLine += " " if pixel == "": outputLine += " " else: outputLine += pixel print(outputLine) def part1(tiles): tileObjs = [Tile(tileNumber) for tileNumber in tiles] populateTileMatches(tileObjs) cornerTiles = getCornerTiles(tileObjs) productOfCornerTiles = np.prod([cornerTile.number for cornerTile in cornerTiles]) print(f"Part 1 - Solution: {productOfCornerTiles}") def part2(tiles): tileNumberToObjs = {} for tileNumber in tiles: tileObj = Tile(tileNumber) tileNumberToObjs[tileNumber] = tileObj populateTileMatches(list(tileNumberToObjs.values())) cornerTiles = getCornerTiles(list(tileNumberToObjs.values())) # Get corner tiles topRightTiles = [cornerTile for cornerTile in cornerTiles if (len(cornerTile.top) == 0 and len(cornerTile.right) == 0)] bottomRightTiles = [cornerTile for cornerTile in cornerTiles if (len(cornerTile.bottom) == 0 and len(cornerTile.right) == 0)] bottomLeftTiles = [cornerTile for cornerTile in cornerTiles if (len(cornerTile.bottom) == 0 and len(cornerTile.left) == 0)] topLeftTiles = [cornerTile for cornerTile in cornerTiles if (len(cornerTile.top) == 0 and len(cornerTile.left) == 0)] currentTileNumber = None currentTilePixels = None if len(topLeftTiles) == 1: currentTileNumber = topLeftTiles[0].number currentTilePixels = tiles[currentTileNumber] elif len(topRightTiles) == 1: currentTileNumber = topRightTiles[0].number currentTilePixels = tiles[currentTileNumber] # Make top right into top left... (flip X) currentTilePixels = np.fliplr(currentTilePixels) elif len(bottomRightTiles) == 1: currentTileNumber = bottomRightTiles[0].number currentTilePixels = tiles[currentTileNumber] # Make bottom right into top left... (flip X and flip Y) currentTilePixels =
np.fliplr(currentTilePixels)
numpy.fliplr
from cgitb import grey from matplotlib.pyplot import axis, close, hot import numpy as np from numpy.random import default_rng import random import copy import math class ImageManipulator: def __init__(self): self._rng = default_rng(seed=42) def salt_pepper_noise(self, gray_img, ratio): noise = self._rng.choice( [-1, 0, 255], size=gray_img.shape, p=[1 - ratio, ratio / 2, ratio / 2] ) np.copyto(noise, gray_img, where=noise == -1) gray_img = noise.astype(np.uint8) return gray_img def gaussian_noise(self, gray_img, mean, std): noise = self._rng.normal(loc=mean, scale=std, size=gray_img.shape) gray_img = gray_img + noise gray_img = np.clip(gray_img, 0, 255) gray_img = np.rint(gray_img) gray_img = gray_img.astype(np.uint8) return gray_img def calc_histogram(self, gray_img): hist = np.zeros(256) for i in range(len(hist)): hist[i] = np.sum(gray_img == i) hist = hist.astype(np.uint) return hist def avg_histograms(self, hist_list): hist_arr = np.array(hist_list) hist = np.mean(hist_arr, axis=0) hist =
np.rint(hist)
numpy.rint
import numpy as np import scipy as scipy from numpy.random import uniform import scipy.stats import cv2 np.set_printoptions(threshold=3) np.set_printoptions(suppress=True) # The code is copied from http://ros-developer.com/2019/04/10/parcticle-filter-explained-with-python-code-from-scratch/. def drawLines(img, points, b, g, r): cv2.polylines(img, [np.int32(points)], isClosed=False, color=(b, g, r)) def drawCross(img, center, b, g, r): d = 5 t = 2 LINE_AA = cv2.LINE_AA # LINE_AA = cv2.LINE_AA if cv2.__version__[0] == '3' else cv2.CV_AA color = (b, g, r) ctrx = center[0, 0] ctry = center[0, 1] cv2.line(img, (ctrx - d, ctry - d), (ctrx + d, ctry + d), color, t, LINE_AA) cv2.line(img, (ctrx + d, ctry - d), (ctrx - d, ctry + d), color, t, LINE_AA) def mouseCallback(event, x, y, flags, null): global center global trajectory global previous_x global previous_y global zs center = np.array([[x, y]]) trajectory = np.vstack((trajectory, np.array([x, y]))) # noise=sensorSigma * np.random.randn(1,2) + sensorMu if previous_x > 0: heading = np.arctan2(np.array([y - previous_y]), np.array([previous_x - x])) if heading > 0: heading = -(heading - np.pi) else: heading = -(np.pi + heading) distance = np.linalg.norm(np.array([[previous_x, previous_y]]) - np.array([[x, y]]), axis=1) std = np.array([2, 4]) u = np.array([heading, distance]) predict(particles, u, std, dt=1.) zs = (np.linalg.norm(landmarks - center, axis=1) + (np.random.randn(NL) * sensor_std_err)) # weights = np.ones(len(particles))/len(particles) update(particles, weights, z=zs, R=50, landmarks=landmarks) indexes = systematic_resample(weights) resample_from_index(particles, weights, indexes) previous_x = x previous_y = y WIDTH = 800 HEIGHT = 600 WINDOW_NAME = "Particle Filter" # sensorMu=0 # sensorSigma=3 sensor_std_err = 5 def create_uniform_particles(x_range, y_range, N): particles = np.empty((N, 2)) particles[:, 0] = uniform(x_range[0], x_range[1], size=N) particles[:, 1] = uniform(y_range[0], y_range[1], size=N) return particles def predict(particles, u, std, dt=1.): N = len(particles) dist = (u[1] * dt) + (np.random.randn(N) * std[1]) particles[:, 0] += np.cos(u[0]) * dist particles[:, 1] += np.sin(u[0]) * dist def update(particles, weights, z, R, landmarks): weights.fill(1.) for i, landmark in enumerate(landmarks): distance =
np.power((particles[:, 0] - landmark[0]) ** 2 + (particles[:, 1] - landmark[1]) ** 2, 0.5)
numpy.power
# Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for lift metrics.""" from absl.testing import parameterized import apache_beam as beam from apache_beam.testing import util import numpy as np import tensorflow as tf from tensorflow_model_analysis.addons.fairness.metrics import lift from tensorflow_model_analysis.eval_saved_model import testutil from tensorflow_model_analysis.metrics import metric_types from tensorflow_model_analysis.metrics import metric_util from tensorflow_model_analysis.proto import config_pb2 class LiftTest(testutil.TensorflowModelAnalysisTest, parameterized.TestCase): def _assert_test(self, num_buckets, baseline_examples, comparison_examples, lift_metric_value, ignore_out_of_bound_examples=False): eval_config = config_pb2.EvalConfig( cross_slicing_specs=[config_pb2.CrossSlicingSpec()]) computations = lift.Lift( num_buckets=num_buckets, ignore_out_of_bound_examples=ignore_out_of_bound_examples).computations( eval_config=eval_config) histogram = computations[0] lift_metrics = computations[1] with beam.Pipeline() as pipeline: # pylint: disable=no-value-for-parameter baseline_result = ( pipeline | 'CreateB' >> beam.Create(baseline_examples) | 'ProcessB' >> beam.Map(metric_util.to_standard_metric_inputs) | 'AddSliceB' >> beam.Map(lambda x: ((), x)) | 'ComputeHistogramB' >> beam.CombinePerKey(histogram.combiner) ) # pyformat: ignore comparison_result = ( pipeline | 'CreateC' >> beam.Create(comparison_examples) | 'ProcessC' >> beam.Map(metric_util.to_standard_metric_inputs) | 'AddSliceC' >> beam.Map(lambda x: (('slice'), x)) | 'ComputeHistogramC' >> beam.CombinePerKey(histogram.combiner) ) # pyformat: ignore # pylint: enable=no-value-for-parameter merged_result = ((baseline_result, comparison_result) | 'MergePCollections' >> beam.Flatten()) def check_result(got): try: self.assertLen(got, 2) slice_1, metric_1 = got[0] slice_2, metric_2 = got[1] lift_value = None if not slice_1: lift_value = lift_metrics.cross_slice_comparison(metric_1, metric_2) else: lift_value = lift_metrics.cross_slice_comparison(metric_2, metric_1) self.assertDictElementsAlmostEqual( lift_value, { metric_types.MetricKey(name=f'lift@{num_buckets}'): lift_metric_value, }) except AssertionError as err: raise util.BeamAssertException(err) util.assert_that(merged_result, check_result, label='result') def testLift_continuousLabelsAndPredictions(self): baseline_examples = [{ 'labels': np.array([0.0]), 'predictions': np.array([0.1]), 'example_weights': np.array([3.0]), }, { 'labels': np.array([0.3]), 'predictions': np.array([0.5]), 'example_weights': np.array([5.0]), }, { 'labels': np.array([0.6]), 'predictions': np.array([0.8]), 'example_weights': np.array([2.0]), }, { 'labels': np.array([0.9]), 'predictions': np.array([0.3]), 'example_weights': np.array([8.0]), }, { 'labels': np.array([0.9]), 'predictions': np.array([0.9]), 'example_weights': np.array([3.0]), }] comparison_examples = [{ 'labels': np.array([0.0]), 'predictions': np.array([0.8]), 'example_weights': np.array([1.0]), }, { 'labels': np.array([0.2]), 'predictions': np.array([0.3]), 'example_weights': np.array([2.0]), }, { 'labels': np.array([0.5]), 'predictions': np.array([0.5]), 'example_weights': np.array([5.0]), }, { 'labels': np.array([0.7]), 'predictions': np.array([0.4]), 'example_weights': np.array([2.0]), }, { 'labels': np.array([0.9]), 'predictions': np.array([0.3]), 'example_weights': np.array([3.0]), }] self._assert_test(3, baseline_examples, comparison_examples, -0.136013986) def testLift_baselineAndComparisonAreSame(self): baseline_examples = [{ 'labels': np.array([0.0]), 'predictions': np.array([0.1]), 'example_weights': np.array([3.0]), }, { 'labels': np.array([0.3]), 'predictions': np.array([0.5]), 'example_weights': np.array([5.0]), }, { 'labels': np.array([0.6]), 'predictions': np.array([0.8]), 'example_weights': np.array([2.0]), }, { 'labels': np.array([0.9]), 'predictions': np.array([0.3]), 'example_weights': np.array([8.0]), }, { 'labels': np.array([1.0]), 'predictions': np.array([0.9]), 'example_weights': np.array([3.0]), }] self._assert_test(3, baseline_examples, baseline_examples, 0.0) def testLift_ignoringOutOfBoundExamples(self): baseline_examples = [ { 'labels': np.array([0.0]), 'predictions': np.array([0.1]), 'example_weights': np.array([3.0]), }, { 'labels': np.array([0.3]), 'predictions': np.array([0.5]), 'example_weights': np.array([5.0]), }, { 'labels': np.array([0.6]), 'predictions': np.array([0.8]), 'example_weights': np.array([2.0]), }, { 'labels': np.array([0.9]), 'predictions': np.array([0.3]), 'example_weights': np.array([8.0]), }, { 'labels': np.array([-0.9]), # Ignore this example 'predictions': np.array([0.3]), 'example_weights': np.array([8.0]), }, { 'labels': np.array([0.9]), 'predictions': np.array([0.9]), 'example_weights': np.array([3.0]), } ] comparison_examples = [ { 'labels': np.array([0.0]), 'predictions': np.array([0.8]), 'example_weights': np.array([1.0]), }, { 'labels': np.array([0.2]), 'predictions': np.array([0.3]), 'example_weights': np.array([2.0]), }, { 'labels': np.array([0.5]), 'predictions': np.array([0.5]), 'example_weights': np.array([5.0]), }, { 'labels': np.array([0.7]), 'predictions': np.array([0.4]), 'example_weights': np.array([2.0]), }, { 'labels': np.array([1.9]), # Ignore this example 'predictions': np.array([0.3]), 'example_weights': np.array([8.0]), }, { 'labels': np.array([0.9]), 'predictions': np.array([0.3]), 'example_weights': np.array([3.0]), } ] self._assert_test( 3, baseline_examples, comparison_examples, -0.136013986, ignore_out_of_bound_examples=True) def testLift_binaryLabelsAndContinuousPredictions(self): baseline_examples = [{ 'labels': np.array([0.0]), 'predictions': np.array([0.1]), 'example_weights': np.array([3.0]), }, { 'labels': np.array([0.0]), 'predictions': np.array([0.5]), 'example_weights': np.array([5.0]), }, { 'labels': np.array([1.0]), 'predictions': np.array([0.8]), 'example_weights': np.array([2.0]), }, { 'labels': np.array([1.0]), 'predictions': np.array([0.3]), 'example_weights': np.array([8.0]), }, { 'labels': np.array([1.0]), 'predictions': np.array([0.9]), 'example_weights': np.array([3.0]), }] comparison_examples = [{ 'labels': np.array([0.0]), 'predictions': np.array([0.8]), 'example_weights': np.array([1.0]), }, { 'labels': np.array([0.0]), 'predictions': np.array([0.3]), 'example_weights': np.array([2.0]), }, { 'labels': np.array([0.0]), 'predictions': np.array([0.5]), 'example_weights': np.array([5.0]), }, { 'labels': np.array([1.0]), 'predictions': np.array([0.4]), 'example_weights': np.array([2.0]), }, { 'labels': np.array([1.0]), 'predictions': np.array([0.3]), 'example_weights': np.array([3.0]), }] self._assert_test(2, baseline_examples, comparison_examples, 0.01715976331) def testLift_binaryLabelsAndPredictions(self): baseline_examples = [{ 'labels': np.array([0.0]), 'predictions': np.array([1.0]), 'example_weights': np.array([3.0]), }, { 'labels':
np.array([0.0])
numpy.array
# -*- coding: utf-8 -*- """ Spyder Editor This is a temporary script file. """ import numpy as np def subarray_multislice(array_ndim, fixed_axes, indices): ''' Return tuple of slices that if indexed into an array with given dimensions will return subarray with the axes in axes fixed at given indices ''' indices = np.array(indices) colon = slice(None, None, None) multislice = () for i in range(array_ndim): if i in fixed_axes: multislice = multislice + \ (indices[
np.where(fixed_axes == i)
numpy.where
# this script assumes Python 3.5 is in use # imports import numpy as np from scipy import ndimage from astropy.io import fits #..........................................................................main def main(file, scale) : science = fits.open(file) # open the background-subtracted science image image = science[0].data # get the science data that will be used science.close() smooth_image = ndimage.gaussian_filter(image, scale) # smooth the original # image by the Gaussian kernel return clumpiness(image, smooth_image) #....................................................................clumpiness def clumpiness(image, smoothed) : numer = image - smoothed numer = np.ma.masked_where(numer < 0, numer) np.ma.set_fill_value(numer, 0) numer = numer.filled() denom = image clumpy =
np.sum(numer)
numpy.sum
# Copyright 2018 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Defines the various functions for training the pre-defined MentorNet.""" import numpy as np import scipy.stats as st import tensorflow as tf def logistic(inputs): """A baseline logistic model.""" feat_dim = int(inputs.get_shape()[1]) with tf.variable_scope('logistic'): layer_1 = tf.add( tf.matmul(inputs, tf.get_variable('W1', [feat_dim, 1])), tf.get_variable('b1', [1])) return layer_1 def mlp(inputs, num_hidden_nodes=10): """A baseline MLP model.""" feat_dim = int(inputs.get_shape()[1]) with tf.variable_scope('mlp'): layer_1 = tf.add( tf.matmul(inputs, tf.Variable(tf.random_normal([feat_dim, num_hidden_nodes]))), tf.Variable(tf.random_normal([num_hidden_nodes]))) layer_1 = tf.nn.tanh(layer_1) # output layer has the linear activation out_layer = tf.matmul( layer_1, tf.Variable(tf.random_normal([num_hidden_nodes, 1])) + tf.Variable( tf.random_normal([1]))) return out_layer def vstar_baseline(inbatch, **kwargs): """Variable star function for equally weighting every sample. Args: inbatch: a numpy array with the following Index 0: Loss Index 1: Loss difference from moving average Index 3: Label Index 4: Epoch **kwargs: hyper-parameter specified in vtsar_gamma Returns: v: [batch_size, 1] weight vector. """ del kwargs # Unused. v = np.ones(inbatch.shape[0]) return v def vstar_self_paced(inbatch, **kwargs): """Variable star function for self-paced learning. Args: inbatch: a numpy array with the following Index 0: Loss Index 1: Loss difference from moving average Index 3: Label Index 4: Epoch **kwargs: hyper-parameter specified in vtsar_gamma Returns: v: [batch_size, 1] weight vector. """ del kwargs # Unused. loss_diff = inbatch[:, 1] v = np.copy(loss_diff) v[np.where(loss_diff <= 0)] = 1 v[np.where(loss_diff > 0)] = 0 return v def vstar_hard_example_mining(inbatch, **kwargs): """Variable v_star function for self-paced learning. Args: inbatch: a numpy array with the following Index 0: Loss Index 1: Loss difference from moving average Index 3: Label Index 4: Epoch **kwargs: hyper-parameter specified in vtsar_gamma Returns: v: [batch_size, 1] weight vector. """ del kwargs # Unused. loss_diff = inbatch[:, 1] y = inbatch[:, 2] v = np.copy(loss_diff) v[np.where(loss_diff >= 0)] = 1 v[np.where(loss_diff < 0)] = 0 v[np.where(y > 0)] = 1 # Select all positive return v def vstar_focal_loss(inbatch, **kwargs): """Variable v_star function for focal loss. Args: inbatch: a numpy array with the following Index 0: Loss Index 1: Loss difference from moving average Index 3: Label Index 4: Epoch **kwargs: hyper-parameter specified in vtsar_gamma Returns: v: [batch_size, 1] weight vector. """ if 'vstar_gamma' in kwargs: gamma = kwargs['vstar_gamma'] else: gamma = 2 assert gamma > 0 loss = inbatch[:, 0] v = np.power((1 - np.exp(-1 * loss)), gamma) return v def vstar_spcl_linear(inbatch, **kwargs): """Variable v_star function for self-paced curriculum learning (linear). Args: inbatch: a numpy array with the following Index 0: Loss Index 1: Loss difference from moving average Index 3: Label Index 4: Epoch **kwargs: hyper-parameter specified in vtsar_gamma Returns: v: [batch_size, 1] weight vector. """ if 'vstar_gamma' in kwargs: gamma = kwargs['vstar_gamma'] else: gamma = 1 assert gamma != 0 loss_diff = inbatch[:, 1] v = -1.0 / gamma * loss_diff + 1 v = np.maximum(np.minimum(v, 1), 0) return v def vstar_mentornet_pd(inbatch, **kwargs): """Variable v_star function for the pre-defined mentornet: MentorNet PD. Args: inbatch: a numpy array with the following Index 0: Loss Index 1: Loss difference from moving average Index 3: Label Index 4: Epoch **kwargs: hyper-parameter specified in vtsar_gamma Returns: v: [batch_size, 1] weight vector. """ epoch = inbatch[:, 3] v1 = vstar_self_paced(inbatch, **kwargs) v2 = vstar_spcl_linear(inbatch, **kwargs) v = np.zeros(len(epoch)) ids = np.where(epoch >= 90) v[ids] = v2[ids] ids =
np.where(epoch < 90)
numpy.where