prompt
stringlengths
19
879k
completion
stringlengths
3
53.8k
api
stringlengths
8
59
# -*- coding: utf-8 -*- """ Created on Sun Nov 12 17:31:02 2017 @author: gianni """ import numpy as np class Flux0D(): '''Represents the computation of the flux when considering only a single direction''' def compute_flux_nu(self,tau_nu,source_function,solid_angle): '''Computes the observed flux in [W/m2/Hz], given the optical depth tau_nu, the source function and the solid angle of the source seen by the observer.''' return source_function*(1-np.exp(-tau_nu))*solid_angle class FluxUniformSphere(): '''Represents the computation of the flux from a uniform sphere''' min_tau_nu = 1e-2 def compute_flux_nu(self,tau_nu,source_function,solid_angle): '''Computes the observed flux in [W/m2/Hz], given the optical depth tau_nu and the source function.''' #see old Osterbrock book for this formula, appendix 2 #this is the flux per surface of the emitting region #convert to numpy array to avoid ZeroDivisionError: tau_nu = np.array(tau_nu) #for lower tau_nu, the Osterbrock formula becomes numerically unstable: stable_region = tau_nu > self.min_tau_nu with np.errstate(divide='ignore',invalid='ignore'): flux_nu = 2*np.pi*source_function/tau_nu**2\ *(tau_nu**2/2-1+(tau_nu+1)*np.exp(-tau_nu)) flux_nu_Taylor = 2*np.pi*source_function*(tau_nu/3-tau_nu**2/8+tau_nu**3/30 -tau_nu**4/144) #from Wolfram Alpha flux_nu = np.where(stable_region,flux_nu,flux_nu_Taylor) assert np.all(np.isfinite(flux_nu)) #observed flux = (flux at sphere surface)*4*pi*r**2/(4*pi*d**2) #=F_surface*Omega*4/(4*pi) return flux_nu*solid_angle/np.pi class TaylorEscapeProbability(): # use Taylor expansion if tau is below epsilon; this is to avoid numerical #problems with the anlytical formula to compute the escape probability tau_epsilon = 0.00005 min_tau = -1 def beta_analytical(self,tau_nu): raise NotImplementedError def beta_Taylor(self,tau_nu): raise NotImplementedError def beta(self,tau_nu): '''Computes the escape probability from the optical depth tau_nu.''' #with np.errstate(divide='ignore',invalid='ignore',over='ignore'): #the RADEX paper advises that negativ tau (inverted population) cannot be #treated correctly by a non-local code like RADEX, and that results for #lines with tau<~1 should be ignored #Moreover, negative tau can make the code crash: very negative tau #leads to very large transition rates, which makes the matrix of the #rate equations ill-conditioned. #thus, for tau < -1, I just take abs(tau). Note that this is different #from RADEX: they make something quite strange: the have different #approximations for positive large, normal and small tau. But then they #use abs(tau) to decide which function to use, but then use tau in that #function tau_nu = np.atleast_1d(np.array(tau_nu)) prob = np.ones_like(tau_nu)*np.inf normal_tau_region = tau_nu > self.tau_epsilon prob[normal_tau_region] = self.beta_analytical(tau_nu[normal_tau_region]) small_tau_region = np.abs(tau_nu) <= self.tau_epsilon #here I use tau even if tau < 0: prob[small_tau_region] = self.beta_Taylor(tau_nu[small_tau_region]) negative_tau_region = (tau_nu >= self.min_tau) & (tau_nu<-self.tau_epsilon) prob[negative_tau_region] = self.beta_analytical(tau_nu[negative_tau_region]) unreliable_negative_region = tau_nu < self.min_tau #here I just use abs(tau) to stabilize the code prob[unreliable_negative_region] = self.beta_analytical( np.abs(tau_nu[unreliable_negative_region])) assert np.all(np.isfinite(prob)) return prob class EscapeProbabilityUniformSphere(TaylorEscapeProbability): '''Represents the escape probability from a uniform spherical medium.''' def beta_analytical(self,tau_nu): '''Computes the escape probability analytically, given the optical depth tau_nu''' #see the RADEX manual for this formula; derivation is found in the old #Osterbrock (1974) book, appendix 2. Note that Osterbrock uses tau for #radius, while I use it for diameter #convert to numpy array to avoid ZeroDivisionError (numpy converts to inf #instead of raising an error) tau_nu = np.array(tau_nu) return 1.5/tau_nu*(1-2/tau_nu**2+(2/tau_nu+2/tau_nu**2)*np.exp(-tau_nu)) def beta_Taylor(self,tau_nu): '''Computes the escape probability using a Taylor expansion, given the optical depth tau_nu''' #Taylor expansion of beta for uniform sphere, easier to evaluate numerically #(for small tau_nu) #Series calculated using Wolfram Alpha; not so easy analytically, to #calculate the limit as tau->0, use rule of L'Hopital return (1 - 0.375*tau_nu + 0.1*tau_nu**2 - 0.0208333*tau_nu**3 + 0.00357143*tau_nu**4) class UniformSphere(EscapeProbabilityUniformSphere,FluxUniformSphere): '''Represents the escape probability and emerging flux from a uniform spherical medium''' pass class UniformSphereRADEX(EscapeProbabilityUniformSphere,Flux0D): """Represents the escape probability from a uniform sphere, but uses the single direction assumption to compute the emerging flux. This is what is done in the original RADEX code.""" pass class UniformFaceOnSlab(Flux0D): #Since I assume the source is in the far field, it is ok to calculate the flux #with the 0D formula """Represents the computation of the flux from a uniform slab that is seen face-on (think of a face-on disk, i.e. x-y-size of much larger than the z-size, where z is along the line of sight)""" theta = np.linspace(0,np.pi/2,200) tau_grid = np.logspace(-3,2,1000) min_tau_nu = np.min(tau_grid) def __init__(self): #the expression for the flux contains an integral term; #here I pre-compute this term so it can be interpolated to speed up the code self.integral_term_grid = np.array([self.integral_term(tau) for tau in self.tau_grid]) def integral_term(self,tau): return np.trapz((1-np.exp(-tau/np.cos(self.theta)))*
np.cos(self.theta)
numpy.cos
from medpy import metric import numpy as np from scipy import ndimage import time from .surface import Surface LARGE = 9001 def dice(input1, input2): return metric.dc(input1, input2) def detect_lesions(prediction_mask, reference_mask, min_overlap=0.5): """ Produces a mask for predicted lesions and a mask for reference lesions, with label IDs matching lesions together. Given a prediction and a reference mask, output a modified version of each where objects that overlap between the two mask share a label. This requires merging labels in the reference mask that are spanned by a single prediction and merging labels in the prediction mask that are spanned by a single reference. In cases where a label can be merged, separately, with more than one other label, a single merge option (label) is chosen to accord the greatest overlap between the reference and prediction objects. After merging and matching, objects in the reference are considered detected if their respective predictions overlap them by more than `min_overlap` (intersection over union). :param prediction_mask: numpy.array :param reference_mask: numpy.array :param min_overlap: float in range [0, 1.] :return: prediction mask (int), reference mask (int), num_detected """ # Initialize detected_mask = np.zeros(prediction_mask.shape, dtype=np.uint8) mod_reference_mask = np.copy(reference_mask) num_detected = 0 if not np.any(reference_mask): return detected_mask, num_detected, 0, 0, 0 if not min_overlap>0 and not min_overlap<=1: raise ValueError("min_overlap must be in [0, 1.]") # Get available IDs (excluding 0) # # To reduce computation time, check only those lesions in the prediction # that have any overlap with the ground truth. p_id_list = np.unique(prediction_mask[reference_mask.nonzero()]) if p_id_list[0]==0: p_id_list = p_id_list[1:] g_id_list = np.unique(reference_mask) if g_id_list[0]==0: g_id_list = g_id_list[1:] # To reduce computation time, get views into reduced size masks. reduced_prediction_mask = rpm = prediction_mask.copy() for p_id in np.unique(prediction_mask): if p_id not in p_id_list and p_id!=0: reduced_prediction_mask[(rpm==p_id).nonzero()] = 0 target_mask = np.logical_or(reference_mask, reduced_prediction_mask) bounding_box = ndimage.find_objects(target_mask)[0] r = reference_mask[bounding_box] p = prediction_mask[bounding_box] d = detected_mask[bounding_box] m = mod_reference_mask[bounding_box] # Compute intersection of predicted lesions with reference lesions. intersection_matrix = np.zeros((len(p_id_list), len(g_id_list)), dtype=np.int32) for i, p_id in enumerate(p_id_list): for j, g_id in enumerate(g_id_list): intersection = np.count_nonzero(np.logical_and(p==p_id, r==g_id)) intersection_matrix[i, j] = intersection def sum_dims(x, axis, dims): ''' Given an array x, collapses dimensions listed in dims along the specified axis, summing them together. Returns the reduced array. ''' x = np.array(x) if len(dims) < 2: return x # Initialize output new_shape = list(x.shape) new_shape[axis] -= len(dims)-1 x_ret = np.zeros(new_shape, dtype=x.dtype) # Sum over dims on axis sum_slices = [slice(None)]*x.ndim sum_slices[axis] = dims dim_sum = np.sum(x[sum_slices], axis=axis, keepdims=True) # Remove all but first dim in dims mask =
np.ones(x.shape, dtype=np.bool)
numpy.ones
import gym import random import pybullet import numpy as np import pybullet_envs import scipy.stats as ss from copy import deepcopy from multiprocessing import Pool MAX_SEED = 2**16 - 1 def compute_weight_decay(weight_decay, model_param_list): """ Compute weight decay penalty :param weight_decay: (float) weight decay coefficient :param model_param_list: (ndarray) weight parameters :return: (float) weight decay penalty """ return -weight_decay * np.mean(np.square(model_param_list)) class FixedWeightModule: def __init__(self, input_dim, output_dim, bias=False, recurrent=False): self.bias = bias self.parameters = list() self.input_dim = input_dim self.output_dim = output_dim if self.bias: self.bias_param = np.zeros((1, self.output_dim)) self.parameters.append((self.bias_param, "bias_param")) self.recurrent = recurrent if self.recurrent: self.r_weight =
np.zeros((self.output_dim, self.output_dim))
numpy.zeros
#!/usr/bin/env python # -*- coding: utf-8 -*- # File: Ampel-contrib-HU/ampel/contrib/hu/t2/T2LCQuality.py # License: BSD-3-Clause # Author: <EMAIL> # Date: 11.09.2018 # Last Modified Date: 06.06.2020 # Last Modified By: <NAME> <<EMAIL>> from typing import Any from astropy.table import Table from scipy.interpolate import interp1d from ampel.types import UBson from ampel.abstract.AbsLightCurveT2Unit import AbsLightCurveT2Unit from ampel.struct.UnitResult import UnitResult from ampel.view.LightCurve import LightCurve class T2LCQuality(AbsLightCurveT2Unit): """ determine the 'quality' of the light curve by computing ratios between the number of detection and that of upper limits. The LC 'quality' is measured by two numbers: * 'detection strenght' = n_det / n_obs * 'detection purity' = n_det / n_det + n_strong_ulims where: n_det: total number of detections n_obs: number of observations (detections + upper lims) computed from the time of the first detection. n_strong_ulims: number of upper limits which are below (higher magnitude) than what expected from a simple interpolation between the detections. That is, be interp_lc the function that iterpolates the detections (returning magnitude and accepting a time), an upper limit at time jd_ul of magnitude mag_ul is considered 'strong' if: interp_lc(jd_ul) < mag_ul NOTE that in the calculations of the strength, all the upper limits happening after the first detection are considered, while for the 'purity' metric, the default behaviour is to just consider ulims happening after the first, and before the last detection. This behaviour can be changed via the 'exclude_ulims' parameter of the run_config dictionary. """ filter_names: dict = {1: "g", 2: "r", 3: "i"} filter_ids: list[int] = [1, 2, 3] exclude_ulims_after: bool = True lc_filter: list[dict[str, Any]] = [ {"attribute": "isdiffpos", "operator": "!=", "value": "f"}, {"attribute": "isdiffpos", "operator": "!=", "value": "0"}, ] def count_strong_ulims(self, det_tab, ulim_tab): """ compute the number of strong upper limts in the light curve. This is defined as the number of upper limits which are below (higher magnitude) than what expected from a simple interpolation between the detections. """ # interpolate detections interp_lc = interp1d( det_tab["jd"], det_tab["magpsf"], kind="zero", fill_value="extrapolate" ) # loop on uls and count the strong ones n_strong = 0 for ul in ulim_tab: expected_mag = interp_lc(ul["jd"]) # self.logger.debug("upper limit at jd %f is at %f, source should be at %f"% # (ul['jd'], ul['mag'], expected_mag)) if ul["magpsf"] > expected_mag: n_strong += 1 return n_strong def compute_strength_purity(self, dets, ulims): """ given the detection and upper limit history, compute the strength and purity of the light curve. exclude_ul is a dict of the {'before': bool, 'after': bool} type, with flags can be used to mask out upper limts that happends before the first detections and/or after the last one. """ # compute time of first and last detection and mask out upper before first detection det_start, det_end = dets["jd"].min(), dets["jd"].max() ulims = ulims[ulims["jd"] > det_start] self.logger.debug( f"retained {len(ulims)} upper limits from start of detection (at {det_start} jd)" ) # if you don't have any upper limit to consider, easy if len(ulims) == 0: return 0, 1, 1 # compute number of detections, total observations, and upper limts. # for the strength, use all ulims from first detection on strength = float(len(dets)) / (len(dets) + len(ulims)) # for the strong upper limits, eventually exclude those which happends after the last detection if self.exclude_ulims_after: ulims = ulims[ulims["jd"] < det_end] n_strong_ulims = self.count_strong_ulims(dets, ulims) purity = float(len(dets)) / (len(dets) + n_strong_ulims) # return return n_strong_ulims, strength, purity def test_plot(self, dets, ulims, n_strong_ulims, purity, strength, fid): """ but useful for debugging """ import matplotlib.pyplot as plt import numpy as np interp_lc = interp1d( dets["jd"], dets["magpsf"], kind="zero", fill_value="extrapolate" ) min_t = min([dets["jd"].min(), ulims["jd"].min()]) max_t = max([dets["jd"].max(), ulims["jd"].max()]) jd_int =
np.arange(min_t, max_t, 0.1)
numpy.arange
import numpy as np import theano.tensor as tt import pymc3 as pm import starry from starry._plotting import ( get_moll_latitude_lines, get_moll_longitude_lines, ) from matplotlib import pyplot as plt from matplotlib import colors from scipy import optimize np.random.seed(42) starry.config.lazy = True ydeg = 20 map = starry.Map(ydeg) lat, lon, Y2P, P2Y, Dx, Dy = map.get_pixel_transforms(oversample=4) npix = Y2P.shape[0] std_p = 1.62 with pm.Model() as model: p = pm.Exponential("p", 1 / std_p, shape=(npix,)) x = tt.dot(P2Y, p) pm.Deterministic("x", x) p_back = tt.dot(Y2P, x) pm.Deterministic("p_back", p_back) trace_pp = pm.sample_prior_predictive(10) # Convert lat, lon to x,y coordinates in Mollewiede projection def lon_lat_to_mollweide(lon, lat): lat *= np.pi / 180 lon *= np.pi / 180 f = lambda x: 2 * x + np.sin(2 * x) - np.pi * np.sin(lat) theta = optimize.newton(f, 0.3) x = 2 *
np.sqrt(2)
numpy.sqrt
import numpy as np import torch, sys, os, pdb import pandas as pd from torch import optim from torch.autograd import Variable from .utils import DataLoaderCustom, ConfusionMatrixPlot, compute_ap, normalize from sklearn.model_selection import train_test_split from tqdm import tqdm from .models import Classifier, Discriminator, ClassifierBig from matplotlib import pyplot as plt from sklearn.metrics import ConfusionMatrixDisplay, confusion_matrix from sklearn.manifold import TSNE from sklearn.decomposition import PCA from sklearn.preprocessing import StandardScaler, MinMaxScaler class JindLib: global MODEL_WIDTH, LDIM MODEL_WIDTH = 1500 LDIM = 256 def __init__(self, gene_mat, cell_labels, path): self.class2num = None self.num2class = None self.reduced_features = None self.reduce_method = None self.model = None self.preprocessed = False self.path = path os.system('mkdir -p {}'.format(self.path)) self.raw_features = gene_mat.values self.cell_ids = list(gene_mat.index) self.gene_names = list(gene_mat.columns) classes = list(set(cell_labels)) classes.sort() self.classes = classes self.n_classes = len(classes) self.class2num = class2num = {c: i for (i, c) in enumerate(classes)} self.class2num['Unassigned'] = self.n_classes self.num2class = num2class = {i: c for (i, c) in enumerate(classes)} self.num2class[self.n_classes] = 'Unassigned' self.labels =
np.array([class2num[i] for i in cell_labels])
numpy.array
import cv2 import numpy as np import copy import os from .seg import SegmentationModule, vis def naive_driver(info, continuous): if info['angle'] > 0.5 or (info['trackPos'] < -1 and info['angle'] > 0): return np.array([1.0, 0.1]) if continuous else 0 elif info['angle'] < -0.5 or (info['trackPos'] > 3 and info['angle'] < 0): return np.array([1.0, -0.1]) if continuous else 2 return np.array([1.0, 0.0]) if continuous else 1 class GTAWrapper: def __init__(self, env, args, imsize=(256, 256), random_reset=True, continuous=True): self.env = env self.imsize = imsize self.args = args self.random_reset = random_reset self.continuous = continuous self.epi_len = 0 self.coll_cnt = 0 self.speed_list = [] self.num_off = 0 self.obs = None self.model = SegmentationModule(256, 65) self.model = self.model.cuda().eval() self.timestep = 0 self.episode = 0 self.spath = None def get_segmentation(self): print('get segmentation') seg = self.model(self.obs) if True: img = np.concatenate([self.obs, vis(seg)], axis=1) segsave_path = os.path.join(self.spath, "{}.png".format(self.timestep)) cv2.imwrite(segsave_path, img) seg = cv2.resize(seg, self.imsize, interpolation=cv2.INTER_NEAREST) print('seg return') return seg def reset(self): self.episode += 1 self.spath = os.path.join(self.args.save_path, "segs/{}".format(self.episode)) if not os.path.isdir(self.spath): os.makedirs(self.spath) self.timestep = 0 self.env.done = True self.num_off = 0 obs, info = self.env.reset() print(obs.shape) self.epi_len = 0 self.coll_cnt = 0 self.speed_list = [] obs = obs[:-15,:,:] self.obs = cv2.resize(obs[30:740, :], (640, 352)) info['seg'] = self.get_segmentation() info['angle'] = 0.1 try: info['pos'] = info['location'] except: info['pos'] = [1, 420, 900] try: road_dir = np.arctan(info['roadinfo'][9]/info['roadinfo'][8]) velo_dir = np.arctan(info['roadinfo'][6]/info['roadinfo'][5]) road_velo_angle = road_dir-velo_dir angle_sign = 1 if road_velo_angle > 0 else -1 road_velo_angle = min(np.abs(road_velo_angle), np.abs(np.pi / 2 - np.abs(road_velo_angle))) except: road_velo_angle = np.pi/2.0+0.1 angle_sign = 1 info['angle'] = 0 #angle_sign * road_velo_angle off_flag = 1-int(info['roadinfo'][0]) try: road_center = 0.5*(np.array(info['roadinfo'][11:13])+np.array(info['roadinfo'][14:16])) road_width = np.sqrt(np.sum((np.array(info['roadinfo'][11:13]-np.array(info['roadinfo'][14:16])))**2.0)) trackPos = np.sqrt(np.sum((np.array(info['location'][:2])-road_center)**2.0)) left_dist = np.sqrt(np.sum((np.array(info['location'][:2])-np.array(info['roadinfo'][11:13]))**2.0)) right_dist = np.sqrt(np.sum((np.array(info['location'][:2])-np.array(info['roadinfo'][14:16]))**2.0)) if left_dist > right_dist: dist_sign = -1 else: dist_sign = 1 except: dist_sign = 1 road_width = 5 trackPos = 5 if off_flag else 2 info['trackPos'] = trackPos*dist_sign off_flag = False coll_flag = off_flag#int(abs(info['trackPos']) > 7.0) info['collision'] = bool(coll_flag) info['offroad'] = bool(off_flag) # dump place holder: info['collision_other'] = 0 info['collision_vehicles'] = 0 info['coll_with'] = 0 info['offlane'] = 0 info["bboxes"] = [] info["directions"] = [] return cv2.resize(obs, self.imsize), info def step(self, action): self.timestep += 1 real_action = copy.deepcopy(action) this_action = np.zeros(3) this_action[0] = real_action[0]*0.4 + 0.6 this_action[1] = 0 this_action[2] = real_action[1] print(this_action) self.epi_len += 1 obs, info, real_done = self.env.step(this_action) self.obs = cv2.resize(obs[30:740, :], (640, 352)) info['seg'] = self.get_segmentation() self.speed_list.append(info['speed']) if np.mean(self.speed_list[-10:]) < 0.5 and len(self.speed_list) > 30: done = True else: done = False info['angle'] = 0#0.1 try: info['pos'] = info['location'] except: info['pos'] = [1, 420, 900] if True: try: road_dir = np.arctan(info['roadinfo'][9]/info['roadinfo'][8]) velo_dir = np.arctan(info['roadinfo'][6]/info['roadinfo'][5]) road_velo_angle = road_dir-velo_dir angle_sign = 1 if road_velo_angle > 0 else -1 road_velo_angle = min(np.abs(road_velo_angle), np.abs(np.pi / 2 - np.abs(road_velo_angle))) except: road_velo_angle = np.pi/2.0#+0.1 angle_sign = 1 else: road_velo_angle = 0 angle_sign = 1 info['angle'] = angle_sign * road_velo_angle off_flag = 1-int(info['roadinfo'][0]) try: road_center = 0.5*(np.array(info['roadinfo'][11:13])+np.array(info['roadinfo'][14:16])) road_width = np.sqrt(np.sum((np.array(info['roadinfo'][11:13]-np.array(info['roadinfo'][14:16])))**2.0)) trackPos = np.sqrt(np.sum((
np.array(info['location'][:2])
numpy.array
import math import matplotlib.pyplot as plt import pandas as pd import numpy as np from sklearn.linear_model import Lasso from sklearn.linear_model import Ridge from sklearn.linear_model import LinearRegression from sklearn.feature_selection import RFE from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_squared_error as MSE def main(): df, el, unit, dEl, coul, dCell, cell = get_data_h5('data.hdf5') print(df.dtypes) def do_feature_selection(name, data_file): """ Does lasso regression, ridge regression, recursive feature elimination, and random forrest modeling for each of the nine properties being modeled Inputs ------ name : String containing the name of the csv file containing the data to be analyzed (i.e. "CdTe" for CdTe.csv) data_file : String containing the name of the file the model statistics will be stored in, where the RMSE and R-Squared values for each model will be stored Outputs ------- cd_list : Contains four lists, one for each model type (Lasso, Ridge, Recursive Feature Elimination, and Random Forrest), each of which contains the coefficients for each descriptor used in the model to predict $\Delta$H(Cd-rich) mod_list : Contains four lists, one for each model type (Lasso, Ridge, Recursive Feature Elimination, and Random Forrest), each of which contains the coefficients for each descriptor used in the model to predict $\Delta$H(Mod) x_list : Contains four lists, one for each model type (Lasso, Ridge, Recursive Feature Elimination, and Random Forrest), each of which contains the coefficients for each descriptor used in the model to predict $\Delta$H(X-rich) plus_3_list : Contains four lists, one for each model type (Lasso, Ridge, Recursive Feature Elimination, and Random Forrest), each of which contains the coefficients for each descriptor used in the model to predict the (+3/+2) charge transfer state plus_2_list : Contains four lists, one for each model type (Lasso, Ridge, Recursive Feature Elimination, and Random Forrest), each of which contains the coefficients for each descriptor used in the model to predict the (+2/+1) charge transfer state plus_1_list : Contains four lists, one for each model type (Lasso, Ridge, Recursive Feature Elimination, and Random Forrest), each of which contains the coefficients for each descriptor used in the model to predict the (+1/0) charge transfer state minus_1_list : Contains four lists, one for each model type (Lasso, Ridge, Recursive Feature Elimination, and Random Forrest), each of which contains the coefficients for each descriptor used in the model to predict the (0/-1) charge transfer state minus_2_list : Contains four lists, one for each model type (Lasso, Ridge, Recursive Feature Elimination, and Random Forrest), each of which contains the coefficients for each descriptor used in the model to predict the (-1/-2) charge transfer state minus_3_list : Contains four lists, one for each model type (Lasso, Ridge, Recursive Feature Elimination, and Random Forrest), each of which contains the coefficients for each descriptor used in the model to predict the (-2/-3) charge transfer state """ #assert len(types) == len(count) CdS_df = get_data_csv(name) X = CdS_df[['Period', 'Group', 'Site', 'Delta Ion. Rad.', 'Delta At. Wt.', 'Delta Cov. Rad.', 'Delta Ion. En.', 'Delta At. Rad.', 'Delta EA', 'Delta EN', 'Delta At. Num.', 'Delta Val.', '# Cd Neighbors', '# X Neighbors', 'Corrected VBM (eV)', 'Corrected CBM (eV)', '∆H_uc(Cd-rich)', '∆H_uc(Mod)', '∆H_uc(X-rich)']] data_file.write(name) data_file.write('\n\t# rows: ' + str(CdS_df.shape[0])) data_file.write("\n\n\t∆H(Cd-rich):") Y = CdS_df[['∆H(Cd-rich)']] cd_las_coefs, cd_las_list = lasso_reg(X,Y,data_file) cd_rr_coefs, cd_rr_list = ridge_reg(X,Y,data_file) #cd_rfe_list = rfe_selection(X,Y.values.ravel(),data_file) cd_rfr_list = rf_reg(X,Y.values.ravel(),data_file) data_file.write("\n\n\t∆H(Mod):") Y = CdS_df[['∆H(Mod)']] mod_las_coefs, mod_las_list = lasso_reg(X,Y,data_file) mod_rr_coefs, mod_rr_list = ridge_reg(X,Y,data_file) #mod_rfe_list = rfe_selection(X,Y.values.ravel(),data_file) mod_rfr_list = rf_reg(X,Y.values.ravel(),data_file) data_file.write("\n\n\t∆H(X-rich):") Y = CdS_df[['∆H(X-rich)']] x_las_coefs, x_las_list = lasso_reg(X,Y,data_file) x_rr_coefs, x_rr_list = ridge_reg(X,Y,data_file) #x_rfe_list = rfe_selection(X,Y.values.ravel(),data_file) x_rfr_list = rf_reg(X,Y.values.ravel(),data_file) data_file.write("\n\n\t(+3/+2):") Y = CdS_df[['(+3/+2)']] plus_3_las_coefs, plus_3_las_list = lasso_reg(X,Y,data_file) plus_3_rr_coefs, plus_3_rr_list = ridge_reg(X,Y,data_file) #plus_3_rfe_list = rfe_selection(X,Y.values.ravel(),data_file) plus_3_rfr_list = rf_reg(X,Y.values.ravel(),data_file) data_file.write("\n\n\t(+2/+1):") Y = CdS_df[['(+2/+1)']] plus_2_las_coefs, plus_2_las_list = lasso_reg(X,Y,data_file) plus_2_rr_coefs, plus_2_rr_list = ridge_reg(X,Y,data_file) #plus_2_rfe_list = rfe_selection(X,Y.values.ravel(),data_file) plus_2_rfr_list = rf_reg(X,Y.values.ravel(),data_file) data_file.write("\n\n\t(+1/0):") Y = CdS_df[['(+1/0)']] plus_1_las_coefs, plus_1_las_list = lasso_reg(X,Y,data_file) plus_1_rr_coefs, plus_1_rr_list = ridge_reg(X,Y,data_file) #plus_1_rfe_list = rfe_selection(X,Y.values.ravel(),data_file) plus_1_rfr_list = rf_reg(X,Y.values.ravel(),data_file) data_file.write("\n\n\t(0/-1):") Y = CdS_df[['(0/-1)']] minus_1_las_coefs, minus_1_las_list = lasso_reg(X,Y,data_file) minus_1_rr_coefs, minus_1_rr_list = ridge_reg(X,Y,data_file) #minus_1_rfe_list = rfe_selection(X,Y.values.ravel(),data_file) minus_1_rfr_list = rf_reg(X,Y.values.ravel(),data_file) data_file.write("\n\n\t(-1/-2):") Y = CdS_df[['(-1/-2)']] minus_2_las_coefs, minus_2_las_list = lasso_reg(X,Y,data_file) minus_2_rr_coefs, minus_2_rr_list = ridge_reg(X,Y,data_file) #minus_2_rfe_list = rfe_selection(X,Y.values.ravel(),data_file) minus_2_rfr_list = rf_reg(X,Y.values.ravel(),data_file) data_file.write("\n\n\t(-2/-3):") Y = CdS_df[['(-2/-3)']] minus_3_las_coefs, minus_3_las_list = lasso_reg(X,Y,data_file) minus_3_rr_coefs, minus_3_rr_list = ridge_reg(X,Y,data_file) #minus_3_rfe_list = rfe_selection(X,Y.values.ravel(),data_file) minus_3_rfr_list = rf_reg(X,Y.values.ravel(),data_file) data_file.write("\n\n") cd_list = np.concatenate((cd_las_list, cd_rr_list, cd_rfr_list), axis=0) mod_list = np.concatenate((mod_las_list, mod_rr_list, mod_rfr_list), axis=0) x_list = np.concatenate((x_las_list, x_rr_list, x_rfr_list), axis=0) plus_3_list = np.concatenate((plus_3_las_list, plus_3_rr_list, plus_3_rfr_list), axis=0) plus_2_list = np.concatenate((plus_2_las_list, plus_2_rr_list, plus_2_rfr_list), axis=0) plus_1_list = np.concatenate((plus_1_las_list, plus_1_rr_list, plus_1_rfr_list), axis=0) minus_1_list = np.concatenate((minus_1_las_list, minus_1_rr_list, minus_1_rfr_list), axis=0) minus_2_list = np.concatenate((minus_2_las_list, minus_2_rr_list, minus_2_rfr_list), axis=0) minus_3_list = np.concatenate((minus_3_las_list, minus_3_rr_list, minus_3_rfr_list), axis=0) return cd_list, mod_list, x_list, \ plus_3_list, plus_2_list, plus_1_list, \ minus_1_list, minus_2_list, minus_3_list def rfe_selection(X,Y,data_file,p=False): """ Does recursive feature elimination on the data provided Inputs ------ X : Coulumns of the pandas dataframe that contains the data for each of the descriptors to be used Y : Column of the pandas dataframe that contains the values to be predicted data_file : String containing the name of the file the model statistics will be stored in, where the RMSE and R-Squared values for each model will be stored Outputs ------- coefs : Contains a list of the coefficient for each descriptor used """ n=len(X.columns) high_score=0 nof=0 score_list =[] temp = [] for n in range(len(nof_list)): X_train, X_test, y_train, y_test = train_test_split(X,Y, test_size = 0.3, random_state = 0) model = LinearRegression() rfe = RFE(model,nof_list[n]) X_train_rfe = rfe.fit_transform(X_train,y_train) X_test_rfe = rfe.transform(X_test) model.fit(X_train_rfe,y_train) score = model.score(X_test_rfe,y_test) score_list.append(score) if(n==0 or score>high_score): high_score = score nof = nof_list[n] coefs_ = model.coef_ temp = rfe.support_ count = 0 coefs = np.zeros(19) for i in range(len(temp)): if (temp[i] == True): coefs[i] = coefs_[count] count += 1 data_file.write('\n\t\tRFE Score with %d features: \t\t\t%f' % (nof, high_score)) if(p==True): print('\n\t\tRFE Score with %d features: \t\t\t%f' % (nof, high_score)) #Fix spacing return np.array([coefs]) def ridge_reg(X,Y,data_file,p=False): """ Does ridge regression on the data provided Inputs ------ X : Coulumns of the pandas dataframe that contains the data for each of the descriptors to be used Y : Column of the pandas dataframe that contains the values to be predicted data_file : String containing the name of the file the model statistics will be stored in, where the RMSE and R-Squared values for each model will be stored Outputs ------- coefs : Contains a list of the coefficient for each descriptor used """ X_train,X_test,y_train,y_test=train_test_split(X,Y,test_size=0.3,random_state=3) high_score = 0 alpha_ = 0 #coefs = np.zeros(19) rr0001 = Ridge(alpha=0.001) rr0001.fit(X_train, y_train) Ridge_train_score0001 = rr0001.score(X_train,y_train) Ridge_test_score0001 = rr0001.score(X_test, y_test) high_score = Ridge_test_score0001 alpha_ = 0.001 coefs = rr0001.coef_ pred = rr0001.predict(X_test) rmse = np.sqrt(MSE(y_test, pred)) rr001 = Ridge(alpha=0.01) rr001.fit(X_train, y_train) Ridge_train_score001 = rr001.score(X_train,y_train) Ridge_test_score001 = rr001.score(X_test, y_test) if(Ridge_test_score001 > high_score): high_score = Ridge_test_score001 alpha_ = 0.01 coefs = rr001.coef_ pred = rr001.predict(X_test) rmse = np.sqrt(MSE(y_test, pred)) rr01 = Ridge(alpha=0.1) rr01.fit(X_train, y_train) Ridge_train_score01 = rr01.score(X_train,y_train) Ridge_test_score01 = rr01.score(X_test, y_test) if(Ridge_test_score01 > high_score): high_score = Ridge_test_score01 alpha_ = 0.1 coefs = rr01.coef_ pred = rr01.predict(X_test) rmse = np.sqrt(MSE(y_test, pred)) rr10 = Ridge(alpha=10) rr10.fit(X_train, y_train) Ridge_train_score10 = rr10.score(X_train,y_train) Ridge_test_score10 = rr10.score(X_test, y_test) if(Ridge_test_score10 > high_score): high_score = Ridge_test_score10 alpha_ = 10 coefs = rr10.coef_ pred = rr10.predict(X_test) rmse = np.sqrt(MSE(y_test, pred)) rr100 = Ridge(alpha=100) rr100.fit(X_train, y_train) Ridge_train_score100 = rr100.score(X_train,y_train) Ridge_test_score100 = rr100.score(X_test, y_test) if(Ridge_test_score100 > high_score): high_score = Ridge_test_score100 alpha_ = 100 coefs = rr100.coef_ pred = rr100.predict(X_test) rmse = np.sqrt(MSE(y_test, pred)) rr1000 = Ridge(alpha=1000) rr1000.fit(X_train, y_train) Ridge_train_score1000 = rr1000.score(X_train,y_train) Ridge_test_score1000 = rr1000.score(X_test, y_test) if(Ridge_test_score1000 > high_score): high_score = Ridge_test_score1000 alpha_ = 1000 coefs = rr1000.coef_ pred = rr1000.predict(X_test) rmse = np.sqrt(MSE(y_test, pred)) data_file.write('\n\t\tRidge Regression Score with alpha=%f: \t%f' % (alpha_, high_score)) data_file.write('\n\t\t\tRMSE: \t\t%f' % (rmse)) if(p==True): print('\n\t\tRidge Regression Score with alpha=%f: \t%f' % (alpha_, high_score)) print('\n\t\tRMSE: \t\t%f' % (rmse)) return np.concatenate((rr001.coef_, rr10.coef_, rr100.coef_, rr1000.coef_), axis=0), np.array(coefs) def lasso_reg(X,Y,data_file,p=False): """ Does lasso regression on the data provided Inputs ------ X : Coulumns of the pandas dataframe that contains the data for each of the descriptors to be used Y : Column of the pandas dataframe that contains the values to be predicted data_file : String containing the name of the file the model statistics will be stored in, where the RMSE and R-Squared values for each model will be stored Outputs ------- coefs : Contains a list of the coefficient for each descriptor used """ X_train,X_test,y_train,y_test=train_test_split(X,Y, test_size=0.3, random_state=31) high_score = 0 alpha_ = 0 #coefs = np.zeros(19) lasso = Lasso() lasso.fit(X_train,y_train) train_score=lasso.score(X_train,y_train) test_score=lasso.score(X_test,y_test) coeff_used = np.sum(lasso.coef_!=0) high_score = test_score alpha_ = 1 coefs = lasso.coef_ pred = lasso.predict(X_test) rmse = np.sqrt(MSE(y_test, pred)) lasso01 = Lasso(alpha=0.1, max_iter=10e5) lasso01.fit(X_train,y_train) train_score01=lasso01.score(X_train,y_train) test_score01=lasso01.score(X_test,y_test) coeff_used01 =
np.sum(lasso01.coef_!=0)
numpy.sum
# coding: utf-8 # In[2]: ''' Statistical Computing for Scientists and Engineers Homework 4 Problem 3 b1 Fall 2018 University of Notre Dame ''' import numpy as np import matplotlib.pyplot as plt import math import scipy.stats # the true distribution def f(v): return scipy.stats.gamma.pdf(v,a=4.3,scale=1/6.2) # the proposal distribution def q(v): return scipy.stats.gamma.pdf(v,a=5,scale=1/6) #Initialization numSamples = 50000 samp= np.zeros(numSamples) samp[0]=5 #Accept - Reject algorithm, Sample from laplacian #def accept_reject(): # samples = np.random.gamma(4,1/7,numSamples) # acceptanceProb = f(samples)/(M*q(samples)) # unif_samp = np.random.rand(1,numSamples) # accepted = unif_samp < acceptanceProb # return samples, accepted, unif_samp # all the samps, accepted, unif_samps #samps,accepteds,unif_samps = accept_reject() for i in range(1, numSamples): y = scipy.stats.gamma.rvs(5,0,scale=1/6); prob = min(1, q(samp[i-1])/q(y)*(f(y)/f(samp[i-1]))); u = np.random.uniform() if ( u <= prob): samp[i] = y; else: samp[i] = samp[i-1]; #calculate the expectation E =
np.array([0.0]*numSamples)
numpy.array
import numpy as np import time import cv2 def sample_angle(): d = np.random.binomial(1,0.5) theta = np.random.uniform(np.pi/12, np.pi - np.pi/12) * (-1)**d return theta class Player(): def __init__(self, x, board_size, bat_size, dtheta = np.pi/12, dy = 3): self.x = x self.y = np.random.randint(bat_size+1, board_size[0] - bat_size-2) self.theta = np.random.uniform(0,2*np.pi) self.dy = dy self.dtheta = dtheta self.y_max = board_size[0]-bat_size-1 self.y_min = bat_size self.score = 0 self.theta_mesured = 0 def update(self, action): if action == 0: # Move up self.y += self.dy if self.y > self.y_max: self.y = self.y_max elif action == 1: # Move down self.y -= self.dy if self.y < self.y_min: self.y = self.y_min elif action == 3: # Torret up self.theta += self.dtheta elif action == 4: # Torret up self.theta -= self.dtheta class Ball(): def __init__(self,x, y, V, theta): self.V = V self.x = x self.y = y self.theta = theta self.quantum_hits = 0 self.polarization = np.random.uniform(0.0, np.pi) self.visible = 255 class QPP(): def __init__(self, n_players = 1, board_size = (60,60,60), V = 2, n_rounds = 21, res = 0.2, mode="quantum"): self.bat_size = 6 self.board_size = board_size self.board = np.zeros((int(board_size[0]/res),int(board_size[1]/res))) self.res = res self.ball = Ball(x = board_size[0]/2, y = board_size[1]/2, V = V, theta = sample_angle()) self.mode = mode self.left_player = Player(6, board_size, self.bat_size) self.right_player = Player(board_size[1] - 6, board_size, self.bat_size) self.done = False self.n_rounds = n_rounds self.round = 0 self.n_steps = 0 self.board_angle = np.arctan((board_size[0]-board_size[2])/board_size[1]) self.quantum_hits = 0 def direction_probability(self, pose): if pose == 'left': t1 = np.remainder(self.left_player.theta,2*np.pi) t1 = np.min([t1, 2*np.pi-t1]) t2 = np.remainder(self.ball.polarization,2*np.pi) t2 = np.min([t2, 2*np.pi-t2]) x = np.abs(t1-t2) P = (1 + np.cos(2*x))/2 elif pose == 'right': t1 = np.remainder(self.ball.polarization,2*np.pi) t1 = np.min([t1, 2*np.pi-t1]) t2 =
np.remainder(self.right_player.theta,2*np.pi)
numpy.remainder
"""Implementation of Receiver Operator Characteristic.""" import numpy as np from warnings import warn def _check(scores, true_labels): """Raise exceptions or warnings for wrong or questionable inputs.""" if scores.ndim != 1 or true_labels.ndim !=1: raise ValueError("Scores and labels must be one dimensional arrays") if scores.size != true_labels.size: raise ValueError("Scores and labels must have same number of entries") # test that labels are exclusively [0, 1] test_value = np.setdiff1d(np.array([0, 1]), true_labels).size test_value += np.setdiff1d(true_labels, np.array([0, 1])).size if test_value > 0: raise ValueError("True sample class labels\n" "must be either 0 or 1, exclusively.") if
np.unique(scores)
numpy.unique
""" Reference: <NAME> et al. "Deep Neural Networks for YouTube Recommendations" (https://static.googleusercontent.com/media/research.google.com/zh-CN//pubs/archive/45530.pdf) author: massquantity """ import os from itertools import islice import numpy as np import tensorflow as tf2 from tensorflow.keras.initializers import ( zeros as tf_zeros, truncated_normal as tf_truncated_normal ) from .base import Base, TfMixin from ..data.data_generator import DataGenSequence from ..data.sequence import sparse_user_last_interacted from ..evaluation.evaluate import EvalMixin from ..utils.tf_ops import ( reg_config, dropout_config, dense_nn, lr_decay_config, multi_sparse_combine_embedding ) from ..utils.misc import time_block, colorize, assign_oov_vector, count_params tf = tf2.compat.v1 tf.disable_v2_behavior() class YouTuBeRetrieval(Base, TfMixin, EvalMixin): """ The model implemented mainly corresponds to the candidate generation phase based on the original paper. """ # user_variables = [] item_variables = ["item_interaction_features", "nce_weights", "nce_biases"] sparse_variables = ["sparse_features"] dense_variables = ["dense_features"] user_variables_np = ["user_vector"] item_variables_np = ["item_weights"] def __init__( self, task="ranking", data_info=None, embed_size=16, n_epochs=20, lr=0.01, lr_decay=False, reg=None, batch_size=256, num_sampled_per_batch=None, use_bn=True, dropout_rate=None, hidden_units="128,64,32", loss_type="nce", recent_num=10, random_num=None, multi_sparse_combiner="sqrtn", sampler="uniform", seed=42, lower_upper_bound=None, tf_sess_config=None ): Base.__init__(self, task, data_info, lower_upper_bound) TfMixin.__init__(self, tf_sess_config) EvalMixin.__init__(self, task, data_info) self.task = task self.data_info = data_info self.embed_size = embed_size self.n_epochs = n_epochs self.lr = lr self.lr_decay = lr_decay self.reg = reg_config(reg) self.batch_size = batch_size self.num_sampled_per_batch = ( num_sampled_per_batch if num_sampled_per_batch and num_sampled_per_batch > 0 else batch_size ) self.use_bn = use_bn self.dropout_rate = dropout_config(dropout_rate) self.hidden_units = list(map(int, hidden_units.split(","))) # the output of last DNN layer is user vector self.user_vector_size = self.hidden_units[-1] self.loss_type = loss_type self.n_users = data_info.n_users self.n_items = data_info.n_items ( self.interaction_mode, self.interaction_num ) = self._check_interaction_mode(recent_num, random_num) self.seed = seed self.user_vector = None self.item_weights = None self.sampler = sampler # self.item_biases = None self.user_consumed = data_info.user_consumed self.sparse = self._decide_sparse_indices(data_info) self.dense = self._decide_dense_values(data_info) if self.sparse: self.sparse_feature_size = self._sparse_feat_size(data_info) self.sparse_field_size = self._sparse_field_size(data_info) self.multi_sparse_combiner = self._check_multi_sparse( data_info, multi_sparse_combiner) self.true_sparse_field_size = self._true_sparse_field_size( data_info, self.sparse_field_size, self.multi_sparse_combiner) if self.dense: self.dense_field_size = self._dense_field_size(data_info) self.vector_infer = True self.all_args = locals() def _build_model(self): self.graph_built = True tf.set_random_seed(self.seed) # item_indices actually serve as labels in YouTuBeRetrieval model self.item_indices = tf.placeholder(tf.int64, shape=[None]) self.is_training = tf.placeholder_with_default(False, shape=[]) self.concat_embed = [] self._build_item_interaction() if self.sparse: self._build_sparse() if self.dense: self._build_dense() concat_features = tf.concat(self.concat_embed, axis=1) self.user_vector_repr = dense_nn(concat_features, self.hidden_units, use_bn=self.use_bn, dropout_rate=self.dropout_rate, is_training=self.is_training) count_params() def _build_item_interaction(self): self.item_interaction_indices = tf.placeholder( tf.int64, shape=[None, 2]) self.item_interaction_values = tf.placeholder(tf.int32, shape=[None]) self.modified_batch_size = tf.placeholder(tf.int32, shape=[]) item_interaction_features = tf.get_variable( name="item_interaction_features", shape=[self.n_items, self.embed_size], initializer=tf_truncated_normal(0.0, 0.01), regularizer=self.reg) sparse_item_interaction = tf.SparseTensor( self.item_interaction_indices, self.item_interaction_values, [self.modified_batch_size, self.n_items] ) pooled_embed = tf.nn.safe_embedding_lookup_sparse( item_interaction_features, sparse_item_interaction, sparse_weights=None, combiner="sqrtn", default_id=None ) # unknown user will return 0-vector self.concat_embed.append(pooled_embed) def _build_sparse(self): self.sparse_indices = tf.placeholder( tf.int32, shape=[None, self.sparse_field_size]) sparse_features = tf.get_variable( name="sparse_features", shape=[self.sparse_feature_size, self.embed_size], initializer=tf_truncated_normal(0.0, 0.01), regularizer=self.reg) if (self.data_info.multi_sparse_combine_info and self.multi_sparse_combiner in ("sum", "mean", "sqrtn")): sparse_embed = multi_sparse_combine_embedding( self.data_info, sparse_features, self.sparse_indices, self.multi_sparse_combiner, self.embed_size) else: sparse_embed = tf.nn.embedding_lookup( sparse_features, self.sparse_indices) sparse_embed = tf.reshape( sparse_embed, [-1, self.true_sparse_field_size * self.embed_size]) self.concat_embed.append(sparse_embed) def _build_dense(self): self.dense_values = tf.placeholder( tf.float32, shape=[None, self.dense_field_size]) dense_values_reshape = tf.reshape( self.dense_values, [-1, self.dense_field_size, 1]) batch_size = tf.shape(self.dense_values)[0] dense_features = tf.get_variable( name="dense_features", shape=[self.dense_field_size, self.embed_size], initializer=tf_truncated_normal(0.0, 0.01), regularizer=self.reg) dense_embed = tf.expand_dims(dense_features, axis=0) # B * F2 * K dense_embed = tf.tile(dense_embed, [batch_size, 1, 1]) dense_embed = tf.multiply(dense_embed, dense_values_reshape) dense_embed = tf.reshape( dense_embed, [-1, self.dense_field_size * self.embed_size]) self.concat_embed.append(dense_embed) def _build_train_ops(self, **kwargs): self.nce_weights = tf.get_variable( name="nce_weights", # n_classes, embed_size shape=[self.n_items, self.user_vector_size], initializer=tf_truncated_normal(0.0, 0.01), regularizer=self.reg ) self.nce_biases = tf.get_variable( name="nce_biases", shape=[self.n_items], initializer=tf_zeros, regularizer=self.reg, trainable=True ) # By default, `sampled_softmax_loss` and `nce_loss` in tensorflow # uses `log_uniform_candidate_sampler` to sample negative items, # which may not be suitable in recommendation scenarios. labels = tf.reshape(self.item_indices, [-1, 1]) sampled_values = tf.random.uniform_candidate_sampler( true_classes=labels, num_true=1, num_sampled=self.num_sampled_per_batch, unique=True, range_max=self.n_items, ) if self.sampler == "uniform" else None if self.loss_type == "nce": self.loss = tf.reduce_mean(tf.nn.nce_loss( weights=self.nce_weights, biases=self.nce_biases, labels=labels, inputs=self.user_vector_repr, num_sampled=self.num_sampled_per_batch, num_classes=self.n_items, num_true=1, sampled_values=sampled_values, remove_accidental_hits=True, partition_strategy="div") ) elif self.loss_type == "sampled_softmax": self.loss = tf.reduce_mean(tf.nn.sampled_softmax_loss( weights=self.nce_weights, biases=self.nce_biases, labels=labels, inputs=self.user_vector_repr, num_sampled=self.num_sampled_per_batch, num_classes=self.n_items, num_true=1, sampled_values=sampled_values, remove_accidental_hits=True, seed=self.seed, partition_strategy="div") ) else: raise ValueError("Loss type must either be 'nce' " "or 'sampled_softmax") if self.reg is not None: reg_keys = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) total_loss = self.loss + tf.add_n(reg_keys) else: total_loss = self.loss if self.lr_decay: n_batches = int(self.data_info.data_size / self.batch_size) self.lr, global_steps = lr_decay_config(self.lr, n_batches, **kwargs) else: global_steps = None optimizer = tf.train.AdamOptimizer(self.lr) optimizer_op = optimizer.minimize(total_loss, global_step=global_steps) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) self.training_op = tf.group([optimizer_op, update_ops]) self.sess.run(tf.global_variables_initializer()) def fit(self, train_data, verbose=1, shuffle=True, eval_data=None, metrics=None, **kwargs): assert self.task == "ranking", ( "YouTube models is only suitable for ranking" ) self._check_item_col() self.show_start_time() if not self.graph_built: self._build_model() self._build_train_ops(**kwargs) data_generator = DataGenSequence( train_data, self.data_info, self.sparse, self.dense, mode=self.interaction_mode, num=self.interaction_num, class_name="YoutubeMatch", padding_idx=self.n_items ) for epoch in range(1, self.n_epochs + 1): with time_block(f"Epoch {epoch}", verbose): train_total_loss = [] for b, ii, iv, user, item, _, si, dv in data_generator( shuffle, self.batch_size): feed_dict = {self.modified_batch_size: b, self.item_interaction_indices: ii, self.item_interaction_values: iv, self.item_indices: item, self.is_training: True} if self.sparse: feed_dict.update({self.sparse_indices: si}) if self.dense: feed_dict.update({self.dense_values: dv}) train_loss, _ = self.sess.run( [self.loss, self.training_op], feed_dict) train_total_loss.append(train_loss) if verbose > 1: train_loss_str = "train_loss: " + str( round(float(np.mean(train_total_loss)), 4) ) print(f"\t {colorize(train_loss_str, 'green')}") # for evaluation self._set_latent_vectors() self.print_metrics(eval_data=eval_data, metrics=metrics, **kwargs) print("=" * 30) # for prediction and recommendation self._set_latent_vectors() assign_oov_vector(self) def predict(self, user, item, cold_start="average", inner_id=False): user, item = self.convert_id(user, item, inner_id) unknown_num, unknown_index, user, item = self._check_unknown(user, item) preds = np.sum( np.multiply(self.user_vector[user], self.item_weights[item]), axis=1) preds = 1 / (1 +
np.exp(-preds)
numpy.exp
#!/usr/bin/env python # encoding: utf-8 # The MIT License (MIT) # Copyright (c) 2018 CNRS # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # AUTHORS # <NAME> - http://herve.niderb.fr import numpy as np from tqdm import tqdm import torch from torch.autograd import Variable import torch.nn as nn import torch.nn.functional as F from pyannote.audio.generators.speaker import SpeechSegmentGenerator from pyannote.audio.checkpoint import Checkpoint from torch.optim import Adam from scipy.spatial.distance import pdist from .triplet_loss import TripletLoss from pyannote.metrics.binary_classification import det_curve class WTFTripletLoss(TripletLoss): """ Parameters ---------- variant : int, optional Loss variants. Defaults to 1. duration : float, optional Defautls to 3.2 seconds. margin: float, optional Margin factor. Defaults to 0.2. sampling : {'all', 'hard', 'negative'}, optional Triplet sampling strategy. per_label : int, optional Number of sequences per speaker in each batch. Defaults to 3. per_fold : int, optional If provided, sample triplets from groups of `per_fold` speakers at a time. Defaults to sample triplets from the whole speaker set. parallel : int, optional Number of prefetching background generators. Defaults to 1. Each generator will prefetch enough batches to cover a whole epoch. Set `parallel` to 0 to not use background generators. """ CONFIDENCE_PT = '{log_dir}/weights/{epoch:04d}.confidence.pt' def __init__(self, variant=1, duration=3.2, sampling='all', per_label=3, per_fold=None, parallel=1): super(WTFTripletLoss, self).__init__( duration=duration, metric='angular', clamp='sigmoid', sampling=sampling, per_label=per_label, per_fold=per_fold, parallel=parallel) self.variant = variant def fit(self, model, feature_extraction, protocol, log_dir, subset='train', epochs=1000, restart=0, gpu=False): import tensorboardX writer = tensorboardX.SummaryWriter(log_dir=log_dir) checkpoint = Checkpoint(log_dir=log_dir, restart=restart > 0) batch_generator = SpeechSegmentGenerator( feature_extraction, per_label=self.per_label, per_fold=self.per_fold, duration=self.duration, parallel=self.parallel) batches = batch_generator(protocol, subset=subset) batch = next(batches) batches_per_epoch = batch_generator.batches_per_epoch if restart > 0: weights_pt = checkpoint.WEIGHTS_PT.format( log_dir=log_dir, epoch=restart) model.load_state_dict(torch.load(weights_pt)) if gpu: model = model.cuda() model.internal = False parameters = list(model.parameters()) if self.variant in [2, 3, 4, 5, 6, 7, 8]: # norm batch-normalization self.norm_bn = nn.BatchNorm1d( 1, eps=1e-5, momentum=0.1, affine=True) if gpu: self.norm_bn = self.norm_bn.cuda() parameters += list(self.norm_bn.parameters()) if self.variant in [9]: # norm batch-normalization self.norm_bn = nn.BatchNorm1d( 1, eps=1e-5, momentum=0.1, affine=False) if gpu: self.norm_bn = self.norm_bn.cuda() parameters += list(self.norm_bn.parameters()) if self.variant in [5, 6, 7]: self.positive_bn = nn.BatchNorm1d( 1, eps=1e-5, momentum=0.1, affine=False) self.negative_bn = nn.BatchNorm1d( 1, eps=1e-5, momentum=0.1, affine=False) if gpu: self.positive_bn = self.positive_bn.cuda() self.negative_bn = self.negative_bn.cuda() parameters += list(self.positive_bn.parameters()) parameters += list(self.negative_bn.parameters()) if self.variant in [8, 9]: self.delta_bn = nn.BatchNorm1d( 1, eps=1e-5, momentum=0.1, affine=False) if gpu: self.delta_bn = self.delta_bn.cuda() parameters += list(self.delta_bn.parameters()) optimizer = Adam(parameters) if restart > 0: optimizer_pt = checkpoint.OPTIMIZER_PT.format( log_dir=log_dir, epoch=restart) optimizer.load_state_dict(torch.load(optimizer_pt)) if gpu: for state in optimizer.state.values(): for k, v in state.items(): if torch.is_tensor(v): state[k] = v.cuda() epoch = restart if restart > 0 else -1 while True: epoch += 1 if epoch > epochs: break loss_avg, tloss_avg, closs_avg = 0., 0., 0. if epoch % 5 == 0: log_positive = [] log_negative = [] log_delta = [] log_norm = [] desc = 'Epoch #{0}'.format(epoch) for i in tqdm(range(batches_per_epoch), desc=desc): model.zero_grad() batch = next(batches) X = batch['X'] if not getattr(model, 'batch_first', True): X = np.rollaxis(X, 0, 2) X = np.array(X, dtype=np.float32) X = Variable(torch.from_numpy(X)) if gpu: X = X.cuda() fX = model(X) # pre-compute pairwise distances distances = self.pdist(fX) # sample triplets triplets = getattr(self, 'batch_{0}'.format(self.sampling)) anchors, positives, negatives = triplets(batch['y'], distances) # compute triplet loss tlosses, deltas, pos_index, neg_index = self.triplet_loss( distances, anchors, positives, negatives, return_delta=True) tloss = torch.mean(tlosses) if self.variant == 1: closses = F.sigmoid( F.softsign(deltas) * torch.norm(fX[anchors], 2, 1, keepdim=True)) # if d(a, p) < d(a, n) (i.e. good case) # --> sign(delta) < 0 # --> loss decreases when norm increases. # i.e. encourages longer anchor # if d(a, p) > d(a, n) (i.e. bad case) # --> sign(delta) > 0 # --> loss increases when norm increases # i.e. encourages shorter anchor elif self.variant == 2: norms_ = torch.norm(fX, 2, 1, keepdim=True) norms_ = F.sigmoid(self.norm_bn(norms_)) confidence = (norms_[anchors] + norms_[positives] + norms_[negatives]) / 3 # if |x| is average # --> normalized |x| = 0 # --> confidence = 0.5 # if |x| is bigger than average # --> normalized |x| >> 0 # --> confidence = 1 # if |x| is smaller than average # --> normalized |x| << 0 # --> confidence = 0 correctness = F.sigmoid(-deltas / np.pi * 6) # if d(a, p) = d(a, n) (i.e. uncertain case) # --> correctness = 0.5 # if d(a, p) - d(a, n) = -𝛑 (i.e. best possible case) # --> correctness = 1 # if d(a, p) - d(a, n) = +𝛑 (i.e. worst possible case) # --> correctness = 0 closses = torch.abs(confidence - correctness) # small if (and only if) confidence & correctness agree elif self.variant == 3: norms_ = torch.norm(fX, 2, 1, keepdim=True) norms_ = F.sigmoid(self.norm_bn(norms_)) confidence = (norms_[anchors] * norms_[positives] * norms_[negatives]) / 3 correctness = F.sigmoid(-(deltas + np.pi / 4) / np.pi * 6) # correctness = 0.5 at delta == -pi/4 # correctness = 1 for delta == -pi # correctness = 0 for delta < 0 closses = torch.abs(confidence - correctness) elif self.variant == 4: norms_ = torch.norm(fX, 2, 1, keepdim=True) norms_ = F.sigmoid(self.norm_bn(norms_)) confidence = (norms_[anchors] * norms_[positives] * norms_[negatives]) ** 1/3 correctness = F.sigmoid(-(deltas + np.pi / 4) / np.pi * 6) # correctness = 0.5 at delta == -pi/4 # correctness = 1 for delta == -pi # correctness = 0 for delta < 0 # delta = pos - neg ... should be < 0 closses = torch.abs(confidence - correctness) elif self.variant == 5: norms_ = torch.norm(fX, 2, 1, keepdim=True) confidence = F.sigmoid(self.norm_bn(norms_)) confidence_pos = .5 * (confidence[anchors] + confidence[positives]) # low positive distance == high correctness correctness_pos = F.sigmoid( -self.positive_bn(distances[pos_index].view(-1, 1))) confidence_neg = .5 * (confidence[anchors] + confidence[negatives]) # high negative distance == high correctness correctness_neg = F.sigmoid( self.negative_bn(distances[neg_index].view(-1, 1))) closses = .5 * (torch.abs(confidence_pos - correctness_pos) \ + torch.abs(confidence_neg - correctness_neg)) elif self.variant == 6: norms_ = torch.norm(fX, 2, 1, keepdim=True) confidence = F.sigmoid(self.norm_bn(norms_)) confidence_pos = .5 * (confidence[anchors] + confidence[positives]) # low positive distance == high correctness correctness_pos = F.sigmoid( -self.positive_bn(distances[pos_index].view(-1, 1))) closses = torch.abs(confidence_pos - correctness_pos) elif self.variant == 7: norms_ = torch.norm(fX, 2, 1, keepdim=True) confidence = F.sigmoid(self.norm_bn(norms_)) confidence_neg = .5 * (confidence[anchors] + confidence[negatives]) # high negative distance == high correctness correctness_neg = F.sigmoid( self.negative_bn(distances[neg_index].view(-1, 1))) closses = torch.abs(confidence_neg - correctness_neg) elif self.variant in [8, 9]: norms_ = torch.norm(fX, 2, 1, keepdim=True) norms_ = F.sigmoid(self.norm_bn(norms_)) confidence = (norms_[anchors] * norms_[positives] * norms_[negatives]) / 3 correctness = F.sigmoid(-self.delta_bn(deltas)) closses = torch.abs(confidence - correctness) closs = torch.mean(closses) if epoch % 5 == 0: if gpu: fX_npy = fX.data.cpu().numpy() pdist_npy = distances.data.cpu().numpy() delta_npy = deltas.data.cpu().numpy() else: fX_npy = fX.data.numpy() pdist_npy = distances.data.numpy() delta_npy = deltas.data.numpy() log_norm.append(np.linalg.norm(fX_npy, axis=1)) same_speaker = pdist(batch['y'].reshape((-1, 1)), metric='chebyshev') < 1 log_positive.append(pdist_npy[np.where(same_speaker)]) log_negative.append(pdist_npy[np.where(~same_speaker)]) log_delta.append(delta_npy) # log loss if gpu: tloss_ = float(tloss.data.cpu().numpy()) closs_ = float(closs.data.cpu().numpy()) else: tloss_ = float(tloss.data.numpy()) closs_ = float(closs.data.numpy()) tloss_avg += tloss_ closs_avg += closs_ loss_avg += tloss_ + closs_ loss = tloss + closs loss.backward() optimizer.step() tloss_avg /= batches_per_epoch writer.add_scalar('tloss', tloss_avg, global_step=epoch) closs_avg /= batches_per_epoch writer.add_scalar('closs', closs_avg, global_step=epoch) loss_avg /= batches_per_epoch writer.add_scalar('loss', loss_avg, global_step=epoch) if epoch % 5 == 0: log_positive = np.hstack(log_positive) writer.add_histogram( 'embedding/pairwise_distance/positive', log_positive, global_step=epoch, bins=np.linspace(0, np.pi, 50)) log_negative = np.hstack(log_negative) writer.add_histogram( 'embedding/pairwise_distance/negative', log_negative, global_step=epoch, bins=np.linspace(0, np.pi, 50)) _, _, _, eer = det_curve( np.hstack([np.ones(len(log_positive)), np.zeros(len(log_negative))]), np.hstack([log_positive, log_negative]), distances=True) writer.add_scalar('eer', eer, global_step=epoch) log_norm = np.hstack(log_norm) writer.add_histogram( 'norm', log_norm, global_step=epoch, bins='doane') log_delta =
np.vstack(log_delta)
numpy.vstack
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.4' # jupytext_version: 1.2.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <div style='background-image: url("../../share/images/header.svg") ; padding: 0px ; background-size: cover ; border-radius: 5px ; height: 250px'> # <div style="float: right ; margin: 50px ; padding: 20px ; background: rgba(255 , 255 , 255 , 0.7) ; width: 50% ; height: 150px"> # <div style="position: relative ; top: 50% ; transform: translatey(-50%)"> # <div style="font-size: xx-large ; font-weight: 900 ; color: rgba(0 , 0 , 0 , 0.8) ; line-height: 100%">Inverse Problems</div> # <div style="font-size: large ; padding-top: 20px ; color: rgba(0 , 0 , 0 , 0.5)">Probabilistic Inversion: Simulated annealing</div> # </div> # </div> # </div> # ##### Authors: # * <NAME> ([@heinerigel](https://github.com/heinerigel)) # # * <NAME> ([@KGessele](https://github.com/KGessele)) # # * <NAME> ([@bvilacis](https://github.com/bvilacis)) # # --- # This notebook presents a technique for finding the maximum likely point of a multi-modal function while sampling it. Our function is a probability density function (pdf) and the method used is called simulated annealing, a Markov Chain Monte Carlo method (MCMC). # # This method is inspired by the annealing process in solid state physics. Annealing is a physical process of heating a solid until thermal stresses are released, then cooling it very slowly until the crystals are perfectly arranged, acheiving a minimum energy state. Depending on how fast or slow the temperature is cooled, the results will have worse or better the quality. # # ![alt text](crystal.jpg "Title") # Photo: <NAME> - https://bit.ly/2IzAkcE} # # # # # The simulation of this process is known as Simualted Annealing. It is a mathematical analogy which can be used to sample highly nonlinear functions, multidimensional functions. The analogy of the slow temperature cooling is the decrese on the probablitiy of accepting worse solutions as the space is explored; instead, the probability for going to a better solution is kept to 1. At high temperatures the system is allowed to explore the whole space, and at low temperatures exist a restriction in the exploration. The decrease of the temperature has to be done carefully, due to the aim is to find the global minimum state of energy. If it is done too fast you can obtain a secondary minima, and if it is too slow you will waste a lot of forward calculations. # # # This analogy between the physical process of annealing and the mathematical problem of obtaining a global minimum of a function allow us to finding the maximum likelihood point. # # First we will define an energy function: # \begin{equation} # S(m) = - T_0 log\frac{\sigma_M(m)}{\rho_M(m)} # \end{equation} # where $m$ is refered to the coordinates in the space, $T_0$ is a fixed positive number termed the ambient temperature (e.g. $T=1$), $\sigma_M(m)$ is the designed function and $\rho_M(m)$ is a parameter. # We obtain the probability density function as a function of the temperature of the system # \begin{equation} # \sigma_M(m,T)=\rho_M(m) exp \bigg( -\frac{S(m)}{T} \bigg) = \rho_M(m) exp \bigg( -\frac{-T_0 log\frac{\sigma_M(m)} {\rho_M(m)}}{T} \bigg) # \end{equation} # # + {"code_folding": []} # Import Libraries (PLEASE RUN THIS CODE FIRST!) # ---------------------------------------------- import numpy as np import matplotlib ## Show Plot in The Notebook matplotlib.use('nbagg') import matplotlib.pyplot as plt from IPython import display from mpl_toolkits.mplot3d import Axes3D # - # **PARAMETER CONFIGURATION** # # The above expression helps us to understand the relationship between our pdf and the temperature. For this notebook is considered that $\rho_M(m) = 1$ and $T_0 = 1$, with the expresion beeing # \begin{equation} # \sigma_M(m,T)= exp \bigg( -\frac{-log(\sigma_M(m))}{T} \bigg) # \end{equation} # # # + {"code_folding": [0]} # Parameter Configuration # ---------------------- # Some global parameters npp = 1000000 #number of tries to take ns = 100 # number of samples to keep T0 = 1. T = T0 alpha = 0.99999 #cooling schedule rho = 1 # manteining it constant rho0 = 1 # mantening it constant Tplots = 10 # inicial temperature for the plots stepT = 4 #how many steps should the Temperature be *0.1 evolved # - # **GENERATION OF THE PDF** # # Monte Carlo sampling of a probability density function, in this case for the purpose of the notebook it is used the absolute value of the function "peaks" from Matlab in each point. The corresponding section can simply be replaced with the probability from a proper inverse problem (e.g. hypocenter location). This function has the following expression # # \begin{equation} # \sigma_M(m) = \sigma(x,y) = | 3(1-x)^2 e^{-x^2-(y+1)^2}-10 \big( \frac{x}{5} -x^3-y^5\big) e^{-x^2-y^2} - \frac{1}{3} e^{-(x+1)^2-y^2} | # \end{equation} # # + {"code_folding": [0]} # Generate a pdf # the following steps generate a pdf; this is equivalent to the function "peaks(n)" in matlab n = 100 # number of dimension pdf = np.zeros([n,n]) sigma = np.zeros([n,n]) s = np.zeros([n,n]) x = -3. for i in range(0,n): y = -3. for j in range(0,n): pdf[j,i]=3.*(1-x)**2*np.exp(-(x**2)-(y+1)**2)-10.*(x/5-x**3-y**5)*np.exp(-x**2-y**2)-1./3*np.exp(-(x+1)**2-y**2) if pdf[j,i] < 0: pdf[j,i] = pdf[j,i] * (-1) # in contrast to the peaks function: all negative values are multiplied by (-1) y = y + 6./(n-1) x = x + 6./(n-1) pdf = pdf / pdf.max() s = -T0*np.log(pdf/rho0) # + {"code_folding": [0]} # Plot the 3D plot of pdf # -------------------------- X = np.arange(0,100 + 100./(n-1), 100./(n-1)) Y = np.arange(0,100 + 100./(n-1), 100./(n-1)) fig0 = plt.figure() ax = fig0.gca(projection='3d') X, Y =
np.meshgrid(X, Y)
numpy.meshgrid
from .ldft_model import LdftModel import numpy as np import scipy.optimize as op import matplotlib.pyplot as plt from functools import reduce class LG2dAOHighl(LdftModel): """This class describes a single component lattice gas in 2d with sticky next neighbour attractions on a simple cubic lattice. The description is done within the framework of lattice density functional theory (ldft). The free energy functional was constructed by translating the model to the Asakura-Oosawa (AO) model and then setting up the functional of the resulting colloid-polymer dispersion by the Highlander version of dft. Therefor this class works with three species instead of one, namely the species of the extended AO-model (colloid, polymer clusters species accounting for attraction in x-direction and polymer for the attraction in y-direction). The free energy functional is the one for the three species. It differs from the free energy functional of the AO-model just by a correction term accounting for the zero- and one-body interaction of the polymers. If one wants the free energy of the lattice gas, one would have to calculate the semi-grand potential of the previous free energy, where the polymer clusters are treated grand-canonically and the colloids canonically. In this class extra functions are supported for this. The colloids correspond to the species in the lattice gas. Parameters ---------- size : `tuple` of `int` Shape of the systems simulation box. Expects a `Tuple` of two integers, each for one dimensional axis. epsi : `float` Attraction strength of the lattice gas particles (multiplied with the inverse temperature to make it's dimension 1). From this the value of the chemical potential of the polymer clusters is calculated. mu_fix_c : `bool`, optional: default = False Determines whether or not the system is treated canonical or grand canonical. Meant is the lattice gas system. This parameter therefore only steers the colloid-species. The others are set `True` by default. `False` for canonical. mu_c : `float`, optional: default = `None` The chemical potential for the colloid species (multiplied with the inverse temperature to make it's dimension 1). Just required when ``mu_fix==True``. The chemical potential of the polymer clusters is determined by the value of ``epsi``. dens_c : `float`, optional: default = `None` The average density of the colloids. Just required when ``mu_fix``==`False`. The average density of the polymer clusters is not required, as for those ``mu_fix`` is set `True`. v_ext_c : `numpy.ndarray`, optional: default=`None` An external potential for the colloids. Shape must be of the same shape as chosen in ``size``. This class does not consider the possibility of sticky walls. Therefore the external potential of polymers is set zero by default. bound_cond : `string`, optional: default='periodic' The boundary condition. Supports 'periodic' for periodic boundary conditions and '11_if' for a 45° tilted system with respect to the lattice. The latter is for creating slab interface with (11) orientation. If '11_if' is chosen then one dimension has to be chosen twice as the other dimension in the ``size`` parameter e.g. (64, 128). Default value is 'periodic'. r : `List` of `np.array`; Optional: default = `None` Density profile for all three species arranged in a `List`. Choose `None` in case you hand over the ``r_hist``-parameter or in case you do not want to set the variable yet. r_hist : `List` of `List` of `np.array`; Optional: default = `None` Picard-history of a density profile. It contains the density profiles for certain picard-steps of a system which has already been evolved through picard iteration. Caution! Every entry is of the format of the ``_r``-instance variable, which is a list itself containing the profile for each species. Therefore in our case the list is of length one. Use `None` if the system has no history yet. err_hist : `List` of `Tuple` of `Float`; Optional: default = `None` Contains the error at the picard-steps corresponding to the entries of `r_hist`. The entries are tuples containing an error for every species. Use `None` if no history available. it_hist : `List`; Optional: default = `None` List of the picard-steps corresponding to the density profiles at the ``r_hist``-parameter. Use `None` if no history available. Note: if ``r_hist`` is given then also this argument should be assigned with an appropriate list. """ def __init__(self, size, epsi, mu_fix_c=False, mu_c=None,\ dens_c=None, v_ext_c=None, bound_cond='periodic', r=None,\ r_hist=None, err_hist=None, it_hist=None): mu_pc=self.translate_epsi_to_mu_pc(epsi) v_ext_pc = np.zeros(size) v_ext_c = v_ext_pc if type(v_ext_c)==type(None) else v_ext_c super().__init__(size=size, mu_fix=[mu_fix_c, True, True], mu=[mu_c, mu_pc, mu_pc], dens=[dens_c, None, None], v_ext=[v_ext_c, v_ext_pc, v_ext_pc], r=r, r_hist=r_hist, err_hist=err_hist, it_hist=it_hist, bound_cond=bound_cond) def __str__(self): descrLG2dHighl = 'This is a Lattice gas described with lattice'\ +' DFT. It was translated to the AO-model and the'\ +' functional was constructed by the Highlander method'\ +' It is an object of the Type \'LG2dAOHighl\' and has'\ +' the following properties:' epsiStr='{0:<40s}: {1}\n'.format('Attr. strength \'epsi\'',\ self.epsi) motherClass = 'It inherits from \'LdftModel\', with the'\ +' following properties:' descrLdftModel=super().__str__() return descrLG2dHighl+'\n\n'+epsiStr+'\n'+motherClass+\ '\n\n'+descrLdftModel #################################################################### #Protected descriptors for internal use. These are for a more #convenient addressing of the species specific instance variables. #Important to notice: do not override the protected variables of the #super class LdftModel. Otherwise the functionality of the instance #methods in LdftModel can not be secured. #################################################################### @property def _mu_c(self): """The chemical potential of the colloid species (times the inverse temperature to make its dimension 1) (`float`, read-only). """ return self._mu[0] @property def _mu_pc1(self): """The chemical potential of the polymer species in x-direction (times the inverse temperature to make its dimension 1). (`float`, read-only) """ return self._mu[1] @property def _mu_pc2(self): """The chemical potential of the polymer species in y-direction (times the inverse temperature to make its dimension 1). (`float`, read-only) """ return self._mu[2] @property def _dens_c(self): """The average density of the colloid species (`float`, read-only). """ return self._dens[0] @property def _dens_pc1(self): """The average density of the polymer species in x-direction (`float`, read-only). """ return self._dens[1] @property def _dens_pc2(self): """The average density of the polymer species in x-direction (`float`, read-only). """ return self._dens[2] @property def _v_ext_c(self): """The external potential acting on the colloids (`np.array`, read-only) """ return self._v_ext[0] @property def _v_ext_pc1(self): """The external potential acting on the polymer clusters in x-direction. (`np.array`, read-only) """ return self._v_ext[1] @property def _v_ext_pc2(self): """The external potential acting on the polymer clusters in y-direction. (`np.array`, read-only) """ return self._v_ext[2] @property def _r_c(self): """The density profile of the colloid species. (`numpy.ndarray`, read-only) """ return self._r[0] @property def _r_pc1(self): """The density profile of the polymer species in x-direction. (`numpy.ndarray`, read-only) """ return self._r[1] @property def _r_pc2(self): """The density profile of the polymer species in y-direction. (`numpy.ndarray`, read-only) """ return self._r[2] #################################################################### #Public descriptors. These are for the user to access the variables #of interest. Some are already defined in the super class. Some of #them are reused, but others are overwritten. #################################################################### @property def epsi(self): """The attraction strength between the lattice-particles of the lattice gas. (`Float`, read-only) """ return self.translate_mu_pc_to_epsi(self._mu_pc1) @property def mu_c(self): """The chemical potential of the colloids (times the inverse temperature to make its dimension 1). It is equals the chemical potential of the particles of the lattice gas. (`float`) """ return self._mu[0] @mu_c.setter def mu_c(self, mu_c): self._mu[0]=mu_c mu_pc1=_mu_pc1 """The chemical potential of the polymer-cluster in x-direction (times the inverse temperature to make its dimension 1). (`float`, read-only) """ mu_pc2=_mu_pc2 """The chemical potential of the polymer-cluster in y-direction (times the inverse temperature to make its dimension 1). (`float`, read-only) """ @LdftModel.mu.setter def mu(self, mu): print('This setter has been deactivated in favour for \`mu_c\`') @property def dens_c(self): """The average density of the colloids. It is equals the average density in the lattice gas. (`float`) """ return self._dens[0] dens_pc1=_dens_pc1 """The average density of the polymer clusters in x-direction. (`float`, read-only) """ dens_pc2=_dens_pc2 """The average density of the polymer clusters in x-direction. (`float`, read-only) """ @LdftModel.dens.setter def dens(self, dens): print('This setter has been deactivated in favour for \ \`dens_c\`') @property def mu_fix_c(self): """Flag which determines Wether the colloids (a.k. the particles of the lattice gas) are treated canonical (`False`) or grand canonical (`True`). (`Bool`) """ return self._mu_fix[0] @mu_fix_c.setter def mu_fix_c(self, mu_fix_c): self._mu_fix[0]=mu_fix_c @LdftModel.mu_fix.setter def mu_fix(self, mu_fix): print('This setter has been deactivated in favour for \ \`mu_fix_c\`') @property def v_ext_c(self): """External potential acting on the colloids (a.k. the particles of the lattice gas). (`np.array`) """ return self._v_ext[0] @v_ext_c.setter def v_ext_c(self, v_ext_c): self._v_ext[0]=v_ext_c @LdftModel.v_ext.setter def v_ext(self, v_ext): print('This setter has been deactivated in favour for \ \`v_ext_c\`') @property def r_c(self): """The density profile of the colloids (a.k. the particles of the lattice gas). (`np.array`, read-only) """ return self._r[0] r_pc1=_r_pc1 """The density profile of the polymer clusters in x-direction. (`np.array`, read-only) """ r_pc2=_r_pc2 """The density profile of the polymer clusters in y-direction. (`np.array`, read-only) """ @property def r_c_hist(self): """Iteration history of the density profile of the colloids (a.k. the particles of the lattice gas). (`List`, read-only) """ r_c_hist = [r[0] for r in self._r_hist] return r_c_hist @property def err_c_hist(self): """Iteration history of the picard-error at the colloidal density profile. (`List`, read-only) """ err_hist =[err[0] for err in self._err_hist] return err_hist #################################################################### #Map the lattice gas to the AO-model: #################################################################### @staticmethod def translate_epsi_to_mu_pc(epsi): """Maps the attraction strength of the lattice gas ``epsi`` to the corresponding polymer cluster chemical potential. Parameters ---------- epsi : `float` The attraction strength (multiplied with the inverse temperature to make the quantity dimensionless). Returns ------- mu_pc : The chemical potential (multiplied with the inverse temperature to make the quantity dimensionless). (`float`) """ mu_pc=np.log(np.exp(epsi)-1) return mu_pc @staticmethod def translate_mu_pc_to_epsi(mu_pc): """Maps the polymer cluster chemical potential to the attraction strength of the lattice gas ``epsi``. Parameters ---------- mu_pc : `float` The polymer chemical potential (multiplied with the inverse temperature to make the quantity dimensionless). Returns ------- epsi : The attraction strength (multiplied with the inverse temperature to make the quantity dimensionless). (`float`) """ epsi=np.log(np.exp(mu_pc)+1) return epsi #################################################################### #The inhomogeneous functional: #In this section all the functions concerning the model specific #free energy functional are defined. #################################################################### def _cal_n(self): """Calculates the weighted densities necessary for the calculation of the free energy and the excess chemical potential. Returns ------- Result : `tuple` of `numpy.ndaray` """ n1 = self._r_c + self._r_pc1 n2 = self._boundary_roll(self._r_c, -1, axis=1) + self._r_pc1 n3 = self._r_c + self._r_pc2 n4 = self._boundary_roll(self._r_c, -1, axis=0) + self._r_pc2 n5 = self._r_pc1 n6 = self._r_pc2 n7 = self._r_c return n1, n2, n3, n4, n5, n6, n7 def _cal_Phi_ex_AO(self): """Calculates the excess free energy of the AO-model. Returns ------- Result : `np.array` Free energy density of the AO-model. """ n=self._cal_n() n1=n[0] n2=n[1] n3=n[2] n4=n[3] n5=n[4] n6=n[5] n7=n[6] Phi0=self._cal_Phi_0 Phi_ex = Phi0(n1)+Phi0(n2)+Phi0(n3)+Phi0(n4)-Phi0(n5)-Phi0(n6)\ -3*Phi0(n7) return Phi_ex def cal_F(self): """Calculates the free energy of the three component system. It differs from the free energy functional of the AO-model just by a correction term accounting for the zero- and one-body interaction of the polymers (see description of the class). For getting the free energy of the lattice gas use ``cal_F_lg``, which is the semi-grand potential, where the polymer clusters are treated grand canonically and the colloids canonically. Returns ------- Result : `float` Free energy of the three component system (times the inverse temperature to make the results dimension 1). """ z_pc1 = np.exp(self._mu_pc1) z_pc2 = np.exp(self._mu_pc2) r_c = self._r_c r_pc1 = self._r_pc1 r_pc2 = self._r_pc2 Phi_id = self._cal_Phi_id() Phi_ex = self._cal_Phi_ex_AO() F_id = np.sum(Phi_id) F_ex_AO = np.sum(Phi_ex) F = (F_id + F_ex_AO - np.log(z_pc1+1) *np.sum(-1+r_c+self._boundary_roll(r_c, -1, axis=1)) - np.log(z_pc2+1) *np.sum(-1+r_c+self._boundary_roll(r_c, -1, axis=0))) return F def cal_F_lg(self): """Calculates the free energy of the lattice gas. If ``self.mu_fix==False`` this should give the same result as the ``cal_semi_Om``-function. Returns ------- Result : `float` Free energy of the lattice gas. """ F_lg = self.cal_F() mu_pc1 = self._mu_pc1 mu_pc2 = self._mu_pc2 r_pc1 = self._r_pc1 r_pc2 = self._r_pc2 F_lg -= (mu_pc1*np.sum(r_pc1)+mu_pc2*np.sum(r_pc2)) return F_lg @LdftModel._RespectBoundaryCondition() def cal_mu_ex(self): n = self._cal_n() n1=n[0] n2=n[1] n3=n[2] n4=n[3] n5=n[4] n6=n[5] n7=n[6] z_pc = np.exp(self._mu_pc1) mu_c_ex = np.log((1-n1)*(1-self._boundary_roll(n2, 1, axis=1))\ *(1-n3)*(1-self._boundary_roll(n4, 1, axis=0))\ /(1-n7)**3) + 4*np.log(z_pc+1) mu_pc1_ex = np.log((1-n1)*(1-n2)/(1-n5)) mu_pc2_ex = np.log((1-n3)*(1-n4)/(1-n6)) return mu_c_ex, mu_pc1_ex, mu_pc2_ex #################################################################### #The homogeneous methods: #The following section contains all the methods concerning the bulk #properties of the system. #################################################################### @classmethod def _cal_bulk_r_pc(cls, r_c, epsi): """Calculates the bulk polymer cluster density in dependence of the colloid density and the chosen attraction strength Parameters ---------- r_c : `float` or `np.ndarray` The colloid density. epsi : `float` Attraction strength (times inverse temperature). Returns ------- r_pc : `float` or `np.ndarray` The polymer cluster density. """ mu_pc = cls.translate_epsi_to_mu_pc(epsi) z_pc =
np.exp(mu_pc)
numpy.exp
import matplotlib.pyplot as plt import scipy.io import numpy as np img = plt.imread("floor3_med.jpg") fig, ax = plt.subplots() ax.imshow(img) original = scipy.io.loadmat('Office_seq_01.mat') # reconstructed = scipy.io.loadmat('reconstructed.mat') original2 = scipy.io.loadmat('Kitchen1_seq_01.mat') original3 = scipy.io.loadmat('Conference_seq_01.mat') original4 = scipy.io.loadmat('Meeting_seq_01.mat') original5 = scipy.io.loadmat('Kitchen2_seq_01.mat') original6 = scipy.io.loadmat('reconstructed.mat') original7 = scipy.io.loadmat('Office_seq_03.mat') o_array = original['iPhonePose'] o_array2 = original2['iPhonePose'] o_array3 = original3['iPhonePose'] o_array4 = original4['iPhonePose'] o_array5 = original5['iPhonePose'] o_array6 = original6['reconstructed'] o_array7 = original7['iPhonePose'] num_points = o_array.size num_points2 = o_array2.size num_points3 = o_array3.size num_points4 = o_array4.size num_points5 = o_array5.size (num_points6,temp) = o_array6.shape num_points7 = o_array7.size coord = np.zeros((num_points,2)) coord2 = np.zeros((num_points2,2)) coord3 = np.zeros((num_points3,2)) coord4 = np.zeros((num_points4,2)) coord5 = np.zeros((num_points5,2)) coord6 = np.zeros((num_points6,2)) coord7 = np.zeros((num_points7,2)) for i in range(0,num_points): coord[i] = o_array[i][0][0:2,3] for i in range(0,num_points2): coord2[i] = o_array2[i][0][0:2,3] for i in range(0,num_points3): coord3[i] = o_array3[i][0][0:2,3] for i in range(0,num_points4): coord4[i] = o_array4[i][0][0:2,3] for i in range(0,num_points5): coord5[i] = o_array5[i][0][0:2,3] coord6 = o_array6 for i in range(0,num_points7): coord7[i] = o_array7[i][0][0:2,3] plt_coord = coord / 0.033917 plt_coord2 = coord2 / 0.033917 plt_coord3 = coord3 / 0.033917 plt_coord4 = coord4 / 0.033917 plt_coord5 = coord5 / 0.033917 plt_coord6 = coord6 / 0.033917 plt_coord7 = coord7 / 0.033917 switch_axes = np.array([[ -1, 1] ]*num_points) switch_axes2 = np.array([[ -1, 1] ]*num_points2) switch_axes3 = np.array([[ -1, 1] ]*num_points3) switch_axes4 = np.array([[ -1, 1] ]*num_points4) switch_axes5 = np.array([[ -1, 1] ]*num_points5) switch_axes6 = np.array([[ -1, 1] ]*num_points6) switch_axes7 = np.array([[ -1, 1] ]*num_points7) plt_coord = plt_coord * switch_axes plt_coord2 = plt_coord2 * switch_axes2 plt_coord3 = plt_coord3 * switch_axes3 plt_coord4 = plt_coord4 * switch_axes4 plt_coord5 = plt_coord5 * switch_axes5 plt_coord6 = plt_coord6 * switch_axes6 plt_coord7 = plt_coord7 * switch_axes7 origin = np.array([[ 2324, 747] ]*num_points) origin2 = np.array([[ 2324, 747] ]*num_points2) origin3 = np.array([[ 2324, 747] ]*num_points3) origin4 = np.array([[ 2324, 747] ]*num_points4) origin5 =
np.array([[ 2324, 747] ]*num_points5)
numpy.array
#!/usr/bin/env python """ Does some simple calculations to test trace gas PACE calculations Adapted from benchmark4Amir.py <NAME>, April 2020 """ import os import sys from netCDF4 import Dataset from netCDF4 import Dataset as ncread import numpy as np from MAPL.constants import * from py_leo_vlidort import VLIDORT_POLAR_ from scipy.interpolate import interp1d import scipy.integrate as integrate from pyhdf.SD import SD, SDC from multiprocessing import Pool from hyperTest import get_channels,get_geom,get_ROT,get_TOA_unpack,get_TOA from hyperTest import get_PTWV_profile, writenc format = 'NETCDF4_CLASSIC' MISSING = -1.e+20 def get_kg(inFile): """ Read c-k distribution coefficients from Amir's calculations """ nc = Dataset(inFile) # wavelength [nm] wav_abs = np.array(nc.variables['wavelengths'][:]) # c-k coefficients [not sure about units] kg_o2 = nc.variables['kg_o2'][:].T kg_h2o = nc.variables['kg_h2o'][:].T kg_co = nc.variables['kg_co'][:].T kg_co2 = nc.variables['kg_co2'][:].T kg_ch4 = nc.variables['kg_ch4'][:].T kg_n2o = nc.variables['kg_n2o'][:].T g_bins = nc.variables['g_bins'][:] nc.close() return kg_o2, kg_h2o, kg_co, kg_co2, kg_ch4, kg_n2o, g_bins, wav_abs def get_alpha(A,VMR,rhoe,ze): """ Calculate Absorption optical depth profile A - absorption coefficient [m2/molecule] VMR - trace gas mixing ratio [vol/vol, dimensionless] rhoe - air number density [molecules/m3] ze - profile altitude [m] """ # convert vmr to molecules/m3 nxe = VMR*rhoe # integrate to get the optical depth subcolumns km, nbin, nch = A.shape alpha = np.zeros([km,nbin,nch]) for i in range(km): for b in range(nbin): c1 = A[i,b,:]*nxe[i] c2 = A[i,b,:]*nxe[i+1] c1.shape = (1, nch) c2.shape = (1, nch) c = np.append(c1,c2,axis=0) alpha[i,b,:] =
np.trapz(c,ze[i:i+2],axis=0)
numpy.trapz
#!/usr/bin/env python # coding: utf-8 # # Vatsal's Code # This notebook shows you how to build a model for predicting degradation at various locations along RNA sequence. # * We will first pre-process and tokenize the sequence, secondary structure and loop type. # * Then, we will use all the information to train a model on degradations recorded by the researchers from OpenVaccine. # * Finally, we run our model on the public test set (shorter sequences) and the private test set (longer sequences), and submit the predictions. # # In[1]: # %%capture # !pip install forgi # !yes Y |conda install -c bioconda viennarna # In[2]: import json,os, math import subprocess # from forgi.graph import bulge_graph # import forgi.visual.mplotlib as fvm import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import tensorflow.keras.backend as K import plotly.express as px import tensorflow.keras.layers as L import tensorflow as tf import warnings warnings.filterwarnings('ignore') import tensorflow_addons as tfa from itertools import combinations_with_replacement from sklearn.model_selection import train_test_split, KFold, StratifiedKFold,GroupKFold from keras.utils import plot_model from colorama import Fore, Back, Style # ### Configuration # In[3]: ###### USE DIFFERENT SEED FOR DIFFERENT STRATIFIED KFOLD SEED = 53 ###### NUMBER OF FOLDS. USE 3, 5, 7,... n_folds=5 ###### TRAIN DEBUG debug=True ###### APPLY WINDOW FEATURES Window_features = True ###### Number of Feature Given to Model # cat_feature = 3 ## ( Categorical Features Only) # num_features = 1 ## ( Numerical Features Only) ###### Model Configuration ###### model_name="GG" ## MODEL NAME (Files will save according to this ) epochs=100 ## NUMBER OF EPOCHS MODEL TRAIN IN EACH FOLD. USE 3, 5, 7,... BATCH_SIZE = 32 ## NUMBER OF BATCH_SIZE USE 16, 32, 64, 128,... n_layers = 2 ## Number of Layers Present in model # ex. 3 Layer of GRU Model layers = ["GRU","GRU"] ## Stacking sequence of GRU and LSTM (list of length == n_layers) hidden_dim = [128, 128] ## Hidden Dimension in Model (Default : [128,128]) (list of length == n_layers) dropout = [0.5, 0.5] ## 1.0 means no dropout, and 0.0 means no outputs from the layer. sp_dropout = 0.2 ## SpatialDropout1D (Fraction of the input units to drop) [https://stackoverflow.com/a/55244985] embed_dim = 250 ## Output Dimention of Embedding Layer (Default : 75) num_hidden_units = 8 ## Number of GRU units after num_input layer ###### LR Schedular ###### Cosine_Schedule = True ## cosine_schedule Rate Rampup_decy_lr = False ## Rampup decy lr Schedule # ### Set Seed # In[4]: def seed_everything(seed=1234): np.random.seed(seed) tf.random.set_seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) os.environ['TF_DETERMINISTIC_OPS'] = '1' seed_everything(SEED) # ### Used Columns # # In[5]: target_cols = ['reactivity', 'deg_Mg_pH10', 'deg_Mg_50C', 'deg_pH10', 'deg_50C'] window_columns = ['sequence','structure','predicted_loop_type'] categorical_features = ['sequence', 'structure', 'predicted_loop_type',] # 'predicted_loop_index'] cat_feature = len(categorical_features) if Window_features: cat_feature += len(window_columns) numerical_features = ['BPPS_Max','BPPS_nb', 'BPPS_sum', 'positional_entropy', 'stems', 'interior_loops', 'multiloops',#'hairpin loops', 'fiveprimes', 'threeprimes', 'A_percent', 'G_percent','C_percent', 'U_percent', 'U-G', 'C-G', 'U-A', 'G-C', 'A-U', 'G-U', # 'E', 'S', 'H', 'B', 'X', 'I', 'M', 'pair_map', 'pair_distance', ] num_features = len(numerical_features) ## ( Numerical Features Only) feature_cols = categorical_features + numerical_features pred_col_names = ["pred_"+c_name for c_name in target_cols] target_eval_col = ['reactivity','deg_Mg_pH10','deg_Mg_50C'] pred_eval_col = ["pred_"+c_name for c_name in target_eval_col] # ### Load and preprocess data # In[6]: data_dir = '/kaggle/input/stanford-covid-vaccine/' fearure_data_path = '../input/openvaccine/' # train = pd.read_csv(fearure_data_path+'train.csv') # test = pd.read_csv(fearure_data_path+'test.csv') train = pd.read_json(fearure_data_path+'train.json') test = pd.read_json(fearure_data_path+'test.json') # train_j = pd.read_json(data_dir + 'train.json', lines=True) # test_j = pd.read_json(data_dir + 'test.json', lines=True) sample_sub = pd.read_csv(data_dir + 'sample_submission.csv') # In[7]: train[target_cols] = train[target_cols].applymap(lambda x: x[1:-1].split(", ")) # In[8]: # train = train[train['SN_filter'] == 1] train = train[train['signal_to_noise'] >= 0.5] # In[9]: def pair_feature(row): arr = list(row) its = [iter(['_']+arr[:]) ,iter(arr[1:]+['_'])] list_touple = list(zip(*its)) return list(map("".join,list_touple)) # In[10]: def preprocess_categorical_inputs(df, cols=categorical_features,Window_features=Window_features): if Window_features: for c in window_columns: df["pair_"+c] = df[c].apply(pair_feature) cols.append("pair_"+c) cols = list(set(cols)) return np.transpose( np.array( df[cols] .applymap(lambda seq: [token2int[x] for x in seq]) .values .tolist() ), (0, 2, 1) ) # In[11]: def preprocess_numerical_inputs(df, cols=numerical_features): return np.transpose( np.array( df[cols].values.tolist() ), (0, 2, 1) ) # In[12]: # We will use this dictionary to map each character to an integer # so that it can be used as an input in keras # ().ACGUBEHIMSXshftim0123456789[]{}'_, token_list = list("().<KEY>") if Window_features: comb = combinations_with_replacement(list('_().<KEY>'*2), 2) token_list += list(set(list(map("".join,comb)))) token2int = {x:i for i, x in enumerate(list(set(token_list)))} print("token_list Size :",len(token_list)) train_inputs_all_cat = preprocess_categorical_inputs(train,cols=categorical_features) train_inputs_all_num = preprocess_numerical_inputs(train,cols=numerical_features) train_labels_all = np.array(train[target_cols].values.tolist(),dtype =np.float32).transpose((0, 2, 1)) print("Train categorical Features Shape : ",train_inputs_all_cat.shape) print("Train numerical Features Shape : ",train_inputs_all_num.shape) print("Train labels Shape : ",train_labels_all.shape) # ### Reduce Train Data # In[13]: # train_inputs_all_cat = train_inputs_all_cat[:,:68,:] # train_inputs_all_num = train_inputs_all_num[:,:68,:] # train_labels_all = train_labels_all[:,:68,:] # print("Train categorical Features Shape : ",train_inputs_all_cat.shape) # print("Train numerical Features Shape : ",train_inputs_all_num.shape) # print("Train labels Shape : ",train_labels_all.shape) # #### Public and private sets have different sequence lengths, so we will preprocess them separately and load models of different tensor shapes. # In[14]: public_df = test.query("seq_length == 107") private_df = test.query("seq_length == 130") print("public_df : ",public_df.shape) print("private_df : ",private_df.shape) public_inputs_cat = preprocess_categorical_inputs(public_df) private_inputs_cat = preprocess_categorical_inputs(private_df) public_inputs_num = preprocess_numerical_inputs(public_df,cols=numerical_features) private_inputs_num = preprocess_numerical_inputs(private_df,cols=numerical_features) print("Public categorical Features Shape : ",public_inputs_cat.shape) print("Public numerical Features Shape : ",public_inputs_num.shape) print("Private categorical Features Shape : ",private_inputs_cat.shape) print("Private numerical Features Shape : ",private_inputs_num.shape) # ### loss Function # In[15]: ### Custom Loss Function for ['reactivity','deg_Mg_pH10','deg_Mg_50C'] target Columns # def rmse(y_actual, y_pred): # mse = tf.keras.losses.mean_squared_error(y_actual, y_pred) # return K.sqrt(mse) # def MCRMSE(y_actual, y_pred, num_scored=3): # score = 0 # for i in range(num_scored): # score += rmse(y_actual[:,:, i], y_pred[:,:, i]) / num_scored # return score def MCRMSE(y_true, y_pred): colwise_mse = tf.reduce_mean(tf.square(y_true[:,:,:3] - y_pred[:,:,:3]), axis=1) return tf.reduce_mean(tf.sqrt(colwise_mse), axis=1) # ### Learning Rate Schedulars # ### Rampup decy lr Schedule # In[16]: def get_lr_callback(batch_size=8): lr_start = 0.00001 lr_max = 0.004 lr_min = 0.00005 lr_ramp_ep = 45 lr_sus_ep = 2 lr_decay = 0.8 def lrfn(epoch): if epoch < lr_ramp_ep: lr = (lr_max - lr_start) / lr_ramp_ep * epoch + lr_start elif epoch < lr_ramp_ep + lr_sus_ep: lr = lr_max else: lr = (lr_max - lr_min) * lr_decay**(epoch - lr_ramp_ep - lr_sus_ep) + lr_min return lr lr_callback = tf.keras.callbacks.LearningRateScheduler(lrfn, verbose=False) return lr_callback # ### Cosine schedule with warmup # In[17]: def get_cosine_schedule_with_warmup(lr,num_warmup_steps, num_training_steps, num_cycles=3.5): """ Modified version of the get_cosine_schedule_with_warmup from huggingface. (https://huggingface.co/transformers/_modules/transformers/optimization.html#get_cosine_schedule_with_warmup) Create a schedule with a learning rate that decreases following the values of the cosine function between 0 and `pi * cycles` after a warmup period during which it increases linearly between 0 and 1. """ def lrfn(epoch): if epoch < num_warmup_steps: return (float(epoch) / float(max(1, num_warmup_steps))) * lr progress = float(epoch - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps)) return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress))) * lr return tf.keras.callbacks.LearningRateScheduler(lrfn, verbose=False) # ### Different Layers # In[18]: def lstm_layer(hidden_dim, dropout): return tf.keras.layers.Bidirectional( tf.keras.layers.LSTM(hidden_dim, dropout=dropout, return_sequences=True, kernel_initializer = 'orthogonal')) # In[19]: def gru_layer(hidden_dim, dropout): return L.Bidirectional( L.GRU(hidden_dim, dropout=dropout, return_sequences=True, kernel_initializer='orthogonal') ) # ### Model Building # In[20]: # def build_model(embed_size, # seq_len = 107, # pred_len = 68, # dropout = dropout, # sp_dropout = sp_dropout, # num_features = num_features, # num_hidden_units = num_hidden_units, # embed_dim = embed_dim, # layers = layers, # hidden_dim = hidden_dim, # n_layers = n_layers, # cat_feature = cat_feature): # inputs = L.Input(shape=(seq_len, cat_feature),name='category_input') # embed = L.Embedding(input_dim=embed_size, output_dim=embed_dim)(inputs) # reshaped = tf.reshape(embed, shape=(-1, embed.shape[1], embed.shape[2] * embed.shape[3])) # reshaped_conv = tf.keras.layers.Conv1D(filters=512, kernel_size=3,strides=1, padding='same', activation='elu')(reshaped) # numerical_input = L.Input(shape=(seq_len, num_features), name='numeric_input') # n_Dense_1 = L.Dense(64)(numerical_input) # n_Dense_2 = L.Dense(128)(n_Dense_1) # numerical_conv = tf.keras.layers.Conv1D(filters=256, kernel_size=4,strides=1, padding='same', activation='elu')(n_Dense_2) # hidden = L.concatenate([reshaped_conv, numerical_conv]) # hidden = L.SpatialDropout1D(sp_dropout)(hidden) # for x in range(n_layers): # if layers[x] == "GRU": # hidden = gru_layer(hidden_dim[x], dropout[x])(hidden) # else: # hidden = lstm_layer(hidden_dim[x], dropout[x])(hidden) # # Since we are only making predictions on the first part of each sequence, # # we have to truncate it # truncated = hidden[:, :pred_len] # out = L.Dense(5)(truncated) # model = tf.keras.Model(inputs=[inputs] + [numerical_input], outputs=out) # adam = tf.optimizers.Adam() # radam = tfa.optimizers.RectifiedAdam() # lookahead = tfa.optimizers.Lookahead(adam, sync_period=6) # ranger = tfa.optimizers.Lookahead(radam, sync_period=6) # model.compile(optimizer=radam, loss=MCRMSE) # return model # In[21]: def build_model(embed_size, seq_len = 107, pred_len = 68, dropout = dropout, sp_dropout = sp_dropout, num_features = num_features, num_hidden_units = num_hidden_units, embed_dim = embed_dim, layers = layers, hidden_dim = hidden_dim, n_layers = n_layers, cat_feature = cat_feature): inputs = L.Input(shape=(seq_len, cat_feature),name='category_input') embed = L.Embedding(input_dim=embed_size, output_dim=embed_dim)(inputs) reshaped = tf.reshape(embed, shape=(-1, embed.shape[1], embed.shape[2] * embed.shape[3])) reshaped = L.SpatialDropout1D(sp_dropout)(reshaped) reshaped_conv = tf.keras.layers.Conv1D(filters=512, kernel_size=3,strides=1, padding='same', activation='elu')(reshaped) numerical_input = L.Input(shape=(seq_len, num_features), name='numeric_input') # n_Dense_1 = L.Dense(64)(numerical_input) # n_Dense_2 = L.Dense(128)(n_Dense_1) # numerical_conv = tf.keras.layers.Conv1D(filters=256, kernel_size=4,strides=1, padding='same', activation='elu')(n_Dense_2) hidden = L.concatenate([reshaped_conv, numerical_input]) hidden_1 = tf.keras.layers.Conv1D(filters=256, kernel_size=4,strides=1, padding='same', activation='elu')(hidden) hidden = gru_layer(128, 0.5)(hidden_1) hidden = L.concatenate([hidden, hidden_1]) # hidden = L.SpatialDropout1D(sp_dropout)(hidden) for x in range(n_layers): if layers[x] == "GRU": hidden = gru_layer(hidden_dim[x], dropout[x])(hidden) else: hidden = lstm_layer(hidden_dim[x], dropout[x])(hidden) hidden = L.concatenate([hidden, hidden_1]) # Since we are only making predictions on the first part of each sequence, # we have to truncate it truncated = hidden[:, :pred_len] out = L.Dense(5)(truncated) model = tf.keras.Model(inputs=[inputs] + [numerical_input], outputs=out) adam = tf.optimizers.Adam() radam = tfa.optimizers.RectifiedAdam() lookahead = tfa.optimizers.Lookahead(adam, sync_period=6) ranger = tfa.optimizers.Lookahead(radam, sync_period=6) model.compile(optimizer=radam, loss=MCRMSE) return model # ### Build and train model # # We will train a bi-directional GRU model. It has three layer and has dropout. To learn more about RNNs, LSTM and GRU, please see [this blog post](https://colah.github.io/posts/2015-08-Understanding-LSTMs/). # In[22]: model = build_model(embed_size=len(token_list)) plot_model(model, to_file='model_plot.png', show_shapes=True, show_layer_names=True) # ### Add Augmentation Data # ### stratify_group Based on structure and SN_Filter # In[23]: def get_stratify_group(row): snf = row['SN_filter'] snr = row['signal_to_noise'] cnt = row['cnt'] id_ = row['id'] structure = row['structure'] if snf == 0: if snr<0: snr_c = 0 elif 0<= snr < 2: snr_c = 1 elif 2<= snr < 4: snr_c = 2 elif 4<= snr < 5.5: snr_c = 3 elif 5.5<= snr < 10: snr_c = 4 elif snr >= 10: snr_c = 5 else: # snf == 1 if snr<0: snr_c = 6 elif 0<= snr < 1: snr_c = 7 elif 1<= snr < 2: snr_c = 8 elif 2<= snr < 3: snr_c = 9 elif 3<= snr < 4: snr_c = 10 elif 4<= snr < 5: snr_c = 11 elif 5<= snr < 6: snr_c = 12 elif 6<= snr < 7: snr_c = 13 elif 7<= snr < 8: snr_c = 14 elif 8<= snr < 9: snr_c = 15 elif 9<= snr < 10: snr_c = 15 elif snr >= 10: snr_c = 16 return '{}_{}'.format(id_,snr_c) train['stratify_group'] = train.apply(get_stratify_group, axis=1) train['stratify_group'] = train['stratify_group'].astype('category').cat.codes skf = StratifiedKFold(n_folds, shuffle=True, random_state=SEED) gkf = GroupKFold(n_splits=n_folds) fig, ax = plt.subplots(n_folds,3,figsize=(20,5*n_folds)) for Fold, (train_index, val_index) in enumerate(gkf.split(train_inputs_all_cat, groups=train['stratify_group'])): print(Fore.YELLOW);print('#'*45);print("### Fold : ", str(Fold+1));print('#'*45);print(Style.RESET_ALL) train_data = train.iloc[train_index] val_data = train.iloc[val_index] print("Augmented data Present in Val Data : ",len(val_data[val_data['cnt'] != 1])) print("Augmented data Present in Train Data : ",len(train_data[train_data['cnt'] != 1])) val_data = val_data[val_data['cnt'] == 1] print("Data Lekage : ",len(val_data[val_data['id'].isin(train_data['id'])])) # print(train_data['stratify_group'].unique(),val_data['stratify_group'].unique()) print("number of Train Data points : ",len(train_data)) print("number of val_data Data points : ",len(val_data)) print("number of unique Structure in Train data : ", len(train_data.structure.unique())) print("number of unique Structure in val data : ",len(val_data.structure.unique()), val_data.structure.value_counts()[:5].values) print("Train SN_Filter == 1 : ", len(train_data[train_data['SN_filter']==1])) print("val_data SN_Filter == 1 : ", len(val_data[val_data['SN_filter']==1])) print("Train SN_Filter == 0 : ", len(train_data[train_data['SN_filter']==0])) print("val_data SN_Filter == 0 : ", len(val_data[val_data['SN_filter']==0])) print("Unique ID :",len(train_data.id.unique())) sns.kdeplot(train[train['SN_filter']==0]['signal_to_noise'],ax=ax[Fold][0],color="Red",label='Train All') sns.kdeplot(train_data[train_data['SN_filter']==0]['signal_to_noise'],ax=ax[Fold][0],color="Blue",label='Train') sns.kdeplot(val_data[val_data['SN_filter']==0]['signal_to_noise'],ax=ax[Fold][0],color="Green",label='Validation') ax[Fold][0].set_title(f'Fold : {Fold+1} Signal/Noise & SN_filter == 0') sns.kdeplot(train[train['SN_filter']==1]['signal_to_noise'],ax=ax[Fold][1],color="Red",label='Train All') sns.kdeplot(train_data[train_data['SN_filter']==1]['signal_to_noise'],ax=ax[Fold][1],color="Blue",label='Train') sns.kdeplot(val_data[val_data['SN_filter']==1]['signal_to_noise'],ax=ax[Fold][1],color="Green",label='Validation') ax[Fold][1].set_title(f'Fold : {Fold+1} Signal/Noise & SN_filter == 1') sns.kdeplot(train['signal_to_noise'],ax=ax[Fold][2],color="Red",label='Train All') sns.kdeplot(train_data['signal_to_noise'],ax=ax[Fold][2],color="Blue",label='Train') sns.kdeplot(val_data['signal_to_noise'],ax=ax[Fold][2],color="Green",label='Validation') ax[Fold][2].set_title(f'Fold : {Fold+1} Signal/Noise') plt.show() # In[24]: submission = pd.DataFrame(index=sample_sub.index, columns=target_cols).fillna(0) # test dataframe with 0 values val_losses = [] historys = [] oof_preds_all = [] stacking_pred_all = [] kf = KFold(n_folds, shuffle=True, random_state=SEED) skf = StratifiedKFold(n_folds, shuffle=True, random_state=SEED) gkf = GroupKFold(n_splits=n_folds) for Fold, (train_index, val_index) in enumerate(gkf.split(train_inputs_all_cat, groups=train['stratify_group'])): print(Fore.YELLOW);print('#'*45);print("### Fold : ", str(Fold+1));print('#'*45);print(Style.RESET_ALL) print(f"|| Batch_size: {BATCH_SIZE} \n|| n_layers: {n_layers} \n|| embed_dim: {embed_dim}") print(f"|| cat_feature: {cat_feature} \n|| num_features: {num_features}") print(f"|| layers : {layers} \n|| hidden_dim: {hidden_dim} \n|| dropout: {dropout} \n|| sp_dropout: {sp_dropout}") train_data = train.iloc[train_index] val_data = train.iloc[val_index] print("|| number Augmented data Present in Val Data : ",len(val_data[val_data['cnt'] != 1])) print("|| number Augmented data Present in Train Data : ",len(train_data[train_data['cnt'] != 1])) print("|| Data Lekage : ",len(val_data[val_data['id'].isin(train_data['id'])])) val_data = val_data[val_data['cnt'] == 1] model_train = build_model(embed_size=len(token_list)) model_short = build_model(embed_size=len(token_list),seq_len=107, pred_len=107) model_long = build_model(embed_size=len(token_list),seq_len=130, pred_len=130) train_inputs_cat = preprocess_categorical_inputs(train_data,cols=categorical_features) train_inputs_num = preprocess_numerical_inputs(train_data,cols=numerical_features) train_labels = np.array(train_data[target_cols].values.tolist(),dtype =np.float32).transpose((0, 2, 1)) val_inputs_cat = preprocess_categorical_inputs(val_data,cols=categorical_features) val_inputs_num = preprocess_numerical_inputs(val_data,cols=numerical_features) val_labels = np.array(val_data[target_cols].values.tolist(),dtype =np.float32).transpose((0, 2, 1)) # train_inputs_cat, train_labels = train_inputs_all_cat[train_index], train_labels_all[train_index] # val_inputs_cat, val_labels = train_inputs_all_cat[val_index], train_labels_all[val_index] # train_inputs_num, val_inputs_num = train_inputs_all_num[train_index],train_inputs_all_num[val_index] # csv_logger csv_logger = tf.keras.callbacks.CSVLogger(f'Fold_{Fold}_log.csv', separator=',', append=False) # SAVE BEST MODEL EACH FOLD checkpoint = tf.keras.callbacks.ModelCheckpoint(f'{model_name}_Fold_{Fold}.h5', monitor='val_loss', verbose=0, mode='min', save_freq='epoch') if Cosine_Schedule: #cosine Callback lr_schedule= get_cosine_schedule_with_warmup(lr=0.001, num_warmup_steps=20, num_training_steps=epochs) elif Rampup_decy_lr : # Rampup decy lr lr_schedule = get_lr_callback(BATCH_SIZE) else: lr_schedule = tf.keras.callbacks.ReduceLROnPlateau() history = model_train.fit( {'numeric_input': train_inputs_num, 'category_input': train_inputs_cat} , train_labels, validation_data=({'numeric_input': val_inputs_num, 'category_input': val_inputs_cat} ,val_labels), batch_size=BATCH_SIZE, epochs=epochs, callbacks=[lr_schedule, checkpoint, csv_logger,lr_schedule], verbose=1 if debug else 0 ) print("Min Validation Loss : ", min(history.history['val_loss'])) print("Min Validation Epoch : ",np.argmin( history.history['val_loss'] )+1) val_losses.append(min(history.history['val_loss'])) historys.append(history) model_short.load_weights(f'{model_name}_Fold_{Fold}.h5') model_long.load_weights(f'{model_name}_Fold_{Fold}.h5') public_preds = model_short.predict({'numeric_input': public_inputs_num, 'category_input': public_inputs_cat}) private_preds = model_long.predict({'numeric_input': private_inputs_num, 'category_input': private_inputs_cat}) oof_preds = model_train.predict({'numeric_input': val_inputs_num, 'category_input': val_inputs_cat}) stacking_pred = model_short.predict({'numeric_input': val_inputs_num, 'category_input': val_inputs_cat}) preds_model = [] for df, preds in [(public_df, public_preds), (private_df, private_preds)]: for i, uid in enumerate(df.id): single_pred = preds[i] single_df = pd.DataFrame(single_pred, columns=target_cols) single_df['id_seqpos'] = [f'{uid}_{x}' for x in range(single_df.shape[0])] preds_model.append(single_df) preds_model_df = pd.concat(preds_model) preds_model_df = preds_model_df.groupby(['id_seqpos'],as_index=True).mean() submission[target_cols] += preds_model_df[target_cols].values / n_folds for df, preds in [(val_data, oof_preds)]: for i, uid in enumerate(df.id): single_pred = preds[i] single_label = val_labels[i] single_label_df = pd.DataFrame(single_label, columns=target_cols) single_label_df['id_seqpos'] = [f'{uid}_{x}' for x in range(single_label_df.shape[0])] single_label_df['id'] = [f'{uid}' for x in range(single_label_df.shape[0])] single_label_df['s_id'] = [x for x in range(single_label_df.shape[0])] single_df = pd.DataFrame(single_pred, columns=pred_col_names) single_df['id_seqpos'] = [f'{uid}_{x}' for x in range(single_df.shape[0])] single_df = pd.merge(single_label_df,single_df, on="id_seqpos", how="left") oof_preds_all.append(single_df) for df, preds in [(val_data, stacking_pred)]: for i, uid in enumerate(df.id): single_pred = preds[i] # single_label = val_labels[i] # single_label_df = pd.DataFrame(single_label, columns=target_cols) # single_label_df['id_seqpos'] = [f'{uid}_{x}' for x in range(single_label_df.shape[0])] # single_label_df['id'] = [f'{uid}' for x in range(single_label_df.shape[0])] # single_label_df['s_id'] = [x for x in range(single_label_df.shape[0])] single_df = pd.DataFrame(single_pred, columns=pred_col_names) single_df['id_seqpos'] = [f'{uid}_{x}' for x in range(single_df.shape[0])] single_df['id'] = [uid for x in range(single_df.shape[0])] stacking_pred_all.append(single_df) # PLOT TRAINING history_data = pd.read_csv(f'Fold_{Fold}_log.csv') EPOCHS = len(history_data['epoch']) history = pd.DataFrame({'history':history_data.to_dict('list')}) fig = plt.figure(figsize=(15,5)) plt.plot(np.arange(EPOCHS),history.history['lr'],'-',label='Learning Rate',color='#ff7f0e') x = np.argmax( history.history['lr'] ); y = np.max( history.history['lr'] ) xdist = plt.xlim()[1] - plt.xlim()[0]; ydist = plt.ylim()[1] - plt.ylim()[0] plt.scatter(x,y,s=200,color='#1f77b4'); plt.text(x-0.03*xdist,y-0.13*ydist,f'Max Learning Rate : {y}' ,size=12) plt.ylabel('Learning Rate',size=14); plt.xlabel('Epoch',size=14) plt.legend(loc=1) plt2 = plt.gca().twinx() plt2.plot(np.arange(EPOCHS),history.history['loss'],'-o',label='Train Loss',color='#2ca02c') plt2.plot(np.arange(EPOCHS),history.history['val_loss'],'-o',label='Val Loss',color='#d62728') x = np.argmin( history.history['val_loss'] ); y = np.min( history.history['val_loss'] ) ydist = plt.ylim()[1] - plt.ylim()[0] plt.scatter(x,y,s=200,color='#d62728'); plt.text(x-0.03*xdist,y+0.05*ydist,'min loss',size=14) plt.ylabel('Loss',size=14) fig.text(s=f"Model Name : {model_name}" , x=0.5, y=1.08, fontsize=18, ha='center', va='center',color="green") fig.text(s=f"|| Fold : {Fold+1} | Batch_size: {BATCH_SIZE} | num_features: {num_features} | cat_feature: {cat_feature} |n_layers: {n_layers} | embed_dim: {embed_dim} ||", x=0.5, y=1.0, fontsize=15, ha='center', va='center',color="red") fig.text(s=f"|| layers : {layers} | hidden_dim: {hidden_dim} | dropout: {dropout} | sp_dropout: {sp_dropout} ||", x=0.5, y=0.92, fontsize=15, ha='center', va='center',color="blue") plt.legend(loc=3) plt.savefig(f'Fold_{Fold+1}.png', bbox_inches='tight') plt.show() submission["id_seqpos"] = preds_model_df.index submission = pd.merge(sample_sub["id_seqpos"], submission, on="id_seqpos", how="left") OOF = pd.concat(oof_preds_all) stacking_df = pd.concat(stacking_pred_all) # ### Calculate OOF Score : # The OOF (out of fold) predictions are saved to disk. If you wish to ensemble multiple models, use the OOF to determine what are the best weights to blend your models with. Choose weights that maximize OOF CV score when used to blend OOF. Then use those same weights to blend your test predictions. # ### Overall OOF Score # In[25]: OOF = OOF.groupby(['id_seqpos','id','s_id'],as_index=False).mean() OOF = OOF.sort_values(['id','s_id'],ascending=[True, True]) OOF_score = MCRMSE(np.expand_dims(OOF[target_eval_col].values, axis=0),
np.expand_dims(OOF[pred_eval_col].values, axis=0)
numpy.expand_dims
# -*- coding: utf-8 -*- import numpy as np import matplotlib.pyplot as plt import os import sys import subprocess import csv import scipy import scipy.stats import aifc import read_aif data_folder = sys.argv[1] N_subjects = 21 result_folder = sys.argv[2] if not os.path.exists(result_folder): os.mkdir(result_folder) #calculate and read behavioural results into behaviouraldict: #{'S01': {'snareCue_times': [46.28689342,...], ...}, 'S02': {...} } behaviouraldict = {} for i in range(1, N_subjects+1): # load the results into a dictionary try: with np.load(os.path.join(result_folder,'S%02d' % i, 'behavioural_results.npz'), allow_pickle=True) as behave_file: behaviouraldict['S%02d' % i] = dict(behave_file) except: print('Please run read_aif.py for every subjects first.') ###1. plot performance vs musical background: #read subject background (LQ and music qualification) #background is a dict {"N_subjects":[LQ, Quali, Level, years]} background = {} with open(os.path.join(data_folder,'additionalSubjectInfo.csv'),'r') as infile: reader = csv.DictReader(infile, fieldnames=None, delimiter=';') for row in reader: key = "S%02d" % int(row['Subjectnr']) #same format as behaviouraldict value = [int(row['LQ']),int(row['MusicQualification']), int(row['MusicianshipLevel']),int(row['TrainingYears'])] background[key] = value raw_musicscores = np.array([v for k,v in sorted(background.items())]) z_musicscores = (raw_musicscores - np.mean(raw_musicscores,0) )/raw_musicscores.std(0) musicscore = z_musicscores[:,1:].mean(1) # do not include the LQ snare_abs_performance = np.zeros(N_subjects) snare_mean_performance = np.zeros(N_subjects) snare_se_performance = np.zeros(N_subjects) wb_abs_performance = np.zeros(N_subjects) wb_mean_performance = np.zeros(N_subjects) wb_se_performance = np.zeros(N_subjects) snare_rel_performance = np.zeros(N_subjects) wb_rel_performance = np.zeros(N_subjects) for k,v in sorted(behaviouraldict.items()): i = int(k[1:])-1 #'S01'=> entry 0 snaredev = v['snare_deviation'] snaredev = snaredev[np.isfinite(snaredev)] wbdev = v['wdBlk_deviation'] wbdev = wbdev[np.isfinite(wbdev)] snare_abs_performance[i] = np.abs(snaredev).mean() snare_mean_performance[i] = snaredev.mean() snare_se_performance[i] = snaredev.std()/np.sqrt(len(snare_mean_performance)) wb_abs_performance[i] = np.abs(wbdev).mean() wb_mean_performance[i] = wbdev.mean() wb_se_performance[i] = wbdev.std()/np.sqrt(len(wb_mean_performance)) snare_rel_performance[i] = np.std(snaredev) wb_rel_performance[i] = np.std(wbdev) snare_abs_expregress = scipy.stats.linregress( musicscore[0:N_subjects], np.log(snare_abs_performance)) snare_abs_rss = np.sum((np.log(snare_abs_performance) - (snare_abs_expregress[1] + musicscore[0:N_subjects]*snare_abs_expregress[0]))**2) snare_abs_tss = np.sum((np.log(snare_abs_performance) - np.mean(np.log(snare_abs_performance)))**2) snare_abs_r2 = 1 - snare_abs_rss/snare_abs_tss snare_rel_expregress = scipy.stats.linregress( musicscore[0:N_subjects], np.log(snare_rel_performance)) snare_rel_rss = np.sum((np.log(snare_rel_performance) - (snare_rel_expregress[1] + musicscore[0:N_subjects]*snare_rel_expregress[0]))**2) snare_rel_tss = np.sum((np.log(snare_rel_performance) - np.mean(np.log(snare_rel_performance)))**2) snare_rel_r2 = 1 - snare_rel_rss/snare_rel_tss wb_abs_expregress = scipy.stats.linregress( musicscore[0:N_subjects], np.log(wb_abs_performance)) wb_abs_rss = np.sum((np.log(wb_abs_performance) - (wb_abs_expregress[1] + musicscore[0:N_subjects]*wb_abs_expregress[0]))**2) wb_abs_tss = np.sum((np.log(wb_abs_performance) - np.mean(np.log(wb_abs_performance)))**2) wb_abs_r2 = 1 - wb_abs_rss/wb_abs_tss wb_rel_expregress = scipy.stats.linregress( musicscore[0:N_subjects], np.log(wb_rel_performance)) wb_rel_rss = np.sum((
np.log(wb_rel_performance)
numpy.log
from learning_to_adapt.dynamics.core.layers import MLP from collections import OrderedDict import tensorflow as tf import numpy as np from learning_to_adapt.utils.serializable import Serializable from learning_to_adapt.utils import tensor_utils from learning_to_adapt.logger import logger import time class MetaMLPDynamicsModel(Serializable): """ Class for MLP continous dynamics model """ _activations = { None: None, "relu": tf.nn.relu, "tanh": tf.tanh, "sigmoid": tf.sigmoid, "softmax": tf.nn.softmax, "swish": lambda x: x * tf.sigmoid(x) } def __init__(self, name, env, hidden_sizes=(512, 512), meta_batch_size=10, hidden_nonlinearity=tf.nn.relu, output_nonlinearity=None, batch_size=500, learning_rate=0.001, inner_learning_rate=0.1, normalize_input=True, optimizer=tf.train.AdamOptimizer, valid_split_ratio=0.2, rolling_average_persitency=0.99, ): Serializable.quick_init(self, locals()) self.normalization = None self.normalize_input = normalize_input self.next_batch = None self.meta_batch_size = meta_batch_size self.valid_split_ratio = valid_split_ratio self.rolling_average_persitency = rolling_average_persitency self.batch_size = batch_size self.learning_rate = learning_rate self.inner_learning_rate = inner_learning_rate self.name = name self._dataset_train = None self._dataset_test = None self._prev_params = None self._adapted_param_values = None # determine dimensionality of state and action space self.obs_space_dims = obs_space_dims = env.observation_space.shape[0] self.action_space_dims = action_space_dims = env.action_space.shape[0] hidden_nonlinearity = self._activations[hidden_nonlinearity] output_nonlinearity = self._activations[output_nonlinearity] """ ------------------ Pre-Update Graph + Adaptation ----------------------- """ with tf.variable_scope(name): # Placeholders self.obs_ph = tf.placeholder(tf.float32, shape=(None, obs_space_dims)) self.act_ph = tf.placeholder(tf.float32, shape=(None, action_space_dims)) self.delta_ph = tf.placeholder(tf.float32, shape=(None, obs_space_dims)) # Concatenate action and observation --> NN input self.nn_input = tf.concat([self.obs_ph, self.act_ph], axis=1) # Create MLP mlp = MLP(name, output_dim=obs_space_dims, hidden_sizes=hidden_sizes, hidden_nonlinearity=hidden_nonlinearity, output_nonlinearity=output_nonlinearity, input_var=self.nn_input, input_dim=obs_space_dims+action_space_dims) self.delta_pred = mlp.output_var # shape: (batch_size, ndim_obs, n_models) self.loss = tf.reduce_mean(tf.square(self.delta_ph - self.delta_pred)) self.optimizer = tf.train.GradientDescentOptimizer(self.learning_rate) self.adaptation_sym = tf.train.GradientDescentOptimizer(self.inner_learning_rate).minimize(self.loss) # Tensor_utils self.f_delta_pred = tensor_utils.compile_function([self.obs_ph, self.act_ph], self.delta_pred) """ --------------------------- Meta-training Graph ---------------------------------- """ nn_input_per_task = tf.split(self.nn_input, self.meta_batch_size, axis=0) delta_per_task = tf.split(self.delta_ph, self.meta_batch_size, axis=0) pre_input_per_task, post_input_per_task = zip(*[tf.split(nn_input, 2, axis=0) for nn_input in nn_input_per_task]) pre_delta_per_task, post_delta_per_task = zip(*[tf.split(delta, 2, axis=0) for delta in delta_per_task]) pre_losses = [] post_losses = [] self._adapted_params = [] for idx in range(self.meta_batch_size): with tf.variable_scope(name + '/pre_model_%d' % idx, reuse=tf.AUTO_REUSE): pre_mlp = MLP(name, output_dim=obs_space_dims, hidden_sizes=hidden_sizes, hidden_nonlinearity=hidden_nonlinearity, output_nonlinearity=output_nonlinearity, input_var=pre_input_per_task[idx], input_dim=obs_space_dims + action_space_dims, params=mlp.get_params()) pre_delta_pred = pre_mlp.output_var pre_loss = tf.reduce_mean(tf.square(pre_delta_per_task[idx] - pre_delta_pred)) adapted_params = self._adapt_sym(pre_loss, pre_mlp.get_params()) self._adapted_params.append(adapted_params) with tf.variable_scope(name + '/post_model_%d' % idx, reuse=tf.AUTO_REUSE): post_mlp = MLP(name, output_dim=obs_space_dims, hidden_sizes=hidden_sizes, hidden_nonlinearity=hidden_nonlinearity, output_nonlinearity=output_nonlinearity, input_var=post_input_per_task[idx], params=adapted_params, input_dim=obs_space_dims + action_space_dims) post_delta_pred = post_mlp.output_var post_loss = tf.reduce_mean(tf.square(post_delta_per_task[idx] - post_delta_pred)) pre_losses.append(pre_loss) post_losses.append(post_loss) self.pre_loss = tf.reduce_mean(pre_losses) self.post_loss = tf.reduce_mean(post_losses) self.train_op = optimizer(self.learning_rate).minimize(self.post_loss) """ --------------------------- Post-update Inference Graph --------------------------- """ with tf.variable_scope(name + '_ph_graph'): self.post_update_delta = [] self.network_phs_meta_batch = [] nn_input_per_task = tf.split(self.nn_input, self.meta_batch_size, axis=0) for idx in range(meta_batch_size): with tf.variable_scope('task_%i' % idx): network_phs = self._create_placeholders_for_vars(mlp.get_params()) self.network_phs_meta_batch.append(network_phs) mlp_meta_batch = MLP(name, output_dim=obs_space_dims, hidden_sizes=hidden_sizes, hidden_nonlinearity=hidden_nonlinearity, output_nonlinearity=output_nonlinearity, params=network_phs, input_var=nn_input_per_task[idx], input_dim=obs_space_dims + action_space_dims, ) self.post_update_delta.append(mlp_meta_batch.output_var) self._networks = [mlp] def fit(self, obs, act, obs_next, epochs=1000, compute_normalization=True, valid_split_ratio=None, rolling_average_persitency=None, verbose=False, log_tabular=False): assert obs.ndim == 3 and obs.shape[2] == self.obs_space_dims assert obs_next.ndim == 3 and obs_next.shape[2] == self.obs_space_dims assert act.ndim == 3 and act.shape[2] == self.action_space_dims if valid_split_ratio is None: valid_split_ratio = self.valid_split_ratio if rolling_average_persitency is None: rolling_average_persitency = self.rolling_average_persitency assert 1 > valid_split_ratio >= 0 sess = tf.get_default_session() if (self.normalization is None or compute_normalization) and self.normalize_input: self.compute_normalization(obs, act, obs_next) if self.normalize_input: # Normalize data obs, act, delta = self._normalize_data(obs, act, obs_next) assert obs.ndim == act.ndim == obs_next.ndim == 3 else: delta = obs_next - obs # Split into valid and test set obs_train, act_train, delta_train, obs_test, act_test, delta_test = train_test_split(obs, act, delta, test_split_ratio=valid_split_ratio) if self._dataset_test is None: self._dataset_test = dict(obs=obs_test, act=act_test, delta=delta_test) self._dataset_train = dict(obs=obs_train, act=act_train, delta=delta_train) else: self._dataset_test['obs'] = np.concatenate([self._dataset_test['obs'], obs_test]) self._dataset_test['act'] = np.concatenate([self._dataset_test['act'], act_test]) self._dataset_test['delta'] = np.concatenate([self._dataset_test['delta'], delta_test]) self._dataset_train['obs'] = np.concatenate([self._dataset_train['obs'], obs_train]) self._dataset_train['act'] = np.concatenate([self._dataset_train['act'], act_train]) self._dataset_train['delta'] = np.concatenate([self._dataset_train['delta'], delta_train]) valid_loss_rolling_average = None epoch_times = [] """ ------- Looping over training epochs ------- """ num_steps_per_epoch = max(int(np.prod(self._dataset_train['obs'].shape[:2]) / (self.meta_batch_size * self.batch_size * 2)), 1) num_steps_test = max(int(np.prod(self._dataset_test['obs'].shape[:2]) / (self.meta_batch_size * self.batch_size * 2)), 1) for epoch in range(epochs): # preparations for recording training stats pre_batch_losses = [] post_batch_losses = [] t0 = time.time() """ ------- Looping through the shuffled and batched dataset for one epoch -------""" for _ in range(num_steps_per_epoch): obs_batch, act_batch, delta_batch = self._get_batch(train=True) pre_batch_loss, post_batch_loss, _ = sess.run([self.pre_loss, self.post_loss, self.train_op], feed_dict={self.obs_ph: obs_batch, self.act_ph: act_batch, self.delta_ph: delta_batch}) pre_batch_losses.append(pre_batch_loss) post_batch_losses.append(post_batch_loss) valid_losses = [] for _ in range(num_steps_test): obs_test, act_test, delta_test = self._get_batch(train=False) # compute validation loss feed_dict = {self.obs_ph: obs_test, self.act_ph: act_test, self.delta_ph: delta_test} valid_loss = sess.run(self.loss, feed_dict=feed_dict) valid_losses.append(valid_loss) valid_loss = np.mean(valid_losses) if valid_loss_rolling_average is None: valid_loss_rolling_average = 1.5 * valid_loss # set initial rolling to a higher value avoid too early stopping valid_loss_rolling_average_prev = 2 * valid_loss if valid_loss < 0: valid_loss_rolling_average = valid_loss/1.5 # set initial rolling to a higher value avoid too early stopping valid_loss_rolling_average_prev = valid_loss/2 valid_loss_rolling_average = rolling_average_persitency*valid_loss_rolling_average \ + (1.0-rolling_average_persitency)*valid_loss epoch_times.append(time.time() - t0) if verbose: logger.log("Training DynamicsModel - finished epoch %i - " "train loss: %.4f valid loss: %.4f valid_loss_mov_avg: %.4f epoch time: %.2f" % (epoch, np.mean(post_batch_losses), valid_loss, valid_loss_rolling_average, time.time() - t0)) if valid_loss_rolling_average_prev < valid_loss_rolling_average or epoch == epochs - 1: logger.log('Stopping Training of Model since its valid_loss_rolling_average decreased') break valid_loss_rolling_average_prev = valid_loss_rolling_average """ ------- Tabular Logging ------- """ if log_tabular: logger.logkv('AvgModelEpochTime', np.mean(epoch_times)) logger.logkv('Post-Loss', np.mean(post_batch_losses)) logger.logkv('Pre-Loss', np.mean(pre_batch_losses)) logger.logkv('Epochs', epoch) def predict(self, obs, act): assert obs.shape[0] == act.shape[0] assert obs.ndim == 2 and obs.shape[1] == self.obs_space_dims assert act.ndim == 2 and act.shape[1] == self.action_space_dims obs_original = obs if self.normalize_input: obs, act = self._normalize_data(obs, act) delta = np.array(self._predict(obs, act)) delta = denormalize(delta, self.normalization['delta'][0], self.normalization['delta'][1]) else: delta = np.array(self._predict(obs, act)) assert delta.ndim == 2 pred_obs = obs_original + delta return pred_obs def _predict(self, obs, act): if self._adapted_param_values is not None: sess = tf.get_default_session() obs, act = self._pad_inputs(obs, act) feed_dict = {self.obs_ph: obs, self.act_ph: act} feed_dict.update(self.network_params_feed_dict) delta = sess.run(self.post_update_delta[:self._num_adapted_models], feed_dict=feed_dict) delta = np.concatenate(delta, axis=0) else: delta = self.f_delta_pred(obs, act) return delta def _pad_inputs(self, obs, act, obs_next=None): if self._num_adapted_models < self.meta_batch_size: pad = int(obs.shape[0] / self._num_adapted_models * (self.meta_batch_size - self._num_adapted_models)) obs = np.concatenate([obs, np.zeros((pad,) + obs.shape[1:])], axis=0) act = np.concatenate([act, np.zeros((pad,) + act.shape[1:])], axis=0) if obs_next is not None: obs_next = np.concatenate([obs_next, np.zeros((pad,) + obs_next.shape[1:])], axis=0) if obs_next is not None: return obs, act, obs_next else: return obs, act def adapt(self, obs, act, obs_next): self._num_adapted_models = len(obs) assert len(obs) == len(act) == len(obs_next) obs = np.concatenate([np.concatenate([ob, np.zeros_like(ob)], axis=0) for ob in obs], axis=0) act = np.concatenate([np.concatenate([a, np.zeros_like(a)], axis=0) for a in act], axis=0) obs_next = np.concatenate([np.concatenate([ob, np.zeros_like(ob)], axis=0) for ob in obs_next], axis=0) obs, act, obs_next = self._pad_inputs(obs, act, obs_next) assert obs.shape[0] == act.shape[0] == obs_next.shape[0] assert obs.ndim == 2 and obs.shape[1] == self.obs_space_dims assert act.ndim == 2 and act.shape[1] == self.action_space_dims assert obs_next.ndim == 2 and obs_next.shape[1] == self.obs_space_dims if self.normalize_input: # Normalize data obs, act, delta = self._normalize_data(obs, act, obs_next) assert obs.ndim == act.ndim == obs_next.ndim == 2 else: delta = obs_next - obs self._prev_params = [nn.get_param_values() for nn in self._networks] sess = tf.get_default_session() self._adapted_param_values = sess.run(self._adapted_params[:self._num_adapted_models], feed_dict={self.obs_ph: obs, self.act_ph: act, self.delta_ph: delta}) def switch_to_pre_adapt(self): if self._prev_params is not None: [nn.set_params(params) for nn, params in zip(self._networks, self._prev_params)] self._prev_params = None self._adapted_param_values = None def _get_batch(self, train=True): if train: num_paths, len_path = self._dataset_train['obs'].shape[:2] idx_path =
np.random.randint(0, num_paths, size=self.meta_batch_size)
numpy.random.randint
from typing import Callable import numpy def calc_iint( beam_polarization: float, flipper_efficiency: float, f_nucl, f_m_perp, matrix_u, func_extinction: Callable = None, flag_beam_polarization: bool = False, flag_flipper_efficiency: bool = False, flag_f_nucl: bool = False, flag_f_m_perp: bool = False, dict_in_out: dict = None, flag_use_precalculated_data: bool = False): """Calculate integrated intensities. It is supposed that crystal is not rotate during the calculations (orientation matrix is the same for all reflections) """ if dict_in_out is None: flag_dict = False dict_in_out_keys = [] else: flag_dict = True dict_in_out_keys = dict_in_out.keys() p_u = beam_polarization p_d = beam_polarization*(2*flipper_efficiency-1) flag_f_plus_sq = flag_f_nucl or flag_f_m_perp flag_f_minus_sq = flag_f_nucl or flag_f_m_perp flag_f_m_perp_xy_sq = flag_f_m_perp f_n = numpy.atleast_1d(f_nucl) f_m_perp = numpy.atleast_1d(f_m_perp) f_n_sq = numpy.square(numpy.abs(f_n)) f_m_perp_x = f_m_perp[0]*matrix_u[0] + f_m_perp[1]*matrix_u[1] + f_m_perp[2]*matrix_u[2] f_m_perp_y = f_m_perp[0]*matrix_u[3] + f_m_perp[1]*matrix_u[4] + f_m_perp[2]*matrix_u[5] f_m_perp_z = f_m_perp[0]*matrix_u[6] + f_m_perp[1]*matrix_u[7] + f_m_perp[2]*matrix_u[8] f_m_perp_x_sq = numpy.square(numpy.abs(f_m_perp_x)) f_m_perp_y_sq = numpy.square(numpy.abs(f_m_perp_y)) f_m_perp_z_sq = numpy.square(numpy.abs(f_m_perp_z)) f_n_f_m_perp_z = 2.*(f_n.real * f_m_perp_z.real + f_n.imag * f_m_perp_z.imag) f_plus_sq = f_n_sq + f_m_perp_z_sq + f_n_f_m_perp_z f_minus_sq = f_n_sq + f_m_perp_z_sq - f_n_f_m_perp_z f_m_perp_xy_sq = f_m_perp_x_sq + f_m_perp_y_sq if func_extinction is None: dder_y_plus, dder_y_minus, dder_y_m_perp_xy = {}, {}, {} y_plus = numpy.ones_like(f_plus_sq) y_minus = numpy.ones_like(f_minus_sq) y_m_perp_xy = numpy.ones_like(f_m_perp_xy_sq) else: y_plus, dder_y_plus = func_extinction(f_plus_sq, flag_f_sq=flag_f_plus_sq) y_minus, dder_y_minus = func_extinction(f_minus_sq, flag_f_sq=flag_f_minus_sq) y_m_perp_xy, dder_y_m_perp_xy = func_extinction(f_m_perp_xy_sq, flag_f_sq=flag_f_m_perp_xy_sq) chiral_term = 2.*(f_m_perp_x.imag * f_m_perp_y.real - f_m_perp_x.real * f_m_perp_y.imag) if flag_dict: dict_in_out["chiral_term"] = chiral_term iint_plus = 0.5*((1.+p_u)*y_plus*f_plus_sq + (1.-p_u)*y_minus*f_minus_sq) + \ y_m_perp_xy * f_m_perp_xy_sq + \ p_u * chiral_term iint_minus = 0.5*((1.-p_d)*y_plus*f_plus_sq + (1.+p_d)*y_minus*f_minus_sq) + \ y_m_perp_xy * f_m_perp_xy_sq - \ p_d * chiral_term if flag_dict: dict_in_out["iint_plus"] = iint_plus dict_in_out["iint_minus"] = iint_minus dict_in_out["y_plus"] = y_plus dict_in_out["f_plus_sq"] = f_plus_sq dict_in_out["y_minus"] = y_minus dict_in_out["f_minus_sq"] = f_minus_sq dict_in_out["y_m_perp_xy"] = y_m_perp_xy dict_in_out["f_m_perp_xy_sq"] = f_m_perp_xy_sq dder_plus = {} dder_minus = {} if flag_beam_polarization: dder_plus["beam_polarization"] = 0.5*(y_plus*f_plus_sq - y_minus*f_minus_sq + 2.*chiral_term) * \ numpy.ones_like(beam_polarization) dder_minus["beam_polarization"] = 0.5*(-y_plus*f_plus_sq + y_minus*f_minus_sq - 2.*chiral_term) * \ numpy.ones_like(beam_polarization)*(2.*flipper_efficiency-1.) if flag_flipper_efficiency: dder_minus["flipper_efficiency"] = beam_polarization*(-y_plus*f_plus_sq + y_minus*f_minus_sq - 2.*chiral_term) * \ numpy.ones_like(flipper_efficiency) if flag_f_nucl: f_plus_sq_f_n_real = 2. * (f_n.real + f_m_perp_z.real) * numpy.ones_like(f_n.real) f_plus_sq_f_n_imag = 2. * (f_n.imag + f_m_perp_z.imag) * numpy.ones_like(f_n.imag) f_minus_sq_f_n_real = 2. * (f_n.real - f_m_perp_z.real)* numpy.ones_like(f_n.real) f_minus_sq_f_n_imag = 2. * (f_n.imag - f_m_perp_z.imag) * numpy.ones_like(f_n.imag) y_plus_f_n_real, y_minus_f_n_real = 0, 0 y_plus_f_n_imag, y_minus_f_n_imag = 0, 0 if "f_sq" in dder_y_plus.keys(): y_plus_f_n_real = dder_y_plus["f_sq"]*f_plus_sq_f_n_real y_plus_f_n_imag = dder_y_plus["f_sq"]*f_plus_sq_f_n_imag if "f_sq" in dder_y_minus.keys(): y_minus_f_n_real = dder_y_minus["f_sq"]*f_minus_sq_f_n_real y_minus_f_n_imag = dder_y_minus["f_sq"]*f_minus_sq_f_n_imag dder_plus["f_nucl_real"] = 0.5*( (1.+p_u)*(y_plus*f_plus_sq_f_n_real+y_plus_f_n_real*f_plus_sq) + (1.-p_u)*(y_minus*f_minus_sq_f_n_real+y_minus_f_n_real*f_minus_sq)) dder_plus["f_nucl_imag"] = 0.5*( (1.+p_u)*(y_plus*f_plus_sq_f_n_imag+y_plus_f_n_imag*f_plus_sq) + (1.-p_u)*(y_minus*f_minus_sq_f_n_imag+y_minus_f_n_imag*f_minus_sq)) dder_minus["f_nucl_real"] = 0.5*( (1.-p_d)*(y_plus*f_plus_sq_f_n_real+y_plus_f_n_real*f_plus_sq) + (1.+p_d)*(y_minus*f_minus_sq_f_n_real+y_minus_f_n_real*f_minus_sq)) dder_minus["f_nucl_imag"] = 0.5*( (1.-p_d)*(y_plus*f_plus_sq_f_n_imag+y_plus_f_n_imag*f_plus_sq) + (1.+p_d)*(y_minus*f_minus_sq_f_n_imag+y_minus_f_n_imag*f_minus_sq)) if flag_f_m_perp: f_plus_sq_f_m_perp_z_real = 2. * (f_n.real + f_m_perp_z.real) * numpy.ones_like(f_m_perp_z.real) f_plus_sq_f_m_perp_z_imag = 2. * (f_n.imag + f_m_perp_z.imag) * numpy.ones_like(f_m_perp_z.imag) f_minus_sq_f_m_perp_z_real = -2. * (f_n.real - f_m_perp_z.real) * numpy.ones_like(f_m_perp_z.real) f_minus_sq_f_m_perp_z_imag = -2. * (f_n.imag - f_m_perp_z.imag) * numpy.ones_like(f_m_perp_z.imag) f_m_perp_xy_sq_f_m_perp_x_real = 2 * f_m_perp_x.real * numpy.ones_like(f_m_perp_x.real) f_m_perp_xy_sq_f_m_perp_x_imag = 2 * f_m_perp_x.imag * numpy.ones_like(f_m_perp_x.imag) f_m_perp_xy_sq_f_m_perp_y_real = 2 * f_m_perp_y.real * numpy.ones_like(f_m_perp_y.real) f_m_perp_xy_sq_f_m_perp_y_imag = 2 * f_m_perp_y.imag * numpy.ones_like(f_m_perp_y.imag) chiral_term_f_m_perp_x_real = -2 * f_m_perp_y.imag * numpy.ones_like(f_m_perp_x.real) chiral_term_f_m_perp_x_imag = 2 * f_m_perp_y.real * numpy.ones_like(f_m_perp_x.imag) chiral_term_f_m_perp_y_real = 2 * f_m_perp_x.imag * numpy.ones_like(f_m_perp_y.real) chiral_term_f_m_perp_y_imag = -2 * f_m_perp_x.real * numpy.ones_like(f_m_perp_y.imag) y_plus_f_m_perp_z_real, y_plus_f_m_perp_z_imag = 0, 0 y_minus_f_m_perp_z_real, y_minus_f_m_perp_z_imag = 0, 0 y_m_perp_xy_f_m_perp_x_real, y_m_perp_xy_f_m_perp_x_imag = 0, 0 y_m_perp_xy_f_m_perp_y_real, y_m_perp_xy_f_m_perp_y_imag = 0, 0 if "f_sq" in dder_y_plus.keys(): y_plus_f_m_perp_z_real = dder_y_plus["f_sq"]*f_plus_sq_f_m_perp_z_real y_plus_f_m_perp_z_imag = dder_y_plus["f_sq"]*f_plus_sq_f_m_perp_z_imag if "f_sq" in dder_y_minus.keys(): y_minus_f_m_perp_z_real = dder_y_minus["f_sq"]*f_minus_sq_f_m_perp_z_real y_minus_f_m_perp_z_imag = dder_y_minus["f_sq"]*f_minus_sq_f_m_perp_z_imag if "f_sq" in dder_y_m_perp_xy.keys(): y_m_perp_xy_f_m_perp_x_real = dder_y_m_perp_xy["f_sq"]*f_m_perp_xy_sq_f_m_perp_x_real y_m_perp_xy_f_m_perp_x_imag = dder_y_m_perp_xy["f_sq"]*f_m_perp_xy_sq_f_m_perp_x_imag y_m_perp_xy_f_m_perp_y_real = dder_y_m_perp_xy["f_sq"]*f_m_perp_xy_sq_f_m_perp_y_real y_m_perp_xy_f_m_perp_y_imag = dder_y_m_perp_xy["f_sq"]*f_m_perp_xy_sq_f_m_perp_y_imag dder_plus_f_m_perp_x_real = \ y_m_perp_xy_f_m_perp_x_real * f_m_perp_xy_sq + \ y_m_perp_xy * f_m_perp_xy_sq_f_m_perp_x_real +\ p_u * chiral_term_f_m_perp_x_real dder_plus_f_m_perp_x_imag = \ y_m_perp_xy_f_m_perp_x_imag * f_m_perp_xy_sq + \ y_m_perp_xy * f_m_perp_xy_sq_f_m_perp_x_imag +\ p_u * chiral_term_f_m_perp_x_imag dder_plus_f_m_perp_y_real = \ y_m_perp_xy_f_m_perp_y_real * f_m_perp_xy_sq + \ y_m_perp_xy * f_m_perp_xy_sq_f_m_perp_y_real +\ p_u * chiral_term_f_m_perp_y_real dder_plus_f_m_perp_y_imag = \ y_m_perp_xy_f_m_perp_y_imag * f_m_perp_xy_sq + \ y_m_perp_xy * f_m_perp_xy_sq_f_m_perp_y_imag +\ p_u * chiral_term_f_m_perp_y_imag dder_plus_f_m_perp_z_real = 0.5*( (1.+p_u)*(y_plus*f_plus_sq_f_m_perp_z_real+y_plus_f_m_perp_z_real*f_plus_sq) + (1.-p_u)*(y_minus*f_minus_sq_f_m_perp_z_real+y_minus_f_m_perp_z_real*f_minus_sq)) dder_plus_f_m_perp_z_imag = 0.5*( (1.+p_u)*(y_plus*f_plus_sq_f_m_perp_z_imag+y_plus_f_m_perp_z_imag*f_plus_sq) + (1.-p_u)*(y_minus*f_minus_sq_f_m_perp_z_imag+y_minus_f_m_perp_z_imag*f_minus_sq)) dder_plus_f_m_perp_1_real = \ dder_plus_f_m_perp_x_real*matrix_u[0] + \ dder_plus_f_m_perp_y_real*matrix_u[3] + \ dder_plus_f_m_perp_z_real*matrix_u[6] dder_plus_f_m_perp_1_imag = \ dder_plus_f_m_perp_x_imag*matrix_u[0] + \ dder_plus_f_m_perp_y_imag*matrix_u[3] + \ dder_plus_f_m_perp_z_imag*matrix_u[6] dder_plus_f_m_perp_2_real = \ dder_plus_f_m_perp_x_real*matrix_u[1] + \ dder_plus_f_m_perp_y_real*matrix_u[4] + \ dder_plus_f_m_perp_z_real*matrix_u[7] dder_plus_f_m_perp_2_imag = \ dder_plus_f_m_perp_x_imag*matrix_u[1] + \ dder_plus_f_m_perp_y_imag*matrix_u[4] + \ dder_plus_f_m_perp_z_imag*matrix_u[7] dder_plus_f_m_perp_3_real = \ dder_plus_f_m_perp_x_real*matrix_u[2] + \ dder_plus_f_m_perp_y_real*matrix_u[5] + \ dder_plus_f_m_perp_z_real*matrix_u[8] dder_plus_f_m_perp_3_imag = \ dder_plus_f_m_perp_x_imag*matrix_u[2] + \ dder_plus_f_m_perp_y_imag*matrix_u[5] + \ dder_plus_f_m_perp_z_imag*matrix_u[8] dder_plus["f_m_perp_real"] = numpy.stack([ dder_plus_f_m_perp_1_real, dder_plus_f_m_perp_2_real, dder_plus_f_m_perp_3_real], axis=0) dder_plus["f_m_perp_imag"] = numpy.stack([ dder_plus_f_m_perp_1_imag, dder_plus_f_m_perp_2_imag, dder_plus_f_m_perp_3_imag], axis=0) dder_minus_f_m_perp_x_real = \ y_m_perp_xy_f_m_perp_x_real * f_m_perp_xy_sq + \ y_m_perp_xy * f_m_perp_xy_sq_f_m_perp_x_real -\ p_d * chiral_term_f_m_perp_x_real dder_minus_f_m_perp_x_imag = \ y_m_perp_xy_f_m_perp_x_imag * f_m_perp_xy_sq + \ y_m_perp_xy * f_m_perp_xy_sq_f_m_perp_x_imag -\ p_d * chiral_term_f_m_perp_x_imag dder_minus_f_m_perp_y_real = \ y_m_perp_xy_f_m_perp_y_real * f_m_perp_xy_sq + \ y_m_perp_xy * f_m_perp_xy_sq_f_m_perp_y_real -\ p_d * chiral_term_f_m_perp_y_real dder_minus_f_m_perp_y_imag = \ y_m_perp_xy_f_m_perp_y_imag * f_m_perp_xy_sq + \ y_m_perp_xy * f_m_perp_xy_sq_f_m_perp_y_imag -\ p_d * chiral_term_f_m_perp_y_imag dder_minus_f_m_perp_z_real = 0.5*( (1.-p_d)*(y_plus*f_plus_sq_f_m_perp_z_real+y_plus_f_m_perp_z_real*f_plus_sq) + (1.+p_d)*(y_minus*f_minus_sq_f_m_perp_z_real+y_minus_f_m_perp_z_real*f_minus_sq)) dder_minus_f_m_perp_z_imag = 0.5*( (1.-p_d)*(y_plus*f_plus_sq_f_m_perp_z_imag+y_plus_f_m_perp_z_imag*f_plus_sq) + (1.+p_d)*(y_minus*f_minus_sq_f_m_perp_z_imag+y_minus_f_m_perp_z_imag*f_minus_sq)) dder_minus_f_m_perp_1_real = \ dder_minus_f_m_perp_x_real*matrix_u[0] + \ dder_minus_f_m_perp_y_real*matrix_u[3] + \ dder_minus_f_m_perp_z_real*matrix_u[6] dder_minus_f_m_perp_1_imag = \ dder_minus_f_m_perp_x_imag*matrix_u[0] + \ dder_minus_f_m_perp_y_imag*matrix_u[3] + \ dder_minus_f_m_perp_z_imag*matrix_u[6] dder_minus_f_m_perp_2_real = \ dder_minus_f_m_perp_x_real*matrix_u[1] + \ dder_minus_f_m_perp_y_real*matrix_u[4] + \ dder_minus_f_m_perp_z_real*matrix_u[7] dder_minus_f_m_perp_2_imag = \ dder_minus_f_m_perp_x_imag*matrix_u[1] + \ dder_minus_f_m_perp_y_imag*matrix_u[4] + \ dder_minus_f_m_perp_z_imag*matrix_u[7] dder_minus_f_m_perp_3_real = \ dder_minus_f_m_perp_x_real*matrix_u[2] + \ dder_minus_f_m_perp_y_real*matrix_u[5] + \ dder_minus_f_m_perp_z_real*matrix_u[8] dder_minus_f_m_perp_3_imag = \ dder_minus_f_m_perp_x_imag*matrix_u[2] + \ dder_minus_f_m_perp_y_imag*matrix_u[5] + \ dder_minus_f_m_perp_z_imag*matrix_u[8] dder_minus["f_m_perp_real"] = numpy.stack([ dder_minus_f_m_perp_1_real, dder_minus_f_m_perp_2_real, dder_minus_f_m_perp_3_real], axis=0) dder_minus["f_m_perp_imag"] = numpy.stack([ dder_minus_f_m_perp_1_imag, dder_minus_f_m_perp_2_imag, dder_minus_f_m_perp_3_imag], axis=0) extinction_keys = dder_y_plus.keys() if len(extinction_keys) != 0: for key in extinction_keys: if key == "f_sq": pass else: dder_plus[key] = \ 0.5*((1.+p_u)*f_plus_sq*dder_y_plus[key] + (1.-p_u)*f_minus_sq*dder_y_minus[key]) + \ f_m_perp_xy_sq * dder_y_m_perp_xy[key] dder_minus[key] = \ 0.5*((1.-p_d)*f_plus_sq*dder_y_plus[key] + (1.+p_d)*f_minus_sq*dder_y_minus[key]) + \ f_m_perp_xy_sq * dder_y_m_perp_xy[key] return iint_plus, iint_minus, dder_plus, dder_minus def calc_flip_ratio_by_iint( iint_plus, iint_minus, c_lambda2: float = None, iint_2hkl = None, flag_iint_plus: bool = False, flag_iint_minus: bool = False, flag_c_lambda2: bool = False, flag_iint_2hkl: bool = False): """Calculate flip ratio by given integrated intensities. """ dder = {} cond = (c_lambda2 is None) or (iint_2hkl is None) if cond: signal_plus = iint_plus signal_minus = iint_minus else: signal_plus = iint_plus + c_lambda2*iint_2hkl signal_minus = iint_minus + c_lambda2*iint_2hkl flip_ratio = signal_plus/signal_minus if flag_iint_plus: dder["iint_plus"] = numpy.ones_like(iint_plus) / signal_minus if flag_iint_minus: dder["iint_minus"] = -1. * flip_ratio * numpy.ones_like(iint_minus)/ signal_minus if flag_c_lambda2: dder["c_lambda2"] = numpy.ones_like(c_lambda2)*iint_2hkl / signal_minus - \ flip_ratio * numpy.ones_like(c_lambda2)*iint_2hkl/ signal_minus if flag_iint_2hkl: dder["iint_2hkl"] = numpy.ones_like(iint_2hkl)*c_lambda2 / signal_minus - \ flip_ratio *
numpy.ones_like(iint_2hkl)
numpy.ones_like
#!/usr/bin/env python # -*- coding: utf-8 -*- import numpy as np import matplotlib.pyplot as plt from matplotlib.backends.backend_pdf import PdfPages pp = PdfPages('random_walk.pdf') print("Setup Complete") # number of walkers n_stories = 1000 # time during which we follow the walker t_max = 200 t = np.arange(t_max) # +1 because the high value is exclusive steps = 2 * np.random.randint(0, 1 + 1, (n_stories, t_max)) - 1 # Verification: all steps are 1 or -1 np.unique(steps) # axis = 1: dimension of time positions = np.cumsum(steps, axis=1) sq_distance = positions**2 mean_sq_distance = np.mean(sq_distance, axis=0) print(np.isclose( np.sqrt(mean_sq_distance), np.sqrt(t),atol=1.0).all()) print(np.isclose( np.sqrt(mean_sq_distance),
np.sqrt(t)
numpy.sqrt
import numpy as np import xarray as xr from numba import vectorize, guvectorize, float64, boolean, complex128 from xsar.utils import timing from xsarsea import gmfs, gmfs_methods from xsarsea.gmfs import gmf_ufunc_co_inc from config import * from gmfs_methods import cmod5 lut_cr_spd = inversion_parameters["lut_cr_dict"]["lut_cr_spd"] lut_cr_nrcs = inversion_parameters["lut_cr_dict"]["lut_cr_nrcs"] lut_cr_inc = inversion_parameters["lut_cr_dict"]["lut_cr_inc"] if inversion_parameters["inversion_method"] == 'point_by_point': lut_co_zon = inversion_parameters["lut_co_dict"]["lut_co_zon"] lut_co_mer = inversion_parameters["lut_co_dict"]["lut_co_mer"] lut_co_spd = inversion_parameters["lut_co_dict"]["lut_co_spd"] lut_co_nrcs = inversion_parameters["lut_co_dict"]["lut_co_nrcs"] lut_co_inc = inversion_parameters["lut_co_dict"]["lut_co_inc"] if inversion_parameters["inversion_method"] == 'third': wspd_1d = dims["wspd_1d"] inc_1d = dims["inc_1d"] phi_1d = dims["phi_1d"] lut_co_spd, lut_co_phi = np.meshgrid(wspd_1d, phi_1d) lut_co_zon = lut_co_spd*np.cos(np.radians(lut_co_phi)) lut_co_mer = lut_co_spd*np.sin(np.radians(lut_co_phi)) class WindInversion: """ WindInversion class """ def __init__(self, params, ds_xsar): """ Parameters ---------- ds_xsar: xarray.Dataset Dataset with dims `('pol','atrack','xtrack')`. params : dict of parameters Returns ------- """ self.params = params self.is_rs2 = self.params["is_rs2"] self.inversion_method = self.params["inversion_method"] self.product_type = self.params["product_type"] self.ds_xsar = ds_xsar if self.ds_xsar: self.get_ancillary_wind() def get_ancillary_wind(self): """ Parameters ---------- Returns ------- """ self.ds_xsar['ancillary_wind_azi'] = np.sqrt( self.ds_xsar["ecmwf_0100_1h_U10"] ** 2 + self.ds_xsar["ecmwf_0100_1h_V10"] ** 2 ) * np.exp(1j * (np.arctan2(self.ds_xsar["ecmwf_0100_1h_U10"], self.ds_xsar["ecmwf_0100_1h_V10"]) - np.deg2rad( self.ds_xsar['ground_heading']))) self.ds_xsar['ancillary_wind_azi'].attrs['comment'] = """ Ancillary wind, as a complex number. complex angle is the wind direction relative to azimuth(atrack) module is windspeed real part is atrack wind component imag part is xtrack wind component """ # Rotation de 90° pour inverser self.ds_xsar['ancillary_wind_antenna'] = np.imag(self.ds_xsar['ancillary_wind_azi']) + 1j * np.real( self.ds_xsar['ancillary_wind_azi']) def perform_noise_flattening_1row(self, nesz_row, incid_row, display=False): """ Parameters ---------- nesz_row: xarray.DataArray DataArray with dims `('xtrack')`. incid_row: xarray.DataArray DataArray with dims `('xtrack')`. display: boolean Returns ------- xarray.DataArray: DataArray with dims `('xtrack')`. """ nesz_flat = nesz_row.copy() # replacing nan values by nesz mean value for concerned incidence nesz_flat[np.isnan(nesz_flat)] = self.neszcr_mean[np.isnan(nesz_flat)] noise = 10.*np.log10(nesz_flat) try: _coef = np.polyfit(incid_row[np.isfinite(noise)], noise[np.isfinite(noise)], 1) except Exception as e: # print(e) return np.full(nesz_row.shape, np.nan) nesz_flat = 10.**((incid_row*_coef[0] + _coef[1] - 1.0)/10.) return nesz_flat def perform_noise_flattening(self, nesz_cr, incid): """ Parameters ---------- nesz_cr: xarray.DataArray DataArray with dims `('atrack', 'xtrack')`. incid: xarray.DataArray DataArray with dims `('atrack', 'xtrack')`. Returns ------- xarray.DataArray DataArray with dims `('atrack', 'xtrack')`. """ # TODO numba-ize return xr.apply_ufunc(self.perform_noise_flattening_1row, nesz_cr, incid, input_core_dims=[["xtrack"], ["xtrack"]], output_core_dims=[["xtrack"]], dask='parallelized', vectorize=True) @timing def perform_copol_inversion(self, args={}): if self.inversion_method == "point_by_point": fct = perform_copol_inversion_1pt_guvect elif self.inversion_method == "iterative": fct = self.perform_copol_inversion_1pt_iterative elif self.inversion_method == "third": fct = self.perform_copol_inversion_met3 if self.ds_xsar: mask_co = ((self.ds_xsar.land_mask.values == 1) | (np.isnan( self.ds_xsar.sigma0.isel(pol=0))) | (self.ds_xsar.sigma0.isel(pol=0) == 0)) mask_cr = ((self.ds_xsar.values == 1) | (np.isnan( self.ds_xsar.sigma0.isel(pol=1))) | (np.isnan(self.ds_xsar.nesz.isel(pol=1)))) mask_dual = mask_co | mask_cr return xr.apply_ufunc(fct, 10*np.log10(self.ds_xsar.sigma0.isel(pol=0) ).compute(), self.ds_xsar.incidence.compute(), self.ds_xsar.ancillary_wind_antenna.compute(), mask_dual.compute(), vectorize=False) else: return xr.apply_ufunc(fct, args["nrcs_co"], args["inc"], args["ancillary_wind_antenna"], args["mask"], vectorize=False) @timing def perform_dual_inversion(self, args={}): if inversion_parameters["inversion_method"] == "point_by_point": fct = perform_dualpol_inversion_1pt_guvect elif self.inversion_method == "iterative": fct = self.perform_dualpol_iterative_inversion_1pt elif self.inversion_method == "third": fct = self.perform_dualpol_inversion_met3 if self.ds_xsar: # Perform noise_flatteing if self.is_rs2 == False: # Noise flatening for s1a, s1b self.neszcr_mean = self.ds_xsar.nesz.isel( pol=1).mean(axis=0, skipna=True) noise_flatened = self.perform_noise_flattening(self.ds_xsar.nesz.isel(pol=1) .compute(), self.ds_xsar.incidence.compute()) else: # No noise flatening for rs2 noise_flatened = self.ds_xsar.nesz.isel(pol=1) # mask_co = ((L2_ds.owiLandFlag.values == 1) | (np.isnan(L2_ds.owiNrcs)) | (L2_ds.owiNrcs == 0)) # mask_cr = ((L2_ds.owiLandFlag.values == 1) | (np.isnan(L2_ds.owiNrcs_cross)) | (np.isnan(_nesz_cr_sarwing))) mask_co = ((self.ds_xsar.land_mask.values == 1) | (np.isnan( self.ds_xsar.sigma0.isel(pol=0))) | (self.ds_xsar.sigma0.isel(pol=0) == 0)) mask_cr = ((self.ds_xsar.values == 1) | (np.isnan(self.ds_xsar.sigma0.isel(pol=1))) | ( np.isnan(self.ds_xsar.nesz.isel(pol=1)) | (self.ds_xsar.sigma0.isel(pol=1) == 0))) mask_dual = mask_co | mask_cr return xr.apply_ufunc(fct, 10*np.log10(self.ds_xsar.sigma0.isel(pol=0) ).compute(), 10*np.log10(self.ds_xsar.sigma0.isel(pol=1) ).compute(), noise_flatened.compute(), self.ds_xsar.incidence.compute(), self.ds_xsar.ancillary_wind_antenna.compute(), mask_dual.compute(), vectorize=False) else: return xr.apply_ufunc(fct, args["nrcs_co"], args["nrcs_cr"], args["noise_flattened"], args["inc"], args["ancillary_wind_antenna"], args["mask"], vectorize=False) def perform_dualpol_iterative_inversion_1pt(self, sigco, sigcr, nesz_cr, incid, ancillary_wind, mask): """ Parameters ---------- sigco: float64 sigcr: float64 nesz_cr: float64 incid: float64 ancillary_wind: complex128 Returns ------- float64 """ if mask: return np.nan index_cp_inc = np.argmin(abs(self.lut_cr_inc-incid)) spd_co = self.perform_copol_inversion_1pt_iterative( sigco, incid, ancillary_wind, False) J_wind_cr = ((self.lut_cr_spd - spd_co)/du10_fg)**2. nrcslin = 10.**(sigcr/10.) dsig_cr_local = 1./(1.25/(nrcslin/nesz_cr))**4. lut_nrcs_inc_cr = lut_cr_nrcs[index_cp_inc, :] Jsig_cr = ((lut_nrcs_inc_cr-sigcr)*dsig_cr_local)**2 J_cr = Jsig_cr + J_wind_cr min__ = (np.argmin(J_cr) % J_cr.shape[-1]) spd_dual = lut_cr_spd[min__] if (spd_dual < 5 or spd_co < 5 or np.isnan(spd_dual)): return spd_co return spd_dual def perform_copol_inversion_1pt_iterative(self, sigco, theta, ancillary_wind, mask): return perform_copol_inversion_1pt_iterative(sigco, theta, ancillary_wind, mask) @guvectorize([(float64[:, :], float64[:, :], complex128[:, :], float64[:, :], boolean[:, :])], '(n,m),(n,m),(n,m),(n,m)->(n,m)', forceobj=False, nopython=True, fastmath=False) def perform_copol_inversion_met3(nrcs_co_2d, inc_2d, ancillary_wind_2d, mask_2d, wspd_co): # return sigma 0 values of cmod5n for a given incidence (°) for j in range(nrcs_co_2d.shape[1]): # constant inc gmf_cmod5n_2d = np.empty(shape=lut_co_spd.shape) mean_incidence = np.nanmean(inc_2d[:, j]) for i_spd, one_wspd in enumerate(wspd_1d): gmf_cmod5n_2d[:, i_spd] = 10*np.log10(cmod5( one_wspd, phi_1d, np.array([mean_incidence]), neutral=True)) for i in range(nrcs_co_2d.shape[0]): if mask_2d[i, j]: wspd_co[i, j] = np.nan else: mu = np.real(ancillary_wind_2d[i, j]) mv = -np.imag(ancillary_wind_2d[i, j]) Jwind_co = ((lut_co_zon-mu)/du)**2 + \ ((lut_co_mer-mv)/dv)**2 Jsig_co = ((gmf_cmod5n_2d-nrcs_co_2d[i, j])/dsig_co)**2 # print(lut_co_spd.shape) J_co = Jwind_co+Jsig_co wspd_co[i, j] = lut_co_spd[( np.argmin(J_co) // J_co.shape[-1], np.argmin(J_co) % J_co.shape[-1])] @guvectorize([(float64[:, :], float64[:, :], float64[:, :], float64[:, :], complex128[:, :], float64[:, :], boolean[:, :])], '(n,m),(n,m),(n,m),(n,m),(n,m),(n,m)->(n,m)', forceobj=True) def perform_dualpol_inversion_met3(nrcs_co_2d, nrcs_cr_2d, nesz_cr_2d, inc_2d, ancillary_wind_2d, mask_2d, wspd_dual): for j in range(nrcs_co_2d.shape[1]): # constant inc gmf_cmod5n_2d = np.empty(shape=lut_co_spd.shape) mean_incidence = np.nanmean(inc_2d[:, j]) for i_spd, one_wspd in enumerate(wspd_1d): gmf_cmod5n_2d[:, i_spd] = 10*np.log10(cmod5( one_wspd, phi_1d, np.array([mean_incidence]), neutral=True)) idx_inc_cr = np.argmin(np.abs(lut_cr_inc-mean_incidence)) lut_nrcs_inc_cr = lut_cr_nrcs[idx_inc_cr, :] for i in range(nrcs_co_2d.shape[0]): # print(mean_incidence,inc_2d[i,j]) if mask_2d[i, j]: wspd_dual[i, j] = np.nan else: mu = np.real(ancillary_wind_2d[i, j]) mv = -np.imag(ancillary_wind_2d[i, j]) Jwind = ((lut_co_zon-mu)/du)**2 + \ ((lut_co_mer-mv)/dv)**2 Jsig = ((gmf_cmod5n_2d-nrcs_co_2d[i, j])/dsig_co)**2 J = Jwind+Jsig spd_co = lut_co_spd[( np.argmin(J) // J.shape[-1], np.argmin(J) % J.shape[-1])] Jwind_cr = ((lut_cr_spd-spd_co)/du10_fg)**2. nrcslin = 10.**(nrcs_cr_2d[i, j]/10.) dsig_cr = 1./(1.25/(nrcslin/nesz_cr_2d[i, j]))**4. Jsig_cr = ((lut_nrcs_inc_cr-nrcs_cr_2d[i, j])*dsig_cr)**2 J_cr = Jsig_cr + Jwind_cr spd_dual = lut_cr_spd[(np.argmin(J_cr) % J_cr.shape[-1])] if (spd_dual < 5 or spd_co < 5): wspd_dual[i, j] = spd_co wspd_dual[i, j] = spd_dual ### iterative on copol 1pt ### @vectorize([float64(float64, float64, complex128, boolean)], forceobj=True) def perform_copol_inversion_1pt_iterative(sigco, theta, ancillary_wind, mask): wspd_min = 0.2 wspd_max = 50 phi_min = 0 phi_max = 360 step_LR_phi = 1 steps = np.geomspace(12.5, 0.1, num=4) wspd_1d = np.arange(wspd_min, wspd_max+steps[0], steps[0]) phi_1d = np.arange(phi_min, phi_max+step_LR_phi, step_LR_phi) spd, phi = perform_copol_inversion_1pt_once( sigco, theta, ancillary_wind, phi_1d, wspd_1d) for idx, val in enumerate(steps[1:]): if (np.isnan(spd)): return np.nan wspd_1d = np.arange(spd-steps[idx], spd+steps[idx]+val, val) spd, phi = perform_copol_inversion_1pt_once( sigco, theta, ancillary_wind, phi_1d, wspd_1d, mask) return spd def perform_copol_inversion_1pt_once(sigco, theta, ancillary_wind, phi_1d, wspd_1d, mask=False): """ Parameters ---------- sigco: float64 incid: float64 ancillary_wind: complex128 mask: boolean Returns ------- float64 """ if mask: return np.nan, np.nan lut_co_spd, lut_co_phi = np.meshgrid(wspd_1d, phi_1d) lut_co_zon = lut_co_spd*np.cos(
np.radians(lut_co_phi)
numpy.radians
""" Author: Dr. <NAME> <<EMAIL>> This package is distributed under New BSD license. """ import numpy as np from packaging import version from sklearn.cross_decomposition import PLSRegression as pls from pyDOE2 import bbdesign from sklearn.metrics.pairwise import check_pairwise_arrays # TODO: Create hyperclass Kernels and a class for each kernel GOWER = "gower" HOMO_GAUSSIAN = "homoscedastic_gaussian_matrix_kernel" FULL_GAUSSIAN = "full_gaussian_matrix_kernel" def standardization(X, y, scale_X_to_unit=False): """ We substract the mean from each variable. Then, we divide the values of each variable by its standard deviation. If scale_X_to_unit, we scale the input space X to the unit hypercube [0,1]^dim with dim the input dimension. Parameters ---------- X: np.ndarray [n_obs, dim] - The input variables. y: np.ndarray [n_obs, 1] - The output variable. scale_X_to_unit: bool - We substract the mean from each variable and then divide the values of each variable by its standard deviation (scale_X_to_unit=False). - We scale X to the unit hypercube [0,1]^dim (scale_X_to_unit=True). Returns ------- X: np.ndarray [n_obs, dim] The standardized input matrix. y: np.ndarray [n_obs, 1] The standardized output vector. X_offset: list(dim) The mean (or the min if scale_X_to_unit=True) of each input variable. y_mean: list(1) The mean of the output variable. X_scale: list(dim) The standard deviation (or the difference between the max and the min if scale_X_to_unit=True) of each input variable. y_std: list(1) The standard deviation of the output variable. """ if scale_X_to_unit: X_offset = np.min(X, axis=0) X_max = np.max(X, axis=0) X_scale = X_max - X_offset else: X_offset = np.mean(X, axis=0) X_scale = X.std(axis=0, ddof=1) X_scale[X_scale == 0.0] = 1.0 y_mean = np.mean(y, axis=0) y_std = y.std(axis=0, ddof=1) y_std[y_std == 0.0] = 1.0 # scale X and y X = (X - X_offset) / X_scale y = (y - y_mean) / y_std return X, y, X_offset, y_mean, X_scale, y_std def cross_distances(X, y=None): """ Computes the nonzero componentwise cross-distances between the vectors in X or between the vectors in X and the vectors in y. Parameters ---------- X: np.ndarray [n_obs, dim] - The input variables. y: np.ndarray [n_y, dim] - The training data. Returns ------- D: np.ndarray [n_obs * (n_obs - 1) / 2, dim] - The cross-distances between the vectors in X. ij: np.ndarray [n_obs * (n_obs - 1) / 2, 2] - The indices i and j of the vectors in X associated to the cross- distances in D. """ n_samples, n_features = X.shape if y is None: n_nonzero_cross_dist = n_samples * (n_samples - 1) // 2 ij = np.zeros((n_nonzero_cross_dist, 2), dtype=np.int32) D = np.zeros((n_nonzero_cross_dist, n_features)) ll_1 = 0 for k in range(n_samples - 1): ll_0 = ll_1 ll_1 = ll_0 + n_samples - k - 1 ij[ll_0:ll_1, 0] = k ij[ll_0:ll_1, 1] = np.arange(k + 1, n_samples) D[ll_0:ll_1] = X[k] - X[(k + 1) : n_samples] else: n_y, n_features = y.shape X, y = check_pairwise_arrays(X, y) n_nonzero_cross_dist = n_samples * n_y ij = np.zeros((n_nonzero_cross_dist, 2), dtype=np.int32) D = np.zeros((n_nonzero_cross_dist, n_features)) for k in range(n_nonzero_cross_dist): xk = k // n_y yk = k % n_y D[k] = X[xk] - y[yk] ij[k, 0] = xk ij[k, 1] = yk return D, ij.astype(np.int32) def cross_levels(X, ij, xtypes, y=None): """ Returns the levels corresponding to the indices i and j of the vectors in X and the number of levels. Parameters ---------- X: np.ndarray [n_obs, dim] - The input variables. y: np.ndarray [n_y, dim] - The training data. ij: np.ndarray [n_obs * (n_obs - 1) / 2, 2] - The indices i and j of the vectors in X associated to the cross- distances in D. xtypes: np.ndarray [dim] -the types (FLOAT,ORD,ENUM) of the input variables Returns ------- Lij: np.ndarray [n_obs * (n_obs - 1) / 2, 2] - The levels corresponding to the indices i and j of the vectors in X. n_levels: np.ndarray - The number of levels for every categorical variable. """ n_levels = [] for i, xtyp in enumerate(xtypes): if isinstance(xtyp, tuple): n_levels.append(xtyp[1]) n_levels = np.array(n_levels) n_var = n_levels.shape[0] n, _ = ij.shape X_cont, cat_features = compute_X_cont(X, xtypes) X_cat = X[:, cat_features] Lij = np.zeros((n_var, n, 2)) for k in range(n_var): for l in range(n): i, j = ij[l] if y is None: Lij[k][l][0] = X_cat[i, k] Lij[k][l][1] = X_cat[j, k] else: y_cat = y[:, cat_features] Lij[k][l][0] = X_cat[i, k] Lij[k][l][1] = y_cat[j, k] return Lij, n_levels def compute_n_param(xtypes, cat_kernel): """ Returns the he number of parameters needed for an homoscedastic or full group kernel. Parameters ---------- xtypes: np.ndarray [dim] -the types (FLOAT,ORD,ENUM) of the input variables cat_kernel : string -The kernel to use for categorical inputs. Only for non continuous Kriging", Returns ------- n_param: int - The number of parameters. """ n_param = 0 for i, xtyp in enumerate(xtypes): if isinstance(xtyp, tuple): if cat_kernel == FULL_GAUSSIAN: n_param += int(xtyp[1] * (xtyp[1] + 1) / 2) if cat_kernel == HOMO_GAUSSIAN: n_param += int(xtyp[1] * (xtyp[1] - 1) / 2) else: n_param += 1 return n_param def compute_X_cont(x, xtypes): """ Some parts were extracted from gower 0.0.5 library Computes the X_cont part of a vector x for mixed integer Parameters ---------- x: np.ndarray [n_obs, dim] - The input variables. xtypes: np.ndarray [dim] -the types (FLOAT,ORD,ENUM) of the input variables Returns ------- X_cont: np.ndarray [n_obs, dim_cont] - The non categorical values of the input variables. cat_features: np.ndarray [dim] - Indices of the categorical input dimensions. """ if xtypes is None: return x, None cat_features = [ not (xtype == "float_type" or xtype == "ord_type") for i, xtype in enumerate(xtypes) ] return x[:, np.logical_not(cat_features)], cat_features def gower_componentwise_distances(X, y=None, xtypes=None): """ Computes the nonzero Gower-distances componentwise between the vectors in X. Parameters ---------- X: np.ndarray [n_obs, dim] - The input variables. y: np.ndarray [n_y, dim] - The training data. xtypes: np.ndarray [dim] -the types (FLOAT,ORD,ENUM) of the input variables Returns ------- D: np.ndarray [n_obs * (n_obs - 1) / 2, dim] - The gower distances between the vectors in X. ij: np.ndarray [n_obs * (n_obs - 1) / 2, 2] - The indices i and j of the vectors in X associated to the cross- distances in D. X_cont: np.ndarray [n_obs, dim_cont] - The non categorical values of the input variables. """ X = X.astype(np.float) Xt = X X_cont, cat_features = compute_X_cont(Xt, xtypes) # function checks if y is None: Y = X else: Y = y if not isinstance(X, np.ndarray): if not np.array_equal(X.columns, Y.columns): raise TypeError("X and Y must have same columns!") else: if not X.shape[1] == Y.shape[1]: raise TypeError("X and Y must have same y-dim!") x_n_rows, x_n_cols = X.shape y_n_rows, y_n_cols = Y.shape if not isinstance(X, np.ndarray): X = np.asarray(X) if not isinstance(Y, np.ndarray): Y = np.asarray(Y) Z = np.concatenate((X, Y)) x_index = range(0, x_n_rows) y_index = range(x_n_rows, x_n_rows + y_n_rows) Z_num = Z[:, np.logical_not(cat_features)] Y_num = Y[:, np.logical_not(cat_features)] num_cols = Z_num.shape[1] num_ranges = np.zeros(num_cols) num_max = np.zeros(num_cols) for col in range(num_cols): col_array = Y_num[:, col].astype(np.float32) max = np.nanmax(col_array) min = np.nanmin(col_array) if np.isnan(max): max = 0.0 if np.isnan(min): min = 0.0 num_max[col] = max num_ranges[col] = (1 - min / max) if (max != 0) else 0.0 # This is to normalize the numeric values between 0 and 1. Z_num = np.divide(Z_num, num_max, out=np.zeros_like(Z_num), where=num_max != 0) Z_cat = Z[:, cat_features] X_cat = Z_cat[ x_index, ] X_num = Z_num[ x_index, ] Y_cat = Z_cat[ y_index, ] Y_num = Z_num[ y_index, ] X_norma = X Y_norma = Y X_norma[:, np.logical_not(cat_features)] = X_num Y_norma[:, np.logical_not(cat_features)] = Y_num n_samples, n_features = X_num.shape n_nonzero_cross_dist = n_samples * (n_samples - 1) // 2 ij = np.zeros((n_nonzero_cross_dist, 2), dtype=np.int32) D_num = np.zeros((n_nonzero_cross_dist, n_features)) ll_1 = 0 if y is None: for k in range(n_samples - 1): ll_0 = ll_1 ll_1 = ll_0 + n_samples - k - 1 ij[ll_0:ll_1, 0] = k ij[ll_0:ll_1, 1] = np.arange(k + 1, n_samples) abs_delta = np.abs(X_num[k] - Y_num[(k + 1) : n_samples]) try: D_num[ll_0:ll_1] = np.divide( abs_delta, num_ranges, out=np.zeros_like(abs_delta), where=num_ranges != 0, ) except: pass n_samples, n_features = X_cat.shape n_nonzero_cross_dist = n_samples * (n_samples - 1) // 2 D_cat = np.zeros((n_nonzero_cross_dist, n_features)) ll_1 = 0 for k in range(n_samples - 1): ll_0 = ll_1 ll_1 = ll_0 + n_samples - k - 1 D_cat[ll_0:ll_1] = np.where( X_cat[k] == Y_cat[(k + 1) : n_samples], np.zeros_like(X_cat[k]), np.ones_like(X_cat[k]), ) D = np.concatenate((D_cat, D_num), axis=1) * 0 D[:, np.logical_not(cat_features)] = D_num D[:, cat_features] = D_cat return D, ij.astype(np.int32), X_cont else: D = X_norma[:, np.newaxis, :] - Y_norma[np.newaxis, :, :] D = D.reshape((-1, X.shape[1])) D = np.abs(D) D[:, cat_features] = D[:, cat_features] > 0.5 D[:, np.logical_not(cat_features)] = np.divide( D[:, np.logical_not(cat_features)], num_ranges, out=np.zeros_like(D[:, np.logical_not(cat_features)]), where=num_ranges != 0, ) return D def differences(X, Y): "compute the componentwise difference between X and Y" X, Y = check_pairwise_arrays(X, Y) D = X[:, np.newaxis, :] - Y[np.newaxis, :, :] return D.reshape((-1, X.shape[1])) def matrix_data_corr( corr, theta, theta_bounds, d, Lij, nlevels, cat_features, cat_kernel ): """ matrix kernel correlation model. Parameters ---------- corr: correlation_types - The autocorrelation model theta : list[small_d * n_comp] Hyperparameters of the correlation model d: np.ndarray[n_obs * (n_obs - 1) / 2, n_comp] d_i Lij: np.ndarray [n_obs * (n_obs - 1) / 2, 2] - The levels corresponding to the indices i and j of the vectors in X. n_levels: np.ndarray - The number of levels for every categorical variable. cat_features: np.ndarray [dim] - Indices of the categorical input dimensions. cat_kernel : string - The kernel to use for categorical inputs. Only for non continuous Kriging", Returns ------- r: np.ndarray[n_obs * (n_obs - 1) / 2,1] An array containing the values of the autocorrelation model. """ _correlation_types = { "abs_exp": abs_exp, "squar_exp": squar_exp, "act_exp": act_exp, "matern52": matern52, "matern32": matern32, } r = np.zeros((d.shape[0], 1)) n_components = d.shape[1] theta_cont_features = np.zeros((len(theta), 1), dtype=bool) theta_cat_features = np.zeros((len(theta), len(nlevels)), dtype=bool) i = 0 j = 0 for feat in cat_features: if feat: if cat_kernel == FULL_GAUSSIAN: theta_cont_features[ j : j + int(nlevels[i] * (nlevels[i] + 1) / 2) ] = False theta_cat_features[ j : j + int(nlevels[i] * (nlevels[i] + 1) / 2), i ] = [True] * int(nlevels[i] * (nlevels[i] + 1) / 2) j += int(nlevels[i] * (nlevels[i] + 1) / 2) if cat_kernel == HOMO_GAUSSIAN: theta_cont_features[ j : j + int(nlevels[i] * (nlevels[i] - 1) / 2) ] = False theta_cat_features[ j : j + int(nlevels[i] * (nlevels[i] - 1) / 2), i ] = [True] * int(nlevels[i] * (nlevels[i] - 1) / 2) j += int(nlevels[i] * (nlevels[i] - 1) / 2) i += 1 else: theta_cont_features[j] = True j += 1 theta_cont = theta[theta_cont_features[:, 0]] d_cont = d[:, np.logical_not(cat_features)] r_cont = _correlation_types[corr](theta_cont, d_cont) r_cat = np.copy(r_cont) * 0 r = np.copy(r_cont) ##Theta_cat_i loop for i in range(len(nlevels)): theta_cat = theta[theta_cat_features[:, i]] if cat_kernel == FULL_GAUSSIAN: theta_cat[: -nlevels[i]] = theta_cat[: -nlevels[i]] * ( 0.5 * np.pi / theta_bounds[1] ) if cat_kernel == HOMO_GAUSSIAN: theta_cat = theta_cat * (0.5 * np.pi / theta_bounds[1]) d_cat = d[:, cat_features] Theta_mat = np.zeros((nlevels[i], nlevels[i])) L = np.zeros((nlevels[i], nlevels[i])) v = 0 for j in range(nlevels[i]): for k in range(nlevels[i] - j): if j == k + j: Theta_mat[j, k + j] = 1 else: Theta_mat[j, k + j] = theta_cat[v] Theta_mat[k + j, j] = theta_cat[v] v = v + 1 for j in range(nlevels[i]): for k in range(nlevels[i] - j): if j == k + j: if j == 0: L[j, k + j] = 1 else: L[j, k + j] = 1 for l in range(j): L[j, k + j] = L[j, k + j] * np.sin(Theta_mat[j, l]) else: if j == 0: L[k + j, j] = np.cos(Theta_mat[k, 0]) else: L[k + j, j] =
np.cos(Theta_mat[k + j, j])
numpy.cos
""" Copyright (c) 2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. This file is based in fatchord_version.py from https://github.com/fatchord/WaveRNN, commit <PASSWORD> on Nov 27, 2019 """ import math import numpy as np import numpy.random as rnd # for WaveRNN approach (https://github.com/fatchord/WaveRNN), first step before upsample def pad_tensor(x, pad, side='both'): # NB - this is just a quick method i need right now # i.e., it won't generalise to other shapes/dims b, t, c = x.shape total = t + 2 * pad if side == 'both' else t + pad padded =
np.zeros((b, total, c), dtype=np.float)
numpy.zeros
# This script is for simulating and trying to recover velocities. import numpy as np import pandas as pd import astropy.stats as aps import aviary as av from multiprocessing import Pool import emcee import corner def infer_velocity(df): # Format parameter and data arrays. pos = [df["ra"], df["dec"], df["parallax"]] pos_err = [df["ra_error"], df["dec_error"], df["parallax_error"]] pm = [df["pmra"], df["pmdec"]] pm_err = [df["pmra_error"], df["pmdec_error"]] # Run MCMC. ndim, nwalkers = 4, 16 inits = [df["vx"], df["vy"], df["vz"], np.log(1./df["parallax"])] p0 = np.random.randn(nwalkers, ndim)*1e-2 + inits sampler = emcee.EnsembleSampler(nwalkers, ndim, av.lnprob, args=(pm, pm_err, pos, pos_err)) nsteps = 1000 sampler.run_mcmc(p0, nsteps, progress=True); flat_samples = sampler.get_chain(discard=int(nsteps/2), flat=True) fig = corner.corner(flat_samples) fig.savefig("corner_degeneracy_test") params_inferred = np.median(flat_samples, axis=0) upper = np.percentile(flat_samples, 84, axis=0) lower = np.percentile(flat_samples, 16, axis=0) errp = upper - params_inferred errm = params_inferred - lower std = np.std(flat_samples, axis=0) df["ID"] = df["ID"] df["vx_inferred"] = params_inferred[0], df["vx_inferred_errp"] = errp[0], df["vx_inferred_errm"] = errm[0], df["vx_inferred_err"] = std[0], df["vy_inferred"] = params_inferred[1], df["vy_inferred_errp"] = errp[1], df["vy_inferred_errm"] = errm[1], df["vy_inferred_err"] = std[1], df["vz_inferred"] = params_inferred[2], df["vz_inferred_errp"] = errp[2], df["vz_inferred_errm"] = errm[2], df["vz_inferred_err"] = std[2], df["lndistance_inferred"] = params_inferred[3], df["lndistance_inferred_errp"] = errp[3], df["lndistance_inferred_errm"] = errm[3], df["lndistance_inferred_err"] = std[3] df = pd.DataFrame(df) df.to_csv("{}.csv".format(int(df["ID"]))) if __name__ == "__main__": np.random.seed(42) # Calculate prior parameters from vx, vy, vz distributions df = pd.read_csv("../data/gaia_mc5_velocities.csv") m = df.radial_velocity.values != 0 m &= np.isfinite(df.basic_vx.values) m &= np.isfinite(df.basic_vy.values) m &= np.isfinite(df.basic_vz.values) df = df.iloc[m] # Calculate covariance between velocities VX = np.stack((df.basic_vx.values, df.basic_vy.values, df.basic_vz.values, np.log(1./df.parallax.values)), axis=0) mean = np.mean(VX, axis=1) cov = np.cov(VX) # Draw parameters from the prior. Nstars = 1 vxs, vys, vzs, lnds = np.random.multivariate_normal(mean, cov, Nstars).T ra = np.random.uniform(280, 300, Nstars) dec = np.random.uniform(36, 52, Nstars) parallax = 1./np.exp(lnds) + (
np.random.randn(Nstars)
numpy.random.randn
import math import numpy as np from scipy import stats from pprint import pprint from collections import OrderedDict import statsmodels.api as sm from statsmodels.distributions.mixture_rvs import mixture_rvs from timeit import default_timer as timer from .cpu_ref import (approx_bandwidth, calc_rms, uni_kde_seq, build_support_nd, multi_kde_seq) from numba import roc from numba_roc_examples.reduction.reduction import group_reduce_sum_float64 WAVESIZE = 64 @roc.jit(device=True) def roc_gaussian(x, mu, sigma): xmu = (x - mu) xmu2 = xmu * xmu div = 2 * sigma * sigma exp = math.exp(-(xmu2 / div)) return exp / (sigma * math.sqrt(2 * math.pi)) @roc.jit(device=True) def roc_gaussian_kernel(x): return roc_gaussian(x, 0, 1) def approx_bandwidth(xs): """ Scott's rule of thumb as in SciPy """ n = xs.size d = xs.ndim return n ** (-1 / (d + 4)) def roc_uni_kde_factory(kernel): @roc.jit def roc_uni_kde(support, samples, bandwidth, pdf): i = roc.get_global_id(0) if i < support.size: supp = support[i] total = 0 for j in range(samples.size): total += kernel((samples[j] - supp) / bandwidth) / bandwidth pdf[i] = total / samples.size def launcher(support, samples, bandwidth, pdf): assert pdf.ndim == 1 assert support.ndim == 1 assert samples.ndim == 1 assert support.size == pdf.size with roc.register(support, samples, pdf): threads = WAVESIZE * 4 blocks = (pdf.size + threads - 1) // threads roc_uni_kde[blocks, threads](support, samples, bandwidth, pdf) return launcher roc_uni_kde = roc_uni_kde_factory(roc_gaussian_kernel) def roc_uni_kde_ver2_factory(kernel): @roc.jit def roc_uni_kde(support, samples, bandwidth, pdf): gid = roc.get_group_id(0) tid = roc.get_local_id(0) tsz = roc.get_local_size(0) supp = support[gid] # all local threads cooperatively computes the energy for a support energy = 0 for base in range(0, samples.size, tsz): idx = tid + base if idx < samples.size: energy += kernel((samples[idx] - supp) / bandwidth) / bandwidth # reduce energy total = group_reduce_sum_float64(energy) if tid == 0: pdf[gid] = total / samples.size def launcher(support, samples, bandwidth, pdf): assert pdf.ndim == 1 assert support.ndim == 1 assert samples.ndim == 1 assert support.size == pdf.size with roc.register(support, samples, pdf): threads = WAVESIZE * 8 blocks = support.size roc_uni_kde[blocks, threads](support, samples, bandwidth, pdf) return launcher roc_uni_kde_ver2 = roc_uni_kde_ver2_factory(roc_gaussian_kernel) def test_roc_uni_kde(): np.random.seed(12345) samples = mixture_rvs([.25, .75], size=10000, dist=[stats.norm, stats.norm], kwargs=( dict(loc=-1, scale=.5), dict(loc=1, scale=.5))) bandwidth = approx_bandwidth(samples) # Run statsmodel for reference kde = sm.nonparametric.KDEUnivariate(samples) kde.fit(kernel="gau", fft=False) # Reuse statsmodel support for our test support = kde.support # Run custom KDE pdf = np.zeros_like(support) roc_uni_kde(support, samples, bandwidth, pdf) # Check value expect = kde.density got = pdf rms = calc_rms(expect, got, norm=True) print("RMS", rms) assert rms < 1e-2, "RMS error too high: {0}".format(rms) def test_roc_uni_kde_ver2(): np.random.seed(12345) samples = mixture_rvs([.25, .75], size=10000, dist=[stats.norm, stats.norm], kwargs=( dict(loc=-1, scale=.5), dict(loc=1, scale=.5))) bandwidth = approx_bandwidth(samples) # Run statsmodel for reference kde = sm.nonparametric.KDEUnivariate(samples) kde.fit(kernel="gau", fft=False) # Reuse statsmodel support for our test support = kde.support # Run custom KDE pdf =
np.zeros_like(support)
numpy.zeros_like
import os import os.path as osp import sys import torch import torch.utils.data as data import torchvision.transforms as transforms import cv2 import numpy as np from .config import cfg from pycocotools import mask as maskUtils import contextlib import io import logging import time def get_label_map(): if cfg.dataset.label_map is None: return {x + 1: x + 1 for x in range(len(cfg.dataset.class_names))} else: return cfg.dataset.label_map def collate_fn_youtube_vis_eval(batch): return batch[0] def collate_fn_youtube_vis(batch): # 0 imgs , 1 targets , 2 masks , 3 num_crowds frames = [([], [], [], []) for _ in batch[0][0]] # TODO: is it better to use range here? for sample, extra in batch: for idx, (img, (gt, masks, num_crowds)) in enumerate(sample): frames[idx][0].append(img) frames[idx][1].append(torch.FloatTensor(gt) if gt is not None else gt) frames[idx][2].append(torch.FloatTensor(masks) if masks is not None else masks) frames[idx][3].append(num_crowds) for idx, (imgs, targets, masks, num_crowds) in enumerate(frames): frames[idx] = (torch.stack(imgs, 0), (targets, masks, num_crowds), ) return frames class YoutubeVISAnnotationTransform(object): """Transforms a YoutubeVIS annotation into a Tensor of bbox coords and label index Initilized with a dictionary lookup of classnames to indexes """ def __init__(self): self.dataset_name = cfg.dataset.name self.label_map = get_label_map() def __call__(self, target, frame_id, width, height): """ Args: target (dict): YoutubeVIS target json annotation as a python dict frame_id (int): frame ID height (int): height width (int): width Returns: a list containing lists of bounding boxes [bbox coords, class idx] """ # TODO: is this wasteful to check this? the dataset has been changed here. if self.dataset_name != cfg.dataset.name: self.label_map = get_label_map() self.dataset_name = cfg.dataset.name scale = np.array([width, height, width, height]) res = [] for obj in target: if 'bboxes' in obj and obj['bboxes'][frame_id] is not None: bbox = obj['bboxes'][frame_id] label_idx = obj['category_id'] if label_idx >= 0: label_idx = self.label_map[label_idx] - 1 final_box = list(np.array([bbox[0], bbox[1], bbox[0] + bbox[2], bbox[1] + bbox[3]]) / scale) final_box.append(label_idx) res += [final_box] # [xmin, ymin, xmax, ymax, label_idx] # else: # TODO: it shall be okay for videos to have some frames without bbox annotation, right? # print("No bbox found for object ", obj) return res # [[xmin, ymin, xmax, ymax, label_idx], ... ] class YoutubeVIS(data.Dataset): """`YoutubeVIS <https://youtube-vos.org/dataset/vis/>`_ Dataset. Args: root (string): Root directory where images are downloaded to. set_name (string): Name of the specific set of COCO images. transform (callable, optional): A function/transform that augments the raw images` target_transform (callable, optional): A function/transform that takes in the target (bbox) and transforms it. prep_crowds (bool): Whether or not to prepare crowds for the evaluation step. """ def __init__(self, image_path, info_file, configs, transform=None, target_transform=YoutubeVISAnnotationTransform(), dataset_name='YouTube VIS', has_gt=True): # Do this here because we have too many things named COCO from pycocotools.ytvos import YTVOS self.root = image_path self.configs = configs logger = logging.getLogger("yolact.dataset") logger.info('Loading annotations into memory...') tic = time.time() with contextlib.redirect_stdout(io.StringIO()): self.coco = YTVOS(info_file) self.ids = list(self.coco.vidToAnns.keys()) if len(self.ids) == 0 or not has_gt: self.ids = list(self.coco.vids.keys()) logger.info('{} videos loaded in {:0.2f}s.'.format(len(self.ids), time.time() - tic)) self.transform = transform self.target_transform = target_transform self.name = dataset_name self.has_gt = has_gt def __getitem__(self, index): """ Args: index (int): Index Returns: tuple: Tuple (image, (target, masks, num_crowds)). target is the object returned by ``coco.loadAnns``. """ video_frames, extra_data = self.pull_video(index) video_frames = [(im, (gt, masks, num_crowds), ) for im, gt, masks, h, w, num_crowds in video_frames] return video_frames, extra_data def pull_video(self, index, return_on_failure=False, full_video=False, max_images=-1): """ Args: index (int): Index Returns: tuple: Tuple (image, target, masks, height, width, crowd). target is the object returned by ``coco.loadAnns``. Note that if no crowd annotations exist, crowd will be None """ vid_id = self.ids[index] seq_len = self.configs.images_per_video # sample vid_id with enough length while True: vid = self.coco.loadVids(vid_id)[0] annot_length = len(vid['file_names']) if not full_video and annot_length < seq_len: continue # FIXME: need to set new vid_id right? vid_name = vid['file_names'][0].split('/')[0] # Generate target starts. if self.has_gt: target = self.coco.vidToAnns[vid_id] ann_ids = self.coco.getAnnIds(vidIds=vid_id) # Target has {'segmentation', 'area', iscrowd', 'image_id', 'bboxes', 'category_id'} target = self.coco.loadAnns(ann_ids) else: target = [] # Separate out crowd annotations. These are annotations that signify a large crowd of # objects of said class, where there is no annotation for each individual object. Both # during testing and training, consider these crowds as neutral. crowd = [x for x in target if ('iscrowd' in x and x['iscrowd'])] target = [x for x in target if not ('iscrowd' in x and x['iscrowd'])] num_crowds = len(crowd) for x in crowd: x['category_id'] = -1 # This is so we ensure that all crowd annotations are at the end of the array target += crowd # Generate target ends. # shuffling and sample a small range of video here if full_video: annot_idx = np.arange(0, annot_length, 1) frame_idx = np.asarray([int(vid['file_names'][idx][-9:-4]) for idx in range(annot_length)]) if self.configs.use_all_frames: key_frame_idx = frame_idx frame_idx = np.arange(frame_idx[0], frame_idx[-1] + 1, 1) have_annot = np.asarray([int(idx in key_frame_idx) for idx in frame_idx]) annot_idx = np.add.accumulate(have_annot) * have_annot - 1 if max_images != -1: eval_frames = min(max_images, len(frame_idx)) # start_idx = np.random.randint(0, len(frame_idx) - eval_frames + 1) start_idx = 0 frame_idx = frame_idx[start_idx: start_idx + eval_frames] annot_idx = annot_idx[start_idx: start_idx + eval_frames] elif self.configs.use_all_frames: rand_idx = np.arange(0, annot_length - seq_len) np.random.shuffle(rand_idx) direction = 1 if self.configs.all_frame_direction == 'allway': if np.random.rand() > 0.5: direction *= -1 elif self.configs.all_frame_direction == 'forward': # Note: forward warping needs to sample a 'previous frame' direction *= -1 elif self.configs.all_frame_direction == 'backward': pass else: raise ValueError("Unexpected frame direction: %s" % self.configs.all_frame_direction) start_idx = rand_idx[0] if direction < 0: start_idx += self.configs.images_per_video start_frame_idx = int(vid['file_names'][start_idx][-9:-4]) annot_idx = [start_idx] frame_idx = [start_frame_idx] # if self.configs.images_per_video > 1: # num_extra_frames = self.configs.images_per_video - 1 # extra_annot_idx = [start_idx + direction * offset_idx # for offset_idx in range(1, num_extra_frames + 1)] # extra_frame_idx = [int(vid['file_names'][extra_idx][-9:-4]) # for extra_idx in extra_annot_idx] # # annot_idx += extra_annot_idx # frame_idx += extra_frame_idx extra_frame_idx = [] extra_annot_idx = [] if self.configs.images_per_video > 0: offset_lb, offset_ub = self.configs.frame_offset_lb, self.configs.frame_offset_ub lb, ub = int(vid['file_names'][0][-9:-4]), int(vid['file_names'][-1][-9:-4]) fidx = frame_idx[-1] lb, ub = lb - fidx, ub - fidx if direction == -1: ub = -offset_lb lb = max(lb, -offset_ub) else: lb = offset_lb ub = min(ub, offset_ub) assert lb <= ub + 1, "{}, {}".format(lb, ub) assert self.configs.frame_offset_multiplier == 1, "frame_offset_multiplier deprecated." for _ in range(self.configs.images_per_video): frame_diff = np.random.randint(lb, ub + 1) ref_idx = fidx + frame_diff assert int(vid['file_names'][0][-9:-4]) <= ref_idx <= int(vid['file_names'][-1][-9:-4]), "{} <= {} <= {}".format(int(vid['file_names'][0][-9:-4]), ref_idx, int(vid['file_names'][-1][-9:-4])) # frame_diff = self.configs.frame_offset_multiplier * np.random.randint(self.configs.frame_offset_lb, self.configs.frame_offset_ub + 1) # ref_idx = np.clip(frame_idx[-1] + frame_diff * direction, # int(vid['file_names'][0][-9:-4]), int(vid['file_names'][-1][-9:-4])) extra_frame_idx += [ref_idx] extra_annot_idx += [-1] extra_frame_idx = list(sorted(extra_frame_idx, reverse=True)) annot_idx += extra_annot_idx frame_idx += extra_frame_idx annot_idx = np.asarray(annot_idx) frame_idx = np.asarray(frame_idx) else: rand_idx = np.arange(0, annot_length - seq_len + 1) np.random.shuffle(rand_idx) start_idx = rand_idx[0] annot_idx = np.arange(start_idx, start_idx + seq_len, 1) frame_idx = np.asarray([int(vid['file_names'][idx][-9:-4]) for idx in annot_idx]) has_targets = all([self.target_in_frame(target, annot_id, true_on_reference=True) for annot_id in annot_idx]) if has_targets: break if return_on_failure: return None # print("Not all frame of video %s[%d-%d] has targets, re-selecting video." % # (vid['file_names'][0].split('/')[0], start_idx, start_idx + frm_len)) index = np.random.randint(len(self)) vid_id = self.ids[index] frame_results = [] extra_data = [] while True: try: for idx, (frame_id, annot_id) in enumerate(zip(frame_idx.tolist(), annot_idx.tolist())): extra = {} # FIXME: little bit hacky for full frames, maybe fix this using annotation files frame_id_str = "%05d" % frame_id file_name = vid['file_names'][0] file_name = file_name[:-9] + frame_id_str + file_name[-4:] prev_frame_id = frame_idx[idx - 1] if idx > 0 else -1 prev_annot_id = annot_idx[idx - 1] if idx > 0 else -1 if idx == 0: seeds, (im, gt, masks, h, w, num_crowds) = self.pull_frame(vid_name, (frame_id, annot_id), (prev_frame_id, prev_annot_id), file_name, target, num_crowds, require_seeds=True) else: im, gt, masks, h, w, num_crowds = self.pull_frame(vid_name, (frame_id, annot_id), (prev_frame_id, prev_annot_id), file_name, target, num_crowds, seeds=seeds) extra['idx'] = (frame_id, annot_id, ) frame_results.append((im, gt, masks, h, w, num_crowds, )) extra_data.append(extra) except ValueError as e: logger = logging.getLogger("yolact.dataset") logger.warning('Resampling with reseed signal...') frame_results.clear() extra_data.clear() continue break return frame_results, extra_data def __len__(self): return len(self.ids) @staticmethod def target_in_frame(target, frame_id, true_on_reference=False): if frame_id < 0: return true_on_reference if len(target) > 0: for obj in target: if obj['segmentations'][frame_id] is not None: return True return False def pull_frame(self, vid_name, frame_annot_id, prev_frame_annot_id, file_name, target, num_crowds, require_seeds=False, seeds=None): frame_id, annot_id = frame_annot_id prev_frame_id, prev_annot_id = prev_frame_annot_id path = osp.join(self.root, file_name) assert osp.exists(path), 'Image path does not exist: {}'.format(path) img = cv2.imread(path) height, width, _ = img.shape target_is_in_frame = self.target_in_frame(target, annot_id) if target_is_in_frame: # Pool all the masks for this image into one [num_objects,height,width] matrix # masks = [np.zeros(height * width, dtype=np.uint8).reshape(-1) if obj['segmentations'][frame_id] is None # all-zero mask on None # else self.coco.annToMask(obj, frame_id).reshape(-1) for obj in target] masks = [self.coco.annToMask(obj, annot_id).reshape(-1) for obj in target if obj['segmentations'][annot_id] is not None] masks = np.vstack(masks) masks = masks.reshape(-1, height, width) if self.target_transform is not None and target_is_in_frame: target = self.target_transform(target, annot_id, width, height) if self.transform is not None: if "Video" in type(self.transform).__name__: if target_is_in_frame: target = np.array(target) return_transform = self.transform(img, masks, target[:, :4], {'num_crowds': num_crowds, 'labels': target[:, 4]}, require_seeds=require_seeds, seeds=seeds) if require_seeds: seeds, (img, masks, boxes, labels) = return_transform else: img, masks, boxes, labels = return_transform # I stored num_crowds in labels so I didn't have to modify the entirety of augmentations num_crowds = labels['num_crowds'] labels = labels['labels'] target = np.hstack((boxes,
np.expand_dims(labels, axis=1)
numpy.expand_dims
import numpy as np from math import sin, cos, atan2, tan, sqrt, pi import matplotlib.pyplot as plt import time from bdsim.components import TransferBlock, FunctionBlock from bdsim.graphics import GraphicsBlock class MultiRotor(TransferBlock): """ :blockname:`MULTIROTOR` .. table:: :align: left +------------+---------+---------+ | inputs | outputs | states | +------------+---------+---------+ | 1 | 1 | 16 | +------------+---------+---------+ | ndarray(4) | dict | | +------------+---------+---------+ """ nin = 1 nout = 1 # Flyer2dynamics lovingly coded by <NAME>, first coded 12/4/04 # A simulation of idealised X-4 Flyer II flight dynamics. # version 2.0 2005 modified to be compatible with latest version of Matlab # version 3.0 2006 fixed rotation matrix problem # version 4.0 4/2/10, fixed rotor flapping rotation matrix bug, mirroring # version 5.0 8/8/11, simplified and restructured # version 6.0 25/10/13, fixed rotation matrix/inverse wronskian definitions, flapping cross-product bug # # New in version 2: # - Generalised rotor thrust model # - Rotor flapping model # - Frame aerodynamic drag model # - Frame aerodynamic surfaces model # - Internal motor model # - Much coolage # # Version 1.3 # - Rigid body dynamic model # - Rotor gyroscopic model # - External motor model # # ARGUMENTS # u Reference inputs 1x4 # tele Enable telemetry (1 or 0) 1x1 # crash Enable crash detection (1 or 0) 1x1 # init Initial conditions 1x12 # # INPUTS # u = [N S E W] # NSEW motor commands 1x4 # # CONTINUOUS STATES # z Position 3x1 (x,y,z) # v Velocity 3x1 (xd,yd,zd) # n Attitude 3x1 (Y,P,R) # o Angular velocity 3x1 (wx,wy,wz) # w Rotor angular velocity 4x1 # # Notes: z-axis downward so altitude is -z(3) # # CONTINUOUS STATE MATRIX MAPPING # x = [z1 z2 z3 n1 n2 n3 z1 z2 z3 o1 o2 o3 w1 w2 w3 w4] # # # CONTINUOUS STATE EQUATIONS # z` = v # v` = g*e3 - (1/m)*T*R*e3 # I*o` = -o X I*o + G + torq # R = f(n) # n` = inv(W)*o # def __init__(self, model, groundcheck=True, speedcheck=True, x0=None, **blockargs): r""" Create a multi-rotor dynamic model block. :param model: A dictionary of vehicle geometric and inertial properties :type model: dict :param groundcheck: Prevent vehicle moving below ground :math:`z>0`, defaults to True :type groundcheck: bool :param speedcheck: Check for non-positive rotor speed, defaults to True :type speedcheck: bool :param x0: Initial state, defaults to None :type x0: array_like(6) or array_like(12), optional :param blockargs: |BlockOptions| :type blockargs: dict :return: a MULTIROTOR block :rtype: MultiRotor instance Dynamic model of a multi-rotor flying robot, includes rotor flapping. **Block ports** :input ω: a vector of input rotor speeds in (radians/sec). These are, looking down, clockwise from the front rotor which lies on the x-axis. :output x: a dictionary signal with the following items: - ``x`` pose in the world frame as :math:`[x, y, z, \theta_Y, \theta_P, \theta_R]` - ``vb`` translational velocity in the world frame (metres/sec) - ``w`` angular rates in the world frame as yaw-pitch-roll rates (radians/second) - ``a1s`` longitudinal flapping angles (radians) - ``b1s`` lateral flapping angles (radians) **Model parameters** The dynamic model is a dict with the following key/value pairs. =========== ========================================== key description =========== ========================================== ``nrotors`` Number of rotors (even integer) ``J`` Flyer rotational inertia matrix (3x3) ``h`` Height of rotors above CoG ``d`` Length of flyer arms ``nb`` Number of blades per rotor ``r`` Rotor radius ``c`` Blade chord ``e`` Flapping hinge offset ``Mb`` Rotor blade mass ``Mc`` Estimated hub clamp mass ``ec`` Blade root clamp displacement ``Ib`` Rotor blade rotational inertia ``Ic`` Estimated root clamp inertia ``mb`` Static blade moment ``Ir`` Total rotor inertia ``Ct`` Non-dim. thrust coefficient ``Cq`` Non-dim. torque coefficient ``sigma`` Rotor solidity ratio ``thetat`` Blade tip angle ``theta0`` Blade root angle ``theta1`` Blade twist angle ``theta75`` 3/4 blade angle ``thetai`` Blade ideal root approximation ``a`` Lift slope gradient ``A`` Rotor disc area ``gamma`` Lock number =========== ========================================== .. note:: - SI units are used. - Based on MATLAB code developed by <NAME> 2004. :References: - Design, Construction and Control of a Large Quadrotor micro air vehicle. P.Pounds, `PhD thesis <https://openresearch-repository.anu.edu.au/handle/1885/146543>`_ Australian National University, 2007. :seealso: :class:`MultiRotorMixer` :class:`MultiRotorPlot` """ if model is None: raise ValueError('no model provided') super().__init__(nin=1, nout=1, **blockargs) self.type = 'quadrotor' try: nrotors = model['nrotors'] except KeyError: raise RuntimeError('vehicle model does not contain nrotors') assert nrotors % 2 == 0, 'Must have an even number of rotors' self.nstates = 12 if x0 is None: x0 = np.zeros((self.nstates,)) else: x0 = np.r_[x0] if len(x0) == 6: # assume all derivative are zero x0 = np.r_[x0, np.zeros((6,))] elif len(x0) == 4: # assume x,y,z,yaw x0 = np.r_[x0[:3], 0, 0, x0[3], np.zeros((6,))] elif len(x0) == 3: # assume x,y,z x0 = np.r_[x0[:3], np.zeros((9,))] elif len(x0) != self.nstates: raise ValueError("x0 is the wrong length") self._x0 = x0 self.nrotors = nrotors self.model = model self.groundcheck = groundcheck self.speedcheck = speedcheck self.D = np.zeros((3,self.nrotors)) self.theta = np.zeros((self.nrotors,)) for i in range(0, self.nrotors): theta = i / self.nrotors * 2 * pi # Di Rotor hub displacements (1x3) # first rotor is on the x-axis, clockwise order looking down from above self.D[:,i] = np.r_[ model['d'] * cos(theta), model['d'] * sin(theta), model['h']] self.theta[i] = theta self.a1s = np.zeros((self.nrotors,)) self.b1s = np.zeros((self.nrotors,)) def output(self, t=None): model = self.model # compute output vector as a function of state vector # z Position 3x1 (x,y,z) # v Velocity 3x1 (xd,yd,zd) # n Attitude 3x1 (Y,P,R) # o Angular velocity 3x1 (Yd,Pd,Rd) n = self._x[3:6] # RPY angles phi = n[0] # yaw the = n[1] # pitch psi = n[2] # roll # rotz(phi)*roty(the)*rotx(psi) # BBF > Inertial rotation matrix R = np.array([ [cos(the) * cos(phi), sin(psi) * sin(the) * cos(phi) - cos(psi) * sin(phi), cos(psi) * sin(the) * cos(phi) + sin(psi) * sin(phi)], [cos(the) * sin(phi), sin(psi) * sin(the) * sin(phi) + cos(psi) * cos(phi), cos(psi) * sin(the) * sin(phi) - sin(psi) * cos(phi)], [-sin(the), sin(psi) * cos(the), cos(psi) * cos(the)] ]) #inverted Wronskian iW = np.array([ [0, sin(psi), cos(psi)], [0, cos(psi) * cos(the), -sin(psi) * cos(the)], [cos(the), sin(psi) * sin(the), cos(psi) * sin(the)] ]) / cos(the) # return velocity in the body frame vd = np.linalg.inv(R) @ self._x[6:9] # translational velocity mapped to body frame rpyd = iW @ self._x[9:12] # RPY rates mapped to body frame out = {} out['x'] = self._x[0:6] out['trans'] = np.r_[self._x[:3], vd] out['rot'] = np.r_[self._x[3:6], rpyd] out['a1s'] = self.a1s out['b1s'] = self.b1s out['X'] = np.r_[self._x[:6], vd, rpyd] # sys = [ x(1:6); # inv(R)*x(7:9); % translational velocity mapped to body frame # iW*x(10:12)]; return [out] def deriv(self): model = self.model # Body-fixed frame references # ei Body fixed frame references 3x1 e3 = np.r_[0, 0, 1] # process inputs w = self.inputs[0] if len(w) != self.nrotors: raise RuntimeError('input vector wrong size') if self.speedcheck and np.any(w == 0): # might need to fix this, preculudes aerobatics :( # mu becomes NaN due to 0/0 raise RuntimeError('quadrotor_dynamics: not defined for zero rotor speed'); # EXTRACT STATES FROM X z = self._x[0:3] # position in {W} n = self._x[3:6] # RPY angles {W} v = self._x[6:9] # velocity in {W} o = self._x[9:12] # angular velocity in {W} # PREPROCESS ROTATION AND WRONSKIAN MATRICIES phi = n[0] # yaw the = n[1] # pitch psi = n[2] # roll # phi = n(1); % yaw # the = n(2); % pitch # psi = n(3); % roll # rotz(phi)*roty(the)*rotx(psi) # BBF > Inertial rotation matrix R = np.array([ [cos(the)*cos(phi), sin(psi)*sin(the)*cos(phi)-cos(psi)*sin(phi), cos(psi)*sin(the)*cos(phi)+sin(psi)*sin(phi)], [cos(the)*sin(phi), sin(psi)*sin(the)*sin(phi)+cos(psi)*cos(phi), cos(psi)*sin(the)*sin(phi)-sin(psi)*cos(phi)], [-sin(the), sin(psi)*cos(the), cos(psi)*cos(the)] ]) # Manual Construction # Q3 = [cos(phi) -sin(phi) 0;sin(phi) cos(phi) 0;0 0 1]; % RZ %Rotation mappings # Q2 = [cos(the) 0 sin(the);0 1 0;-sin(the) 0 cos(the)]; % RY # Q1 = [1 0 0;0 cos(psi) -sin(psi);0 sin(psi) cos(psi)]; % RX # R = Q3*Q2*Q1 %Rotation matrix # # RZ * RY * RX # inverted Wronskian iW = np.array([ [0, sin(psi), cos(psi)], [0, cos(psi)*cos(the), -sin(psi)*cos(the)], [cos(the), sin(psi)*sin(the), cos(psi)*sin(the)] ]) / cos(the) # % rotz(phi)*roty(the)*rotx(psi) # R = [cos(the)*cos(phi) sin(psi)*sin(the)*cos(phi)-cos(psi)*sin(phi) cos(psi)*sin(the)*cos(phi)+sin(psi)*sin(phi); %BBF > Inertial rotation matrix # cos(the)*sin(phi) sin(psi)*sin(the)*sin(phi)+cos(psi)*cos(phi) cos(psi)*sin(the)*sin(phi)-sin(psi)*cos(phi); # -sin(the) sin(psi)*cos(the) cos(psi)*cos(the)]; # iW = [0 sin(psi) cos(psi); %inverted Wronskian # 0 cos(psi)*cos(the) -sin(psi)*cos(the); # cos(the) sin(psi)*sin(the) cos(psi)*sin(the)] / cos(the); # ROTOR MODEL T = np.zeros((3,4)) Q = np.zeros((3,4)) tau = np.zeros((3,4)) a1s = self.a1s b1s = self.b1s for i in range(0, self.nrotors): # for each rotor # Relative motion Vr =
np.cross(o, self.D[:,i])
numpy.cross
import torch from collections import OrderedDict from torch.nn import utils, functional as F from torch.optim import Adam from torch.autograd import Variable from torch.backends import cudnn from networks.joint_poolnet import build_model, weights_init import scipy.misc as sm import numpy as np import os import torchvision.utils as vutils import cv2 import math import time import csv class Solver(object): def __init__(self, train_loader, test_loader, config): self.train_loader = train_loader self.test_loader = test_loader self.config = config self.iter_size = config.iter_size self.show_every = config.show_every self.lr_decay_epoch = [8,] self.build_model() if config.mode == 'test': print('Loading pre-trained model from %s...' % self.config.model) if self.config.cuda: self.net.load_state_dict(torch.load(self.config.model)) else: self.net.load_state_dict(torch.load(self.config.model, map_location='cpu')) self.net.eval() # print the network information and parameter numbers def print_network(self, model, name): num_params = 0 for p in model.parameters(): num_params += p.numel() print(name) print(model) print("The number of parameters: {}".format(num_params)) # build the network def build_model(self): self.net = build_model(self.config.arch) if self.config.cuda: self.net = self.net.cuda() # self.net.train() self.net.eval() # use_global_stats = True self.net.apply(weights_init) if self.config.load == '': self.net.base.load_pretrained_model(torch.load(self.config.pretrained_model)) else: self.net.load_state_dict(torch.load(self.config.load)) self.lr = self.config.lr self.wd = self.config.wd self.optimizer = Adam(filter(lambda p: p.requires_grad, self.net.parameters()), lr=self.lr, weight_decay=self.wd) self.print_network(self.net, 'PoolNet Structure') def get_sub_imgs_from_bboxes(self, img, img_name, annotation_path): csv_file = open(annotation_path) csv_reader = csv.reader(csv_file, delimiter=',') imgs = [] positions = [] # TODO: Print bboxes to check if are well read img_np = (img.squeeze(0).permute(1, 2, 0).cpu().numpy() + np.array((104.00699, 116.66877, 122.67892))).astype(np.uint8).copy() h, w, c = img_np.shape for row in csv_reader: if row[0] == img_name: tol_x = 0.02 tol_y = 0.04 x1 = max(int(float(row[1]) * (1.0 - tol_x)), 0) y1 = max(int(float(row[2]) * (1.0 - tol_y)), 0) x2 = min(int(float(row[3]) * (1.0 + tol_x)), w) y2 = min(int(float(row[4]) * (1.0 + tol_y)), h) imgs.append(img[:, :, y1:y2, x1:x2]) positions.append([x1, y1, x2, y2]) # Print boundinb box cv2.rectangle(img_np, (x1, y1), (x2, y2), (255, 0, 0), 2) return imgs, positions, img_np def test_grabcut(self, img, in_mask=None): if in_mask is None: mask = np.zeros(img.shape[:2], np.uint8) use_rect = True else: mask = cv2.cvtColor(in_mask, cv2.COLOR_BGR2GRAY) use_rect = False if np.max(mask) == 0: use_rect = True mask = np.where(mask == 0, 2, 1).astype('uint8') bgdModel = np.zeros((1, 65), np.float64) fgdModel = np.zeros((1, 65), np.float64) h, w, c = img.shape ITERATIONS = 5 if use_rect: rect = (1, 1, w - 1, h - 1) cv2.grabCut(img, mask, rect, bgdModel, fgdModel, ITERATIONS, cv2.GC_INIT_WITH_RECT) else: cv2.grabCut(img, mask, None, bgdModel, fgdModel, ITERATIONS, cv2.GC_INIT_WITH_MASK) print("using mask") mask2 = np.where((mask == 2) | (mask == 0), 0, 1).astype('uint8') return mask2 * 255 def infer(self, img_np, resize_size=256, test_mode=1): with torch.no_grad(): # Convert to tensor and apply normalizations img = np.array(img_np, dtype=np.float32) img -= np.array((104.00699, 116.66877, 122.67892)) img = img.transpose((2, 0, 1)) img = torch.Tensor(img).unsqueeze(0) img = Variable(img) if self.config.cuda: img = img.cuda() print(img.shape) im_size = (img.shape[-1], img.shape[-2]) img = torch.nn.functional.interpolate(img, size=resize_size) print(img.shape) img = img.permute(0, 1, 3, 2) img = torch.nn.functional.interpolate(img, size=resize_size) img = img.permute(0, 1, 3, 2) preds = self.net(img, mode=test_mode) pred = np.squeeze(torch.sigmoid(preds).cpu().data.numpy()) multi_fuse = 255 * pred # cv2.imwrite(os.path.join(self.config.test_fold, name[:-4] + '_' + mode_name[test_mode] + '.png'), multi_fuse) masks = torch.nn.functional.interpolate(preds.squeeze(0).sigmoid().permute(1, 2, 0), 3) masks_np = ((masks * 255).cpu().numpy().astype(np.uint8)).copy() masks_grab = masks_np.copy() cv2.normalize(masks_np, masks_np, 0, 255, cv2.NORM_MINMAX) # OpenCV processing ret, masks_np = cv2.threshold(masks_np, 2, 255, cv2.THRESH_BINARY) # Test grabcut print(np.max(masks_np / 255)) print(masks_np.shape) print(np.sum(masks_np / 255) * 100 / (resize_size * resize_size * 3)) covering_ptge = np.sum(masks_np / 255) * 100 / (resize_size * resize_size * 3) THRESHOLD = 43 if covering_ptge < THRESHOLD: mask_grab = self.test_grabcut((img.squeeze(0).permute(1, 2, 0).cpu().numpy() + np.array((104.00699, 116.66877, 122.67892))).astype(np.uint8).copy(), in_mask=None) mask_grab = (cv2.cvtColor(mask_grab, cv2.COLOR_GRAY2BGR)).astype(np.uint8) # cv2.putText(mask_grab, 'grab', # (5, 25), # cv2.FONT_HERSHEY_SIMPLEX, # 1, # (0, 0, 255), # 2) # results = np.concatenate((mask_grab, # (img.squeeze(0).permute(1, 2, 0).cpu().numpy() + np.array( # (104.00699, 116.66877, 122.67892))).astype(np.uint8).copy()), # axis=1) mask_grab = cv2.resize(mask_grab, im_size) img_out = cv2.resize((img.squeeze(0).permute(1, 2, 0).cpu().numpy() + np.array((104.00699, 116.66877, 122.67892))).astype(np.uint8).copy(), im_size) return mask_grab, img_out else: # cv2.putText(masks_np, 'pool', # (5, 25), # cv2.FONT_HERSHEY_SIMPLEX, # 1, # (0, 0, 255), # 2) # results = np.concatenate((masks_np, # (img.squeeze(0).permute(1, 2, 0).cpu().numpy() + np.array( # (104.00699, 116.66877, 122.67892))).astype(np.uint8).copy()), # axis=1).copy() masks_np = cv2.resize(masks_np, im_size) img_out = cv2.resize((img.squeeze(0).permute(1, 2, 0).cpu().numpy() + np.array((104.00699, 116.66877, 122.67892))).astype(np.uint8).copy(), im_size) return masks_np, img_out def test(self, test_mode=1): mode_name = ['edge_fuse', 'sal_fuse'] EPSILON = 1e-8 time_s = time.time() img_num = len(self.test_loader) for i, data_batch in enumerate(self.test_loader): images, name, im_size = data_batch['image'], data_batch['name'][0], np.asarray(data_batch['size']) # if name != 'test_2402.jpg': # continue # Get annotation file annotation_file = self.config.test_root.replace('images/', 'annotations/annotations_test.csv') subimages, positions, full_img = self.get_sub_imgs_from_bboxes(images, name, annotation_file) images_np = (images.squeeze(0).permute(1, 2, 0).cpu().numpy() + np.array( (104.00699, 116.66877, 122.67892))).astype(np.uint8).copy() resize_size = 256 number_per_row = 10 max_x_mosaic = number_per_row * resize_size * 2 max_y_mosaic = (len(subimages) * resize_size // number_per_row) + resize_size mosaic = np.zeros((max_y_mosaic, max_x_mosaic, 3)) x_mosaic = 0 y_mosaic = 0 if test_mode == 0: images = images.numpy()[0].transpose((1,2,0)) scale = [0.5, 1, 1.5, 2] # uncomment for multi-scale testing # scale = [1] multi_fuse = np.zeros(im_size, np.float32) for k in range(0, len(scale)): im_ = cv2.resize(images, None, fx=scale[k], fy=scale[k], interpolation=cv2.INTER_LINEAR) im_ = im_.transpose((2, 0, 1)) im_ = torch.Tensor(im_[np.newaxis, ...]) with torch.no_grad(): im_ = Variable(im_) if self.config.cuda: im_ = im_.cuda() preds = self.net(im_, mode=test_mode) pred_0 = np.squeeze(torch.sigmoid(preds[1][0]).cpu().data.numpy()) pred_1 = np.squeeze(torch.sigmoid(preds[1][1]).cpu().data.numpy()) pred_2 = np.squeeze(torch.sigmoid(preds[1][2]).cpu().data.numpy()) pred_fuse = np.squeeze(torch.sigmoid(preds[0]).cpu().data.numpy()) pred = (pred_0 + pred_1 + pred_2 + pred_fuse) / 4 pred = (pred -
np.min(pred)
numpy.min
''' License ======= copyright <NAME>, <NAME> (PTB) 2020 This software is licensed under the BSD-like license: Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. DISCLAIMER ========== This software was developed at Physikalisch-Technische Bundesanstalt (PTB). The software is made available "as is" free of cost. PTB assumes no responsibility whatsoever for its use by other parties, and makes no guarantees, expressed or implied, about its quality, reliability, safety, suitability or any other characteristic. In no event will PTB be liable for any direct, indirect or consequential damage arising in connection Using this software in publications requires citing the following paper <NAME>, <NAME> and <NAME> (2020). A simple method for Bayesian uncertainty evaluation in linear models.  Metrologia  https://doi.org/10.1088/1681-7575/aba3b8 ''' from __future__ import (division, print_function, absolute_import) import numpy as np from itertools import product # cartesian product of sets from scipy.integrate import cumtrapz # trapezoidal rule for integration from scipy.stats import t as student_t # student-t distribution from scipy.stats import gaussian_kde # kernel density estimation from scipy.stats import norm, gamma # normal and gamma distribution from matplotlib import rc # plot parameter rc('font', family='serif') rc('font', size=12) # rc('text', usetex=True) import matplotlib.pyplot as plt # plot environment def nig_prior(U_y0, sig0): """ Returns parameter of normal inverse gamma prior according to the choice in the paper. See Section 2.3. Arguments: U_y0 {float} -- uncertainty of parameter sig0 {float} -- standard deviation of measurement device Returns: tuple -- (\lambda, a, b) """ a = 1 llambda = (0.28*U_y0/sig0)**2 b = (sig0**2)/1.44 return llambda, a, b def inverse_transform_sampling(x, pd, cnt): """ Implementation of inverse transform sampling using interpolation of the inverse CDF Arguments: x {list} -- density nodes pd {list} -- density values cnt {int} -- number of interpolation nodes Returns: list -- random samples """ cdf = cumtrapz(pd, x=x) cdf = cdf/cdf[-1] # numpy unique returns unique values in sorted order # therefore consider only the indices _ ,ia = np.unique(cdf, return_index=True) # then, sort the indices to obtain the original order again cdf_unique = cdf[np.sort(ia)] x_unique=x[np.sort(ia)] return np.interp(np.random.rand(cnt), cdf_unique, x_unique) def bayes_uncertainty(X, y0, U_y0, sig0, alpha, B_S1_samples, n_samples, bootstrap=1): """ Implementation of the simple Bayesian approach according to paper. Returns a tuple with three lists - Y_samples - posterior samples of unknown - B_samples - posterior samples of type B quantity B - phi_samples - samples of variance of measurement device Arguments: X {list} -- measurement data y0 {float} -- prior mean of unknown U_y0 {float} -- prior uncertainty of unknown sig0 {float} -- std. deviation of measurement device alpha {float} -- influence of measurement device (alpha=1) B_S1_samples {list} -- samples of type B quantity B n_samples {int} -- number of returned samples Keyword Arguments: bootstrap {int} -- number of sub-sets to estimate model error (default: 1) Returns: tuple -- Y_samples, B_samples, phi_samples """ assert isinstance(bootstrap, int) assert bootstrap >= 1 # Evaluate Type A data n = len(X) xm = np.mean(X) s2 = np.var(X, ddof=1) # Calculate NIG prior parameters a = 1 llambda=(0.28*U_y0/sig0)**2 b=(sig0**2)/1.44 # Create prior PDF pi(B) from B_S1_samples ## # histogram returns the values of the pdf at the bin, normalised such that the integral over the range is 1 # and the edge positions of the bins (n+1) ## p_B, x_B = np.histogram(B_S1_samples, bins="fd", density=True) x_B = 0.5*(x_B[:-1]+x_B[1:]) # interpolate the pdf and extend left and right with 0 prior_B_pdf = lambda B: np.interp(B, x_B, p_B, left=0, right=0) mB_S1_samples=np.mean(B_S1_samples) # Define functions al2 = alpha**2 lmn = llambda*n Yhat = lambda B: (al2/(al2+lmn))*(y0+lmn*(alpha*xm+B)/al2) psi = lambda B: (llambda*al2/((al2+lmn)*(n+2*a)))*((n-1)*s2+2*b+(n/(al2+lmn))*(y0-(alpha*xm+B))**2) posterior_B = lambda B: (prior_B_pdf(B)*(psi(B)**(-(n+2*a)/2))) # Find suitable grid for B ngrid = 10000 B_hat= y0-alpha*xm B_scale = (al2+lmn)*((n-1)*s2+2*b)/(n*(n-1+2*a)) Bgrid_mean = 0.5*(mB_S1_samples+B_hat) Bgrid_u = np.sqrt(np.var(B_S1_samples, ddof=1)+B_scale+(mB_S1_samples-B_hat)**2) Bgrid1 = np.linspace(Bgrid_mean-5*Bgrid_u, Bgrid_mean+5*Bgrid_u, ngrid) hlp = posterior_B(Bgrid1) ind = np.argwhere(hlp>1e-10*max(hlp))[:, 0] Bgrid = np.linspace(Bgrid1[ind[0]], Bgrid1[ind[-1]], ngrid) # Monte-Carlo sampling # (i) : sample from marginal posterior of summarized Type B effect B B_samples = inverse_transform_sampling(Bgrid, posterior_B(Bgrid), n_samples) # (ii) : sample from marginal posterior of the measurand Y conditional on B Y_samples = Yhat(B_samples)+np.sqrt(psi(B_samples))*np.random.standard_t(n+2*a, n_samples) # (iii): sample from marginal posterior of the variance parameter phi conditional on B(optional) a_cond = a+n/2 b_cond = b+((n-1)*s2+(n/(al2+lmn))*(y0-(alpha*xm+B_samples))**2)/2 phi_samples = b_cond / np.random.gamma(a_cond, 1, n_samples) if bootstrap > 1: print(" Start bootstrapping with {} x {:.2e} sub-samples".format(bootstrap, len(B_S1_samples))) res = { "B": [], "Y": [], "phi": [] } for _ in range(bootstrap): # print(" run bootstrap {}/{}".format(lia+1, bootstrap)) sub_B_samples = np.random.choice(B_S1_samples, size=len(B_S1_samples), replace=True) curr_Y_samples, curr_B_samples, curr_phi_samples = bayes_uncertainty(X, y0, U_y0, sig0, alpha, sub_B_samples, n_samples, bootstrap=1) res["B"].append(curr_B_samples) res["Y"].append(curr_Y_samples) res["phi"].append(curr_phi_samples) return Y_samples, B_samples, phi_samples, res return Y_samples, B_samples, phi_samples def tlocscale(x, mu, scale2, nu): """ shifted and scaled student-t pdf Arguments: x {list} -- nodes to evaluate density at mu {float} -- shift scale2 {float} -- scale nu {int} -- degrees of freedom Returns: list -- evaluations of pdf """ scale=np.sqrt(scale2) return student_t.pdf((x-mu)/scale,nu)/scale def plot_result_phi(phi_samples, unc_0, sig0, xlim=None, n_bins=200, output="figure2.pdf", interactive=False, use_kde=False): """ Helper function to plot the posterior results of phi Arguments: phi_samples {list or array} -- posterior samples of phi unc_0 {float} -- uncertainty of measurand sig0 {float} -- uncertainty of measurement device Keyword Arguments: xlim {tuple} -- bounds to plot in (default: {None}) n_bins {int} -- number of bins for histogram (default: {200}) output {str} -- path and name of output file (default: {"figure2.pdf"}) interactive {bool} -- flag to hold the image (default: {False}) use_kde {bool} -- flag to use kernel density estimation (default: {False}) """ _, a, b = nig_prior(unc_0, sig0) # Note that lambda is a Python specific keyword # define the inverse gamma pdf invgampdf = lambda _x, _a, _b: (gamma.pdf(1/_x, _a, scale=1/_b)/(_x**2)) # reconstruct the pdf from samples of phi m_phi = np.mean(phi_samples) u_phi = np.std(phi_samples, ddof=1) x_grid = np.linspace(np.max([0, m_phi-6*u_phi]), m_phi+6*u_phi, n_bins) x_phi, p_phi = get_pdf_from_samples(phi_samples, method="kde" if use_kde else "hist", bins=x_grid) fig = plt.figure() plt.plot(np.sqrt(x_phi), 2*np.sqrt(x_phi)*invgampdf(x_phi, a, b), '--b', label="Prior") plt.plot(np.sqrt(x_phi), 2*np.sqrt(x_phi)*p_phi, '-b', label="Posterior") plt.xlabel("sigma=sqrt(phi)", fontsize=14) plt.ylabel("Probability density", fontsize=14) if xlim is not None: plt.xlim(xlim) plt.legend(fontsize=12) fig.tight_layout() # plt.show(block=False if not interactive else True) fig.savefig(output, dpi=300, format="pdf") def plot_result(bayes_samples, mean_0, unc_0, sig0, s1_samples=None, mean_gum=None, u_gum=None, title="Example", xlabel="Y", xlim=None, n_bins=200, output="figure.pdf", hold=False, interactive=False, use_kde=False): """ plots the resulting posterior to a file Arguments: bayes_samples {list or array} -- posterior samples mean_0 {float} -- mean of measurand unc_0 {float} -- uncertainty of measurand sig0 {float} -- uncertainty of measurement device Keyword Arguments: s1_samples {list or array} -- GUM S1 samples (default: {None}) mean_gum {float} -- mean by GUM (default: {None}) u_gum {float} -- uncertainty by GUM (default: {None}) title {str} -- title of figure (default: {"Example"}) xlabel {str} -- x label string (default: {"Y"}) xlim {tuple} -- bounds to plot in (default: {None}) n_bins {int} -- number of bins in histogram (default: {200}) output {str} -- path and name of figure (default: {"figure.pdf"}) hold {bool} -- flag to hold the image (experimental) (default: {False}) interactive {bool} -- flag to hold the image (default: {False}) use_kde {bool} -- flag to use kernel density estimation (default: {False}) """ llambda, a, b = nig_prior(unc_0, sig0) # Note that lambda is a Python specific keyword fig = plt.figure() # determine plotting range mean = np.mean(bayes_samples) unc = np.std(bayes_samples, ddof=1) x_grid = np.linspace(mean-6*unc, mean+6*unc, n_bins) x_bayes, p_bayes = get_pdf_from_samples(bayes_samples, method="kde" if use_kde else "hist", bins=x_grid) if s1_samples is not None: p_s1, _ = np.histogram(s1_samples, np.linspace(mean-6*unc, mean+6*unc, n_bins), density=True) plt.plot(x_bayes, p_s1, '-g', label="GUM-S1") # prior of Y is a scaled and shifted student-t distribution plt.plot(x_bayes, tlocscale(x_bayes, mean_0, llambda*b/a, 2*a), '--b', label="Prior") plt.plot(x_bayes, p_bayes, '-b', label="Posterior") if mean_gum is not None and u_gum is not None: plt.plot(x_bayes, norm.pdf(x_bayes, loc=mean_gum, scale=u_gum), '-r', label="GUM") plt.legend(fontsize=12) if xlim is not None: plt.xlim(xlim) plt.title(title, fontsize=14) plt.xlabel(xlabel, fontsize=14) plt.ylabel("Probability density", fontsize=14) fig.tight_layout() # if hold: # plt.show(block=False if not interactive else True) fig.savefig(output, dpi=300, format="pdf") def plot_sensitivity(bayes_samples, x, mean_0, unc_0, sig0, alpha, B_S1_samples, n_samples, xlim=None, xlabel="", output="figure3.pdf", interactive=False, use_kde=False): """ Helper function to plot the results of the sensitivity analysis. Arguments: bayes_samples {list or array} -- posterior samples x {list or array} -- measurements mean_0 {float} -- mean of measurand unc_0 {float} -- uncertainty of measurand sig0 {float} -- measurement device uncertainty alpha {float} -- model parameter Y = alpha X + B B_S1_samples {list or array} -- samples of B n_samples {int} -- number of samples to create for every bootstrap Keyword Arguments: xlim {tuple} -- bounds to plot in (default: {None}) xlabel {str} -- x label string (default: {""}) output {str} -- path and name of output file (default: {"figure3.pdf"}) interactive {bool} -- flag to hold image (default: {False}) use_kde {bool} -- flag to use kernel density estimation (default: {False}) """ # Sensitivity analysis dlt = 0.1 delta_U_y0 = np.array([1, -1])*dlt + 1 delta_sig0 = np.array([1, -1])*dlt + 1 mean = np.mean(bayes_samples) unc = np.std(bayes_samples, ddof=1) x_grid = np.linspace(mean-6*unc, mean+6*unc, 200) x_bayes, p_bayes = get_pdf_from_samples(bayes_samples, method="kde" if use_kde else "hist", bins=x_grid) fig = plt.figure() plt.plot(x_bayes, p_bayes, '-b', linewidth=1.5, label="orig. Posterior") for d_Uy0, d_sig0 in product(delta_U_y0, delta_sig0): Y_samples_sens, _, _ = bayes_uncertainty(x, mean_0, d_Uy0*unc_0, d_sig0*sig0, alpha, B_S1_samples, n_samples) _, p_Y_sens = get_pdf_from_samples(Y_samples_sens, method="kde" if use_kde else "hist", bins=x_grid) plt.plot(x_bayes, p_Y_sens, alpha=0.5, label="Uy0*{}, sig0*{}".format(d_Uy0, d_sig0)) if xlim is not None: plt.xlim(xlim) plt.xlabel(xlabel, fontsize=14) plt.ylabel("Probability density", fontsize=14) plt.legend(fontsize=12) fig.tight_layout() # plt.show(block=False if not interactive else True) fig.savefig(output, dpi=300, format="pdf") def import_file(file_path): """ Utility function to import samples from file. Expected format: newline separated floats. Example: 12.3342 11.3123 1.34e+1 Arguments: file_path {str} -- name and path to file Returns: list -- samples TODO: appropriate error handling """ import os assert os.path.exists(file_path) retval = [] with open(file_path, 'r') as f: lines = f.readlines() for line in lines: retval.append(float(line)) retval = np.array(retval) return retval def export_samples(samples, file_path): """ Utility function to export samples to file. Arguments: samples {list} -- samples to export file_path {str} -- name and path to file Returns: None TODO: appropriate error handling """ with open(file_path, 'w') as f: for sample in samples: f.write(str(sample) + "\n") def get_pdf_from_samples(samples, method="kde", *args, **kwargs): """ Method to construct a pdf from given samples. The employed method can be chosen, default kernel density estimation using Gaussian kernels with Scott's bandwith selection. TODO: Consider Silverman bandwith selection. See https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.gaussian_kde.html for details Alternatively, histograms can be chosen. Return type depends on input values. Arguments: samples {list} -- list of samples Keyword Arguments: method {str} -- methods string {"kde", "hist"} (default: {"kde"}) Returns: callable or (list, list) -- PDF as function or (x, y) values """ used_method = "kde" bins = kwargs.pop("bins", None) if method == "hist": assert bins is not None used_method = "hist" if used_method == "kde": kde = gaussian_kde(samples, **kwargs) if bins is not None and not isinstance(bins, str): return bins, kde.evaluate(bins) retval = lambda _x: kde.evaluate(_x) return retval elif used_method == "hist": p, x = np.histogram(samples, bins=bins, density=True) x = 0.5*(x[:-1] + x[1:]) return x, p else: raise ValueError("unknown density estimation method: {}".format(method)) def analyse_bootstrap_res(res): """ Processes the result of the bootstrap algorithm by estimating the uncertainty for the given quantity bootstraps. Arguments: res {dict} -- dictionary containing bootstrap results Returns: dict -- estimated uncertainty over bootstrap ensembles """ assert len(res["Y"]) == len(res["B"]) assert len(res["Y"]) == len(res["phi"]) lb = 2.5 ub = 97.5 mean_y = [] std_y = [] mean_b = [] std_b = [] mean_phi = [] std_phi = [] lb_y = [] lb_b = [] lb_phi = [] ub_y = [] ub_b = [] ub_phi = [] for lia in range(len(res["Y"])): mean_y.append(np.mean(res["Y"][lia])) mean_b.append(np.mean(res["B"][lia])) mean_phi.append(np.mean(res["phi"][lia])) std_y.append(np.std(res["Y"][lia], ddof=1)) std_b.append(np.std(res["B"][lia], ddof=1)) std_phi.append(np.std(res["phi"][lia], ddof=1)) lb_y.append(np.percentile(res["Y"][lia], lb)) lb_b.append(np.percentile(res["B"][lia], lb)) lb_phi.append(np.percentile(res["phi"][lia], lb)) ub_y.append(np.percentile(res["Y"][lia], ub)) ub_b.append(np.percentile(res["B"][lia], ub)) ub_phi.append(np.percentile(res["phi"][lia], ub)) retval = { "u_m_y":
np.std(mean_y, ddof=1)
numpy.std
__author__ = 'zhengwang' import numpy as np import cv2 import serial import pygame from pygame.locals import * import socket import time import os from drive_api2 import Motor class CollectTrainingData(object): def __init__(self, host, port, serial_port, input_size): self.server_socket = socket.socket() self.server_socket.bind((host, port)) self.server_socket.listen(0) # accept a single connection self.connection = self.server_socket.accept()[0].makefile('rb') # connect to a seral port self.car = Motor() self.send_inst = True self.input_size = input_size # create labels self.k = np.zeros((4, 4), 'float') for i in range(4): self.k[i, i] = 1 pygame.init() pygame.display.set_mode((250, 250)) def collect(self): saved_frame = 0 total_frame = 0 # collect images for training print("Start collecting images...") print("Press 'q' or 'x' to finish...") start = cv2.getTickCount() X = np.empty((0, self.input_size)) y = np.empty((0, 4)) # stream video frames one by one try: stream_bytes = b' ' frame = 1 while self.send_inst: stream_bytes += self.connection.read(1024) first = stream_bytes.find(b'\xff\xd8') last = stream_bytes.find(b'\xff\xd9') if first != -1 and last != -1: jpg = stream_bytes[first:last + 2] stream_bytes = stream_bytes[last + 2:] image = cv2.imdecode(np.frombuffer(jpg, dtype=np.uint8), cv2.IMREAD_GRAYSCALE) # select lower half of the image if image.any(): height, width = image.shape else: continue roi = image[int(height/2):height, :] cv2.imshow('image', image) # reshape the roi image into a vector temp_array = roi.reshape(1, int(height/2) * width).astype(np.float32) frame += 1 total_frame += 1 # get input from human driver for event in pygame.event.get(): if event.type == KEYDOWN: key_input = pygame.key.get_pressed() # complex orders if key_input[pygame.K_UP] and key_input[pygame.K_RIGHT]: print("Forward Right") X = np.vstack((X, temp_array)) y = np.vstack((y, self.k[1])) saved_frame += 1 self.car.forward_right() elif key_input[pygame.K_UP] and key_input[pygame.K_LEFT]: print("Forward Left") X = np.vstack((X, temp_array)) y = np.vstack((y, self.k[0])) saved_frame += 1 self.car.forward_left() elif key_input[pygame.K_DOWN] and key_input[pygame.K_RIGHT]: print("Reverse Right") elif key_input[pygame.K_DOWN] and key_input[pygame.K_LEFT]: print("Reverse Left") # simple orders elif key_input[pygame.K_UP]: print("Forward") saved_frame += 1 X = np.vstack((X, temp_array)) y =
np.vstack((y, self.k[2]))
numpy.vstack
""" Test Surrogates Overview ======================== """ # Author: <NAME> <<EMAIL>> # License: new BSD from PIL import Image import numpy as np import scripts.surrogates_overview as exo import scripts.image_classifier as imgclf import sklearn.datasets import sklearn.linear_model SAMPLES = 10 BATCH = 50 SAMPLE_IRIS = False IRIS_SAMPLES = 50000 def test_bilmey_image(): """Tests surrogate image bLIMEy.""" # Load the image doggo_img = Image.open('surrogates_overview/img/doggo.jpg') doggo_array = np.array(doggo_img) # Load the classifier clf = imgclf.ImageClassifier() explain_classes = [('tennis ball', 852), ('golden retriever', 207), ('Labrador retriever', 208)] # Configure widgets to select occlusion colour, segmentation granularity # and explained class colour_selection = { i: i for i in ['mean', 'black', 'white', 'randomise-patch', 'green'] } granularity_selection = {'low': 13, 'medium': 30, 'high': 50} # Generate explanations blimey_image_collection = {} for gran_name, gran_number in granularity_selection.items(): blimey_image_collection[gran_name] = {} for col_name in colour_selection: blimey_image_collection[gran_name][col_name] = \ exo.build_image_blimey( doggo_array, clf.predict_proba, explain_classes, explanation_size=5, segments_number=gran_number, occlusion_colour=col_name, samples_number=SAMPLES, batch_size=BATCH, random_seed=42) exp = [] for gran_ in blimey_image_collection: for col_ in blimey_image_collection[gran_]: exp.append(blimey_image_collection[gran_][col_]['surrogates']) assert len(exp) == len(EXP_IMG) for e, E in zip(exp, EXP_IMG): assert sorted(list(e.keys())) == sorted(list(E.keys())) for key in e.keys(): assert e[key]['name'] == E[key]['name'] assert len(e[key]['explanation']) == len(E[key]['explanation']) for e_, E_ in zip(e[key]['explanation'], E[key]['explanation']): assert e_[0] == E_[0] assert np.allclose(e_[1], E_[1], atol=.001, equal_nan=True) def test_bilmey_tabular(): """Tests surrogate tabular bLIMEy.""" # Load the iris data set iris = sklearn.datasets.load_iris() iris_X = iris.data # [:, :2] # take the first two features only iris_y = iris.target iris_labels = iris.target_names iris_feature_names = iris.feature_names label2class = {lab: i for i, lab in enumerate(iris_labels)} # Fit the classifier logreg = sklearn.linear_model.LogisticRegression(C=1e5) logreg.fit(iris_X, iris_y) # explained class _dtype = iris_X.dtype explained_instances = { 'setosa': np.array([5, 3.5, 1.5, 0.25]).astype(_dtype), 'versicolor': np.array([5.5, 2.75, 4.5, 1.25]).astype(_dtype), 'virginica': np.array([7, 3, 5.5, 2.25]).astype(_dtype) } petal_length_idx = iris_feature_names.index('petal length (cm)') petal_length_bins = [1, 2, 3, 4, 5, 6, 7] petal_width_idx = iris_feature_names.index('petal width (cm)') petal_width_bins = [0, .5, 1, 1.5, 2, 2.5] discs_ = [] for i, ix in enumerate(petal_length_bins): # X-axis for iix in petal_length_bins[i + 1:]: for j, jy in enumerate(petal_width_bins): # Y-axis for jjy in petal_width_bins[j + 1:]: discs_.append({ petal_length_idx: [ix, iix], petal_width_idx: [jy, jjy] }) for inst_i in explained_instances: for cls_i in iris_labels: for disc_i, disc in enumerate(discs_): inst = explained_instances[inst_i] cls = label2class[cls_i] exp = exo.build_tabular_blimey( inst, cls, iris_X, iris_y, logreg.predict_proba, disc, IRIS_SAMPLES, SAMPLE_IRIS, 42) key = '{}&{}&{}'.format(inst_i, cls, disc_i) exp_ = EXP_TAB[key] assert exp['explanation'].shape[0] == exp_.shape[0] assert np.allclose( exp['explanation'], exp_, atol=.001, equal_nan=True) EXP_IMG = [ {207: {'explanation': [(13, -0.24406872165780585), (11, -0.20456180387430317), (9, -0.1866779131424261), (4, 0.15001224157793785), (3, 0.11589480417160983)], 'name': 'golden retriever'}, 208: {'explanation': [(13, -0.08395966359346249), (0, -0.0644986107387837), (9, 0.05845584633658977), (1, 0.04369763085720947), (11, -0.035958188394941866)], 'name': '<NAME>'}, 852: {'explanation': [(13, 0.3463529698715463), (11, 0.2678050131923326), (4, -0.10639863421417416), (6, 0.08345792378117327), (9, 0.07366945242386444)], 'name': '<NAME>'}}, {207: {'explanation': [(13, -0.0624167912596456), (7, 0.06083359545295548), (3, 0.0495953943686462), (11, -0.04819787147412231), (2, -0.03858823761391199)], 'name': '<NAME>'}, 208: {'explanation': [(13, -0.08408428146916162), (7, 0.07704235920590158), (3, 0.06646468388122273), (11, -0.0638326572126609), (2, -0.052621478002380796)], 'name': '<NAME>'}, 852: {'explanation': [(11, 0.35248212611685886), (13, 0.2516925608037859), (2, 0.13682853028454384), (9, 0.12930134856644754), (6, 0.1257747954095489)], 'name': '<NAME>'}}, {207: {'explanation': [(3, 0.21351937934930917), (10, 0.16933456312772083), (11, -0.13447244552856766), (8, 0.11058919217055371), (2, -0.06269239798368743)], 'name': '<NAME>'}, 208: {'explanation': [(8, 0.05995551486884414), (9, -0.05375302972380482), (11, -0.051997353324246445), (6, 0.04213181405953071), (2, -0.039169895361928275)], 'name': '<NAME>'}, 852: {'explanation': [(7, 0.31382219776986503), (11, 0.24126214884275987), (13, 0.21075924370226598), (2, 0.11937652039885377), (8, -0.11911265319329697)], 'name': '<NAME>'}}, {207: {'explanation': [(3, 0.39254403293049134), (9, 0.19357165018747347), (6, 0.16592079671652987), (0, 0.14042059731407297), (1, 0.09793027079765507)], 'name': '<NAME>'}, 208: {'explanation': [(9, -0.19351859273276703), (1, -0.15262967987262344), (3, 0.12205127112235375), (2, 0.11352141032313934), (6, -0.11164209893429898)], 'name': '<NAME>'}, 852: {'explanation': [(7, 0.17213007100844877), (0, -0.1583030948868859), (3, -0.13748574615069775), (5, 0.13273283867075436), (11, 0.12309551170070354)], 'name': '<NAME>'}}, {207: {'explanation': [(3, 0.4073533182995105), (10, 0.20711667988142463), (8, 0.15360813290032324), (6, 0.1405424759832785), (1, 0.1332920685413575)], 'name': '<NAME>'}, 208: {'explanation': [(9, -0.14747910525112617), (1, -0.13977061235228924), (2, 0.10526833898161611), (6, -0.10416022118399552), (3, 0.09555992655161764)], 'name': '<NAME>'}, 852: {'explanation': [(11, 0.2232260929107954), (7, 0.21638443149433054), (5, 0.21100464215582274), (13, 0.145614853795006), (1, -0.11416523431311262)], 'name': '<NAME>'}}, {207: {'explanation': [(1, 0.14700178977744183), (0, 0.10346667279328238), (2, 0.10346667279328238), (7, 0.10346667279328238), (8, 0.10162900633690726)], 'name': '<NAME>'}, 208: {'explanation': [(10, -0.10845134816658476), (8, -0.1026920429226184), (6, -0.10238154733842847), (18, 0.10094164937411244), (16, 0.08646888450232793)], 'name': '<NAME>'}, 852: {'explanation': [(18, -0.20542297091894474), (13, 0.2012751176130666), (8, -0.19194747162742365), (20, 0.14686930696710473), (15, 0.11796990086271067)], 'name': '<NAME>'}}, {207: {'explanation': [(13, 0.12446259821701779), (17, 0.11859084421095789), (15, 0.09690553833007137), (12, -0.08869743701731962), (4, 0.08124900427893789)], 'name': '<NAME>'}, 208: {'explanation': [(10, -0.09478194981909983), (20, -0.09173392507039077), (9, 0.08768898801254493), (17, -0.07553994244536394), (4, 0.07422905503397653)], 'name': '<NAME>'}, 852: {'explanation': [(21, 0.1327882942965061), (1, 0.1238236573086363), (18, -0.10911712271717902), (19, 0.09707191051320978), (6, 0.08593672504338913)], 'name': '<NAME>'}}, {207: {'explanation': [(6, 0.14931728779865114), (14, 0.14092073957103526), (1, 0.11071480021464616), (4, 0.10655287976934531), (8, 0.08705404649152573)], 'name': '<NAME>'}, 208: {'explanation': [(8, -0.12242580400886727), (9, 0.12142729544158742), (14, -0.1148252787068248), (16, -0.09562322208795092), (4, 0.09350160975513132)], 'name': '<NAME>'}, 852: {'explanation': [(6, 0.04227675072263027), (9, -0.03107924340879173), (14, 0.028007115650713045), (13, 0.02771190348545554), (19, 0.02640441416071482)], 'name': '<NAME>'}}, {207: {'explanation': [(19, 0.14313680656283245), (18, 0.12866508562342843), (8, 0.11809779264185447), (0, 0.11286255403442104), (2, 0.11286255403442104)], 'name': '<NAME>'}, 208: {'explanation': [(9, 0.2397917428082761), (14, -0.19435572812170654), (6, -0.1760894833446507), (18, -0.12243333818399058), (15, 0.10986343675377105)], 'name': '<NAME>'}, 852: {'explanation': [(14, 0.15378038774613365), (9, -0.14245940635481966), (6, 0.10213601012183973), (20, 0.1009180838986786), (3, 0.09780065767815548)], 'name': '<NAME>'}}, {207: {'explanation': [(15, 0.06525850448807077), (9, 0.06286791243851698), (19, 0.055189970374185854), (8, 0.05499197604401475), (13, 0.04748220842936177)], 'name': '<NAME>'}, 208: {'explanation': [(6, -0.31549091899770765), (5, 0.1862302670824446), (8, -0.17381478451341995), (10, -0.17353516098662508), (14, -0.13591542421754205)], 'name': '<NAME>'}, 852: {'explanation': [(14, 0.2163853942943355), (6, 0.17565046338282214), (1, 0.12446193028474549), (9, -0.11365789839746396), (10, 0.09239073691962967)], 'name': '<NAME>'}}, {207: {'explanation': [(19, 0.1141207265647932), (36, -0.08861425922625768), (30, 0.07219209872026074), (9, -0.07150939547859836), (38, -0.06988288637544438)], 'name': '<NAME>'}, 208: {'explanation': [(29, 0.10531073909547647), (13, 0.08279642208039652), (34, -0.0817952443980797), (33, -0.08086848205765082), (12, 0.08086848205765082)], 'name': '<NAME>'}, 852: {'explanation': [(13, -0.1330452414595897), (4, 0.09942366413042845), (12, -0.09881995683190645), (33, 0.09881995683190645), (19, -0.09596925317560831)], 'name': '<NAME>'}}, {207: {'explanation': [(37, 0.08193926967758253), (35, 0.06804043021426347), (15, 0.06396269230810163), (11, 0.062255657227065296), (8, 0.05529200233091672)], 'name': '<NAME>'}, 208: {'explanation': [(19, 0.05711957286614678), (27, -0.050230108135410824), (16, -0.04743034616549999), (5, -0.046717346734255705), (9, -0.04419100026638039)], 'name': '<NAME>'}, 852: {'explanation': [(3, -0.08390967998497496), (30, -0.07037680222442452), (22, 0.07029819368543713), (8, -0.06861396187180349), (37, -0.06662511956402824)], 'name': '<NAME>'}}, {207: {'explanation': [(19, 0.048418845359024805), (9, -0.0423869575883795), (30, 0.04012650790044438), (36, -0.03787242980067195), (10, 0.036557999380695635)], 'name': '<NAME>'}, 208: {'explanation': [(10, 0.12120686823129677), (17, 0.10196564232230493), (7, 0.09495133975425854), (25, -0.0759657891182803), (2, -0.07035244568286837)], 'name': '<NAME>'}, 852: {'explanation': [(3, -0.0770578003457272), (28, 0.0769372258280398), (6, -0.06044725989272927), (22, 0.05550155775286349), (31, -0.05399028046597057)], 'name': '<NAME>'}}, {207: {'explanation': [(14, 0.05371383110181226), (0, -0.04442539316084218), (18, 0.042589475382826494), (19, 0.04227647855354252), (17, 0.041685661662754295)], 'name': '<NAME>'}, 208: {'explanation': [(29, 0.14419601354489464), (17, 0.11785174500536676), (36, 0.1000501679652906), (10, 0.09679790134851017), (35, 0.08710376081189208)], 'name': '<NAME>'}, 852: {'explanation': [(8, -0.02486237985832769), (3, -0.022559886154747102), (11, -0.021878686669239856), (36, 0.021847953817988534), (19, -0.018317598300716522)], 'name': '<NAME>'}}, {207: {'explanation': [(37, 0.08098729255605368), (35, 0.06639102704982619), (15, 0.06033721190370432), (34, 0.05826267856117829), (28, 0.05549505160798173)], 'name': '<NAME>'}, 208: {'explanation': [(17, 0.13839012042250542), (10, 0.11312187488346881), (7, 0.10729071207480922), (25, -0.09529127965797404), (11, -0.09279834572979286)], 'name': '<NAME>'}, 852: {'explanation': [(3, -0.028385651836694076), (22, 0.023364702783498722), (8, -0.023097812578270233), (30, -0.022931236620034406), (37, -0.022040170736525342)], 'name': '<NAME>'}} ] EXP_TAB = { 'setosa&0&0': np.array([0.7431524521056113, 0.24432235603856345]), 'setosa&0&1': np.array([0.4926091071260067, 0.49260910712601286]), 'setosa&0&2': np.array([0.9550700362273441, 0.025428672111930138]), 'setosa&0&3': np.array([0.9672121512728677, 0.012993005706020341]), 'setosa&0&4': np.array([0.9706534384443797, 0.007448195602953232]), 'setosa&0&5': np.array([0.7431524521056113, 0.24432235603856345]), 'setosa&0&6': np.array([0.7431524521056113, 0.24432235603856345]), 'setosa&0&7': np.array([0.7431524521056113, 0.24432235603856345]), 'setosa&0&8': np.array([0.7431524521056113, 0.24432235603856345]), 'setosa&0&9': np.array([0.4926091071260067, 0.49260910712601286]), 'setosa&0&10': np.array([0.4926091071260067, 0.49260910712601286]), 'setosa&0&11': np.array([0.4926091071260067, 0.49260910712601286]), 'setosa&0&12': np.array([0.9550700362273441, 0.025428672111930138]), 'setosa&0&13': np.array([0.9550700362273441, 0.025428672111930138]), 'setosa&0&14': np.array([0.9672121512728677, 0.012993005706020341]), 'setosa&0&15': np.array([0.7431524521056113, 0.24432235603856345]), 'setosa&0&16': np.array([0.4926091071260067, 0.49260910712601286]), 'setosa&0&17': np.array([0.9550700362273441, 0.025428672111930138]), 'setosa&0&18': np.array([0.9672121512728677, 0.012993005706020341]), 'setosa&0&19': np.array([0.9706534384443797, 0.007448195602953232]), 'setosa&0&20': np.array([0.7431524521056113, 0.24432235603856345]), 'setosa&0&21': np.array([0.7431524521056113, 0.24432235603856345]), 'setosa&0&22': np.array([0.7431524521056113, 0.24432235603856345]), 'setosa&0&23': np.array([0.7431524521056113, 0.24432235603856345]), 'setosa&0&24': np.array([0.4926091071260067, 0.49260910712601286]), 'setosa&0&25': np.array([0.4926091071260067, 0.49260910712601286]), 'setosa&0&26': np.array([0.4926091071260067, 0.49260910712601286]), 'setosa&0&27': np.array([0.9550700362273441, 0.025428672111930138]), 'setosa&0&28': np.array([0.9550700362273441, 0.025428672111930138]), 'setosa&0&29': np.array([0.9672121512728677, 0.012993005706020341]), 'setosa&0&30': np.array([0.19685199412911678, 0.7845879230594391]), 'setosa&0&31': np.array([0.07476043598366156, 0.9062715528547001]), 'setosa&0&32': np.array([0.7770298852793471, 0.0294434304771479]), 'setosa&0&33': np.array([0.7936433456054741, 0.01258375207649658]), 'setosa&0&34': np.array([0.7974072911132786, 0.006894018772033576]), 'setosa&0&35': np.array([0.19685199412911678, 0.7845879230594391]), 'setosa&0&36': np.array([0.19685199412911678, 0.7845879230594391]), 'setosa&0&37': np.array([0.19685199412911678, 0.7845879230594391]), 'setosa&0&38': np.array([0.19685199412911678, 0.7845879230594391]), 'setosa&0&39': np.array([0.07476043598366156, 0.9062715528547001]), 'setosa&0&40': np.array([0.07476043598366156, 0.9062715528547001]), 'setosa&0&41': np.array([0.07476043598366156, 0.9062715528547001]), 'setosa&0&42': np.array([0.7770298852793471, 0.0294434304771479]), 'setosa&0&43': np.array([0.7770298852793471, 0.0294434304771479]), 'setosa&0&44': np.array([0.7936433456054741, 0.01258375207649658]), 'setosa&0&45': np.array([0.050316962184345455, 0.9292276112117481]), 'setosa&0&46': np.array([0.0171486447659196, 0.9632117581295891]), 'setosa&0&47': np.array([0.06151571389390039, 0.524561199322281]), 'setosa&0&48': np.array([0.4329463382004908, 0.057167210150691136]), 'setosa&0&49': np.array([0.4656481363306145, 0.007982539480288167]), 'setosa&0&50': np.array([0.050316962184345455, 0.9292276112117481]), 'setosa&0&51': np.array([0.050316962184345455, 0.9292276112117481]), 'setosa&0&52': np.array([0.050316962184345455, 0.9292276112117481]), 'setosa&0&53': np.array([0.050316962184345455, 0.9292276112117481]), 'setosa&0&54': np.array([0.0171486447659196, 0.9632117581295891]), 'setosa&0&55': np.array([0.0171486447659196, 0.9632117581295891]), 'setosa&0&56': np.array([0.0171486447659196, 0.9632117581295891]), 'setosa&0&57': np.array([0.06151571389390039, 0.524561199322281]), 'setosa&0&58': np.array([0.06151571389390039, 0.524561199322281]), 'setosa&0&59': np.array([0.4329463382004908, 0.057167210150691136]), 'setosa&0&60': np.array([0.029402442458921055, 0.9481684282717416]), 'setosa&0&61': np.array([0.00988785935411159, 0.9698143912008228]), 'setosa&0&62': np.array([0.009595083643662688, 0.5643652067423869]), 'setosa&0&63': np.array([0.13694026920485936, 0.36331091829858003]), 'setosa&0&64': np.array([0.3094460464703627, 0.11400643817329122]), 'setosa&0&65': np.array([0.029402442458921055, 0.9481684282717416]), 'setosa&0&66': np.array([0.029402442458921055, 0.9481684282717416]), 'setosa&0&67': np.array([0.029402442458921055, 0.9481684282717416]), 'setosa&0&68': np.array([0.029402442458921055, 0.9481684282717416]), 'setosa&0&69': np.array([0.00988785935411159, 0.9698143912008228]), 'setosa&0&70': np.array([0.00988785935411159, 0.9698143912008228]), 'setosa&0&71': np.array([0.00988785935411159, 0.9698143912008228]), 'setosa&0&72': np.array([0.009595083643662688, 0.5643652067423869]), 'setosa&0&73': np.array([0.009595083643662688, 0.5643652067423869]), 'setosa&0&74': np.array([0.13694026920485936, 0.36331091829858003]), 'setosa&0&75': np.array([0.0, 0.95124502153736]), 'setosa&0&76': np.array([0.0, 0.9708703761803881]), 'setosa&0&77': np.array([0.0, 0.5659706098422994]), 'setosa&0&78': np.array([0.0, 0.3962828716108186]), 'setosa&0&79': np.array([0.0, 0.2538069363248767]), 'setosa&0&80': np.array([0.0, 0.95124502153736]), 'setosa&0&81': np.array([0.0, 0.95124502153736]), 'setosa&0&82': np.array([0.0, 0.95124502153736]), 'setosa&0&83': np.array([0.0, 0.95124502153736]), 'setosa&0&84': np.array([0.0, 0.9708703761803881]), 'setosa&0&85': np.array([0.0, 0.9708703761803881]), 'setosa&0&86': np.array([0.0, 0.9708703761803881]), 'setosa&0&87': np.array([0.0, 0.5659706098422994]), 'setosa&0&88': np.array([0.0, 0.5659706098422994]), 'setosa&0&89': np.array([0.0, 0.3962828716108186]), 'setosa&0&90': np.array([0.7431524521056113, 0.24432235603856345]), 'setosa&0&91': np.array([0.4926091071260067, 0.49260910712601286]), 'setosa&0&92': np.array([0.9550700362273441, 0.025428672111930138]), 'setosa&0&93': np.array([0.9672121512728677, 0.012993005706020341]), 'setosa&0&94': np.array([0.9706534384443797, 0.007448195602953232]), 'setosa&0&95': np.array([0.7431524521056113, 0.24432235603856345]), 'setosa&0&96': np.array([0.7431524521056113, 0.24432235603856345]), 'setosa&0&97': np.array([0.7431524521056113, 0.24432235603856345]), 'setosa&0&98': np.array([0.7431524521056113, 0.24432235603856345]), 'setosa&0&99': np.array([0.4926091071260067, 0.49260910712601286]), 'setosa&0&100': np.array([0.4926091071260067, 0.49260910712601286]), 'setosa&0&101': np.array([0.4926091071260067, 0.49260910712601286]), 'setosa&0&102': np.array([0.9550700362273441, 0.025428672111930138]), 'setosa&0&103': np.array([0.9550700362273441, 0.025428672111930138]), 'setosa&0&104': np.array([0.9672121512728677, 0.012993005706020341]), 'setosa&0&105': np.array([0.7431524521056113, 0.24432235603856345]), 'setosa&0&106': np.array([0.4926091071260067, 0.49260910712601286]), 'setosa&0&107': np.array([0.9550700362273441, 0.025428672111930138]), 'setosa&0&108': np.array([0.9672121512728677, 0.012993005706020341]), 'setosa&0&109': np.array([0.9706534384443797, 0.007448195602953232]), 'setosa&0&110': np.array([0.7431524521056113, 0.24432235603856345]), 'setosa&0&111': np.array([0.7431524521056113, 0.24432235603856345]), 'setosa&0&112': np.array([0.7431524521056113, 0.24432235603856345]), 'setosa&0&113': np.array([0.7431524521056113, 0.24432235603856345]), 'setosa&0&114': np.array([0.4926091071260067, 0.49260910712601286]), 'setosa&0&115': np.array([0.4926091071260067, 0.49260910712601286]), 'setosa&0&116': np.array([0.4926091071260067, 0.49260910712601286]), 'setosa&0&117': np.array([0.9550700362273441, 0.025428672111930138]), 'setosa&0&118': np.array([0.9550700362273441, 0.025428672111930138]), 'setosa&0&119': np.array([0.9672121512728677, 0.012993005706020341]), 'setosa&0&120': np.array([0.7431524521056113, 0.24432235603856345]), 'setosa&0&121': np.array([0.4926091071260067, 0.49260910712601286]), 'setosa&0&122': np.array([0.9550700362273441, 0.025428672111930138]), 'setosa&0&123': np.array([0.9672121512728677, 0.012993005706020341]), 'setosa&0&124': np.array([0.9706534384443797, 0.007448195602953232]), 'setosa&0&125': np.array([0.7431524521056113, 0.24432235603856345]), 'setosa&0&126': np.array([0.7431524521056113, 0.24432235603856345]), 'setosa&0&127': np.array([0.7431524521056113, 0.24432235603856345]), 'setosa&0&128': np.array([0.7431524521056113, 0.24432235603856345]), 'setosa&0&129': np.array([0.4926091071260067, 0.49260910712601286]), 'setosa&0&130': np.array([0.4926091071260067, 0.49260910712601286]), 'setosa&0&131': np.array([0.4926091071260067, 0.49260910712601286]), 'setosa&0&132': np.array([0.9550700362273441, 0.025428672111930138]), 'setosa&0&133': np.array([0.9550700362273441, 0.025428672111930138]), 'setosa&0&134': np.array([0.9672121512728677, 0.012993005706020341]), 'setosa&0&135': np.array([0.7431524521056113, 0.24432235603856345]), 'setosa&0&136': np.array([0.4926091071260067, 0.49260910712601286]), 'setosa&0&137': np.array([0.9550700362273441, 0.025428672111930138]), 'setosa&0&138': np.array([0.9672121512728677, 0.012993005706020341]), 'setosa&0&139': np.array([0.9706534384443797, 0.007448195602953232]), 'setosa&0&140': np.array([0.7431524521056113, 0.24432235603856345]), 'setosa&0&141': np.array([0.7431524521056113, 0.24432235603856345]), 'setosa&0&142': np.array([0.7431524521056113, 0.24432235603856345]), 'setosa&0&143': np.array([0.7431524521056113, 0.24432235603856345]), 'setosa&0&144': np.array([0.4926091071260067, 0.49260910712601286]), 'setosa&0&145': np.array([0.4926091071260067, 0.49260910712601286]), 'setosa&0&146': np.array([0.4926091071260067, 0.49260910712601286]), 'setosa&0&147': np.array([0.9550700362273441, 0.025428672111930138]), 'setosa&0&148': np.array([0.9550700362273441, 0.025428672111930138]), 'setosa&0&149': np.array([0.9672121512728677, 0.012993005706020341]), 'setosa&0&150': np.array([0.7431524521056113, 0.24432235603856345]), 'setosa&0&151': np.array([0.4926091071260067, 0.49260910712601286]), 'setosa&0&152': np.array([0.9550700362273441, 0.025428672111930138]), 'setosa&0&153': np.array([0.9672121512728677, 0.012993005706020341]), 'setosa&0&154': np.array([0.9706534384443797, 0.007448195602953232]), 'setosa&0&155': np.array([0.7431524521056113, 0.24432235603856345]), 'setosa&0&156': np.array([0.7431524521056113, 0.24432235603856345]), 'setosa&0&157': np.array([0.7431524521056113, 0.24432235603856345]), 'setosa&0&158': np.array([0.7431524521056113, 0.24432235603856345]), 'setosa&0&159': np.array([0.4926091071260067, 0.49260910712601286]), 'setosa&0&160': np.array([0.4926091071260067, 0.49260910712601286]), 'setosa&0&161': np.array([0.4926091071260067, 0.49260910712601286]), 'setosa&0&162': np.array([0.9550700362273441, 0.025428672111930138]), 'setosa&0&163': np.array([0.9550700362273441, 0.025428672111930138]), 'setosa&0&164': np.array([0.9672121512728677, 0.012993005706020341]), 'setosa&0&165': np.array([0.7431524521056113, 0.24432235603856345]), 'setosa&0&166': np.array([0.4926091071260067, 0.49260910712601286]), 'setosa&0&167': np.array([0.9550700362273441, 0.025428672111930138]), 'setosa&0&168': np.array([0.9672121512728677, 0.012993005706020341]), 'setosa&0&169': np.array([0.9706534384443797, 0.007448195602953232]), 'setosa&0&170': np.array([0.7431524521056113, 0.24432235603856345]), 'setosa&0&171': np.array([0.7431524521056113, 0.24432235603856345]), 'setosa&0&172': np.array([0.7431524521056113, 0.24432235603856345]), 'setosa&0&173': np.array([0.7431524521056113, 0.24432235603856345]), 'setosa&0&174': np.array([0.4926091071260067, 0.49260910712601286]), 'setosa&0&175': np.array([0.4926091071260067, 0.49260910712601286]), 'setosa&0&176': np.array([0.4926091071260067, 0.49260910712601286]), 'setosa&0&177': np.array([0.9550700362273441, 0.025428672111930138]), 'setosa&0&178': np.array([0.9550700362273441, 0.025428672111930138]), 'setosa&0&179': np.array([0.9672121512728677, 0.012993005706020341]), 'setosa&0&180': np.array([0.7431524521056113, 0.24432235603856345]), 'setosa&0&181': np.array([0.4926091071260067, 0.49260910712601286]), 'setosa&0&182': np.array([0.9550700362273441, 0.025428672111930138]), 'setosa&0&183': np.array([0.9672121512728677, 0.012993005706020341]), 'setosa&0&184': np.array([0.9706534384443797, 0.007448195602953232]), 'setosa&0&185': np.array([0.7431524521056113, 0.24432235603856345]), 'setosa&0&186': np.array([0.7431524521056113, 0.24432235603856345]), 'setosa&0&187': np.array([0.7431524521056113, 0.24432235603856345]), 'setosa&0&188': np.array([0.7431524521056113, 0.24432235603856345]), 'setosa&0&189': np.array([0.4926091071260067, 0.49260910712601286]), 'setosa&0&190': np.array([0.4926091071260067, 0.49260910712601286]), 'setosa&0&191': np.array([0.4926091071260067, 0.49260910712601286]), 'setosa&0&192': np.array([0.9550700362273441, 0.025428672111930138]), 'setosa&0&193': np.array([0.9550700362273441, 0.025428672111930138]), 'setosa&0&194': np.array([0.9672121512728677, 0.012993005706020341]), 'setosa&0&195': np.array([0.7431524521056113, 0.24432235603856345]), 'setosa&0&196': np.array([0.4926091071260067, 0.49260910712601286]), 'setosa&0&197': np.array([0.9550700362273441, 0.025428672111930138]), 'setosa&0&198': np.array([0.9672121512728677, 0.012993005706020341]), 'setosa&0&199': np.array([0.9706534384443797, 0.007448195602953232]), 'setosa&0&200': np.array([0.7431524521056113, 0.24432235603856345]), 'setosa&0&201': np.array([0.7431524521056113, 0.24432235603856345]), 'setosa&0&202': np.array([0.7431524521056113, 0.24432235603856345]), 'setosa&0&203': np.array([0.7431524521056113, 0.24432235603856345]), 'setosa&0&204': np.array([0.4926091071260067, 0.49260910712601286]), 'setosa&0&205': np.array([0.4926091071260067, 0.49260910712601286]), 'setosa&0&206': np.array([0.4926091071260067, 0.49260910712601286]), 'setosa&0&207': np.array([0.9550700362273441, 0.025428672111930138]), 'setosa&0&208': np.array([0.9550700362273441, 0.025428672111930138]), 'setosa&0&209': np.array([0.9672121512728677, 0.012993005706020341]), 'setosa&0&210': np.array([0.7431524521056113, 0.24432235603856345]), 'setosa&0&211': np.array([0.4926091071260067, 0.49260910712601286]), 'setosa&0&212': np.array([0.9550700362273441, 0.025428672111930138]), 'setosa&0&213': np.array([0.9672121512728677, 0.012993005706020341]), 'setosa&0&214': np.array([0.9706534384443797, 0.007448195602953232]), 'setosa&0&215': np.array([0.7431524521056113, 0.24432235603856345]), 'setosa&0&216': np.array([0.7431524521056113, 0.24432235603856345]), 'setosa&0&217': np.array([0.7431524521056113, 0.24432235603856345]), 'setosa&0&218': np.array([0.7431524521056113, 0.24432235603856345]), 'setosa&0&219': np.array([0.4926091071260067, 0.49260910712601286]), 'setosa&0&220': np.array([0.4926091071260067, 0.49260910712601286]), 'setosa&0&221': np.array([0.4926091071260067, 0.49260910712601286]), 'setosa&0&222': np.array([0.9550700362273441, 0.025428672111930138]), 'setosa&0&223': np.array([0.9550700362273441, 0.025428672111930138]), 'setosa&0&224': np.array([0.9672121512728677, 0.012993005706020341]), 'setosa&0&225': np.array([0.19685199412911678, 0.7845879230594391]), 'setosa&0&226': np.array([0.07476043598366156, 0.9062715528547001]), 'setosa&0&227': np.array([0.7770298852793471, 0.0294434304771479]), 'setosa&0&228': np.array([0.7936433456054741, 0.01258375207649658]), 'setosa&0&229': np.array([0.7974072911132786, 0.006894018772033576]), 'setosa&0&230': np.array([0.19685199412911678, 0.7845879230594391]), 'setosa&0&231': np.array([0.19685199412911678, 0.7845879230594391]), 'setosa&0&232': np.array([0.19685199412911678, 0.7845879230594391]), 'setosa&0&233': np.array([0.19685199412911678, 0.7845879230594391]), 'setosa&0&234': np.array([0.07476043598366156, 0.9062715528547001]), 'setosa&0&235': np.array([0.07476043598366156, 0.9062715528547001]), 'setosa&0&236': np.array([0.07476043598366156, 0.9062715528547001]), 'setosa&0&237': np.array([0.7770298852793471, 0.0294434304771479]), 'setosa&0&238': np.array([0.7770298852793471, 0.0294434304771479]), 'setosa&0&239': np.array([0.7936433456054741, 0.01258375207649658]), 'setosa&0&240': np.array([0.19685199412911678, 0.7845879230594391]), 'setosa&0&241': np.array([0.07476043598366156, 0.9062715528547001]), 'setosa&0&242': np.array([0.7770298852793471, 0.0294434304771479]), 'setosa&0&243': np.array([0.7936433456054741, 0.01258375207649658]), 'setosa&0&244': np.array([0.7974072911132786, 0.006894018772033576]), 'setosa&0&245': np.array([0.19685199412911678, 0.7845879230594391]), 'setosa&0&246': np.array([0.19685199412911678, 0.7845879230594391]), 'setosa&0&247': np.array([0.19685199412911678, 0.7845879230594391]), 'setosa&0&248': np.array([0.19685199412911678, 0.7845879230594391]), 'setosa&0&249': np.array([0.07476043598366156, 0.9062715528547001]), 'setosa&0&250': np.array([0.07476043598366156, 0.9062715528547001]), 'setosa&0&251': np.array([0.07476043598366156, 0.9062715528547001]), 'setosa&0&252': np.array([0.7770298852793471, 0.0294434304771479]), 'setosa&0&253': np.array([0.7770298852793471, 0.0294434304771479]), 'setosa&0&254': np.array([0.7936433456054741, 0.01258375207649658]), 'setosa&0&255': np.array([0.19685199412911678, 0.7845879230594391]), 'setosa&0&256': np.array([0.07476043598366156, 0.9062715528547001]), 'setosa&0&257': np.array([0.7770298852793471, 0.0294434304771479]), 'setosa&0&258': np.array([0.7936433456054741, 0.01258375207649658]), 'setosa&0&259': np.array([0.7974072911132786, 0.006894018772033576]), 'setosa&0&260': np.array([0.19685199412911678, 0.7845879230594391]), 'setosa&0&261': np.array([0.19685199412911678, 0.7845879230594391]), 'setosa&0&262': np.array([0.19685199412911678, 0.7845879230594391]), 'setosa&0&263': np.array([0.19685199412911678, 0.7845879230594391]), 'setosa&0&264': np.array([0.07476043598366156, 0.9062715528547001]), 'setosa&0&265': np.array([0.07476043598366156, 0.9062715528547001]), 'setosa&0&266': np.array([0.07476043598366156, 0.9062715528547001]), 'setosa&0&267': np.array([0.7770298852793471, 0.0294434304771479]), 'setosa&0&268': np.array([0.7770298852793471, 0.0294434304771479]), 'setosa&0&269': np.array([0.7936433456054741, 0.01258375207649658]), 'setosa&0&270': np.array([0.050316962184345455, 0.9292276112117481]), 'setosa&0&271': np.array([0.0171486447659196, 0.9632117581295891]), 'setosa&0&272': np.array([0.06151571389390039, 0.524561199322281]), 'setosa&0&273': np.array([0.4329463382004908, 0.057167210150691136]), 'setosa&0&274': np.array([0.4656481363306145, 0.007982539480288167]), 'setosa&0&275': np.array([0.050316962184345455, 0.9292276112117481]), 'setosa&0&276': np.array([0.050316962184345455, 0.9292276112117481]), 'setosa&0&277': np.array([0.050316962184345455, 0.9292276112117481]), 'setosa&0&278': np.array([0.050316962184345455, 0.9292276112117481]), 'setosa&0&279': np.array([0.0171486447659196, 0.9632117581295891]), 'setosa&0&280': np.array([0.0171486447659196, 0.9632117581295891]), 'setosa&0&281': np.array([0.0171486447659196, 0.9632117581295891]), 'setosa&0&282': np.array([0.06151571389390039, 0.524561199322281]), 'setosa&0&283': np.array([0.06151571389390039, 0.524561199322281]), 'setosa&0&284': np.array([0.4329463382004908, 0.057167210150691136]), 'setosa&0&285': np.array([0.050316962184345455, 0.9292276112117481]), 'setosa&0&286': np.array([0.0171486447659196, 0.9632117581295891]), 'setosa&0&287': np.array([0.06151571389390039, 0.524561199322281]), 'setosa&0&288': np.array([0.4329463382004908, 0.057167210150691136]), 'setosa&0&289': np.array([0.4656481363306145, 0.007982539480288167]), 'setosa&0&290': np.array([0.050316962184345455, 0.9292276112117481]), 'setosa&0&291': np.array([0.050316962184345455, 0.9292276112117481]), 'setosa&0&292': np.array([0.050316962184345455, 0.9292276112117481]), 'setosa&0&293': np.array([0.050316962184345455, 0.9292276112117481]), 'setosa&0&294': np.array([0.0171486447659196, 0.9632117581295891]), 'setosa&0&295': np.array([0.0171486447659196, 0.9632117581295891]), 'setosa&0&296': np.array([0.0171486447659196, 0.9632117581295891]), 'setosa&0&297': np.array([0.06151571389390039, 0.524561199322281]), 'setosa&0&298': np.array([0.06151571389390039, 0.524561199322281]), 'setosa&0&299': np.array([0.4329463382004908, 0.057167210150691136]), 'setosa&0&300': np.array([0.029402442458921055, 0.9481684282717416]), 'setosa&0&301': np.array([0.00988785935411159, 0.9698143912008228]), 'setosa&0&302': np.array([0.009595083643662688, 0.5643652067423869]), 'setosa&0&303': np.array([0.13694026920485936, 0.36331091829858003]), 'setosa&0&304': np.array([0.3094460464703627, 0.11400643817329122]), 'setosa&0&305': np.array([0.029402442458921055, 0.9481684282717416]), 'setosa&0&306': np.array([0.029402442458921055, 0.9481684282717416]), 'setosa&0&307': np.array([0.029402442458921055, 0.9481684282717416]), 'setosa&0&308': np.array([0.029402442458921055, 0.9481684282717416]), 'setosa&0&309': np.array([0.00988785935411159, 0.9698143912008228]), 'setosa&0&310': np.array([0.00988785935411159, 0.9698143912008228]), 'setosa&0&311': np.array([0.00988785935411159, 0.9698143912008228]), 'setosa&0&312': np.array([0.009595083643662688, 0.5643652067423869]), 'setosa&0&313': np.array([0.009595083643662688, 0.5643652067423869]), 'setosa&0&314': np.array([0.13694026920485936, 0.36331091829858003]), 'setosa&1&0': np.array([-0.37157553889555184, -0.1221600832023858]), 'setosa&1&1': np.array([-0.2463036871609408, -0.24630368716093934]), 'setosa&1&2': np.array([-0.9105775730167809, 0.6842162738602727]), 'setosa&1&3': np.array([-0.6718337295341267, 0.6620422637360075]), 'setosa&1&4': np.array([-0.4964962439921071, 0.3798215458387346]), 'setosa&1&5': np.array([-0.37157553889555184, -0.1221600832023858]), 'setosa&1&6': np.array([-0.37157553889555184, -0.1221600832023858]), 'setosa&1&7': np.array([-0.37157553889555184, -0.1221600832023858]), 'setosa&1&8': np.array([-0.37157553889555184, -0.1221600832023858]), 'setosa&1&9': np.array([-0.2463036871609408, -0.24630368716093934]), 'setosa&1&10': np.array([-0.2463036871609408, -0.24630368716093934]), 'setosa&1&11': np.array([-0.2463036871609408, -0.24630368716093934]), 'setosa&1&12': np.array([-0.9105775730167809, 0.6842162738602727]), 'setosa&1&13': np.array([-0.9105775730167809, 0.6842162738602727]), 'setosa&1&14': np.array([-0.6718337295341267, 0.6620422637360075]), 'setosa&1&15': np.array([-0.37157553889555184, -0.1221600832023858]), 'setosa&1&16': np.array([-0.2463036871609408, -0.24630368716093934]), 'setosa&1&17': np.array([-0.9105775730167809, 0.6842162738602727]), 'setosa&1&18': np.array([-0.6718337295341267, 0.6620422637360075]), 'setosa&1&19': np.array([-0.4964962439921071, 0.3798215458387346]), 'setosa&1&20': np.array([-0.37157553889555184, -0.1221600832023858]), 'setosa&1&21': np.array([-0.37157553889555184, -0.1221600832023858]), 'setosa&1&22': np.array([-0.37157553889555184, -0.1221600832023858]), 'setosa&1&23': np.array([-0.37157553889555184, -0.1221600832023858]), 'setosa&1&24': np.array([-0.2463036871609408, -0.24630368716093934]), 'setosa&1&25': np.array([-0.2463036871609408, -0.24630368716093934]), 'setosa&1&26': np.array([-0.2463036871609408, -0.24630368716093934]), 'setosa&1&27': np.array([-0.9105775730167809, 0.6842162738602727]), 'setosa&1&28': np.array([-0.9105775730167809, 0.6842162738602727]), 'setosa&1&29': np.array([-0.6718337295341267, 0.6620422637360075]), 'setosa&1&30': np.array([0.32199975656257585, -0.748229355246375]), 'setosa&1&31': np.array([0.43843349141088417, -0.8642740701867918]), 'setosa&1&32': np.array([-0.7141739659554724, 0.6619819140152877]), 'setosa&1&33': np.array([-0.4446001433508151, 0.6107546840046902]), 'setosa&1&34': np.array([-0.26192650167775977, 0.33491141590339474]), 'setosa&1&35': np.array([0.32199975656257585, -0.748229355246375]), 'setosa&1&36': np.array([0.32199975656257585, -0.748229355246375]), 'setosa&1&37': np.array([0.32199975656257585, -0.748229355246375]), 'setosa&1&38': np.array([0.32199975656257585, -0.748229355246375]), 'setosa&1&39': np.array([0.43843349141088417, -0.8642740701867918]), 'setosa&1&40': np.array([0.43843349141088417, -0.8642740701867918]), 'setosa&1&41': np.array([0.43843349141088417, -0.8642740701867918]), 'setosa&1&42': np.array([-0.7141739659554724, 0.6619819140152877]), 'setosa&1&43': np.array([-0.7141739659554724, 0.6619819140152877]), 'setosa&1&44': np.array([-0.4446001433508151, 0.6107546840046902]), 'setosa&1&45': np.array([0.7749499208750121, -0.814718944080443]), 'setosa&1&46': np.array([0.80403091954169, -0.844515250413482]), 'setosa&1&47': np.array([0.5826506963750848, -0.22335655671229107]), 'setosa&1&48': np.array([0.33108168891715983, 0.13647816746351163]), 'setosa&1&49': np.array([0.4079256832347186, 0.038455640985860955]), 'setosa&1&50': np.array([0.7749499208750121, -0.814718944080443]), 'setosa&1&51': np.array([0.7749499208750121, -0.814718944080443]), 'setosa&1&52': np.array([0.7749499208750121, -0.814718944080443]), 'setosa&1&53': np.array([0.7749499208750121, -0.814718944080443]), 'setosa&1&54': np.array([0.80403091954169, -0.844515250413482]), 'setosa&1&55': np.array([0.80403091954169, -0.844515250413482]), 'setosa&1&56': np.array([0.80403091954169, -0.844515250413482]), 'setosa&1&57': np.array([0.5826506963750848, -0.22335655671229107]), 'setosa&1&58': np.array([0.5826506963750848, -0.22335655671229107]), 'setosa&1&59': np.array([0.33108168891715983, 0.13647816746351163]), 'setosa&1&60': np.array([0.4933316375690333, -0.5272416708629277]), 'setosa&1&61': np.array([0.5041830043657418, -0.5392782673950876]), 'setosa&1&62': np.array([0.25657760110071476, 0.12592645350389123]), 'setosa&1&63': np.array([0.13717260713320106, 0.3627779907901665]), 'setosa&1&64': np.array([0.3093950298647913, 0.1140298206733954]), 'setosa&1&65': np.array([0.4933316375690333, -0.5272416708629277]), 'setosa&1&66': np.array([0.4933316375690333, -0.5272416708629277]), 'setosa&1&67': np.array([0.4933316375690333, -0.5272416708629277]), 'setosa&1&68': np.array([0.4933316375690333, -0.5272416708629277]), 'setosa&1&69': np.array([0.5041830043657418, -0.5392782673950876]), 'setosa&1&70': np.array([0.5041830043657418, -0.5392782673950876]), 'setosa&1&71': np.array([0.5041830043657418, -0.5392782673950876]), 'setosa&1&72': np.array([0.25657760110071476, 0.12592645350389123]), 'setosa&1&73': np.array([0.25657760110071476, 0.12592645350389123]), 'setosa&1&74': np.array([0.13717260713320106, 0.3627779907901665]), 'setosa&1&75': np.array([0.0, -0.4756207622944677]), 'setosa&1&76': np.array([0.0, -0.4854334805210761]), 'setosa&1&77': np.array([0.0, 0.16885577975809635]), 'setosa&1&78': np.array([0.0, 0.395805885538554]), 'setosa&1&79': np.array([0.0, 0.2538072707138344]), 'setosa&1&80': np.array([0.0, -0.4756207622944677]), 'setosa&1&81': np.array([0.0, -0.4756207622944677]), 'setosa&1&82': np.array([0.0, -0.4756207622944677]), 'setosa&1&83': np.array([0.0, -0.4756207622944677]), 'setosa&1&84': np.array([0.0, -0.4854334805210761]), 'setosa&1&85': np.array([0.0, -0.4854334805210761]), 'setosa&1&86': np.array([0.0, -0.4854334805210761]), 'setosa&1&87': np.array([0.0, 0.16885577975809635]), 'setosa&1&88': np.array([0.0, 0.16885577975809635]), 'setosa&1&89': np.array([0.0, 0.395805885538554]), 'setosa&1&90': np.array([-0.37157553889555184, -0.1221600832023858]), 'setosa&1&91': np.array([-0.2463036871609408, -0.24630368716093934]), 'setosa&1&92': np.array([-0.9105775730167809, 0.6842162738602727]), 'setosa&1&93': np.array([-0.6718337295341267, 0.6620422637360075]), 'setosa&1&94': np.array([-0.4964962439921071, 0.3798215458387346]), 'setosa&1&95': np.array([-0.37157553889555184, -0.1221600832023858]), 'setosa&1&96': np.array([-0.37157553889555184, -0.1221600832023858]), 'setosa&1&97': np.array([-0.37157553889555184, -0.1221600832023858]), 'setosa&1&98': np.array([-0.37157553889555184, -0.1221600832023858]), 'setosa&1&99': np.array([-0.2463036871609408, -0.24630368716093934]), 'setosa&1&100': np.array([-0.2463036871609408, -0.24630368716093934]), 'setosa&1&101': np.array([-0.2463036871609408, -0.24630368716093934]), 'setosa&1&102': np.array([-0.9105775730167809, 0.6842162738602727]), 'setosa&1&103': np.array([-0.9105775730167809, 0.6842162738602727]), 'setosa&1&104': np.array([-0.6718337295341267, 0.6620422637360075]), 'setosa&1&105': np.array([-0.37157553889555184, -0.1221600832023858]), 'setosa&1&106': np.array([-0.2463036871609408, -0.24630368716093934]), 'setosa&1&107': np.array([-0.9105775730167809, 0.6842162738602727]), 'setosa&1&108': np.array([-0.6718337295341267, 0.6620422637360075]), 'setosa&1&109': np.array([-0.4964962439921071, 0.3798215458387346]), 'setosa&1&110': np.array([-0.37157553889555184, -0.1221600832023858]), 'setosa&1&111': np.array([-0.37157553889555184, -0.1221600832023858]), 'setosa&1&112': np.array([-0.37157553889555184, -0.1221600832023858]), 'setosa&1&113': np.array([-0.37157553889555184, -0.1221600832023858]), 'setosa&1&114': np.array([-0.2463036871609408, -0.24630368716093934]), 'setosa&1&115': np.array([-0.2463036871609408, -0.24630368716093934]), 'setosa&1&116': np.array([-0.2463036871609408, -0.24630368716093934]), 'setosa&1&117': np.array([-0.9105775730167809, 0.6842162738602727]), 'setosa&1&118': np.array([-0.9105775730167809, 0.6842162738602727]), 'setosa&1&119': np.array([-0.6718337295341267, 0.6620422637360075]), 'setosa&1&120': np.array([-0.37157553889555184, -0.1221600832023858]), 'setosa&1&121': np.array([-0.2463036871609408, -0.24630368716093934]), 'setosa&1&122': np.array([-0.9105775730167809, 0.6842162738602727]), 'setosa&1&123': np.array([-0.6718337295341267, 0.6620422637360075]), 'setosa&1&124': np.array([-0.4964962439921071, 0.3798215458387346]), 'setosa&1&125': np.array([-0.37157553889555184, -0.1221600832023858]), 'setosa&1&126': np.array([-0.37157553889555184, -0.1221600832023858]), 'setosa&1&127': np.array([-0.37157553889555184, -0.1221600832023858]), 'setosa&1&128': np.array([-0.37157553889555184, -0.1221600832023858]), 'setosa&1&129': np.array([-0.2463036871609408, -0.24630368716093934]), 'setosa&1&130': np.array([-0.2463036871609408, -0.24630368716093934]), 'setosa&1&131': np.array([-0.2463036871609408, -0.24630368716093934]), 'setosa&1&132': np.array([-0.9105775730167809, 0.6842162738602727]), 'setosa&1&133': np.array([-0.9105775730167809, 0.6842162738602727]), 'setosa&1&134': np.array([-0.6718337295341267, 0.6620422637360075]), 'setosa&1&135': np.array([-0.37157553889555184, -0.1221600832023858]), 'setosa&1&136': np.array([-0.2463036871609408, -0.24630368716093934]), 'setosa&1&137': np.array([-0.9105775730167809, 0.6842162738602727]), 'setosa&1&138': np.array([-0.6718337295341267, 0.6620422637360075]), 'setosa&1&139': np.array([-0.4964962439921071, 0.3798215458387346]), 'setosa&1&140': np.array([-0.37157553889555184, -0.1221600832023858]), 'setosa&1&141': np.array([-0.37157553889555184, -0.1221600832023858]), 'setosa&1&142': np.array([-0.37157553889555184, -0.1221600832023858]), 'setosa&1&143': np.array([-0.37157553889555184, -0.1221600832023858]), 'setosa&1&144': np.array([-0.2463036871609408, -0.24630368716093934]), 'setosa&1&145': np.array([-0.2463036871609408, -0.24630368716093934]), 'setosa&1&146': np.array([-0.2463036871609408, -0.24630368716093934]), 'setosa&1&147': np.array([-0.9105775730167809, 0.6842162738602727]), 'setosa&1&148': np.array([-0.9105775730167809, 0.6842162738602727]), 'setosa&1&149': np.array([-0.6718337295341267, 0.6620422637360075]), 'setosa&1&150': np.array([-0.37157553889555184, -0.1221600832023858]), 'setosa&1&151': np.array([-0.2463036871609408, -0.24630368716093934]), 'setosa&1&152': np.array([-0.9105775730167809, 0.6842162738602727]), 'setosa&1&153': np.array([-0.6718337295341267, 0.6620422637360075]), 'setosa&1&154': np.array([-0.4964962439921071, 0.3798215458387346]), 'setosa&1&155': np.array([-0.37157553889555184, -0.1221600832023858]), 'setosa&1&156': np.array([-0.37157553889555184, -0.1221600832023858]), 'setosa&1&157': np.array([-0.37157553889555184, -0.1221600832023858]), 'setosa&1&158': np.array([-0.37157553889555184, -0.1221600832023858]), 'setosa&1&159': np.array([-0.2463036871609408, -0.24630368716093934]), 'setosa&1&160': np.array([-0.2463036871609408, -0.24630368716093934]), 'setosa&1&161': np.array([-0.2463036871609408, -0.24630368716093934]), 'setosa&1&162': np.array([-0.9105775730167809, 0.6842162738602727]), 'setosa&1&163': np.array([-0.9105775730167809, 0.6842162738602727]), 'setosa&1&164': np.array([-0.6718337295341267, 0.6620422637360075]), 'setosa&1&165': np.array([-0.37157553889555184, -0.1221600832023858]), 'setosa&1&166': np.array([-0.2463036871609408, -0.24630368716093934]), 'setosa&1&167': np.array([-0.9105775730167809, 0.6842162738602727]), 'setosa&1&168': np.array([-0.6718337295341267, 0.6620422637360075]), 'setosa&1&169': np.array([-0.4964962439921071, 0.3798215458387346]), 'setosa&1&170': np.array([-0.37157553889555184, -0.1221600832023858]), 'setosa&1&171': np.array([-0.37157553889555184, -0.1221600832023858]), 'setosa&1&172': np.array([-0.37157553889555184, -0.1221600832023858]), 'setosa&1&173': np.array([-0.37157553889555184, -0.1221600832023858]), 'setosa&1&174': np.array([-0.2463036871609408, -0.24630368716093934]), 'setosa&1&175': np.array([-0.2463036871609408, -0.24630368716093934]), 'setosa&1&176': np.array([-0.2463036871609408, -0.24630368716093934]), 'setosa&1&177': np.array([-0.9105775730167809, 0.6842162738602727]), 'setosa&1&178': np.array([-0.9105775730167809, 0.6842162738602727]), 'setosa&1&179': np.array([-0.6718337295341267, 0.6620422637360075]), 'setosa&1&180': np.array([-0.37157553889555184, -0.1221600832023858]), 'setosa&1&181': np.array([-0.2463036871609408, -0.24630368716093934]), 'setosa&1&182': np.array([-0.9105775730167809, 0.6842162738602727]), 'setosa&1&183': np.array([-0.6718337295341267, 0.6620422637360075]), 'setosa&1&184': np.array([-0.4964962439921071, 0.3798215458387346]), 'setosa&1&185': np.array([-0.37157553889555184, -0.1221600832023858]), 'setosa&1&186': np.array([-0.37157553889555184, -0.1221600832023858]), 'setosa&1&187': np.array([-0.37157553889555184, -0.1221600832023858]), 'setosa&1&188': np.array([-0.37157553889555184, -0.1221600832023858]), 'setosa&1&189': np.array([-0.2463036871609408, -0.24630368716093934]), 'setosa&1&190': np.array([-0.2463036871609408, -0.24630368716093934]), 'setosa&1&191': np.array([-0.2463036871609408, -0.24630368716093934]), 'setosa&1&192': np.array([-0.9105775730167809, 0.6842162738602727]), 'setosa&1&193': np.array([-0.9105775730167809, 0.6842162738602727]), 'setosa&1&194': np.array([-0.6718337295341267, 0.6620422637360075]), 'setosa&1&195': np.array([-0.37157553889555184, -0.1221600832023858]), 'setosa&1&196': np.array([-0.2463036871609408, -0.24630368716093934]), 'setosa&1&197': np.array([-0.9105775730167809, 0.6842162738602727]), 'setosa&1&198': np.array([-0.6718337295341267, 0.6620422637360075]), 'setosa&1&199': np.array([-0.4964962439921071, 0.3798215458387346]), 'setosa&1&200': np.array([-0.37157553889555184, -0.1221600832023858]), 'setosa&1&201': np.array([-0.37157553889555184, -0.1221600832023858]), 'setosa&1&202': np.array([-0.37157553889555184, -0.1221600832023858]), 'setosa&1&203': np.array([-0.37157553889555184, -0.1221600832023858]), 'setosa&1&204': np.array([-0.2463036871609408, -0.24630368716093934]), 'setosa&1&205': np.array([-0.2463036871609408, -0.24630368716093934]), 'setosa&1&206': np.array([-0.2463036871609408, -0.24630368716093934]), 'setosa&1&207': np.array([-0.9105775730167809, 0.6842162738602727]), 'setosa&1&208': np.array([-0.9105775730167809, 0.6842162738602727]), 'setosa&1&209': np.array([-0.6718337295341267, 0.6620422637360075]), 'setosa&1&210': np.array([-0.37157553889555184, -0.1221600832023858]), 'setosa&1&211': np.array([-0.2463036871609408, -0.24630368716093934]), 'setosa&1&212': np.array([-0.9105775730167809, 0.6842162738602727]), 'setosa&1&213': np.array([-0.6718337295341267, 0.6620422637360075]), 'setosa&1&214': np.array([-0.4964962439921071, 0.3798215458387346]), 'setosa&1&215': np.array([-0.37157553889555184, -0.1221600832023858]), 'setosa&1&216': np.array([-0.37157553889555184, -0.1221600832023858]), 'setosa&1&217': np.array([-0.37157553889555184, -0.1221600832023858]), 'setosa&1&218': np.array([-0.37157553889555184, -0.1221600832023858]), 'setosa&1&219': np.array([-0.2463036871609408, -0.24630368716093934]), 'setosa&1&220': np.array([-0.2463036871609408, -0.24630368716093934]), 'setosa&1&221': np.array([-0.2463036871609408, -0.24630368716093934]), 'setosa&1&222': np.array([-0.9105775730167809, 0.6842162738602727]), 'setosa&1&223': np.array([-0.9105775730167809, 0.6842162738602727]), 'setosa&1&224': np.array([-0.6718337295341267, 0.6620422637360075]), 'setosa&1&225': np.array([0.32199975656257585, -0.748229355246375]), 'setosa&1&226': np.array([0.43843349141088417, -0.8642740701867918]), 'setosa&1&227': np.array([-0.7141739659554724, 0.6619819140152877]), 'setosa&1&228': np.array([-0.4446001433508151, 0.6107546840046902]), 'setosa&1&229': np.array([-0.26192650167775977, 0.33491141590339474]), 'setosa&1&230': np.array([0.32199975656257585, -0.748229355246375]), 'setosa&1&231': np.array([0.32199975656257585, -0.748229355246375]), 'setosa&1&232': np.array([0.32199975656257585, -0.748229355246375]), 'setosa&1&233': np.array([0.32199975656257585, -0.748229355246375]), 'setosa&1&234': np.array([0.43843349141088417, -0.8642740701867918]), 'setosa&1&235': np.array([0.43843349141088417, -0.8642740701867918]), 'setosa&1&236': np.array([0.43843349141088417, -0.8642740701867918]), 'setosa&1&237': np.array([-0.7141739659554724, 0.6619819140152877]), 'setosa&1&238': np.array([-0.7141739659554724, 0.6619819140152877]), 'setosa&1&239': np.array([-0.4446001433508151, 0.6107546840046902]), 'setosa&1&240': np.array([0.32199975656257585, -0.748229355246375]), 'setosa&1&241': np.array([0.43843349141088417, -0.8642740701867918]), 'setosa&1&242': np.array([-0.7141739659554724, 0.6619819140152877]), 'setosa&1&243': np.array([-0.4446001433508151, 0.6107546840046902]), 'setosa&1&244': np.array([-0.26192650167775977, 0.33491141590339474]), 'setosa&1&245': np.array([0.32199975656257585, -0.748229355246375]), 'setosa&1&246': np.array([0.32199975656257585, -0.748229355246375]), 'setosa&1&247': np.array([0.32199975656257585, -0.748229355246375]), 'setosa&1&248': np.array([0.32199975656257585, -0.748229355246375]), 'setosa&1&249': np.array([0.43843349141088417, -0.8642740701867918]), 'setosa&1&250': np.array([0.43843349141088417, -0.8642740701867918]), 'setosa&1&251': np.array([0.43843349141088417, -0.8642740701867918]), 'setosa&1&252': np.array([-0.7141739659554724, 0.6619819140152877]), 'setosa&1&253': np.array([-0.7141739659554724, 0.6619819140152877]), 'setosa&1&254': np.array([-0.4446001433508151, 0.6107546840046902]), 'setosa&1&255': np.array([0.32199975656257585, -0.748229355246375]), 'setosa&1&256': np.array([0.43843349141088417, -0.8642740701867918]), 'setosa&1&257': np.array([-0.7141739659554724, 0.6619819140152877]), 'setosa&1&258': np.array([-0.4446001433508151, 0.6107546840046902]), 'setosa&1&259': np.array([-0.26192650167775977, 0.33491141590339474]), 'setosa&1&260': np.array([0.32199975656257585, -0.748229355246375]), 'setosa&1&261': np.array([0.32199975656257585, -0.748229355246375]), 'setosa&1&262': np.array([0.32199975656257585, -0.748229355246375]), 'setosa&1&263': np.array([0.32199975656257585, -0.748229355246375]), 'setosa&1&264': np.array([0.43843349141088417, -0.8642740701867918]), 'setosa&1&265': np.array([0.43843349141088417, -0.8642740701867918]), 'setosa&1&266': np.array([0.43843349141088417, -0.8642740701867918]), 'setosa&1&267': np.array([-0.7141739659554724, 0.6619819140152877]), 'setosa&1&268': np.array([-0.7141739659554724, 0.6619819140152877]), 'setosa&1&269': np.array([-0.4446001433508151, 0.6107546840046902]), 'setosa&1&270': np.array([0.7749499208750121, -0.814718944080443]), 'setosa&1&271': np.array([0.80403091954169, -0.844515250413482]), 'setosa&1&272': np.array([0.5826506963750848, -0.22335655671229107]), 'setosa&1&273': np.array([0.33108168891715983, 0.13647816746351163]), 'setosa&1&274': np.array([0.4079256832347186, 0.038455640985860955]), 'setosa&1&275': np.array([0.7749499208750121, -0.814718944080443]), 'setosa&1&276': np.array([0.7749499208750121, -0.814718944080443]), 'setosa&1&277': np.array([0.7749499208750121, -0.814718944080443]), 'setosa&1&278': np.array([0.7749499208750121, -0.814718944080443]), 'setosa&1&279': np.array([0.80403091954169, -0.844515250413482]), 'setosa&1&280': np.array([0.80403091954169, -0.844515250413482]), 'setosa&1&281': np.array([0.80403091954169, -0.844515250413482]), 'setosa&1&282': np.array([0.5826506963750848, -0.22335655671229107]), 'setosa&1&283': np.array([0.5826506963750848, -0.22335655671229107]), 'setosa&1&284': np.array([0.33108168891715983, 0.13647816746351163]), 'setosa&1&285': np.array([0.7749499208750121, -0.814718944080443]), 'setosa&1&286': np.array([0.80403091954169, -0.844515250413482]), 'setosa&1&287': np.array([0.5826506963750848, -0.22335655671229107]), 'setosa&1&288': np.array([0.33108168891715983, 0.13647816746351163]), 'setosa&1&289': np.array([0.4079256832347186, 0.038455640985860955]), 'setosa&1&290': np.array([0.7749499208750121, -0.814718944080443]), 'setosa&1&291': np.array([0.7749499208750121, -0.814718944080443]), 'setosa&1&292': np.array([0.7749499208750121, -0.814718944080443]), 'setosa&1&293': np.array([0.7749499208750121, -0.814718944080443]), 'setosa&1&294': np.array([0.80403091954169, -0.844515250413482]), 'setosa&1&295': np.array([0.80403091954169, -0.844515250413482]), 'setosa&1&296': np.array([0.80403091954169, -0.844515250413482]), 'setosa&1&297': np.array([0.5826506963750848, -0.22335655671229107]), 'setosa&1&298': np.array([0.5826506963750848, -0.22335655671229107]), 'setosa&1&299': np.array([0.33108168891715983, 0.13647816746351163]), 'setosa&1&300': np.array([0.4933316375690333, -0.5272416708629277]), 'setosa&1&301': np.array([0.5041830043657418, -0.5392782673950876]), 'setosa&1&302': np.array([0.25657760110071476, 0.12592645350389123]), 'setosa&1&303': np.array([0.13717260713320106, 0.3627779907901665]), 'setosa&1&304': np.array([0.3093950298647913, 0.1140298206733954]), 'setosa&1&305': np.array([0.4933316375690333, -0.5272416708629277]), 'setosa&1&306': np.array([0.4933316375690333, -0.5272416708629277]), 'setosa&1&307': np.array([0.4933316375690333, -0.5272416708629277]), 'setosa&1&308': np.array([0.4933316375690333, -0.5272416708629277]), 'setosa&1&309': np.array([0.5041830043657418, -0.5392782673950876]), 'setosa&1&310': np.array([0.5041830043657418, -0.5392782673950876]), 'setosa&1&311': np.array([0.5041830043657418, -0.5392782673950876]), 'setosa&1&312': np.array([0.25657760110071476, 0.12592645350389123]), 'setosa&1&313': np.array([0.25657760110071476, 0.12592645350389123]), 'setosa&1&314': np.array([0.13717260713320106, 0.3627779907901665]), 'setosa&2&0': np.array([-0.3715769132100501, -0.12216227283618744]), 'setosa&2&1': np.array([-0.24630541996506924, -0.24630541996506994]), 'setosa&2&2': np.array([-0.044492463210563125, -0.7096449459722027]), 'setosa&2&3': np.array([-0.29537842173874096, -0.6750352694420283]), 'setosa&2&4': np.array([-0.47415719445227245, -0.38726974144168774]), 'setosa&2&5': np.array([-0.3715769132100501, -0.12216227283618744]), 'setosa&2&6': np.array([-0.3715769132100501, -0.12216227283618744]), 'setosa&2&7': np.array([-0.3715769132100501, -0.12216227283618744]), 'setosa&2&8': np.array([-0.3715769132100501, -0.12216227283618744]), 'setosa&2&9': np.array([-0.24630541996506924, -0.24630541996506994]), 'setosa&2&10': np.array([-0.24630541996506924, -0.24630541996506994]), 'setosa&2&11': np.array([-0.24630541996506924, -0.24630541996506994]), 'setosa&2&12': np.array([-0.044492463210563125, -0.7096449459722027]), 'setosa&2&13': np.array([-0.044492463210563125, -0.7096449459722027]), 'setosa&2&14': np.array([-0.29537842173874096, -0.6750352694420283]), 'setosa&2&15': np.array([-0.3715769132100501, -0.12216227283618744]), 'setosa&2&16': np.array([-0.24630541996506924, -0.24630541996506994]), 'setosa&2&17': np.array([-0.044492463210563125, -0.7096449459722027]), 'setosa&2&18': np.array([-0.29537842173874096, -0.6750352694420283]), 'setosa&2&19': np.array([-0.47415719445227245, -0.38726974144168774]), 'setosa&2&20': np.array([-0.3715769132100501, -0.12216227283618744]), 'setosa&2&21': np.array([-0.3715769132100501, -0.12216227283618744]), 'setosa&2&22': np.array([-0.3715769132100501, -0.12216227283618744]), 'setosa&2&23': np.array([-0.3715769132100501, -0.12216227283618744]), 'setosa&2&24': np.array([-0.24630541996506924, -0.24630541996506994]), 'setosa&2&25': np.array([-0.24630541996506924, -0.24630541996506994]), 'setosa&2&26': np.array([-0.24630541996506924, -0.24630541996506994]), 'setosa&2&27': np.array([-0.044492463210563125, -0.7096449459722027]), 'setosa&2&28': np.array([-0.044492463210563125, -0.7096449459722027]), 'setosa&2&29': np.array([-0.29537842173874096, -0.6750352694420283]), 'setosa&2&30': np.array([-0.5188517506916893, -0.036358567813067795]), 'setosa&2&31': np.array([-0.513193927394545, -0.041997482667908786]), 'setosa&2&32': np.array([-0.06285591932387405, -0.6914253444924359]), 'setosa&2&33': np.array([-0.34904320225465857, -0.6233384360811872]), 'setosa&2&34': np.array([-0.5354807894355184, -0.3418054346754283]), 'setosa&2&35': np.array([-0.5188517506916893, -0.036358567813067795]), 'setosa&2&36': np.array([-0.5188517506916893, -0.036358567813067795]), 'setosa&2&37': np.array([-0.5188517506916893, -0.036358567813067795]), 'setosa&2&38': np.array([-0.5188517506916893, -0.036358567813067795]), 'setosa&2&39': np.array([-0.513193927394545, -0.041997482667908786]), 'setosa&2&40': np.array([-0.513193927394545, -0.041997482667908786]), 'setosa&2&41': np.array([-0.513193927394545, -0.041997482667908786]), 'setosa&2&42': np.array([-0.06285591932387405, -0.6914253444924359]), 'setosa&2&43': np.array([-0.06285591932387405, -0.6914253444924359]), 'setosa&2&44': np.array([-0.34904320225465857, -0.6233384360811872]), 'setosa&2&45': np.array([-0.8252668830593567, -0.11450866713130638]), 'setosa&2&46': np.array([-0.8211795643076093, -0.1186965077161071]), 'setosa&2&47': np.array([-0.6441664102689847, -0.3012046426099901]), 'setosa&2&48': np.array([-0.7640280271176497, -0.19364537761420375]), 'setosa&2&49': np.array([-0.8735738195653328, -0.046438180466149094]), 'setosa&2&50': np.array([-0.8252668830593567, -0.11450866713130638]), 'setosa&2&51': np.array([-0.8252668830593567, -0.11450866713130638]), 'setosa&2&52': np.array([-0.8252668830593567, -0.11450866713130638]), 'setosa&2&53': np.array([-0.8252668830593567, -0.11450866713130638]), 'setosa&2&54': np.array([-0.8211795643076093, -0.1186965077161071]), 'setosa&2&55': np.array([-0.8211795643076093, -0.1186965077161071]), 'setosa&2&56': np.array([-0.8211795643076093, -0.1186965077161071]), 'setosa&2&57': np.array([-0.6441664102689847, -0.3012046426099901]), 'setosa&2&58': np.array([-0.6441664102689847, -0.3012046426099901]), 'setosa&2&59': np.array([-0.7640280271176497, -0.19364537761420375]), 'setosa&2&60': np.array([-0.5227340800279542, -0.42092675740881474]), 'setosa&2&61': np.array([-0.5140708637198534, -0.43053612380573514]), 'setosa&2&62': np.array([-0.2661726847443776, -0.6902916602462779]), 'setosa&2&63': np.array([-0.2741128763380603, -0.7260889090887469]), 'setosa&2&64': np.array([-0.6188410763351541, -0.22803625884668638]), 'setosa&2&65': np.array([-0.5227340800279542, -0.42092675740881474]), 'setosa&2&66': np.array([-0.5227340800279542, -0.42092675740881474]), 'setosa&2&67': np.array([-0.5227340800279542, -0.42092675740881474]), 'setosa&2&68': np.array([-0.5227340800279542, -0.42092675740881474]), 'setosa&2&69': np.array([-0.5140708637198534, -0.43053612380573514]), 'setosa&2&70': np.array([-0.5140708637198534, -0.43053612380573514]), 'setosa&2&71': np.array([-0.5140708637198534, -0.43053612380573514]), 'setosa&2&72': np.array([-0.2661726847443776, -0.6902916602462779]), 'setosa&2&73': np.array([-0.2661726847443776, -0.6902916602462779]), 'setosa&2&74': np.array([-0.2741128763380603, -0.7260889090887469]), 'setosa&2&75': np.array([0.0, -0.47562425924289314]), 'setosa&2&76': np.array([0.0, -0.48543689565931186]), 'setosa&2&77': np.array([0.0, -0.7348263896003956]), 'setosa&2&78': np.array([0.0, -0.7920887571493729]), 'setosa&2&79': np.array([0.0, -0.507614207038711]), 'setosa&2&80': np.array([0.0, -0.47562425924289314]), 'setosa&2&81': np.array([0.0, -0.47562425924289314]), 'setosa&2&82': np.array([0.0, -0.47562425924289314]), 'setosa&2&83': np.array([0.0, -0.47562425924289314]), 'setosa&2&84': np.array([0.0, -0.48543689565931186]), 'setosa&2&85': np.array([0.0, -0.48543689565931186]), 'setosa&2&86': np.array([0.0, -0.48543689565931186]), 'setosa&2&87': np.array([0.0, -0.7348263896003956]), 'setosa&2&88': np.array([0.0, -0.7348263896003956]), 'setosa&2&89': np.array([0.0, -0.7920887571493729]), 'setosa&2&90': np.array([-0.3715769132100501, -0.12216227283618744]), 'setosa&2&91': np.array([-0.24630541996506924, -0.24630541996506994]), 'setosa&2&92': np.array([-0.044492463210563125, -0.7096449459722027]), 'setosa&2&93': np.array([-0.29537842173874096, -0.6750352694420283]), 'setosa&2&94': np.array([-0.47415719445227245, -0.38726974144168774]), 'setosa&2&95': np.array([-0.3715769132100501, -0.12216227283618744]), 'setosa&2&96': np.array([-0.3715769132100501, -0.12216227283618744]), 'setosa&2&97': np.array([-0.3715769132100501, -0.12216227283618744]), 'setosa&2&98': np.array([-0.3715769132100501, -0.12216227283618744]), 'setosa&2&99': np.array([-0.24630541996506924, -0.24630541996506994]), 'setosa&2&100': np.array([-0.24630541996506924, -0.24630541996506994]), 'setosa&2&101': np.array([-0.24630541996506924, -0.24630541996506994]), 'setosa&2&102': np.array([-0.044492463210563125, -0.7096449459722027]), 'setosa&2&103': np.array([-0.044492463210563125, -0.7096449459722027]), 'setosa&2&104': np.array([-0.29537842173874096, -0.6750352694420283]), 'setosa&2&105': np.array([-0.3715769132100501, -0.12216227283618744]), 'setosa&2&106': np.array([-0.24630541996506924, -0.24630541996506994]), 'setosa&2&107': np.array([-0.044492463210563125, -0.7096449459722027]), 'setosa&2&108': np.array([-0.29537842173874096, -0.6750352694420283]), 'setosa&2&109': np.array([-0.47415719445227245, -0.38726974144168774]), 'setosa&2&110': np.array([-0.3715769132100501, -0.12216227283618744]), 'setosa&2&111': np.array([-0.3715769132100501, -0.12216227283618744]), 'setosa&2&112': np.array([-0.3715769132100501, -0.12216227283618744]), 'setosa&2&113': np.array([-0.3715769132100501, -0.12216227283618744]), 'setosa&2&114': np.array([-0.24630541996506924, -0.24630541996506994]), 'setosa&2&115': np.array([-0.24630541996506924, -0.24630541996506994]), 'setosa&2&116': np.array([-0.24630541996506924, -0.24630541996506994]), 'setosa&2&117': np.array([-0.044492463210563125, -0.7096449459722027]), 'setosa&2&118': np.array([-0.044492463210563125, -0.7096449459722027]), 'setosa&2&119': np.array([-0.29537842173874096, -0.6750352694420283]), 'setosa&2&120': np.array([-0.3715769132100501, -0.12216227283618744]), 'setosa&2&121': np.array([-0.24630541996506924, -0.24630541996506994]), 'setosa&2&122': np.array([-0.044492463210563125, -0.7096449459722027]), 'setosa&2&123': np.array([-0.29537842173874096, -0.6750352694420283]), 'setosa&2&124': np.array([-0.47415719445227245, -0.38726974144168774]), 'setosa&2&125': np.array([-0.3715769132100501, -0.12216227283618744]), 'setosa&2&126': np.array([-0.3715769132100501, -0.12216227283618744]), 'setosa&2&127': np.array([-0.3715769132100501, -0.12216227283618744]), 'setosa&2&128': np.array([-0.3715769132100501, -0.12216227283618744]), 'setosa&2&129': np.array([-0.24630541996506924, -0.24630541996506994]), 'setosa&2&130': np.array([-0.24630541996506924, -0.24630541996506994]), 'setosa&2&131': np.array([-0.24630541996506924, -0.24630541996506994]), 'setosa&2&132': np.array([-0.044492463210563125, -0.7096449459722027]), 'setosa&2&133': np.array([-0.044492463210563125, -0.7096449459722027]), 'setosa&2&134': np.array([-0.29537842173874096, -0.6750352694420283]), 'setosa&2&135': np.array([-0.3715769132100501, -0.12216227283618744]), 'setosa&2&136': np.array([-0.24630541996506924, -0.24630541996506994]), 'setosa&2&137': np.array([-0.044492463210563125, -0.7096449459722027]), 'setosa&2&138': np.array([-0.29537842173874096, -0.6750352694420283]), 'setosa&2&139': np.array([-0.47415719445227245, -0.38726974144168774]), 'setosa&2&140': np.array([-0.3715769132100501, -0.12216227283618744]), 'setosa&2&141': np.array([-0.3715769132100501, -0.12216227283618744]), 'setosa&2&142': np.array([-0.3715769132100501, -0.12216227283618744]), 'setosa&2&143': np.array([-0.3715769132100501, -0.12216227283618744]), 'setosa&2&144': np.array([-0.24630541996506924, -0.24630541996506994]), 'setosa&2&145': np.array([-0.24630541996506924, -0.24630541996506994]), 'setosa&2&146': np.array([-0.24630541996506924, -0.24630541996506994]), 'setosa&2&147': np.array([-0.044492463210563125, -0.7096449459722027]), 'setosa&2&148': np.array([-0.044492463210563125, -0.7096449459722027]), 'setosa&2&149': np.array([-0.29537842173874096, -0.6750352694420283]), 'setosa&2&150': np.array([-0.3715769132100501, -0.12216227283618744]), 'setosa&2&151': np.array([-0.24630541996506924, -0.24630541996506994]), 'setosa&2&152': np.array([-0.044492463210563125, -0.7096449459722027]), 'setosa&2&153': np.array([-0.29537842173874096, -0.6750352694420283]), 'setosa&2&154': np.array([-0.47415719445227245, -0.38726974144168774]), 'setosa&2&155': np.array([-0.3715769132100501, -0.12216227283618744]), 'setosa&2&156': np.array([-0.3715769132100501, -0.12216227283618744]), 'setosa&2&157': np.array([-0.3715769132100501, -0.12216227283618744]), 'setosa&2&158': np.array([-0.3715769132100501, -0.12216227283618744]), 'setosa&2&159': np.array([-0.24630541996506924, -0.24630541996506994]), 'setosa&2&160': np.array([-0.24630541996506924, -0.24630541996506994]), 'setosa&2&161': np.array([-0.24630541996506924, -0.24630541996506994]), 'setosa&2&162': np.array([-0.044492463210563125, -0.7096449459722027]), 'setosa&2&163': np.array([-0.044492463210563125, -0.7096449459722027]), 'setosa&2&164': np.array([-0.29537842173874096, -0.6750352694420283]), 'setosa&2&165': np.array([-0.3715769132100501, -0.12216227283618744]), 'setosa&2&166': np.array([-0.24630541996506924, -0.24630541996506994]), 'setosa&2&167': np.array([-0.044492463210563125, -0.7096449459722027]), 'setosa&2&168': np.array([-0.29537842173874096, -0.6750352694420283]), 'setosa&2&169': np.array([-0.47415719445227245, -0.38726974144168774]), 'setosa&2&170': np.array([-0.3715769132100501, -0.12216227283618744]), 'setosa&2&171': np.array([-0.3715769132100501, -0.12216227283618744]), 'setosa&2&172': np.array([-0.3715769132100501, -0.12216227283618744]), 'setosa&2&173': np.array([-0.3715769132100501, -0.12216227283618744]), 'setosa&2&174': np.array([-0.24630541996506924, -0.24630541996506994]), 'setosa&2&175': np.array([-0.24630541996506924, -0.24630541996506994]), 'setosa&2&176': np.array([-0.24630541996506924, -0.24630541996506994]), 'setosa&2&177': np.array([-0.044492463210563125, -0.7096449459722027]), 'setosa&2&178': np.array([-0.044492463210563125, -0.7096449459722027]), 'setosa&2&179': np.array([-0.29537842173874096, -0.6750352694420283]), 'setosa&2&180': np.array([-0.3715769132100501, -0.12216227283618744]), 'setosa&2&181': np.array([-0.24630541996506924, -0.24630541996506994]), 'setosa&2&182': np.array([-0.044492463210563125, -0.7096449459722027]), 'setosa&2&183': np.array([-0.29537842173874096, -0.6750352694420283]), 'setosa&2&184': np.array([-0.47415719445227245, -0.38726974144168774]), 'setosa&2&185': np.array([-0.3715769132100501, -0.12216227283618744]), 'setosa&2&186': np.array([-0.3715769132100501, -0.12216227283618744]), 'setosa&2&187': np.array([-0.3715769132100501, -0.12216227283618744]), 'setosa&2&188': np.array([-0.3715769132100501, -0.12216227283618744]), 'setosa&2&189': np.array([-0.24630541996506924, -0.24630541996506994]), 'setosa&2&190': np.array([-0.24630541996506924, -0.24630541996506994]), 'setosa&2&191': np.array([-0.24630541996506924, -0.24630541996506994]), 'setosa&2&192': np.array([-0.044492463210563125, -0.7096449459722027]), 'setosa&2&193': np.array([-0.044492463210563125, -0.7096449459722027]), 'setosa&2&194': np.array([-0.29537842173874096, -0.6750352694420283]), 'setosa&2&195': np.array([-0.3715769132100501, -0.12216227283618744]), 'setosa&2&196': np.array([-0.24630541996506924, -0.24630541996506994]), 'setosa&2&197': np.array([-0.044492463210563125, -0.7096449459722027]), 'setosa&2&198': np.array([-0.29537842173874096, -0.6750352694420283]), 'setosa&2&199': np.array([-0.47415719445227245, -0.38726974144168774]), 'setosa&2&200': np.array([-0.3715769132100501, -0.12216227283618744]), 'setosa&2&201': np.array([-0.3715769132100501, -0.12216227283618744]), 'setosa&2&202': np.array([-0.3715769132100501, -0.12216227283618744]), 'setosa&2&203': np.array([-0.3715769132100501, -0.12216227283618744]), 'setosa&2&204': np.array([-0.24630541996506924, -0.24630541996506994]), 'setosa&2&205': np.array([-0.24630541996506924, -0.24630541996506994]), 'setosa&2&206': np.array([-0.24630541996506924, -0.24630541996506994]), 'setosa&2&207': np.array([-0.044492463210563125, -0.7096449459722027]), 'setosa&2&208': np.array([-0.044492463210563125, -0.7096449459722027]), 'setosa&2&209': np.array([-0.29537842173874096, -0.6750352694420283]), 'setosa&2&210': np.array([-0.3715769132100501, -0.12216227283618744]), 'setosa&2&211': np.array([-0.24630541996506924, -0.24630541996506994]), 'setosa&2&212': np.array([-0.044492463210563125, -0.7096449459722027]), 'setosa&2&213': np.array([-0.29537842173874096, -0.6750352694420283]), 'setosa&2&214': np.array([-0.47415719445227245, -0.38726974144168774]), 'setosa&2&215': np.array([-0.3715769132100501, -0.12216227283618744]), 'setosa&2&216': np.array([-0.3715769132100501, -0.12216227283618744]), 'setosa&2&217': np.array([-0.3715769132100501, -0.12216227283618744]), 'setosa&2&218': np.array([-0.3715769132100501, -0.12216227283618744]), 'setosa&2&219': np.array([-0.24630541996506924, -0.24630541996506994]), 'setosa&2&220': np.array([-0.24630541996506924, -0.24630541996506994]), 'setosa&2&221': np.array([-0.24630541996506924, -0.24630541996506994]), 'setosa&2&222': np.array([-0.044492463210563125, -0.7096449459722027]), 'setosa&2&223': np.array([-0.044492463210563125, -0.7096449459722027]), 'setosa&2&224': np.array([-0.29537842173874096, -0.6750352694420283]), 'setosa&2&225': np.array([-0.5188517506916893, -0.036358567813067795]), 'setosa&2&226': np.array([-0.513193927394545, -0.041997482667908786]), 'setosa&2&227': np.array([-0.06285591932387405, -0.6914253444924359]), 'setosa&2&228': np.array([-0.34904320225465857, -0.6233384360811872]), 'setosa&2&229': np.array([-0.5354807894355184, -0.3418054346754283]), 'setosa&2&230': np.array([-0.5188517506916893, -0.036358567813067795]), 'setosa&2&231': np.array([-0.5188517506916893, -0.036358567813067795]), 'setosa&2&232': np.array([-0.5188517506916893, -0.036358567813067795]), 'setosa&2&233': np.array([-0.5188517506916893, -0.036358567813067795]), 'setosa&2&234': np.array([-0.513193927394545, -0.041997482667908786]), 'setosa&2&235': np.array([-0.513193927394545, -0.041997482667908786]), 'setosa&2&236': np.array([-0.513193927394545, -0.041997482667908786]), 'setosa&2&237': np.array([-0.06285591932387405, -0.6914253444924359]), 'setosa&2&238': np.array([-0.06285591932387405, -0.6914253444924359]), 'setosa&2&239': np.array([-0.34904320225465857, -0.6233384360811872]), 'setosa&2&240': np.array([-0.5188517506916893, -0.036358567813067795]), 'setosa&2&241': np.array([-0.513193927394545, -0.041997482667908786]), 'setosa&2&242': np.array([-0.06285591932387405, -0.6914253444924359]), 'setosa&2&243': np.array([-0.34904320225465857, -0.6233384360811872]), 'setosa&2&244': np.array([-0.5354807894355184, -0.3418054346754283]), 'setosa&2&245': np.array([-0.5188517506916893, -0.036358567813067795]), 'setosa&2&246': np.array([-0.5188517506916893, -0.036358567813067795]), 'setosa&2&247': np.array([-0.5188517506916893, -0.036358567813067795]), 'setosa&2&248': np.array([-0.5188517506916893, -0.036358567813067795]), 'setosa&2&249': np.array([-0.513193927394545, -0.041997482667908786]), 'setosa&2&250': np.array([-0.513193927394545, -0.041997482667908786]), 'setosa&2&251': np.array([-0.513193927394545, -0.041997482667908786]), 'setosa&2&252': np.array([-0.06285591932387405, -0.6914253444924359]), 'setosa&2&253': np.array([-0.06285591932387405, -0.6914253444924359]), 'setosa&2&254': np.array([-0.34904320225465857, -0.6233384360811872]), 'setosa&2&255': np.array([-0.5188517506916893, -0.036358567813067795]), 'setosa&2&256': np.array([-0.513193927394545, -0.041997482667908786]), 'setosa&2&257': np.array([-0.06285591932387405, -0.6914253444924359]), 'setosa&2&258': np.array([-0.34904320225465857, -0.6233384360811872]), 'setosa&2&259': np.array([-0.5354807894355184, -0.3418054346754283]), 'setosa&2&260': np.array([-0.5188517506916893, -0.036358567813067795]), 'setosa&2&261': np.array([-0.5188517506916893, -0.036358567813067795]), 'setosa&2&262': np.array([-0.5188517506916893, -0.036358567813067795]), 'setosa&2&263': np.array([-0.5188517506916893, -0.036358567813067795]), 'setosa&2&264': np.array([-0.513193927394545, -0.041997482667908786]), 'setosa&2&265': np.array([-0.513193927394545, -0.041997482667908786]), 'setosa&2&266': np.array([-0.513193927394545, -0.041997482667908786]), 'setosa&2&267': np.array([-0.06285591932387405, -0.6914253444924359]), 'setosa&2&268': np.array([-0.06285591932387405, -0.6914253444924359]), 'setosa&2&269': np.array([-0.34904320225465857, -0.6233384360811872]), 'setosa&2&270': np.array([-0.8252668830593567, -0.11450866713130638]), 'setosa&2&271': np.array([-0.8211795643076093, -0.1186965077161071]), 'setosa&2&272': np.array([-0.6441664102689847, -0.3012046426099901]), 'setosa&2&273': np.array([-0.7640280271176497, -0.19364537761420375]), 'setosa&2&274': np.array([-0.8735738195653328, -0.046438180466149094]), 'setosa&2&275': np.array([-0.8252668830593567, -0.11450866713130638]), 'setosa&2&276': np.array([-0.8252668830593567, -0.11450866713130638]), 'setosa&2&277': np.array([-0.8252668830593567, -0.11450866713130638]), 'setosa&2&278': np.array([-0.8252668830593567, -0.11450866713130638]), 'setosa&2&279': np.array([-0.8211795643076093, -0.1186965077161071]), 'setosa&2&280': np.array([-0.8211795643076093, -0.1186965077161071]), 'setosa&2&281': np.array([-0.8211795643076093, -0.1186965077161071]), 'setosa&2&282': np.array([-0.6441664102689847, -0.3012046426099901]), 'setosa&2&283': np.array([-0.6441664102689847, -0.3012046426099901]), 'setosa&2&284': np.array([-0.7640280271176497, -0.19364537761420375]), 'setosa&2&285': np.array([-0.8252668830593567, -0.11450866713130638]), 'setosa&2&286': np.array([-0.8211795643076093, -0.1186965077161071]), 'setosa&2&287': np.array([-0.6441664102689847, -0.3012046426099901]), 'setosa&2&288': np.array([-0.7640280271176497, -0.19364537761420375]), 'setosa&2&289': np.array([-0.8735738195653328, -0.046438180466149094]), 'setosa&2&290': np.array([-0.8252668830593567, -0.11450866713130638]), 'setosa&2&291': np.array([-0.8252668830593567, -0.11450866713130638]), 'setosa&2&292': np.array([-0.8252668830593567, -0.11450866713130638]), 'setosa&2&293': np.array([-0.8252668830593567, -0.11450866713130638]), 'setosa&2&294': np.array([-0.8211795643076093, -0.1186965077161071]), 'setosa&2&295': np.array([-0.8211795643076093, -0.1186965077161071]), 'setosa&2&296': np.array([-0.8211795643076093, -0.1186965077161071]), 'setosa&2&297': np.array([-0.6441664102689847, -0.3012046426099901]), 'setosa&2&298': np.array([-0.6441664102689847, -0.3012046426099901]), 'setosa&2&299': np.array([-0.7640280271176497, -0.19364537761420375]), 'setosa&2&300': np.array([-0.5227340800279542, -0.42092675740881474]), 'setosa&2&301': np.array([-0.5140708637198534, -0.43053612380573514]), 'setosa&2&302': np.array([-0.2661726847443776, -0.6902916602462779]), 'setosa&2&303': np.array([-0.2741128763380603, -0.7260889090887469]), 'setosa&2&304': np.array([-0.6188410763351541, -0.22803625884668638]), 'setosa&2&305': np.array([-0.5227340800279542, -0.42092675740881474]), 'setosa&2&306': np.array([-0.5227340800279542, -0.42092675740881474]), 'setosa&2&307': np.array([-0.5227340800279542, -0.42092675740881474]), 'setosa&2&308': np.array([-0.5227340800279542, -0.42092675740881474]), 'setosa&2&309': np.array([-0.5140708637198534, -0.43053612380573514]), 'setosa&2&310': np.array([-0.5140708637198534, -0.43053612380573514]), 'setosa&2&311': np.array([-0.5140708637198534, -0.43053612380573514]), 'setosa&2&312': np.array([-0.2661726847443776, -0.6902916602462779]), 'setosa&2&313': np.array([-0.2661726847443776, -0.6902916602462779]), 'setosa&2&314': np.array([-0.2741128763380603, -0.7260889090887469]), 'versicolor&0&0': np.array([-0.7431524521056113, -0.24432235603856345]), 'versicolor&0&1': np.array([-0.4926091071260067, -0.49260910712601286]), 'versicolor&0&2': np.array([-0.9550700362273441, 0.025428672111930138]), 'versicolor&0&3': np.array([-0.9672121512728677, 0.012993005706020341]), 'versicolor&0&4': np.array([-0.9706534384443797, 0.007448195602953232]), 'versicolor&0&5': np.array([-0.4926091071260067, -0.49260910712601286]), 'versicolor&0&6': np.array([-0.967167257194905, -0.011919414234523772]), 'versicolor&0&7': np.array([-0.953200964337313, -0.027163424176667752]), 'versicolor&0&8': np.array([-0.8486399726113752, -0.13537345771621853]), 'versicolor&0&9': np.array([-0.9658161779555727, -0.01446062269877741]), 'versicolor&0&10': np.array([-0.9493506964095418, -0.0312186903717912]), 'versicolor&0&11': np.array([-0.7870031444780577, -0.1952404625292782]), 'versicolor&0&12': np.array([-0.9550700362273441, 0.025428672111930138]), 'versicolor&0&13': np.array([-0.9550700362273441, 0.025428672111930138]), 'versicolor&0&14': np.array([-0.9672121512728677, 0.012993005706020341]), 'versicolor&0&15': np.array([-0.7431524521056113, -0.24432235603856345]), 'versicolor&0&16': np.array([-0.4926091071260067, -0.49260910712601286]), 'versicolor&0&17': np.array([-0.9550700362273441, 0.025428672111930138]), 'versicolor&0&18': np.array([-0.9672121512728677, 0.012993005706020341]), 'versicolor&0&19': np.array([-0.9706534384443797, 0.007448195602953232]), 'versicolor&0&20': np.array([-0.4926091071260067, -0.49260910712601286]), 'versicolor&0&21': np.array([-0.967167257194905, -0.011919414234523772]), 'versicolor&0&22': np.array([-0.953200964337313, -0.027163424176667752]), 'versicolor&0&23': np.array([-0.8486399726113752, -0.13537345771621853]), 'versicolor&0&24': np.array([-0.9658161779555727, -0.01446062269877741]), 'versicolor&0&25': np.array([-0.9493506964095418, -0.0312186903717912]), 'versicolor&0&26': np.array([-0.7870031444780577, -0.1952404625292782]), 'versicolor&0&27': np.array([-0.9550700362273441, 0.025428672111930138]), 'versicolor&0&28': np.array([-0.9550700362273441, 0.025428672111930138]), 'versicolor&0&29': np.array([-0.9672121512728677, 0.012993005706020341]), 'versicolor&0&30': np.array([-0.19685199412911655, -0.7845879230594393]), 'versicolor&0&31': np.array([-0.07476043598366228, -0.9062715528546994]), 'versicolor&0&32': np.array([-0.7770298852793476, 0.029443430477147536]), 'versicolor&0&33': np.array([-0.7936433456054744, 0.012583752076496493]), 'versicolor&0&34': np.array([-0.7974072911132788, 0.006894018772033604]), 'versicolor&0&35': np.array([-0.07476043598366228, -0.9062715528546994]), 'versicolor&0&36': np.array([-0.7779663027946229, -0.2981599980028888]), 'versicolor&0&37': np.array([-0.6669876551417979, -0.2911996622134135]), 'versicolor&0&38': np.array([-0.3355030348883163, -0.6305271339971502]), 'versicolor&0&39': np.array([-0.7658431164447598, -0.3248317507526541]), 'versicolor&0&40': np.array([-0.6459073168288453, -0.31573292128613833]), 'versicolor&0&41': np.array([-0.2519677855687844, -0.7134447168661863]), 'versicolor&0&42': np.array([-0.7770298852793476, 0.029443430477147536]), 'versicolor&0&43': np.array([-0.7770298852793476, 0.029443430477147536]), 'versicolor&0&44': np.array([-0.7936433456054744, 0.012583752076496493]), 'versicolor&0&45': np.array([0.05031696218434577, -0.929227611211748]), 'versicolor&0&46': np.array([0.017148644765919676, -0.9632117581295891]), 'versicolor&0&47': np.array([0.06151571389390039, 0.524561199322281]), 'versicolor&0&48': np.array([0.4329463382004908, 0.057167210150691136]), 'versicolor&0&49': np.array([0.4656481363306145, 0.007982539480288167]), 'versicolor&0&50': np.array([0.017148644765919676, -0.9632117581295891]), 'versicolor&0&51': np.array([0.6614632074748169, -0.6030419328583525]), 'versicolor&0&52': np.array([0.5519595359123358, -0.6434192906054143]), 'versicolor&0&53': np.array([0.14241819268815753, -0.8424615476000691]), 'versicolor&0&54': np.array([0.667423576348749, -0.6594086777766442]), 'versicolor&0&55': np.array([0.5429872243487625, -0.6697888833280774]), 'versicolor&0&56': np.array([0.1140907502997574, -0.8737800276630269]), 'versicolor&0&57': np.array([0.06151571389390039, 0.524561199322281]), 'versicolor&0&58': np.array([0.06151571389390039, 0.524561199322281]), 'versicolor&0&59': np.array([0.4329463382004908, 0.057167210150691136]), 'versicolor&0&60': np.array([0.029402442458921384, -0.9481684282717414]), 'versicolor&0&61': np.array([0.009887859354111524, -0.9698143912008228]), 'versicolor&0&62': np.array([0.009595083643662688, 0.5643652067423869]), 'versicolor&0&63': np.array([0.13694026920485936, 0.36331091829858003]), 'versicolor&0&64': np.array([0.3094460464703627, 0.11400643817329122]), 'versicolor&0&65': np.array([0.009887859354111524, -0.9698143912008228]), 'versicolor&0&66': np.array([0.42809266524335826, -0.40375108595117376]), 'versicolor&0&67': np.array([0.45547700380103057, -0.6083463409799501]), 'versicolor&0&68': np.array([0.19002455311770447, -0.8848597943731074]), 'versicolor&0&69': np.array([0.436966114193701, -0.4638042290788281]), 'versicolor&0&70': np.array([0.45424510803217066, -0.6425314361631614]), 'versicolor&0&71': np.array([0.1746467870122951, -0.9073062742839755]), 'versicolor&0&72': np.array([0.009595083643662688, 0.5643652067423869]), 'versicolor&0&73': np.array([0.009595083643662688, 0.5643652067423869]), 'versicolor&0&74': np.array([0.13694026920485936, 0.36331091829858003]), 'versicolor&0&75': np.array([0.0, -0.95124502153736]), 'versicolor&0&76': np.array([0.0, -0.9708703761803881]), 'versicolor&0&77': np.array([0.0, 0.5659706098422994]), 'versicolor&0&78': np.array([0.0, 0.3962828716108186]), 'versicolor&0&79': np.array([0.0, 0.2538069363248767]), 'versicolor&0&80': np.array([0.0, -0.9708703761803881]), 'versicolor&0&81': np.array([0.0, -0.3631376646911367]), 'versicolor&0&82': np.array([0.0, -0.5804857652839247]), 'versicolor&0&83': np.array([0.0, -0.8943993997517804]), 'versicolor&0&84': np.array([0.0, -0.4231275527222919]), 'versicolor&0&85': np.array([0.0, -0.6164235822373675]), 'versicolor&0&86': np.array([0.0, -0.9166476163222441]), 'versicolor&0&87': np.array([0.0, 0.5659706098422994]), 'versicolor&0&88': np.array([0.0, 0.5659706098422994]), 'versicolor&0&89': np.array([0.0, 0.3962828716108186]), 'versicolor&0&90': np.array([-0.7431524521056113, -0.24432235603856345]), 'versicolor&0&91': np.array([-0.4926091071260067, -0.49260910712601286]), 'versicolor&0&92': np.array([-0.9550700362273441, 0.025428672111930138]), 'versicolor&0&93': np.array([-0.9672121512728677, 0.012993005706020341]), 'versicolor&0&94': np.array([-0.9706534384443797, 0.007448195602953232]), 'versicolor&0&95': np.array([-0.4926091071260067, -0.49260910712601286]), 'versicolor&0&96': np.array([-0.967167257194905, -0.011919414234523772]), 'versicolor&0&97': np.array([-0.953200964337313, -0.027163424176667752]), 'versicolor&0&98': np.array([-0.8486399726113752, -0.13537345771621853]), 'versicolor&0&99': np.array([-0.9658161779555727, -0.01446062269877741]), 'versicolor&0&100': np.array([-0.9493506964095418, -0.0312186903717912]), 'versicolor&0&101': np.array([-0.7870031444780577, -0.1952404625292782]), 'versicolor&0&102': np.array([-0.9550700362273441, 0.025428672111930138]), 'versicolor&0&103': np.array([-0.9550700362273441, 0.025428672111930138]), 'versicolor&0&104': np.array([-0.9672121512728677, 0.012993005706020341]), 'versicolor&0&105': np.array([-0.19685199412911655, -0.7845879230594393]), 'versicolor&0&106': np.array([-0.07476043598366228, -0.9062715528546994]), 'versicolor&0&107': np.array([-0.7770298852793476, 0.029443430477147536]), 'versicolor&0&108': np.array([-0.7936433456054744, 0.012583752076496493]), 'versicolor&0&109': np.array([-0.7974072911132788, 0.006894018772033604]), 'versicolor&0&110': np.array([-0.07476043598366228, -0.9062715528546994]), 'versicolor&0&111': np.array([-0.7779663027946229, -0.2981599980028888]), 'versicolor&0&112': np.array([-0.6669876551417979, -0.2911996622134135]), 'versicolor&0&113': np.array([-0.3355030348883163, -0.6305271339971502]), 'versicolor&0&114': np.array([-0.7658431164447598, -0.3248317507526541]), 'versicolor&0&115': np.array([-0.6459073168288453, -0.31573292128613833]), 'versicolor&0&116': np.array([-0.2519677855687844, -0.7134447168661863]), 'versicolor&0&117': np.array([-0.7770298852793476, 0.029443430477147536]), 'versicolor&0&118': np.array([-0.7770298852793476, 0.029443430477147536]), 'versicolor&0&119': np.array([-0.7936433456054744, 0.012583752076496493]), 'versicolor&0&120': np.array([-0.05855179950109871, -0.9211684729232403]), 'versicolor&0&121': np.array([-0.020067537725011863, -0.960349531159508]), 'versicolor&0&122': np.array([-0.5775164514598086, 0.6278692602817483]), 'versicolor&0&123': np.array([-0.6813845327458135, 0.6599725404733693]), 'versicolor&0&124': np.array([-0.5182062652425321, 0.3958533237517639]), 'versicolor&0&125': np.array([-0.020067537725011863, -0.960349531159508]), 'versicolor&0&126': np.array([-0.5107107533700952, 0.0075507123577884866]), 'versicolor&0&127': np.array([-0.1464063320531759, -0.4788055402156298]), 'versicolor&0&128': np.array([-0.061109248092233844, -0.8620287767000373]), 'versicolor&0&129': np.array([-0.4706137753079746, -0.057389625790424635]), 'versicolor&0&130': np.array([-0.06804620923037683, -0.5677904519730453]), 'versicolor&0&131': np.array([-0.020216773196675246, -0.9057119888626176]), 'versicolor&0&132': np.array([-0.5775164514598086, 0.6278692602817483]), 'versicolor&0&133': np.array([-0.5775164514598086, 0.6278692602817483]), 'versicolor&0&134': np.array([-0.6813845327458135, 0.6599725404733693]), 'versicolor&0&135': np.array([-0.19684482070614498, -0.7845939961595055]), 'versicolor&0&136': np.array([-0.07475231751447156, -0.9062785678426409]), 'versicolor&0&137': np.array([-0.6782037543706109, 0.2956007367698983]), 'versicolor&0&138': np.array([-0.7694171988675237, 0.276633135028249]), 'versicolor&0&139': np.array([-0.8063011502229427, 0.4134300066735808]), 'versicolor&0&140': np.array([-0.07475231751447156, -0.9062785678426409]), 'versicolor&0&141': np.array([-0.7985789197998611, 0.0026209054759345337]), 'versicolor&0&142': np.array([-0.7182275903095532, -0.11963032135457498]), 'versicolor&0&143': np.array([-0.2798927835773098, -0.6581136857450849]), 'versicolor&0&144': np.array([-0.7920119433269182, -0.0142751249964083]), 'versicolor&0&145': np.array([-0.6943081428778407, -0.14852813120265815]), 'versicolor&0&146': np.array([-0.16106555563262584, -0.777621649099753]), 'versicolor&0&147': np.array([-0.6782037543706109, 0.2956007367698983]), 'versicolor&0&148': np.array([-0.6782037543706109, 0.2956007367698983]), 'versicolor&0&149': np.array([-0.7694171988675237, 0.276633135028249]), 'versicolor&0&150': np.array([-0.7431524521056113, -0.24432235603856345]), 'versicolor&0&151': np.array([-0.4926091071260067, -0.49260910712601286]), 'versicolor&0&152': np.array([-0.9550700362273441, 0.025428672111930138]), 'versicolor&0&153': np.array([-0.9672121512728677, 0.012993005706020341]), 'versicolor&0&154': np.array([-0.9706534384443797, 0.007448195602953232]), 'versicolor&0&155': np.array([-0.4926091071260067, -0.49260910712601286]), 'versicolor&0&156': np.array([-0.967167257194905, -0.011919414234523772]), 'versicolor&0&157': np.array([-0.953200964337313, -0.027163424176667752]), 'versicolor&0&158': np.array([-0.8486399726113752, -0.13537345771621853]), 'versicolor&0&159': np.array([-0.9658161779555727, -0.01446062269877741]), 'versicolor&0&160': np.array([-0.9493506964095418, -0.0312186903717912]), 'versicolor&0&161': np.array([-0.7870031444780577, -0.1952404625292782]), 'versicolor&0&162': np.array([-0.9550700362273441, 0.025428672111930138]), 'versicolor&0&163': np.array([-0.9550700362273441, 0.025428672111930138]), 'versicolor&0&164': np.array([-0.9672121512728677, 0.012993005706020341]), 'versicolor&0&165': np.array([-0.19685199412911655, -0.7845879230594393]), 'versicolor&0&166': np.array([-0.07476043598366228, -0.9062715528546994]), 'versicolor&0&167': np.array([-0.7770298852793476, 0.029443430477147536]), 'versicolor&0&168': np.array([-0.7936433456054744, 0.012583752076496493]), 'versicolor&0&169': np.array([-0.7974072911132788, 0.006894018772033604]), 'versicolor&0&170': np.array([-0.07476043598366228, -0.9062715528546994]), 'versicolor&0&171': np.array([-0.7779663027946229, -0.2981599980028888]), 'versicolor&0&172': np.array([-0.6669876551417979, -0.2911996622134135]), 'versicolor&0&173': np.array([-0.3355030348883163, -0.6305271339971502]), 'versicolor&0&174': np.array([-0.7658431164447598, -0.3248317507526541]), 'versicolor&0&175': np.array([-0.6459073168288453, -0.31573292128613833]), 'versicolor&0&176': np.array([-0.2519677855687844, -0.7134447168661863]), 'versicolor&0&177': np.array([-0.7770298852793476, 0.029443430477147536]), 'versicolor&0&178': np.array([-0.7770298852793476, 0.029443430477147536]), 'versicolor&0&179': np.array([-0.7936433456054744, 0.012583752076496493]), 'versicolor&0&180': np.array([-0.05855179950109871, -0.9211684729232403]), 'versicolor&0&181': np.array([-0.020067537725011863, -0.960349531159508]), 'versicolor&0&182': np.array([-0.5775164514598086, 0.6278692602817483]), 'versicolor&0&183': np.array([-0.6813845327458135, 0.6599725404733693]), 'versicolor&0&184': np.array([-0.5182062652425321, 0.3958533237517639]), 'versicolor&0&185': np.array([-0.020067537725011863, -0.960349531159508]), 'versicolor&0&186': np.array([-0.5107107533700952, 0.0075507123577884866]), 'versicolor&0&187': np.array([-0.1464063320531759, -0.4788055402156298]), 'versicolor&0&188': np.array([-0.061109248092233844, -0.8620287767000373]), 'versicolor&0&189': np.array([-0.4706137753079746, -0.057389625790424635]), 'versicolor&0&190': np.array([-0.06804620923037683, -0.5677904519730453]), 'versicolor&0&191': np.array([-0.020216773196675246, -0.9057119888626176]), 'versicolor&0&192': np.array([-0.5775164514598086, 0.6278692602817483]), 'versicolor&0&193': np.array([-0.5775164514598086, 0.6278692602817483]), 'versicolor&0&194': np.array([-0.6813845327458135, 0.6599725404733693]), 'versicolor&0&195': np.array([-0.19684482070614498, -0.7845939961595055]), 'versicolor&0&196': np.array([-0.07475231751447156, -0.9062785678426409]), 'versicolor&0&197': np.array([-0.6782037543706109, 0.2956007367698983]), 'versicolor&0&198': np.array([-0.7694171988675237, 0.276633135028249]), 'versicolor&0&199': np.array([-0.8063011502229427, 0.4134300066735808]), 'versicolor&0&200': np.array([-0.07475231751447156, -0.9062785678426409]), 'versicolor&0&201': np.array([-0.7985789197998611, 0.0026209054759345337]), 'versicolor&0&202': np.array([-0.7182275903095532, -0.11963032135457498]), 'versicolor&0&203': np.array([-0.2798927835773098, -0.6581136857450849]), 'versicolor&0&204': np.array([-0.7920119433269182, -0.0142751249964083]), 'versicolor&0&205': np.array([-0.6943081428778407, -0.14852813120265815]), 'versicolor&0&206': np.array([-0.16106555563262584, -0.777621649099753]), 'versicolor&0&207': np.array([-0.6782037543706109, 0.2956007367698983]), 'versicolor&0&208': np.array([-0.6782037543706109, 0.2956007367698983]), 'versicolor&0&209': np.array([-0.7694171988675237, 0.276633135028249]), 'versicolor&0&210': np.array([-0.7431524521056113, -0.24432235603856345]), 'versicolor&0&211': np.array([-0.4926091071260067, -0.49260910712601286]), 'versicolor&0&212': np.array([-0.9550700362273441, 0.025428672111930138]), 'versicolor&0&213': np.array([-0.9672121512728677, 0.012993005706020341]), 'versicolor&0&214': np.array([-0.9706534384443797, 0.007448195602953232]), 'versicolor&0&215': np.array([-0.4926091071260067, -0.49260910712601286]), 'versicolor&0&216': np.array([-0.967167257194905, -0.011919414234523772]), 'versicolor&0&217': np.array([-0.953200964337313, -0.027163424176667752]), 'versicolor&0&218': np.array([-0.8486399726113752, -0.13537345771621853]), 'versicolor&0&219': np.array([-0.9658161779555727, -0.01446062269877741]), 'versicolor&0&220': np.array([-0.9493506964095418, -0.0312186903717912]), 'versicolor&0&221': np.array([-0.7870031444780577, -0.1952404625292782]), 'versicolor&0&222': np.array([-0.9550700362273441, 0.025428672111930138]), 'versicolor&0&223': np.array([-0.9550700362273441, 0.025428672111930138]), 'versicolor&0&224': np.array([-0.9672121512728677, 0.012993005706020341]), 'versicolor&0&225': np.array([-0.04777085826693217, -0.931704979630315]), 'versicolor&0&226': np.array([-0.016252316132452975, -0.9640854286687816]), 'versicolor&0&227': np.array([-0.44101924439572626, 0.5583264842761904]), 'versicolor&0&228': np.array([-0.5844994389588399, 0.5715208832363579]), 'versicolor&0&229': np.array([-0.46216647196120714, 0.35468591243823655]), 'versicolor&0&230': np.array([-0.016252316132452975, -0.9640854286687816]), 'versicolor&0&231': np.array([-0.3707180757031537, -0.1977196581472426]), 'versicolor&0&232': np.array([-0.1043459833293615, -0.5233314327065356]), 'versicolor&0&233': np.array([-0.049289647556763364, -0.8736084405111605]), 'versicolor&0&234': np.array([-0.34078174031874375, -0.25874482325965437]), 'versicolor&0&235': np.array([-0.050841051273783675, -0.5877587283589205]), 'versicolor&0&236': np.array([-0.0161720977425142, -0.9096817855236822]), 'versicolor&0&237': np.array([-0.44101924439572626, 0.5583264842761904]), 'versicolor&0&238': np.array([-0.44101924439572626, 0.5583264842761904]), 'versicolor&0&239': np.array([-0.5844994389588399, 0.5715208832363579]), 'versicolor&0&240': np.array([-0.11329659732608087, -0.8671819100849522]), 'versicolor&0&241': np.array([-0.040390637135858574, -0.9402832917474078]), 'versicolor&0&242': np.array([-0.5276460255602035, 0.28992233541586077]), 'versicolor&0&243': np.array([-0.6392402874163683, 0.24114611970435948]), 'versicolor&0&244': np.array([-0.6814868825686854, 0.35066801608083215]), 'versicolor&0&245': np.array([-0.040390637135858574, -0.9402832917474078]), 'versicolor&0&246': np.array([-0.6425009695928476, -0.24851992476830956]), 'versicolor&0&247': np.array([-0.5151243662384031, -0.3255567772442641]), 'versicolor&0&248': np.array([-0.16157511199607094, -0.7754323813403634]), 'versicolor&0&249': np.array([-0.6300442788906601, -0.28361140069713875]), 'versicolor&0&250': np.array([-0.4875864856121089, -0.3614122096616301]), 'versicolor&0&251': np.array([-0.08968204532514226, -0.8491191210330045]), 'versicolor&0&252': np.array([-0.5276460255602035, 0.28992233541586077]), 'versicolor&0&253': np.array([-0.5276460255602035, 0.28992233541586077]), 'versicolor&0&254': np.array([-0.6392402874163683, 0.24114611970435948]), 'versicolor&0&255': np.array([-0.19685199412911655, -0.7845879230594393]), 'versicolor&0&256': np.array([-0.07476043598366228, -0.9062715528546994]), 'versicolor&0&257': np.array([-0.7770298852793476, 0.029443430477147536]), 'versicolor&0&258': np.array([-0.7936433456054744, 0.012583752076496493]), 'versicolor&0&259': np.array([-0.7974072911132788, 0.006894018772033604]), 'versicolor&0&260': np.array([-0.07476043598366228, -0.9062715528546994]), 'versicolor&0&261': np.array([-0.7779663027946229, -0.2981599980028888]), 'versicolor&0&262': np.array([-0.6669876551417979, -0.2911996622134135]), 'versicolor&0&263': np.array([-0.3355030348883163, -0.6305271339971502]), 'versicolor&0&264': np.array([-0.7658431164447598, -0.3248317507526541]), 'versicolor&0&265': np.array([-0.6459073168288453, -0.31573292128613833]), 'versicolor&0&266': np.array([-0.2519677855687844, -0.7134447168661863]), 'versicolor&0&267': np.array([-0.7770298852793476, 0.029443430477147536]), 'versicolor&0&268': np.array([-0.7770298852793476, 0.029443430477147536]), 'versicolor&0&269': np.array([-0.7936433456054744, 0.012583752076496493]), 'versicolor&0&270': np.array([0.05031696218434577, -0.929227611211748]), 'versicolor&0&271': np.array([0.017148644765919676, -0.9632117581295891]), 'versicolor&0&272': np.array([0.06151571389390039, 0.524561199322281]), 'versicolor&0&273': np.array([0.4329463382004908, 0.057167210150691136]), 'versicolor&0&274': np.array([0.4656481363306145, 0.007982539480288167]), 'versicolor&0&275': np.array([0.017148644765919676, -0.9632117581295891]), 'versicolor&0&276': np.array([0.6614632074748169, -0.6030419328583525]), 'versicolor&0&277': np.array([0.5519595359123358, -0.6434192906054143]), 'versicolor&0&278': np.array([0.14241819268815753, -0.8424615476000691]), 'versicolor&0&279': np.array([0.667423576348749, -0.6594086777766442]), 'versicolor&0&280': np.array([0.5429872243487625, -0.6697888833280774]), 'versicolor&0&281': np.array([0.1140907502997574, -0.8737800276630269]), 'versicolor&0&282': np.array([0.06151571389390039, 0.524561199322281]), 'versicolor&0&283': np.array([0.06151571389390039, 0.524561199322281]), 'versicolor&0&284': np.array([0.4329463382004908, 0.057167210150691136]), 'versicolor&0&285': np.array([0.05031696218434577, -0.929227611211748]), 'versicolor&0&286': np.array([0.017148644765919676, -0.9632117581295891]), 'versicolor&0&287': np.array([0.06151571389390039, 0.524561199322281]), 'versicolor&0&288': np.array([0.4329463382004908, 0.057167210150691136]), 'versicolor&0&289': np.array([0.4656481363306145, 0.007982539480288167]), 'versicolor&0&290': np.array([0.017148644765919676, -0.9632117581295891]), 'versicolor&0&291': np.array([0.6614632074748169, -0.6030419328583525]), 'versicolor&0&292': np.array([0.5519595359123358, -0.6434192906054143]), 'versicolor&0&293': np.array([0.14241819268815753, -0.8424615476000691]), 'versicolor&0&294': np.array([0.667423576348749, -0.6594086777766442]), 'versicolor&0&295': np.array([0.5429872243487625, -0.6697888833280774]), 'versicolor&0&296': np.array([0.1140907502997574, -0.8737800276630269]), 'versicolor&0&297': np.array([0.06151571389390039, 0.524561199322281]), 'versicolor&0&298': np.array([0.06151571389390039, 0.524561199322281]), 'versicolor&0&299': np.array([0.4329463382004908, 0.057167210150691136]), 'versicolor&0&300': np.array([0.029402442458921384, -0.9481684282717414]), 'versicolor&0&301': np.array([0.009887859354111524, -0.9698143912008228]), 'versicolor&0&302': np.array([0.009595083643662688, 0.5643652067423869]), 'versicolor&0&303': np.array([0.13694026920485936, 0.36331091829858003]), 'versicolor&0&304': np.array([0.3094460464703627, 0.11400643817329122]), 'versicolor&0&305': np.array([0.009887859354111524, -0.9698143912008228]), 'versicolor&0&306': np.array([0.42809266524335826, -0.40375108595117376]), 'versicolor&0&307': np.array([0.45547700380103057, -0.6083463409799501]), 'versicolor&0&308': np.array([0.19002455311770447, -0.8848597943731074]), 'versicolor&0&309': np.array([0.436966114193701, -0.4638042290788281]), 'versicolor&0&310': np.array([0.45424510803217066, -0.6425314361631614]), 'versicolor&0&311': np.array([0.1746467870122951, -0.9073062742839755]), 'versicolor&0&312': np.array([0.009595083643662688, 0.5643652067423869]), 'versicolor&0&313': np.array([0.009595083643662688, 0.5643652067423869]), 'versicolor&0&314': np.array([0.13694026920485936, 0.36331091829858003]), 'versicolor&1&0': np.array([0.37157553889555184, 0.1221600832023858]), 'versicolor&1&1': np.array([0.2463036871609408, 0.24630368716093934]), 'versicolor&1&2': np.array([0.9105775730167809, 0.6842162738602727]), 'versicolor&1&3': np.array([0.6718337295341267, 0.6620422637360075]), 'versicolor&1&4': np.array([0.4964962439921071, 0.3798215458387346]), 'versicolor&1&5': np.array([0.2463036871609408, 0.24630368716093934]), 'versicolor&1&6': np.array([0.2805345936193346, 0.6595182922149835]), 'versicolor&1&7': np.array([0.08302493125394889, 0.6186280682763334]), 'versicolor&1&8': np.array([0.22125635302655813, 0.2925832702358638]), 'versicolor&1&9': np.array([0.2365788606456636, 0.7120007179768731]), 'versicolor&1&10': np.array([0.022347126801293967, 0.6718013300441928]), 'versicolor&1&11': np.array([0.10063786451829529, 0.4085974066833644]), 'versicolor&1&12': np.array([0.9105775730167809, 0.6842162738602727]), 'versicolor&1&13': np.array([0.9105775730167809, 0.6842162738602727]), 'versicolor&1&14': np.array([0.6718337295341267, 0.6620422637360075]), 'versicolor&1&15': np.array([0.37157553889555184, 0.1221600832023858]), 'versicolor&1&16': np.array([0.2463036871609408, 0.24630368716093934]), 'versicolor&1&17': np.array([0.9105775730167809, 0.6842162738602727]), 'versicolor&1&18': np.array([0.6718337295341267, 0.6620422637360075]), 'versicolor&1&19': np.array([0.4964962439921071, 0.3798215458387346]), 'versicolor&1&20': np.array([0.2463036871609408, 0.24630368716093934]), 'versicolor&1&21': np.array([0.2805345936193346, 0.6595182922149835]), 'versicolor&1&22': np.array([0.08302493125394889, 0.6186280682763334]), 'versicolor&1&23': np.array([0.22125635302655813, 0.2925832702358638]), 'versicolor&1&24': np.array([0.2365788606456636, 0.7120007179768731]), 'versicolor&1&25': np.array([0.022347126801293967, 0.6718013300441928]), 'versicolor&1&26': np.array([0.10063786451829529, 0.4085974066833644]), 'versicolor&1&27': np.array([0.9105775730167809, 0.6842162738602727]), 'versicolor&1&28': np.array([0.9105775730167809, 0.6842162738602727]), 'versicolor&1&29': np.array([0.6718337295341267, 0.6620422637360075]), 'versicolor&1&30': np.array([-0.32199975656257646, 0.7482293552463756]), 'versicolor&1&31': np.array([-0.43843349141088417, 0.8642740701867917]), 'versicolor&1&32': np.array([0.7141739659554727, 0.6619819140152878]), 'versicolor&1&33': np.array([0.44460014335081516, 0.6107546840046902]), 'versicolor&1&34': np.array([0.2619265016777598, 0.33491141590339474]), 'versicolor&1&35': np.array([-0.43843349141088417, 0.8642740701867917]), 'versicolor&1&36': np.array([0.20183015430619713, 0.7445346002055082]), 'versicolor&1&37': np.array([-0.05987874887638573, 0.6927937290176818]), 'versicolor&1&38': np.array([-0.2562642052727569, 0.6920266972283227]), 'versicolor&1&39': np.array([0.1736438124560164, 0.7898174616442941]), 'versicolor&1&40': np.array([-0.10114089899940126, 0.7326610366533243]), 'versicolor&1&41': np.array([-0.34479806250338163, 0.7789143553916729]), 'versicolor&1&42': np.array([0.7141739659554727, 0.6619819140152878]), 'versicolor&1&43': np.array([0.7141739659554727, 0.6619819140152878]), 'versicolor&1&44': np.array([0.44460014335081516, 0.6107546840046902]), 'versicolor&1&45': np.array([0.7749499208750119, 0.8147189440804429]), 'versicolor&1&46': np.array([0.8040309195416899, 0.8445152504134819]), 'versicolor&1&47': np.array([0.5826506963750848, -0.22335655671229107]), 'versicolor&1&48': np.array([0.33108168891715983, 0.13647816746351163]), 'versicolor&1&49': np.array([0.4079256832347186, 0.038455640985860955]), 'versicolor&1&50': np.array([0.8040309195416899, 0.8445152504134819]), 'versicolor&1&51': np.array([0.18555813792691386, 0.6940923833143309]), 'versicolor&1&52': np.array([0.32639262064172164, 0.6296083447134281]), 'versicolor&1&53': np.array([0.6964303997553315, 0.7444536452136676]), 'versicolor&1&54': np.array([0.18216358701833335, 0.747615101407194]), 'versicolor&1&55': np.array([0.33549445287370383, 0.6526039763053625]), 'versicolor&1&56': np.array([0.7213651642695392, 0.7718874443854203]), 'versicolor&1&57': np.array([0.5826506963750848, -0.22335655671229107]), 'versicolor&1&58': np.array([0.5826506963750848, -0.22335655671229107]), 'versicolor&1&59': np.array([0.33108168891715983, 0.13647816746351163]), 'versicolor&1&60': np.array([0.4933316375690332, 0.5272416708629276]), 'versicolor&1&61': np.array([0.5041830043657418, 0.5392782673950876]), 'versicolor&1&62': np.array([0.25657760110071476, 0.12592645350389123]), 'versicolor&1&63': np.array([0.13717260713320106, 0.3627779907901665]), 'versicolor&1&64': np.array([0.3093950298647913, 0.1140298206733954]), 'versicolor&1&65': np.array([0.5041830043657418, 0.5392782673950876]), 'versicolor&1&66': np.array([0.1413116283690917, 0.7479856297394165]), 'versicolor&1&67': np.array([0.189773257421942, 0.6552150653012478]), 'versicolor&1&68': np.array([0.40694846236352233, 0.5109051764198169]), 'versicolor&1&69': np.array([0.1390424906594644, 0.7991613016301518]), 'versicolor&1&70': np.array([0.1945777487290197, 0.6743932844312892]), 'versicolor&1&71': np.array([0.415695226122737, 0.5230815102377903]), 'versicolor&1&72': np.array([0.25657760110071476, 0.12592645350389123]), 'versicolor&1&73': np.array([0.25657760110071476, 0.12592645350389123]), 'versicolor&1&74': np.array([0.13717260713320106, 0.3627779907901665]), 'versicolor&1&75': np.array([0.0, 0.4756207622944677]), 'versicolor&1&76': np.array([0.0, 0.4854334805210761]), 'versicolor&1&77': np.array([0.0, 0.16885577975809635]), 'versicolor&1&78': np.array([0.0, 0.395805885538554]), 'versicolor&1&79': np.array([0.0, 0.2538072707138344]), 'versicolor&1&80': np.array([0.0, 0.4854334805210761]), 'versicolor&1&81': np.array([0.0, 0.7613919530844643]), 'versicolor&1&82': np.array([0.0, 0.6668230985485095]), 'versicolor&1&83': np.array([0.0, 0.4904755652105692]), 'versicolor&1&84': np.array([0.0, 0.8121046082359693]), 'versicolor&1&85': np.array([0.0, 0.6855766903749089]), 'versicolor&1&86': np.array([0.0, 0.5008471974438506]), 'versicolor&1&87': np.array([0.0, 0.16885577975809635]), 'versicolor&1&88': np.array([0.0, 0.16885577975809635]), 'versicolor&1&89': np.array([0.0, 0.395805885538554]), 'versicolor&1&90': np.array([0.37157553889555184, 0.1221600832023858]), 'versicolor&1&91': np.array([0.2463036871609408, 0.24630368716093934]), 'versicolor&1&92': np.array([0.9105775730167809, 0.6842162738602727]), 'versicolor&1&93': np.array([0.6718337295341267, 0.6620422637360075]), 'versicolor&1&94': np.array([0.4964962439921071, 0.3798215458387346]), 'versicolor&1&95': np.array([0.2463036871609408, 0.24630368716093934]), 'versicolor&1&96': np.array([0.2805345936193346, 0.6595182922149835]), 'versicolor&1&97': np.array([0.08302493125394889, 0.6186280682763334]), 'versicolor&1&98': np.array([0.22125635302655813, 0.2925832702358638]), 'versicolor&1&99': np.array([0.2365788606456636, 0.7120007179768731]), 'versicolor&1&100': np.array([0.022347126801293967, 0.6718013300441928]), 'versicolor&1&101': np.array([0.10063786451829529, 0.4085974066833644]), 'versicolor&1&102': np.array([0.9105775730167809, 0.6842162738602727]), 'versicolor&1&103': np.array([0.9105775730167809, 0.6842162738602727]), 'versicolor&1&104': np.array([0.6718337295341267, 0.6620422637360075]), 'versicolor&1&105': np.array([-0.32199975656257646, 0.7482293552463756]), 'versicolor&1&106': np.array([-0.43843349141088417, 0.8642740701867917]), 'versicolor&1&107': np.array([0.7141739659554727, 0.6619819140152878]), 'versicolor&1&108': np.array([0.44460014335081516, 0.6107546840046902]), 'versicolor&1&109': np.array([0.2619265016777598, 0.33491141590339474]), 'versicolor&1&110': np.array([-0.43843349141088417, 0.8642740701867917]), 'versicolor&1&111': np.array([0.20183015430619713, 0.7445346002055082]), 'versicolor&1&112': np.array([-0.05987874887638573, 0.6927937290176818]), 'versicolor&1&113': np.array([-0.2562642052727569, 0.6920266972283227]), 'versicolor&1&114': np.array([0.1736438124560164, 0.7898174616442941]), 'versicolor&1&115': np.array([-0.10114089899940126, 0.7326610366533243]), 'versicolor&1&116': np.array([-0.34479806250338163, 0.7789143553916729]), 'versicolor&1&117': np.array([0.7141739659554727, 0.6619819140152878]), 'versicolor&1&118': np.array([0.7141739659554727, 0.6619819140152878]), 'versicolor&1&119': np.array([0.44460014335081516, 0.6107546840046902]), 'versicolor&1&120': np.array([0.8224435822504677, 0.05315271528828394]), 'versicolor&1&121': np.array([0.820222886307464, 0.055413714884152906]), 'versicolor&1&122': np.array([0.8393089066702096, 0.0788980157959197]), 'versicolor&1&123': np.array([0.8282924295054531, 0.0752641855714259]), 'versicolor&1&124': np.array([0.8476206690613984, 0.02146454924522743]), 'versicolor&1&125': np.array([0.820222886307464, 0.055413714884152906]), 'versicolor&1&126': np.array([0.69362517791403, 0.2579390890424607]), 'versicolor&1&127': np.array([0.7261791877801502, 0.16248655642013624]), 'versicolor&1&128': np.array([0.8190416077589757, 0.05661509439536992]), 'versicolor&1&129': np.array([0.6654762076749751, 0.2949291633432878]), 'versicolor&1&130': np.array([0.7118161070185614, 0.17683644094125878]), 'versicolor&1&131': np.array([0.8165214253946836, 0.059175619390630096]), 'versicolor&1&132': np.array([0.8393089066702096, 0.0788980157959197]), 'versicolor&1&133': np.array([0.8393089066702096, 0.0788980157959197]), 'versicolor&1&134': np.array([0.8282924295054531, 0.0752641855714259]), 'versicolor&1&135': np.array([0.5188109114552927, 0.03638964581864269]), 'versicolor&1&136': np.array([0.5131478569192371, 0.04203387599862816]), 'versicolor&1&137': np.array([0.73294627367007, 0.4610490766898855]), 'versicolor&1&138': np.array([0.5965042032375719, 0.48856644624972617]), 'versicolor&1&139': np.array([0.5436097000280874, 0.1461891067488832]), 'versicolor&1&140': np.array([0.5131478569192371, 0.04203387599862816]), 'versicolor&1&141': np.array([0.32513442685780247, 0.6124765483184536]), 'versicolor&1&142': np.array([0.1812883360919208, 0.5504982486874137]), 'versicolor&1&143': np.array([0.4788153032824012, 0.08625929936974323]), 'versicolor&1&144': np.array([0.28490718210609345, 0.6650298146522879]), 'versicolor&1&145': np.array([0.1313204067730033, 0.597079642504441]), 'versicolor&1&146': np.array([0.46583127837967303, 0.09875847161509169]), 'versicolor&1&147': np.array([0.73294627367007, 0.4610490766898855]), 'versicolor&1&148': np.array([0.73294627367007, 0.4610490766898855]), 'versicolor&1&149': np.array([0.5965042032375719, 0.48856644624972617]), 'versicolor&1&150': np.array([0.37157553889555184, 0.1221600832023858]), 'versicolor&1&151': np.array([0.2463036871609408, 0.24630368716093934]), 'versicolor&1&152': np.array([0.9105775730167809, 0.6842162738602727]), 'versicolor&1&153': np.array([0.6718337295341267, 0.6620422637360075]), 'versicolor&1&154': np.array([0.4964962439921071, 0.3798215458387346]), 'versicolor&1&155': np.array([0.2463036871609408, 0.24630368716093934]), 'versicolor&1&156': np.array([0.2805345936193346, 0.6595182922149835]), 'versicolor&1&157': np.array([0.08302493125394889, 0.6186280682763334]), 'versicolor&1&158': np.array([0.22125635302655813, 0.2925832702358638]), 'versicolor&1&159': np.array([0.2365788606456636, 0.7120007179768731]), 'versicolor&1&160': np.array([0.022347126801293967, 0.6718013300441928]), 'versicolor&1&161': np.array([0.10063786451829529, 0.4085974066833644]), 'versicolor&1&162': np.array([0.9105775730167809, 0.6842162738602727]), 'versicolor&1&163': np.array([0.9105775730167809, 0.6842162738602727]), 'versicolor&1&164': np.array([0.6718337295341267, 0.6620422637360075]), 'versicolor&1&165': np.array([-0.32199975656257646, 0.7482293552463756]), 'versicolor&1&166': np.array([-0.43843349141088417, 0.8642740701867917]), 'versicolor&1&167': np.array([0.7141739659554727, 0.6619819140152878]), 'versicolor&1&168': np.array([0.44460014335081516, 0.6107546840046902]), 'versicolor&1&169': np.array([0.2619265016777598, 0.33491141590339474]), 'versicolor&1&170': np.array([-0.43843349141088417, 0.8642740701867917]), 'versicolor&1&171': np.array([0.20183015430619713, 0.7445346002055082]), 'versicolor&1&172': np.array([-0.05987874887638573, 0.6927937290176818]), 'versicolor&1&173': np.array([-0.2562642052727569, 0.6920266972283227]), 'versicolor&1&174': np.array([0.1736438124560164, 0.7898174616442941]), 'versicolor&1&175': np.array([-0.10114089899940126, 0.7326610366533243]), 'versicolor&1&176': np.array([-0.34479806250338163, 0.7789143553916729]), 'versicolor&1&177': np.array([0.7141739659554727, 0.6619819140152878]), 'versicolor&1&178': np.array([0.7141739659554727, 0.6619819140152878]), 'versicolor&1&179': np.array([0.44460014335081516, 0.6107546840046902]), 'versicolor&1&180': np.array([0.8224435822504677, 0.05315271528828394]), 'versicolor&1&181': np.array([0.820222886307464, 0.055413714884152906]), 'versicolor&1&182': np.array([0.8393089066702096, 0.0788980157959197]), 'versicolor&1&183': np.array([0.8282924295054531, 0.0752641855714259]), 'versicolor&1&184': np.array([0.8476206690613984, 0.02146454924522743]), 'versicolor&1&185': np.array([0.820222886307464, 0.055413714884152906]), 'versicolor&1&186': np.array([0.69362517791403, 0.2579390890424607]), 'versicolor&1&187': np.array([0.7261791877801502, 0.16248655642013624]), 'versicolor&1&188': np.array([0.8190416077589757, 0.05661509439536992]), 'versicolor&1&189': np.array([0.6654762076749751, 0.2949291633432878]), 'versicolor&1&190': np.array([0.7118161070185614, 0.17683644094125878]), 'versicolor&1&191': np.array([0.8165214253946836, 0.059175619390630096]), 'versicolor&1&192': np.array([0.8393089066702096, 0.0788980157959197]), 'versicolor&1&193': np.array([0.8393089066702096, 0.0788980157959197]), 'versicolor&1&194': np.array([0.8282924295054531, 0.0752641855714259]), 'versicolor&1&195': np.array([0.5188109114552927, 0.03638964581864269]), 'versicolor&1&196': np.array([0.5131478569192371, 0.04203387599862816]), 'versicolor&1&197': np.array([0.73294627367007, 0.4610490766898855]), 'versicolor&1&198': np.array([0.5965042032375719, 0.48856644624972617]), 'versicolor&1&199': np.array([0.5436097000280874, 0.1461891067488832]), 'versicolor&1&200': np.array([0.5131478569192371, 0.04203387599862816]), 'versicolor&1&201': np.array([0.32513442685780247, 0.6124765483184536]), 'versicolor&1&202': np.array([0.1812883360919208, 0.5504982486874137]), 'versicolor&1&203': np.array([0.4788153032824012, 0.08625929936974323]), 'versicolor&1&204': np.array([0.28490718210609345, 0.6650298146522879]), 'versicolor&1&205': np.array([0.1313204067730033, 0.597079642504441]), 'versicolor&1&206': np.array([0.46583127837967303, 0.09875847161509169]), 'versicolor&1&207': np.array([0.73294627367007, 0.4610490766898855]), 'versicolor&1&208': np.array([0.73294627367007, 0.4610490766898855]), 'versicolor&1&209': np.array([0.5965042032375719, 0.48856644624972617]), 'versicolor&1&210': np.array([0.37157553889555184, 0.1221600832023858]), 'versicolor&1&211': np.array([0.2463036871609408, 0.24630368716093934]), 'versicolor&1&212': np.array([0.9105775730167809, 0.6842162738602727]), 'versicolor&1&213': np.array([0.6718337295341267, 0.6620422637360075]), 'versicolor&1&214': np.array([0.4964962439921071, 0.3798215458387346]), 'versicolor&1&215': np.array([0.2463036871609408, 0.24630368716093934]), 'versicolor&1&216': np.array([0.2805345936193346, 0.6595182922149835]), 'versicolor&1&217': np.array([0.08302493125394889, 0.6186280682763334]), 'versicolor&1&218': np.array([0.22125635302655813, 0.2925832702358638]), 'versicolor&1&219': np.array([0.2365788606456636, 0.7120007179768731]), 'versicolor&1&220': np.array([0.022347126801293967, 0.6718013300441928]), 'versicolor&1&221': np.array([0.10063786451829529, 0.4085974066833644]), 'versicolor&1&222': np.array([0.9105775730167809, 0.6842162738602727]), 'versicolor&1&223': np.array([0.9105775730167809, 0.6842162738602727]), 'versicolor&1&224': np.array([0.6718337295341267, 0.6620422637360075]), 'versicolor&1&225': np.array([0.6253337666017573, 0.21983620140147825]), 'versicolor&1&226': np.array([0.6178968870349187, 0.22747652768125623]), 'versicolor&1&227': np.array([0.7245803616608639, 0.18141483095066183]), 'versicolor&1&228': np.array([0.6762617119303499, 0.19305674697949574]), 'versicolor&1&229': np.array([0.7182033715159247, 0.0970420677941148]), 'versicolor&1&230': np.array([0.6178968870349187, 0.22747652768125623]), 'versicolor&1&231': np.array([0.4976586558055923, 0.5393318265947251]), 'versicolor&1&232': np.array([0.4361093214026388, 0.4279491486345008]), 'versicolor&1&233': np.array([0.613985959011319, 0.23148898930908424]), 'versicolor&1&234': np.array([0.46747697713468217, 0.586607956360002]), 'versicolor&1&235': np.array([0.41044950174869577, 0.45415985894965977]), 'versicolor&1&236': np.array([0.6057447478066579, 0.23993389556303918]), 'versicolor&1&237': np.array([0.7245803616608639, 0.18141483095066183]), 'versicolor&1&238': np.array([0.7245803616608639, 0.18141483095066183]), 'versicolor&1&239': np.array([0.6762617119303499, 0.19305674697949574]), 'versicolor&1&240': np.array([0.056623968925773045, 0.43360725859686644]), 'versicolor&1&241': np.array([0.020169511418752378, 0.47015948158260334]), 'versicolor&1&242': np.array([0.5806365328450954, 0.47262706807712623]), 'versicolor&1&243': np.array([0.4146290154471569, 0.4964318942067898]), 'versicolor&1&244': np.array([0.3351719071445682, 0.20616862401308342]), 'versicolor&1&245': np.array([0.020169511418752378, 0.47015948158260334]), 'versicolor&1&246': np.array([0.24022705822940116, 0.7185371033867092]), 'versicolor&1&247': np.array([0.010447231513465048, 0.6616528865917504]), 'versicolor&1&248': np.array([0.024556360933646205, 0.4723948285969902]), 'versicolor&1&249': np.array([0.21321406009810842, 0.7648907754638917]), 'versicolor&1&250': np.array([-0.027450681014480036, 0.6999336015080245]), 'versicolor&1&251': np.array([-0.0164329511444131, 0.5132208276383963]), 'versicolor&1&252': np.array([0.5806365328450954, 0.47262706807712623]), 'versicolor&1&253': np.array([0.5806365328450954, 0.47262706807712623]), 'versicolor&1&254': np.array([0.4146290154471569, 0.4964318942067898]), 'versicolor&1&255': np.array([-0.32199975656257646, 0.7482293552463756]), 'versicolor&1&256': np.array([-0.43843349141088417, 0.8642740701867917]), 'versicolor&1&257': np.array([0.7141739659554727, 0.6619819140152878]), 'versicolor&1&258': np.array([0.44460014335081516, 0.6107546840046902]), 'versicolor&1&259': np.array([0.2619265016777598, 0.33491141590339474]), 'versicolor&1&260': np.array([-0.43843349141088417, 0.8642740701867917]), 'versicolor&1&261': np.array([0.20183015430619713, 0.7445346002055082]), 'versicolor&1&262': np.array([-0.05987874887638573, 0.6927937290176818]), 'versicolor&1&263': np.array([-0.2562642052727569, 0.6920266972283227]), 'versicolor&1&264': np.array([0.1736438124560164, 0.7898174616442941]), 'versicolor&1&265': np.array([-0.10114089899940126, 0.7326610366533243]), 'versicolor&1&266': np.array([-0.34479806250338163, 0.7789143553916729]), 'versicolor&1&267': np.array([0.7141739659554727, 0.6619819140152878]), 'versicolor&1&268': np.array([0.7141739659554727, 0.6619819140152878]), 'versicolor&1&269': np.array([0.44460014335081516, 0.6107546840046902]), 'versicolor&1&270': np.array([0.7749499208750119, 0.8147189440804429]), 'versicolor&1&271': np.array([0.8040309195416899, 0.8445152504134819]), 'versicolor&1&272': np.array([0.5826506963750848, -0.22335655671229107]), 'versicolor&1&273': np.array([0.33108168891715983, 0.13647816746351163]), 'versicolor&1&274': np.array([0.4079256832347186, 0.038455640985860955]), 'versicolor&1&275': np.array([0.8040309195416899, 0.8445152504134819]), 'versicolor&1&276': np.array([0.18555813792691386, 0.6940923833143309]), 'versicolor&1&277': np.array([0.32639262064172164, 0.6296083447134281]), 'versicolor&1&278': np.array([0.6964303997553315, 0.7444536452136676]), 'versicolor&1&279': np.array([0.18216358701833335, 0.747615101407194]), 'versicolor&1&280': np.array([0.33549445287370383, 0.6526039763053625]), 'versicolor&1&281': np.array([0.7213651642695392, 0.7718874443854203]), 'versicolor&1&282':
np.array([0.5826506963750848, -0.22335655671229107])
numpy.array
import wandb import argparse import exmol import torch as th import numpy as np import yaml import os import matplotlib.pyplot as plt import glob import json import selfies as sf import tqdm import pandas as pd from sklearn.cluster import DBSCAN from sklearn.decomposition import PCA from dataclasses import dataclass, asdict import seaborn as sns sns.set() from train import load_data, get_loss_criteria, run_an_eval_epoch, to_device from models import get_model from data_loader import get_explain_dataset from plot_utils import fig_to_data from grover_feats import convert_smiles_to_fp from data_loader import make_timestamp @dataclass class GeneticMolecule(exmol.Example): """Example of a molecule""" distance: float = 1 #: Output of model function generation: int = 0 #: Raw data prediction y: float = None #: True if base is_origin: bool = False #: Genetic score genetic_score: float = np.inf #: Label for this example label: str = None #: Label for this example crossed: bool = False # to make it look nicer def __str__(self): return str(asdict(self)) def str2bool(v): return v.lower() in ('yes', 'true', 't', 'y', '1') def load_dicts(new_run_dp,template_d,args): with open(os.path.join(new_run_dp,"config.yaml"),"r") as config_file: config_d = yaml.load(config_file, Loader=yaml.FullLoader) data_d_keys = template_d["data"].keys() model_d_keys = template_d["model"].keys() run_d_keys = template_d["run"].keys() data_d, model_d, run_d = {}, {}, {} for k,v in config_d.items(): if k in data_d_keys: data_d[k] = v["value"] elif k in model_d_keys: model_d[k] = v["value"] elif k in run_d_keys: run_d[k] = v["value"] # data_d["primary_dset"] = args.primary_dset # data_d["secondary_dset"] = args.secondary_dset run_d["do_test"] = False run_d["do_matching"] = False run_d["batch_size"] = 512 return data_d, model_d, run_d def get_samples(target_smiles,preset,num_samples): # set up stoned stoned_kw = { "num_samples": num_samples } if preset == "medium": stoned_kw["max_mutations"] = 2 stoned_kw["alphabet"] = exmol.get_basic_alphabet() elif preset == "narrow": stoned_kw["max_mutations"] = 1 stoned_kw["alphabet"] = exmol.get_basic_alphabet() elif preset == "wide": stoned_kw["max_mutations"] = 5 stoned_kw["alphabet"] = sf.get_semantic_robust_alphabet() pbar = tqdm.tqdm(total=num_samples) samples, _ = exmol.run_stoned(target_smiles,_pbar=pbar,**stoned_kw) return samples def calculate_genetic_score(distance, y_delta, delta_cut_off=0.5): if np.abs(y_delta) < delta_cut_off: return 0 if np.abs(y_delta) < delta_cut_off: delta_score = np.abs(y_delta) else: delta_score = delta_cut_off + (np.abs(y_delta) - delta_cut_off) * .2 return (1 - distance) * delta_score def get_genetic_molecules( fxn_values, smiles, selfies, distances, target_molecule, flags, generation=0): # pack them into data structure with filtering out identical # and nan exps = [] for i, (sm, se, d, y) in enumerate(zip(smiles, selfies, distances, fxn_values)): exps.append(GeneticMolecule( smiles=sm, selfies=se, distance=d, similarity=1-d, yhat=np.squeeze(y), is_origin=False, index=0, generation=generation, genetic_score=calculate_genetic_score( d, target_molecule.yhat - np.squeeze(y), flags.delta ), # label # y, )) for i, e in enumerate(exps): e.index = i return exps def plot_scatter( molecule_bank, target_molecule, flags, carc_df, fig_kwargs): # Identify counterfactuals pass_threshold = [mol for mol in molecule_bank if np.abs(mol.yhat - target_molecule.yhat) > flags.delta] positive_candidates = [mol for mol in pass_threshold if mol.yhat > target_molecule.yhat] negative_candidates = [mol for mol in pass_threshold if mol.yhat < target_molecule.yhat] cfs = [target_molecule] positive_candidates = sorted(positive_candidates, key=lambda mol: mol.distance) negative_candidates = sorted(negative_candidates, key=lambda mol: mol.distance) if negative_candidates: cfs.append(negative_candidates[0]) if positive_candidates: cfs.append(positive_candidates[0]) x_cfs = [mol.distance for mol in cfs] y_cfs = [return_percentile(e.yhat, carc_df['carc_continuous'].values) for e in cfs] cmap = "viridis" dists = np.array([mol.distance for mol in molecule_bank]) yhats = np.array([mol.yhat for mol in molecule_bank]) pred_yhat = molecule_bank[0].yhat lower_yhat = pred_yhat - flags.delta upper_yhat = pred_yhat + flags.delta true_percentile = return_percentile(target_molecule.y, carc_df['carc_continuous'].values) pred_percentile = return_percentile(pred_yhat, carc_df['carc_continuous'].values) upper_percentile = return_percentile(upper_yhat, carc_df['carc_continuous'].values) lower_percentile = return_percentile(lower_yhat, carc_df['carc_continuous'].values) # make index selection somewhat stochastic so that we # don't select from the same cluster idx =
np.argsort(dists)
numpy.argsort
""" ================== gprof_nn.retrieval ================== This module contains classes and functionality that drive the execution of the retrieval. """ import logging import math import subprocess from tempfile import TemporaryDirectory from pathlib import Path import numpy as np import xarray as xr import torch from torch import nn import pandas as pd from gprof_nn import sensors from gprof_nn.definitions import PROFILE_NAMES, ALL_TARGETS from gprof_nn.data import get_profile_clusters from gprof_nn.data.training_data import ( GPROF_NN_1D_Dataset, GPROF_NN_3D_Dataset, decompress_and_load, _THRESHOLDS, ) from gprof_nn.data.l1c import L1CFile from gprof_nn.data.preprocessor import PreprocessorFile, run_preprocessor from gprof_nn.data.utils import load_variable LOGGER = logging.getLogger(__name__) ############################################################################### # Helper functions. ############################################################################### def expand_tbs(tbs): """ Helper functions to expand GMI observations to the 15 channels. The GMI preprocessor as well as the simulator all produce observation data with 15 channels for GMI with two of them containing only missing values. Since the GPROF-NN networks expect 15 channel as input, data that comes directly from a L1C file must extended accordingly. Args: tbs: An array containing 13 brightness temperatures of GMI oriented along its last axis. Return: Array containing the same observations but with two empty chanels added at indices 5 and 12. """ tbs_e = np.zeros(tbs.shape[:-1] + (15,), dtype=np.float32) tbs_e[..., :5] = tbs[..., :5] tbs_e[..., 5] = np.nan tbs_e[..., 6:12] = tbs[..., 5:11] tbs_e[..., 12] = np.nan tbs_e[..., 13:] = tbs[..., 11:] return tbs_e def calculate_padding_dimensions(t): """ Calculate list of PyTorch padding values to extend the spatial dimension of input tensor to multiples of 32. Args: t: The ``torch.Tensor`` to pad. Return A tuple ``(p_l_n, p_r_n, p_l_m, p_r_m)`` containing the left and right padding for the second to last dimension (``p_l_m, p_r_m``) and for the last dimension (``p_l_n, p_r_n``). """ shape = t.shape n = shape[-1] d_n = math.ceil(n / 32) * 32 - n p_l_n = d_n // 2 p_r_n = d_n - p_l_n m = shape[-2] d_m = math.ceil(m / 32) * 32 - m p_l_m = d_m // 2 p_r_m = d_m - p_l_m return (p_l_n, p_r_n, p_l_m, p_r_m) def combine_input_data_1d(dataset, sensor): """ Combine retrieval input data into input matrix for the single-pixel retrieval. Args: dataset: ``xarray.Dataset`` containing the input variables. v_tbs: Name of the variable to load the brightness temperatures from. sensor: The sensor object representing the sensor from which the data stems. Return: Rank-2 input tensor containing the input data with features oriented along axis 1. """ n_chans = sensor.n_chans tbs = dataset["brightness_temperatures"].data.copy() # Input from L1C file has only 13 channels. if sensor == sensors.GMI and tbs.shape[-1] < n_chans: tbs = expand_tbs(tbs) tbs = tbs.reshape(-1, n_chans) invalid = (tbs > 500.0) + (tbs < 0.0) tbs[invalid] = np.nan features = [tbs] if "two_meter_temperature" in dataset.variables: t2m = load_variable(dataset, "two_meter_temperature") t2m = t2m.reshape(-1, 1) tcwv = load_variable(dataset, "total_column_water_vapor") tcwv = tcwv.reshape(-1, 1) st = dataset["surface_type"].data.ravel() n_types = 18 st_1h = np.zeros((st.shape[0], n_types), dtype=np.float32) for j in range(18): st_1h[:, j][st == j + 1] = 1.0 at = np.maximum(dataset["airmass_type"].data, 0.0) at = at.ravel() n_types = 4 at_1h = np.zeros((st.shape[0], n_types), dtype=np.float32) for j in range(4): at_1h[:, j][at == j] = 1.0 features += [t2m, tcwv, st_1h, at_1h] if isinstance(sensor, sensors.CrossTrackScanner): va = dataset["earth_incidence_angle"].data features.insert(1, va.reshape(-1, 1)) x = np.concatenate(features, axis=1) x[:, :n_chans][x[:, :n_chans] < 0] = np.nan return x def combine_input_data_3d(dataset, sensor, v_tbs="brightness_temperatures"): """ Combine retrieval input data into input tensor format for convolutional retrieval. Args: dataset: ``xarray.Dataset`` containing the input variables. v_tbs: Name of the variable to load the brightness temperatures from. sensor: The sensor object representing the sensor from which the data stems. v_tbs: Name of the variable to load as brightness temperatures. Return: Rank-4 input tensor containing the input data with features oriented along axis 1. """ n_chans = sensor.n_chans tbs = dataset[v_tbs][:].data if tbs.shape[-1] < n_chans: tbs = expand_tbs(tbs) invalid = (tbs > 500.0) + (tbs < 0.0) tbs[invalid] = np.nan features = [tbs] if "two_meter_temperature" in dataset: # 2m temperature t2m = load_variable(dataset, "two_meter_temperature")[..., np.newaxis] # Total precipitable water. tcwv = load_variable(dataset, "total_column_water_vapor")[..., np.newaxis] # Surface type st = dataset["surface_type"][:].data n_types = 18 shape = tbs.shape[:-1] st_1h = np.zeros(shape + (n_types,), dtype=np.float32) for i in range(n_types): indices = st == (i + 1) st_1h[indices, i] = 1.0 # Airmass type # Airmass type is defined slightly different from surface type in # that there is a 0 type. am = dataset["airmass_type"][:].data n_types = 4 am_1h = np.zeros(shape + (n_types,), dtype=np.float32) for i in range(n_types): indices = am == i am_1h[indices, i] = 1.0 am_1h[am < 0, 0] = 1.0 features += [t2m, tcwv, st_1h, am_1h] if isinstance(sensor, sensors.CrossTrackScanner): va = dataset["earth_incidence_angle"].data features.insert(1, va[..., np.newaxis]) input_data = np.concatenate(features, axis=-1) input_data = input_data.astype(np.float32) if input_data.ndim < 4: input_data = np.expand_dims(input_data, 0) input_data =
np.transpose(input_data, (0, 3, 1, 2))
numpy.transpose
#!/usr/bin/env python3 import numpy as np import matplotlib.pyplot as plt import tikzplotlib get_ipython().run_line_magic('run', "'./../split_step_fourier.ipynb'") DEBUG = False DEBUG_PLOT = True # showing figures inline get_ipython().run_line_magic('matplotlib', 'inline') # plotting options figure_size = (16, 9) plt.rcParams.update({ 'font.family': 'serif', 'text.usetex': True, 'pgf.rcfonts': False, }) # parameters f_symbol = 32e9 # symbol rate (Baud) (Symbols per second) r_rc = .33 syms_per_filt = 4 # symbols per filter (plus minus in both directions) P_in = 19 # dBm # modulation scheme and constellation points M = 2 modulation = {'0': -1, '1': 1} n_symbol = 30 # number of symbols # Signalfolge generieren send_bits = np.random.choice([symbol for symbol in modulation.keys()], size=n_symbol) ## Transmission parameters z_length = 70 # [km] nz = 10 # steps dz = z_length / nz alpha = 0.2 # Dämpfung [dB/km] D = 17 # [ps/nm/km] beta2 = - (D * np.square(1550e-9)) / (2 * np.pi * 3e8) * 1e-3 # [s^2/km] propagation constant, lambda=1550nm is standard single-mode wavelength gamma = 1.3 # [1/W/km] ## Simulation n_up_max = 16 # samples per symbol (>1 => oversampling) outputs = {} sampletimes = {} for n_up in range(1, n_up_max+1): # Pulse IR t_sample, ir = get_rc_ir(syms_per_filt, r_rc, f_symbol, n_up) # Sendesignal generieren send_ir = generate_signal(modulation, t_sample, 1/f_symbol, send_bits, ir, syms_per_filt, n_symbol, P_in) # add zeros before and after signal (use samples per symbol as factor) send = add_zeros(send_ir, 10 * int(1/f_symbol/t_sample)) # transmission output = splitstepfourier(send, t_sample, dz, nz, alpha, beta2, gamma) outputs[f"{n_up}"] = output sampletimes[f"{n_up}"] = t_sample fig1, ax1 = plt.subplots(1, figsize=figure_size) for key, item in outputs.items(): if key in ['1', '2', '3', '4', '10', '16']: x_vals = np.arange(item.size)*sampletimes[key] ax1.plot(x_vals, np.square(np.abs(item)), label=key) ax1.set_xlim(np.amin(x_vals), np.amax(x_vals)) ax1.legend() ax1.set_xlabel("$t[s]$") fig2, ax2 = plt.subplots(1, figsize=figure_size) for key, item in outputs.items(): if key in ['1', '2', '3', '4', '10', '16']: x_vals = np.arange(item.size)*sampletimes[key] ax2.plot(x_vals, np.square(
np.abs(item)
numpy.abs
""" Unit test for selection operators. """ import random from math import nan import numpy as np import pytest from leap_ec import Individual from leap_ec import ops, statistical_helpers from leap_ec.binary_rep.problems import MaxOnes from leap_ec.data import test_population from leap_ec.real_rep.problems import SpheroidProblem ############################## # Tests for sus_selection() ############################## def test_sus_selection1(): ''' Test of a deterministic case of stochastic universal sampling ''' # Make a population where sus_selection has an obvious # reproducible choice pop = [Individual(np.array([0, 0, 0]), problem=MaxOnes()), Individual(np.array([1, 1, 1]), problem=MaxOnes())] pop = Individual.evaluate_population(pop) # This selection operator will always choose the [1, 1, 1] individual # since [0, 0, 0] has zero fitness selector = ops.sus_selection(pop) selected = next(selector) assert np.all(selected.genome == [1, 1, 1]) selected = next(selector) assert np.all(selected.genome == [1, 1, 1]) # run one more time to test shuffle selected = next(selector) assert np.all(selected.genome == [1, 1, 1]) @pytest.mark.stochastic def test_sus_selection_shuffle(): ''' Test of a stochastic case of SUS selection ''' # Make a population where sus_selection has an obvious # reproducible choice # Proportions here should be 1/4 and 3/4, respectively pop = [Individual(np.array([0, 1, 0]), problem=MaxOnes()), Individual(np.array([1, 1, 1]), problem=MaxOnes())] # Assign a unique identifier to each individual pop[0].id = 0 pop[1].id = 1 # We first need to evaluate all the individuals so that # selection has fitnesses to compare pop = Individual.evaluate_population(pop) selected = ops.sus_selection(pop) N = 1000 p_thresh = 0.1 observed_dist = statistical_helpers.collect_distribution( lambda: next(selected).id, samples=N) expected_dist = {pop[0].id: 0.25*N, pop[1].id: 0.75*N} print(f"Observed: {observed_dist}") print(f"Expected: {expected_dist}") assert(statistical_helpers.stochastic_equals(expected_dist, observed_dist, p=p_thresh)) def test_sus_selection_offset(): ''' Test of SUS selection with a non-default offset ''' pop = [Individual(np.array([0, 0, 0]), problem=MaxOnes()), Individual(np.array([1, 1, 1]), problem=MaxOnes())] # evaluate population and negate fitness of second individual pop = Individual.evaluate_population(pop) pop[1].fitness = -pop[1].fitness # now we try to evaluate normally (this should throw a ValueError) # due to the negative fitness with pytest.raises(ValueError): selector = ops.sus_selection(pop) selected = next(selector) # it should work by setting the offset to +3 # this adds 3 to each fitness value, making the second # individual's fitness 0. selector = ops.sus_selection(pop, offset=3) # we expect the first individual to always be selected # since the new zero point is now -3. selected = next(selector) assert np.all(selected.genome == [0, 0, 0]) selected = next(selector) assert np.all(selected.genome == [0, 0, 0]) def test_sus_selection_pop_min(): ''' Test of SUS selection with pop-min offset ''' # Create a population of positive fitness individuals # scaling the fitness by the population minimum makes it so the # least fit member never gets selected. pop = [Individual(np.array([0, 1, 0]), problem=MaxOnes()), Individual(np.array([1, 1, 1]), problem=MaxOnes())] pop = Individual.evaluate_population(pop) selector = ops.sus_selection(pop, offset='pop-min') # we expect that the second individual is always selected # since the new zero point will be at the minimum fitness # of the population selected = next(selector) assert np.all(selected.genome == [1, 1, 1]) selected = next(selector) assert np.all(selected.genome == [1, 1, 1]) def test_sus_selection_custom_key(): ''' Test of SUS selection with custom evaluation ''' pop = [Individual(np.array([0, 0, 0]), problem=MaxOnes()), Individual(np.array([1, 1, 1]), problem=MaxOnes())] def custom_key(individual): ''' Returns fitness based on MaxZeros ''' return np.count_nonzero(individual.genome == 0) pop = Individual.evaluate_population(pop) selector = ops.sus_selection(pop, key=custom_key) # we expect the first individual to always be selected # since its genome is all 0s selected = next(selector) assert np.all(selected.genome == [0, 0, 0]) selected = next(selector) assert np.all(selected.genome == [0, 0, 0]) def test_sus_selection_num_points(): ''' Test of SUS selection with varying `n` random points ''' # the second individual should always be selected pop = [Individual(np.array([0, 0, 0]), problem=MaxOnes()), Individual(np.array([1, 1, 1]), problem=MaxOnes())] pop = Individual.evaluate_population(pop) # with negative points with pytest.raises(ValueError): selector = ops.sus_selection(pop, n=-1) selected = next(selector) # with n = None (default) selector = ops.sus_selection(pop, n=None) selected = next(selector) assert np.all(selected.genome == [1, 1, 1]) # with n less than len(population) selector = ops.sus_selection(pop, n=1) selected = next(selector) assert np.all(selected.genome == [1, 1, 1]) selected = next(selector) assert np.all(selected.genome == [1, 1, 1]) # with n greater than len(population) selector = ops.sus_selection(pop, n=3) selected = next(selector) assert np.all(selected.genome == [1, 1, 1]) selected = next(selector) assert np.all(selected.genome == [1, 1, 1]) selected = next(selector) assert np.all(selected.genome == [1, 1, 1]) selected = next(selector) assert np.all(selected.genome == [1, 1, 1]) selected = next(selector) assert np.all(selected.genome == [1, 1, 1]) ############################## # Tests for proportional_selection() ############################## def test_proportional_selection1(): ''' Test of a deterministic case of proportional selection ''' # Make a population where proportional_selection has an obvious # reproducible choice pop = [Individual(np.array([0, 0, 0]), problem=MaxOnes()), Individual(np.array([1, 1, 1]), problem=MaxOnes())] parents = Individual.evaluate_population(pop) # This selection operator will always select the [1, 1, 1] individual since # [0, 0, 0] has zero fitness selector = ops.proportional_selection(parents) selected = next(selector) assert np.all(selected.genome == [1, 1, 1]) selected = next(selector) assert np.all(selected.genome == [1, 1, 1]) @pytest.mark.stochastic def test_proportional_selection2(): ''' Test of a stochastic proportional selection ''' # Make a population where fitness proportional selection has an obvious # reproducible choice # Proportions here should be 1/4 and 3/4, respectively pop = [Individual(np.array([0, 1, 0]), problem=MaxOnes()), Individual(np.array([1, 1, 1]), problem=MaxOnes())] # Assign a unique identifier to each individual pop[0].id = 0 pop[1].id = 1 # We first need to evaluate all the individuals so that # selection has fitnesses to compare pop = Individual.evaluate_population(pop) selected = ops.proportional_selection(pop) N = 1000 p_thresh = 0.1 observed_dist = statistical_helpers.collect_distribution( lambda: next(selected).id, samples=N) expected_dist = {pop[0].id: 0.25*N, pop[1].id: 0.75*N} print(f"Observed: {observed_dist}") print(f"Expected: {expected_dist}") assert(statistical_helpers.stochastic_equals(expected_dist, observed_dist, p=p_thresh)) def test_proportional_selection_offset(): ''' Test of proportional selection with a non-default offset ''' pop = [Individual(np.array([0, 0, 0]), problem=MaxOnes()), Individual(np.array([1, 1, 1]), problem=MaxOnes())] # evaluate population and negate fitness of second individual pop = Individual.evaluate_population(pop) pop[1].fitness = -pop[1].fitness # now we try to evaluate normally (this should throw a ValueError) # due to the negative fitness with pytest.raises(ValueError): selector = ops.proportional_selection(pop) selected = next(selector) # it should work by setting the offset to +3 # this adds 3 to each fitness value, making the second # individual's fitness 0. selector = ops.proportional_selection(pop, offset=3) # we expect the first individual to always be selected # since the new zero point is now -3. selected = next(selector) assert np.all(selected.genome == [0, 0, 0]) selected = next(selector) assert np.all(selected.genome == [0, 0, 0]) def test_proportional_selection_pop_min(): ''' Test of proportional selection with pop-min offset ''' # Create a population of positive fitness individuals # scaling the fitness by the population minimum makes it so the # least fit member never gets selected. pop = [Individual(np.array([0, 1, 0]), problem=MaxOnes()), Individual(np.array([1, 1, 1]), problem=MaxOnes())] pop = Individual.evaluate_population(pop) selector = ops.proportional_selection(pop, offset='pop-min') # we expect that the second individual is always selected # since the new zero point will be at the minimum fitness # of the population selected = next(selector) assert np.all(selected.genome == [1, 1, 1]) selected = next(selector) assert np.all(selected.genome == [1, 1, 1]) def test_proportional_selection_custom_key(): ''' Test of proportional selection with custom evaluation ''' pop = [Individual(np.array([0, 0, 0]), problem=MaxOnes()), Individual(np.array([1, 1, 1]), problem=MaxOnes())] def custom_key(individual): ''' Returns fitness based on MaxZeros ''' return np.count_nonzero(individual.genome == 0) pop = Individual.evaluate_population(pop) selector = ops.proportional_selection(pop, key=custom_key) # we expect the first individual to always be selected # since its genome is all 0s selected = next(selector) assert np.all(selected.genome == [0, 0, 0]) selected = next(selector) assert np.all(selected.genome == [0, 0, 0]) ############################## # Tests for naive_cyclic_selection() ############################## def test_naive_cyclic_selection(): """ Test of the naive deterministic cyclic selection """ pop = [Individual(np.array([0, 0]), problem=MaxOnes()), Individual(np.array([0, 1]), problem=MaxOnes())] # This selection operator will deterministically cycle through the # given population selector = ops.naive_cyclic_selection(pop) selected = next(selector) assert np.all(selected.genome == [0, 0]) selected = next(selector) assert np.all(selected.genome == [0, 1]) # And now we cycle back to the first individual selected = next(selector) assert np.all(selected.genome == [0, 0]) ############################## # Tests for cyclic_selection() ############################## def test_cyclic_selection(): """ Test of the deterministic cyclic selection """ # Set seed so that we get consistent test results. I.e., it is possible # by happenstance for some tests to fail even though they're actually ok. # E.g., the cyclic selection tests will test if the test_sequence # shuffles between a complete cycle, but there's a chance that the same # test_sequence may come up in the random shuffle, so the test will fail. # However, if we set a random seed ahead of time, then we can control for # those pathological scenarios. random.seed(123) # We're just going to use integers for the population as that's # sufficient for testing this selection operator; we don't want to get in # the weeds with comparing individuals for test_sequence equivalency # testing. pop = list(range(4)) # This selection operator will deterministically cycle through the # given population selector = ops.cyclic_selection(pop) # first cycle should be the same order as we started first_iteration = [next(selector) for _ in range(len(pop))] assert pop == first_iteration # the second iteration should be shuffled second_iteration = [next(selector) for _ in range(len(pop))] assert pop != second_iteration ############################## # Tests for truncation_selection() ############################## def test_truncation_selection(): """ Basic truncation selection test""" pop = [Individual(np.array([0, 0, 0]), problem=MaxOnes()), Individual(np.array([0, 0, 1]), problem=MaxOnes()), Individual(np.array([1, 1, 0]), problem=MaxOnes()), Individual(np.array([1, 1, 1]), problem=MaxOnes())] # We first need to evaluate all the individuals so that truncation # selection has fitnesses to compare pop = Individual.evaluate_population(pop) truncated = ops.truncation_selection(pop, 2) assert len(truncated) == 2 # Just to make sure, check that the two best individuals from the # original population are in the selected population assert pop[2] in truncated assert pop[3] in truncated def test_truncation_parents_selection(): """ Test (mu + lambda), i.e., parents competing with offspring Create parent and offspring populations such that each has an "best" individual that will be selected by truncation selection. """ parents = [Individual(np.array([0, 0, 0]), problem=MaxOnes()), Individual(np.array([1, 1, 0]), problem=MaxOnes())] parents = Individual.evaluate_population(parents) offspring = [Individual(np.array([0, 0, 1]), problem=MaxOnes()), Individual(np.array([1, 1, 1]), problem=MaxOnes())] offspring = Individual.evaluate_population(offspring) truncated = ops.truncation_selection(offspring, 2, parents=parents) assert len(truncated) == 2 assert parents[1] in truncated assert offspring[1] in truncated def test_truncation_selection_with_nan1(): """If truncation selection encounters a NaN and non-NaN fitness while maximizing, the non-NaN wins. """ # Make a population where binary tournament_selection has an obvious # reproducible choice problem = MaxOnes() pop = [Individual(np.array([0, 0, 0]), problem=problem), Individual(
np.array([1, 1, 1])
numpy.array
# # Princeton University licenses this file to You under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. You may obtain a copy of the License at: # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and limitations under the License. # # # ******************************************* MEMORY FUNCTIONS ******************************************************** """ Functions that store and can retrieve a record of their current input. * `Buffer` * `ContentAddressableMemory` * `DictionaryMemory` Overview -------- Functions that store and can return a record of their input. """ import numbers import warnings from collections import deque from itertools import combinations, product # from typing import Optional, Union, Literal, Callable from typing import Optional, Union import numpy as np import typecheck as tc from psyneulink.core import llvm as pnlvm from psyneulink.core.components.functions.function import ( DEFAULT_SEED, FunctionError, _random_state_getter, _seed_setter, is_function_type, EPSILON, ) from psyneulink.core.components.functions.nonstateful.objectivefunctions import Distance from psyneulink.core.components.functions.nonstateful.selectionfunctions import OneHot from psyneulink.core.components.functions.stateful.integratorfunctions import StatefulFunction from psyneulink.core.globals.context import handle_external_context from psyneulink.core.globals.keywords import \ ADDITIVE_PARAM, BUFFER_FUNCTION, MEMORY_FUNCTION, COSINE, \ ContentAddressableMemory_FUNCTION, DictionaryMemory_FUNCTION, \ MIN_INDICATOR, MULTIPLICATIVE_PARAM, NEWEST, NOISE, OLDEST, OVERWRITE, RATE, RANDOM from psyneulink.core.globals.parameters import Parameter from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set from psyneulink.core.globals.utilities import \ all_within_range, convert_to_np_array, convert_to_list, convert_all_elements_to_np_array __all__ = ['MemoryFunction', 'Buffer', 'DictionaryMemory', 'ContentAddressableMemory', 'RETRIEVAL_PROB', 'STORAGE_PROB'] class MemoryFunction(StatefulFunction): # ----------------------------------------------------------------------------- componentType = MEMORY_FUNCTION class Buffer(MemoryFunction): # ------------------------------------------------------------------------------ """ Buffer( \ default_variable=None, \ rate=1.0, \ noise=0.0, \ history=None, \ initializer, \ params=None, \ owner=None, \ prefs=None, \ ) .. _Buffer: Append `variable <Buffer.variable>` to the end of `previous_value <Buffer.previous_value>` (i.e., right-append to deque of previous inputs). .. note:: Every appended item must have same shape as the first. If specified, `rate <Buffer.rate>` and/or `noise <Buffer.noise>` are applied to items already stored in the array, as follows: .. math:: stored\\_item * rate + noise .. note:: Because **rate** and **noise** are applied on every call, their effects accumulative exponentially over calls to `function <Buffer.function>`. If the length of the result exceeds `history <Buffer.history>`, delete the first item. Return `previous_value <Buffer.previous_value>` appended with `variable <Buffer.variable>`. Arguments --------- default_variable : number, list or array : default class_defaults.variable specifies a template for the value to be integrated; if it is a list or array, each element is independently integrated. rate : float, list or 1d array : default 1.0 specifies a value applied multiplicatively to each item already stored in the deque on each call to `function <Buffer.function>`; must be in interval [0,1] noise : float or Function : default 0.0 specifies a random value added to each item already in the deque on each call to `function <Buffer.function>` (see `noise <Buffer.noise>` for details). history : int : default None specifies the maxlen of the deque, and hence `value <Buffer.value>`. initializer float, list or ndarray : default [] specifies a starting value for the deque; if none is specified, the deque is initialized with an empty list. params : Dict[param keyword: param value] : default None a `parameter dictionary <ParameterPort_Specification>` that specifies the parameters for the function. Values specified for parameters in the dictionary override any assigned to those parameters in arguments of the constructor. owner : Component `component <Component>` to which to assign the Function. name : str : default see `name <Function.name>` specifies the name of the Function. prefs : PreferenceSet or specification dict : default Function.classPreferences specifies the `PreferenceSet` for the Function (see `prefs <Function_Base.prefs>` for details). Attributes ---------- variable : number or array current input value appended to the end of the deque. rate : float or 1d array with all elements in interval [0,1] multiplicatively applied to each item already in the deque on call to `function <Buffer.function>`; implements exponential decay of stored items. noise : float or Function random value added to each item of the deque in each call to `function <Buffer.function>` (see `noise <Stateful_Noise>` for additional details). history : int determines maxlen of the deque and the value returned by the `function <Buffer.function>`. If appending `variable <Buffer.variable>` to `previous_value <Buffer.previous_value>` exceeds history, the first item of `previous_value <Buffer.previous_value>` is deleted, and `variable <Buffer.variable>` is appended to it, so that `value <Buffer.previous_value>` maintains a constant length. If history is not specified, the value returned continues to be extended indefinitely. initializer : float, list or ndarray value assigned as the first item of the deque when the Function is initialized, or reset if the **new_previous_value** argument is not specified in the call to `reset <StatefulFunction.reset>`. previous_value : 1d array : default class_defaults.variable state of the deque prior to appending `variable <Buffer.variable>` in the current call. owner : Component `component <Component>` to which the Function has been assigned. name : str the name of the Function; if it is not specified in the **name** argument of the constructor, a default is assigned by FunctionRegistry (see `Registry_Naming` for conventions used for default and duplicate names). prefs : PreferenceSet or specification dict : Function.classPreferences the `PreferenceSet` for function; if it is not specified in the **prefs** argument of the Function's constructor, a default is assigned using `classPreferences` defined in __init__.py (see `Preferences` for details). """ componentName = BUFFER_FUNCTION class Parameters(StatefulFunction.Parameters): """ Attributes ---------- history see `history <Buffer.history>` :default value: None :type: initializer see `initializer <Buffer.initializer>` :default value: numpy.array([], dtype=float64) :type: ``numpy.ndarray`` noise see `noise <Buffer.noise>` :default value: 0.0 :type: ``float`` rate see `rate <Buffer.rate>` :default value: 1.0 :type: ``float`` """ variable = Parameter([], pnl_internal=True, constructor_argument='default_variable') rate = Parameter(1.0, modulable=True, aliases=[MULTIPLICATIVE_PARAM]) noise = Parameter(0.0, modulable=True, aliases=[ADDITIVE_PARAM]) history = None initializer = Parameter(np.array([]), pnl_internal=True) @tc.typecheck def __init__(self, # FIX: 12/11/18 JDC - NOT SAFE TO SPECIFY A MUTABLE TYPE AS DEFAULT default_variable=None, # KAM 6/26/18 changed default param values because constructing a plain buffer function ("Buffer()) # was failing. # For now, updated default_variable, noise, and Alternatively, we can change validation on # default_variable=None, # Changed to [] because None conflicts with initializer rate=None, noise=None, # rate:Optional[Union[int, float, np.ndarray]]=None, # noise:Optional[Union[int, float, np.ndarray]]=None, # rate: parameter_spec=1.0, # noise: parameter_spec=0.0, # rate: Optional[Union(int, float]] = None, # Changed to 1.0: None fails validation # noise: Optional[Union[int, float, callable]] = None, # Changed to 0.0 - None fails validation # rate: Optional[Union[int, float, list, np.ndarray]] = 1.0, # noise: Optional[Union[int, float, list, np.ndarray, callable]] = 0.0, history:tc.optional(int)=None, # history: Optional[int] = None, initializer=None, params: tc.optional(dict) = None, # params: Optional[dict] = None, owner=None, prefs: tc.optional(is_pref_set) = None ): super().__init__( default_variable=default_variable, rate=rate, initializer=initializer, noise=noise, history=history, params=params, owner=owner, prefs=prefs, ) def _initialize_previous_value(self, initializer, context=None): previous_value = deque(initializer, maxlen=self.parameters.history.get(context)) self.parameters.previous_value.set(previous_value, context, override=True) return previous_value # TODO: Buffer variable fix: remove this or refactor to avoid skip # of direct super def _update_default_variable(self, new_default_variable, context=None): if not self.parameters.initializer._user_specified: self._initialize_previous_value([np.zeros_like(new_default_variable)], context) # bypass the additional _initialize_previous_value call used by # other stateful functions super(StatefulFunction, self)._update_default_variable(new_default_variable, context=context) def _instantiate_attributes_before_function(self, function=None, context=None): self.parameters.previous_value._set( self._initialize_previous_value( self.parameters.initializer._get(context), context ), context ) @handle_external_context(fallback_most_recent=True) def reset(self, previous_value=None, context=None): """ Clears the `previous_value <Buffer.previous_value>` deque. If an argument is passed into reset or if the `initializer <Buffer.initializer>` attribute contains a value besides [], then that value is used to start the new `previous_value <Buffer.previous_value>` deque. Otherwise, the new `previous_value <Buffer.previous_value>` deque starts out empty. `value <Buffer.value>` takes on the same value as `previous_value <Buffer.previous_value>`. """ # no arguments were passed in -- use current values of initializer attributes if previous_value is None: previous_value = self._get_current_parameter_value("initializer", context) if previous_value is None or previous_value == []: self.parameters.previous_value._get(context).clear() value = deque([], maxlen=self.parameters.history.get(context)) else: value = self._initialize_previous_value(previous_value, context=context) self.parameters.value.set(value, context, override=True) return value def _function(self, variable=None, context=None, params=None, ): """ Arguments --------- variable : number, list or array : default class_defaults.variable a single value or array of values to be integrated. params : Dict[param keyword: param value] : default None a `parameter dictionary <ParameterPort_Specification>` that specifies the parameters for the function. Values specified for parameters in the dictionary override any assigned to those parameters in arguments of the constructor. Returns ------- updated value of deque : deque """ rate = np.array(self._get_current_parameter_value(RATE, context)).astype(float) # execute noise if it is a function noise = self._try_execute_param(self._get_current_parameter_value(NOISE, context), variable, context=context) # If this is an initialization run, leave deque empty (don't want to count it as an execution step); # Just return current input (for validation). if self.is_initializing: return variable previous_value = self.parameters.previous_value._get(context) # Apply rate and/or noise, if they are specified, to all stored items if len(previous_value): # TODO: remove this shape hack when buffer shapes made consistent noise = np.reshape(noise, np.asarray(previous_value[0]).shape) previous_value = convert_to_np_array(previous_value) * rate + noise previous_value = deque(previous_value, maxlen=self.parameters.history._get(context)) previous_value.append(variable) self.parameters.previous_value._set(previous_value, context) return self.convert_output_type(previous_value) RETRIEVAL_PROB = 'retrieval_prob' STORAGE_PROB = 'storage_prob' DISTANCE_FUNCTION = 'distance_function' SELECTION_FUNCTION = 'selection_function' DISTANCE_FIELD_WEIGHTS = 'distance_field_weights' equidistant_entries_select_keywords = [RANDOM, OLDEST, NEWEST] class ContentAddressableMemory(MemoryFunction): # --------------------------------------------------------------------- """ ContentAddressableMemory( \ default_variable=None, \ retrieval_prob=1.0, \ storage_prob=1.0, \ rate=None, \ noise=0.0, \ initializer=None, \ distance_field_weights=None, \ distance_function=Distance(metric=COSINE), \ selection_function=OneHot(mode=MIN_VAL), \ duplicate_entries_allowed=False, \ duplicate_threshold=0, \ equidistant_entries_select=RANDOM, \ max_entries=None, \ params=None, \ owner=None, \ prefs=None, \ ) .. _ContentAddressableMemory: **Sections** - `Overview <ContentAddressableMemory_Overview>` \n `Entries and Fields <ContentAddressableMemory_Entries_and_Fields>` \n `Content-based Retrieval <ContentAddressableMemory_Retrieval>` \n `Duplicate entries <ContentAddressableMemory_Duplicate_Entries>` \n - `Structure <ContentAddressableMemory_Structure>` - `Execution <ContentAddressableMemory_Execution>` \n `Retrieval <ContentAddressableMemory_Execution_Retrieval>` \n `Storage <ContentAddressableMemory_Execution_Storage>` \n - `Examples <ContentAddressableMemory_Examples>` - `Class Reference <ContentAddressableMemory_Class_Reference>` .. _ContentAddressableMemory_Overview: **Overview** The ContentAddressableMemory `Function` implements a configurable, content-addressable storage and retrieval of entries from `memory <ContentAddressableMemory.memory>`. Storage is determined by `storage_prob <ContentAddressableMemory.storage_prob>`, and retrieval of entries is determined by `distance_function <ContentAddressableMemory.distance_function>`, `selection_function <ContentAddressableMemory.selection_function>`, and `retrieval_prob <ContentAddressableMemory.retrieval_prob>`. .. _ContentAddressableMemory_Entries_and_Fields: **Entries and Fields**. The **default_variable** argument specifies the shape of an entry in `memory <ContentAddressableMemory.storage_prob>`, each of which is a list or array of fields that are themselves lists or 1d arrays (see `EpisodicMemoryMechanism_Memory_Fields`). An entry can have an arbitrary number of fields, and each field can have an arbitrary length. However, all entries must have the same number of fields, and the corresponding fields must all have the same length across entries. Fields can be weighted to determine the influence they have on retrieval, using the `distance_field_weights <ContentAddressableMemory.memory>` parameter (see `retrieval <ContentAddressableMemory_Retrieval>` below). .. hint:: Entries in `memory <ContentAddressableMemory.memory>` can be assigned "labels" -- i.e., values that are not used in the calculation of distance -- by assigning them a weight of 0 or None in `distance_field_weights <ContentAddressableMemory.memory>`); either can be used for labels that are numeric values; however, if non-numeric values are assigned to a field as labels, then None must be specified for that field in `distance_field_weights <ContentAddressableMemory.memory>`. .. _ContentAddressableMemory_Retrieval: **Retrieval**. Entries are retrieved from `memory <ContentAddressableMemory.memory>` based on their distance from `variable <ContentAddressableMemory.variable>`, used as the cue for retrieval. The distance is computed using the `distance_function <ContentAddressableMemory.distance_function>`, which compares `variable <ContentAddressableMemory.variable>` with each entry in `memory <ContentAddressableMemory.storage_prob>` as full vectors (i.e., with all fields of each concatenated into a single array), or by computing the distance of each field in `variable <ContentAddressableMemory.variable>` with the corresponding ones of each entry in `memory <ContentAddressableMemory.memory>`, and then averaging those distances, possibly weighted by coefficients specified in `distance_field_weights <ContentAddressableMemory.distance_field_weights>`. The distances computed between `variable `<ContentAddressableMemory.variable>` and each entry in `memory <ContentAddressableMemory.memory>` are used by `selection_function <ContentAddressableMemory.selection_function>` to determine which entry is retrieved. The distance used for the last retrieval (i.e., between `variable <ContentAddressableMemory.variable>` and the entry retrieved), the distances of each of their corresponding fields (weighted by `distance_field_weights <ContentAddressableMemory.distance_field_weights>`), and the distances to all other entries are stored in `distance <ContentAddressableMemory.distance>` and `distances_by_field <ContentAddressableMemory.distances_by_field>`, and `distances_to_entries <ContentAddressableMemory.distances_to_entries>` respectively. .. _ContentAddressableMemory_Duplicate_Entries: **Duplicate Entries**. These can be allowed, disallowed, or overwritten during storage using `duplicate_entries_allowed <ContentAddressableMemory.duplicate_entries_allowed>`), and how selection is made among duplicate entries or ones indistinguishable by the `distance_function <ContentAddressableMemory.distance_function>` can be specified using `equidistant_entries_select <ContentAddressableMemory.equidistant_entries_select>`. The class also provides methods for directly retrieving (`get_memory <ContentAddressableMemory.get_memory>`), adding (`add_to_memory <ContentAddressableMemory.add_to_memory>`) and deleting (`delete_from_memory <ContentAddressableMemory.delete_from_memory>`) one or more entries from `memory <ContentAddressableMemory.memory>`. .. _ContentAddressableMemory_Structure: **Structure** An entry is stored and retrieved as an array containing a set of `fields <EpisodicMemoryMechanism_Memory_Fields>` each of which is a 1d array. An array containing such entries can be used to initialize the contents of `memory <ContentAddressableMemory.memory>` by providing it in the **initializer** argument of the ContentAddressableMemory's constructor, or in a call to its `reset <ContentAddressableMemory.reset>` method. The current contents of `memory <ContentAddressableMemory.memory>` can be inspected using the `memory <ContentAddressableMemory.memory>` attribute, which returns a list containing the current entries, each as a list containing all fields for that entry. The `memory_num_fields <ContentAddressableMemory.memory_num_fields>` contains the number of fields expected for each entry, `memory_field_shapes <ContentAddressableMemory.memory_field_shapes>` their shapes, and `memory_num_entries <ContentAddressableMemory.memory_num_entries>` the total number of entries in `memory <ContentAddressableMemory.memory>`. .. _ContentAddressableMemory_Shapes: .. technical_note:: Both `memory <ContentAddressableMemory.memory>` and all entries are stored as np.ndarrays, the dimensionality of which is determined by the shape of the fields of an entry. If all fields have the same length (regular), then they are 2d arrays and `memory <ContentAddressableMemory.memory>` is a 3d array. However, if fields vary in length (`ragged <https://en.wikipedia.org/wiki/Jagged_array>`_) then, although each field is 1d, an entry is also 1d (with dtype='object'), and `memory <ContentAddressableMemory.memory>` is 2d (with dtype='object'). .. _ContentAddressableMemory_Execution: **Execution** When the ContentAddressableMemory function is executed, it first retrieves the entry in `memory <ContentAddressableMemory.memory>` that most closely matches `variable <ContentAddressableMemory.variable>` in the call, stores the latter in `memory <ContentAddressableMemory.memory>`, and returns the retrieved entry. If `variable <ContentAddressableMemory.variable>` is an exact match of an entry in `memory <ContentAddressableMemory.memory>`, and `duplicate_entries_allowed <ContentAddressableMemory.duplicate_entries_allowed>` is False, then the matching item is returned, but `variable <ContentAddressableMemory.variable>` is not stored. These steps are described in more detail below. .. _ContentAddressableMemory_Execution_Retrieval: * **Retrieval:** first, with probability `retrieval_prob <ContentAddressableMemory.retrieval_prob>`, the entry closest to `variable <ContentAddressableMemory.variable>` is retrieved from is retrieved from `memory <ContentAddressableMemory.memory>`. The entry is chosen by calling, in order: * `distance_function <ContentAddressableMemory.distance_function>`\: generates a list of and compares `distances <ContentAddressableMemory.distances>` between `variable <ContentAddressableMemory.variable>` and each entry in `memory <ContentAddressableMemory.memory>`, possibly weighted by `distance_field_weights <ContentAddressableMemory.distance_field_weights>`, as follows: .. _ContentAddressableMemory_Distance_Field_Weights: * if `distance_field_weights <ContentAddressableMemory.distance_field_weights>` is either a scalar or an array of scalars that are all the same, then it is used simply to scale the distance computed between `variable <ContentAddressableMemory.variable>` and each entry in `memory <ContentAddressableMemory.memory>`, each of which is computed by concatenating all items of `variable <ContentAddressableMemory.variable>` into a 1d array, similarly concatenating all `memory_fields <EpisodicMemoryMechanism_Memory_Fields>` of an entry in `memory <ContentAddressableMemory.memory>`, and then using `distance_function <ContentAddressableMemory.distance_function>` to compute the distance betwen them. * if `distance_field_weights <ContentAddressableMemory.distance_field_weights>` is an array of scalars with different values, then `variable <ContentAddressableMemory.variable>` is compared with each entry in `memory <ContentAddressableMemory.memory>` by using `distance_function <ContentAddressableMemory.distance_function>` to compute the distance of each item in `variable <ContentAddressableMemory.variable>` with the corresponding field of the entry in memory, and then averaging those distances weighted by the corresponding element of `distance_field_weights<ContentAddressableMemory.distance_field_weights>`. .. note:: Fields assigned a weight of *0* or *None* are ignored in the distance calculation; that is, the distances between `variable <ContentAddressableMemory.variable>` and entries for those fields are not included in the averaging of distances by field. * `selection_function <ContentAddressableMemory.selection_function>`\: called with the list of distances to determine which entries to select for consideration. If more than on entry from `memory <ContentAddressableMemory.memory>` is identified, `equidistant_entries_select <ContentAddressableMemory.equidistant_entries_select>` is used to determine which to retrieve. If no retrieval occurs, an appropriately shaped zero-valued array is assigned as the retrieved memory, and returned by the function. The distance between `variable <ContentAddressableMemory.variable>` and the retrieved entry is assigned to `distance `<ContentAddressableMemory.distance>`, the distance between of each of their fields is assigned to `distances_by_field <ContentAddressableMemory.distances_by_field>`, and the distances of `variable <ContentAddressableMemory.variable>` to all entries in `memory <ContentAddressableMemory.memory>` is assigned to `distances_to_entries <ContentAddressableMemory.distances_to_entries>`. .. _ContentAddressableMemory_Execution_Storage: * **Storage:** after retrieval, an attempt is made to store `variable <ContentAddressableMemory.variable>` in `memory memory <ContentAddressableMemory.memory>` with probability `storage_prob <ContentAddressableMemory.storage_prob>`; if the attempt is made: * if `variable <ContentAddressableMemory.variable>` is identical to an entry already in `memory <ContentAddressableMemory.memory>`, as evaluated by `distance_function <ContentAddressableMemory.distance_function>` and `duplicate_threshold <ContentAddressableMemory.duplicate_threshold>`, then `duplicate_entries_allowed <ContentAddressableMemory.duplicate_entries_allowed>` determines whether or not to store the entry; if `duplicate_entries_allowed <ContentAddressableMemory.duplicate_entries_allowed>` is: * False -- storage is skipped; * True -- `variable <ContentAddressableMemory.variable>` is stored as another duplicate; * *OVERWRITE* -- the duplicate entry in `memory <ContentAddressableMemory.memory>` is replaced with `variable <ContentAddressableMemory.variable>` (which may be slightly different than the item it replaces, within the tolerance of `duplicate_threshold <ContentAddressableMemory.duplicate_threshold>`), and the matching entry is returned; .. note:: If `duplicate_entries_allowed <ContentAddressableMemory.duplicate_entries_allowed>` is OVERWRITE but a duplicate entry is nevertheless identified during retrieval (e.g., **duplicate_entries_allowed** was previously changed from True to False), a warning is issued, and duplicate entry is overwritten with `variable <ContentAddressableMemory.variable>`. * if storage **rate** and/or **noise** arguments are specified in the constructor, they are applied to `variable <ContentAddressableMemory.variable>` before storage as :math:`variable * rate + noise`; * finally, if the number of entries in `memory <ContentAddressableMemory.memory>` exceeds `max_entries <ContentAddressableMemory.max_entries>`, the first (oldest) entry is deleted. The current number of entries in memory is contained in the `memory_num_entries <ContentAddressableMemory.memory_num_entries>` attribute. .. _ContentAddressableMemory_Examples: **Examples** *Initialize memory with **default_variable* The format for entries in `memory <ContentAddressableMemory.memory` can be specified using either the **default_variable** or **initializer** arguments of the Function's constructor. **default_variable** specifies the shape of entries, without creating any entries:: >>> c = ContentAddressableMemory(default_variable=[[0,0],[0,0,0]]) >>> c([[1,2]]) [array([0, 0])] Since `memory <ContentAddressableMemory.memory>` was not intialized, the first call to the Function returns an array of zeros, formatted as specified in **defaul_variable**. However, the input in the call to the Function (``[[1,2]]``) is stored as an entry in `memory <EpisodicMemoryMechanism.memory>`:: >>> c.memory array([[[1., 2.]]]) and is returned on the next call:: >>> c([[2,5]]) array([[1., 2.]]) Note that even though **default_variable** and the inputs to the Function are specified as lists, the entries returned are arrays; `memory <ContentAddressableMemory.memory>` and all of its entries are always formated as arrays. *Initialize memory with **initializer* The **initializer** argument of a ContentAddressableMemory's constructor can be used to initialize its `memory <ContentAddressableMemory.memory>`:: >>> c = ContentAddressableMemory(initializer=[[[1,2],[3,4,5]], ... [[10,9],[8,7,6]]]) >>> c([[1,2],[3,4,6]]) array([array([1., 2.]), array([3., 4., 5.])], dtype=object) >>> c([[1,2],[3,4,6]]) array([array([1., 2.]), array([3., 4., 6.])], dtype=object) Note that there was no need to use **default_variable**, and in fact it would overidden if specified. .. _ContentAddressableMemory_Examples_Weighting_Fields: *Weighting fields* The **distance_field_weights** argument can be used to differentially weight memory fields to modify their influence on retrieval (see `distance_field_weights <ContentAddressableMemory_Distance_Field_Weights>`). For example, this can be used to configure the Function as a dictionary, using the first field for keys (on which retrieval is based) and the second for values (that are retrieved with a matching key), as follows: >>> c = ContentAddressableMemory(initializer=[[[1,2],[3,4]], ... [[1,5],[10,11]]], ... distance_field_weights=[1,0]) >>> c([[1,2.5],[10,11]]) array([[1., 2.], [3., 4.]]) Note that the first entry ``[[1,2],[3,4]]`` in `memory <ContentAddressableMemory.memory>` was retrieved, even though the cue used in the call (``[[1,2.5],[10,11]]``) was an exact match to the second field of the second entry (``[[1,5],[10,11]]``). However, since that field was assigned 0 in **distance_field_weights**, it was ignored and, using only the first entry, the cue was closer to the first entry. This is confirmed by repeating the example without specifying **distance_field_weights**:: >>> c = ContentAddressableMemory(initializer=[[[1,2],[3,4]], ... [[1,5],[10,11]]]) >>> c([[1,2.5],[10,11]]) array([[ 1., 5.], [10., 11.]]) COMMENT: # FIX: ADD EXAMPLES FOR ENTRIES WITH DIFFERENT SHAPES COMMENT *Duplicate entries* By default, duplicate entries are precluded from a ContentAddressableMemory's `memory <ContentAddressableMemory.memory>`. So, for an initializer with identical entries, only one copy of the duplicates will be stored:: >>> c = ContentAddressableMemory(initializer=[[[1,2],[3,4]], ... [[1,2],[3,4]]]) >>> c.memory array([[[1., 2.], [3., 4.]]]) and using the same array as input to the function will retrieve that array but not store another copy:: >>> c([[1,2],[3,4]]) array([[1., 2.], [3., 4.]]) >>> c.memory array([[[1., 2.], [3., 4.]]]) Only fields with non-zero weights in `distance_field_weights <ContentAddressableMemory.distance_field_weights>` are considered when evaluating whether entries are duplicates. So, in the following example, where the weight for the second field is 0, the two entries are considered duplicates and only the first is stored:: >>> c = ContentAddressableMemory(initializer=[[[1,2],[3,4]], ... [[1,2],[5,6]]], ... distance_field_weights=[1,0]) >>> c.memory array([[[1., 2.], [3., 4.]]]) Duplicates can be allowed by setting the **duplicate_entries_allowed** argument to True or *OVERWRITE*. Setting it to True allows duplicate entries to accumulate in `memory <ContentAddressableMemory.memory>`, as shown here:: >>> c = ContentAddressableMemory(initializer=[[[1,2],[3,4]], ... [[1,5],[10,11]]], ... duplicate_entries_allowed=True) >>> c([[1,2],[3,4]]) array([[1., 2.], [3., 4.]]) >>> c.memory array([[[ 1., 2.], [ 3., 4.]], <BLANKLINE> [[ 1., 5.], [10., 11.]], <BLANKLINE> [[ 1., 2.], [ 3., 4.]]]) Duplicates are determined by comparing entries using the functions `distance_function <ContentAddressableMemory.distance_function>`; if the `distance <ContentAddressableMemory.distance>` is less than `duplicate_threshold <ContentAddressableMemory.duplicate_threshold>`, they are considered to be duplicates; otherwise they are treated a distinct entries. By default, `duplicate_threshold <ContentAddressableMemory.duplicate_threshold>` is 0. In the folloiwng example it is increased, so that two very similar, but non-identical entries, are nonetheless treated as duplicates:: >>> c = ContentAddressableMemory(initializer=[[[1, 2.0], [3, 4]], ... [[1, 2.5], [3, 4]]], ... duplicate_entries_allowed=False, ... duplicate_threshold=0.2) >>> c.memory array([[[1., 2.], [3., 4.]]]) Setting **duplicate_entries_allowed** argument to *OVERWRITE* allows an entry to replace one that is considered duplicate, even if it is not identical, as in the following example:: >>> c.duplicate_entries_allowed=OVERWRITE >>> c([[1, 2.1], [3, 4]]) array([[1., 2.], [3., 4.]]) >>> c.memory array([[[1. , 2.1], [3. , 4. ]]]) Note that the entry considered to be the duplicate (``[[1, 2.1], [3, 4]]``) is returned, and then replaced in `memory <ContentAddressableMemory.memory>`. Finally, if `duplicate_entries_allowed <ContentAddressableMemory.duplicate_entries_allowed>` is True, and duplicates have accumulated, the `equidistant_entries_select <ContentAddressableMemory.equidistant_entries_select>` attribute can be used to specify how to select among them for retrieval, either by chosing randomly (*RANDOM*) or selecting either the first one (*OLDEST*) or last one (*NEWEST*) stored. .. _ContentAddressableMemory_Class_Reference: **Class Reference** Arguments --------- default_variable : list or 2d array : default class_defaults.variable specifies a template for an entry in the dictionary; the list or array can have any number of items, each of which must be a list or array of any length; however, at present entries are constrained to be at most 2d. retrieval_prob : float in interval [0,1] : default 1.0 specifies probability of retrieving an entry from `memory <ContentAddressableMemory.memory>`. storage_prob : float in interval [0,1] : default 1.0 specifies probability of adding `variable <ContentAddressableMemory.variable>` to `memory <ContentAddressableMemory.memory>`. rate : float, list, or array : default 1.0 specifies a value used to multiply `variable <ContentAddressableMemory.variable>` before storing in `memory <ContentAddressableMemory.memory>` (see `rate <ContentAddressableMemory.rate>` for details). noise : float, list, 2d array, or Function : default 0.0 specifies random value(s) added to `variable <ContentAddressableMemory.variable>` before storing in `memory <ContentAddressableMemory.memory>`\; if a list or 2d array, it must be the same shape as `variable ContentAddressableMemory.variable>` (see `noise <ContentAddressableMemory.noise>` for details). initializer : 3d array or list : default None specifies an initial set of entries for `memory <ContentAddressableMemory.memory>` (see `initializer <ContentAddressableMemory.initializer>` for additional details). distance_field_weights : 1d array : default None specifies the weight to use in computing the distance between each item of `variable <ContentAddressableMemory.variable>` and the corresponding `memory_field <EpisodicMemoryMechanism_Memory_Fields>` of each item in `memory <ContentAddressableMemory.memory>` (see `distance_field_weights <ContentAddressableMemory.distance_field_weights>` for additional details). distance_function : Distance or function : default Distance(metric=COSINE) specifies the function used during retrieval to compare `variable <ContentAddressableMemory.variable>` with entries in `memory <ContentAddressableMemory.memory>`. selection_function : OneHot or function : default OneHot(mode=MIN_VAL) specifies the function used during retrieval to evaluate the distances returned by `distance_function <ContentAddressableMemory.distance_function>` and select the item to retrieve. duplicate_entries_allowed : bool : default False specifies whether duplicate entries are allowed in `memory <ContentAddressableMemory.memory>` (see `duplicate entries <ContentAddressableMemory_Duplicate_Entries>` for additional details). duplicate_threshold : float : default 0 specifies how similar `variable <ContentAddressableMemory.variable>` must be to an entry in `memory <ContentAddressableMemory.memory>` based on `distance_function <ContentAddressableMemory.distance_function>` to be considered a duplicate (see `duplicate entries <ContentAddressableMemory_Duplicate_Entries>` for additional details). equidistant_entries_select : RANDOM | OLDEST | NEWEST : default RANDOM specifies which entry in `memory <ContentAddressableMemory.memory>` is chosen for retrieval if two or more have the same distance from `variable <ContentAddressableMemory.variable>`. max_entries : int : default None specifies the maximum number of entries allowed in `memory <ContentAddressableMemory.memory>` (see `max_entries <ContentAddressableMemory.max_entries>` for additional details). params : Dict[param keyword: param value] : default None a `parameter dictionary <ParameterPort_Specification>` that specifies the parameters for the function. Values specified for parameters in the dictionary override any assigned to those parameters in arguments of the constructor. owner : Component `component <Component>` to which to assign the Function. name : str : default see `name <Function.name>` specifies the name of the Function. prefs : PreferenceSet or specification dict : default Function.classPreferences specifies the `PreferenceSet` for the Function (see `prefs <Function_Base.prefs>` for details). Attributes ---------- variable : 2d array used to retrieve an entry from `memory <ContentAddressableMemory.memory>`, and then stored there. retrieval_prob : float in interval [0,1] probability of retrieiving a value from `memory <ContentAddressableMemory.memory>`. storage_prob : float in interval [0,1] probability of adding `variable <ContentAddressableMemory.variable>` to `memory <ContentAddressableMemory.memory>`. .. note:: storage_prob does not apply to `initializer <ContentAddressableMemory,initializer>`, the entries of which are added to `memory <ContentAddressableMemory.memory>` irrespective of storage_prob. rate : float or 1d array value applied multiplicatively to `variable <ContentAddressableMemory.variable>`) before storing in`memory <ContentAddressableMemory.memory>` (see `rate <Stateful_Rate>` for additional details). noise : float, 2d array or Function value added to `variable <ContentAddressableMemory.variable>`) before storing in `memory <ContentAddressableMemory.memory>` (see `noise <Stateful_Noise>` for additional details). If a 2d array (or `Function` that returns one), its shape must be the same as `variable <ContentAddressableMemory.variable>`; that is, each array in the outer dimension (Axis 0) must have the same length as the corresponding one in `variable <ContentAddressableMemory.variable>`, so that it can be added Hadamard style to `variable <ContentAddressableMemory.variable>` before storing it in `memory <ContentAddressableMemory.memory>`. initializer : ndarray initial set of entries for `memory <ContentAddressableMemory.memory>`. It should be either a 3d regular array or a 2d ragged array (if the fields of an entry have different lengths), but it can be specified in the **initializer** argument of the constructor using some simpler forms for convenience. Specifically, scalars, 1d and regular 2d arrays are allowed, which are interpreted as a single entry that is converted to a 3d array to initialize `memory <ContentAddressableMemory.memory>`. memory : list list of entries in ContentAddressableMemory, each of which is an array of fields containing stored items; the fields of an entry must be lists or arrays, each of which can be different shapes, but the corresponding fields of all entries must have the same shape; for example, the following could be a pair of entries in memory: +-------------+------------------------------+--------------------------------------------+ | entry 1 | entry 2 | +-------------+--------------+---------------+-----------+--------------+-----------------+ | field1 | field2 | field3 | field1 | field2 | field3 | +-------------+--------------+---------------+-----------+--------------+-----------------+ | [[ [a], | [b,c,d], | [[e],[f]] ], | [ [u], | [v,w,x], | [[y],[z]] ]] | +-------------+--------------+---------------+-----------+--------------+-----------------+ distance_field_weights : 1d array : default None determines the weight used in computing the distance between each item of `variable <ContentAddressableMemory.variable>` and the corresponding `memory_field <EpisodicMemoryMechanism_Memory_Fields>` of each entry in `memory <ContentAddressableMemory.memory>`; if all elements are identical, it is treated as a scalar coefficient on `distance <ContentAddressableMemory.distance>` (see `ContentAddressableMemory_Distance_Field_Weights` for additional details). distance_function : Distance or function : default Distance(metric=COSINE) function used during retrieval to compare `variable <ContentAddressableMemory.variable>` with entries in `memory <ContentAddressableMemory.memory>`. distance : float : default 0 contains distance used for retrieval last cue to last entry returned in a given `context <Context>`. distances_by_field : array : default [0] contains array of distances between each `memory field <ContentAddressableMemory_Memory_Fields>` of the last cue and the corresponding ones of the last entry returned in a given `context <Context>`. distances_to_entries : array : default [0] contains array of distances between last cue retrieved in a given `context <Context>` an all entries at that time. memory_num_entries : int contains the number of entries in `memory <ContentAddressableMemory.memory>`. memory_num_fields : int contains the number of `memory fields <EpisodicMemoryMechanism_Memory_Fields>` in each entry of `memory <ContentAddressableMemory.memory>`. memory_field_shapes : array contains the shapes of each `memory field <EpisodicMemoryMechanism_Memory_Fields>` in each entry of `memory <ContentAddressableMemory.memory>`. selection_function : OneHot or function function used during retrieval to evaluate the distances returned by `distance_function <ContentAddressableMemory.distance_function>` and select the item(s) to return. duplicate_entries_allowed : bool | OVERWRITE determines whether duplicate entries are allowed in `memory <ContentAddressableMemory.memory>`, as evaluated by `distance_function <ContentAddressableMemory.distance_function>` and `duplicate_threshold <ContentAddressableMemory.duplicate_threshold>`. (see `duplicate entries <ContentAddressableMemory_Duplicate_Entries>` for additional details). duplicate_threshold : float determines how similar `variable <ContentAddressableMemory.variable>` must be to an entry in `memory `<ContentAddressableMemory.memory>` based on `distance_function <ContentAddressableMemory.distance_function>` to be considered a duplicate (see `duplicate entries <ContentAddressableMemory_Duplicate_Entries>` for additional details). equidistant_entries_select: RANDOM | OLDEST | NEWEST determines which entry is retrieved when duplicate entries are identified or are indistinguishable by the `distance_function <ContentAddressableMemory.distance_function>`. max_entries : int maximum number of entries allowed in `memory <ContentAddressableMemory.memory>`; if storing a memory exceeds the number, the oldest memory is deleted. previous_value : ndarray state of the `memory <ContentAddressableMemory.memory>` prior to storing `variable <ContentAddressableMemory.variable>` in the current call. random_state : numpy.RandomState private pseudorandom number generator owner : Component `component <Component>` to which the Function has been assigned. name : str the name of the Function; if it is not specified in the **name** argument of the constructor, a default is assigned by FunctionRegistry (see `Registry_Naming` for conventions used for default and duplicate names). prefs : PreferenceSet or specification dict : Function.classPreferences the `PreferenceSet` for function; if it is not specified in the **prefs** argument of the Function's constructor, a default is assigned using `classPreferences` defined in __init__.py (see `Preferences` for details). Returns ------- entry from `memory <ContentAddressableMemory.memory>` that best matches `variable <ContentAddressableMemory.variable>` : 2d array if no retrieval occurs, an appropriately shaped zero-valued array is returned. """ componentName = ContentAddressableMemory_FUNCTION class Parameters(StatefulFunction.Parameters): """ Attributes ---------- variable see `variable <ContentAddressableMemory.variable>` :default value: [[0], [0]] :type: ``list`` distance see `distance <ContentAddressableMemory.distance>` :default value: 0 :type: ``float`` distance_field_weights see `distance_field_weights <ContentAddressableMemory.distance_field_weights>` :default value: [1] :type: ``numpy.ndarray`` distance_function see `distance_function <ContentAddressableMemory.distance_function>` :default value: Distance(metric=COSINE) :type: ``Function`` distances_by_field see `distances_by_field <ContentAddressableMemory.distances_by_field>` :default value: [0] :type: ``numpy.ndarray`` distances_to_entries see `distances_to_entries <ContentAddressableMemory.distances_to_entries>` :default value: [0] :type: ``numpy.ndarray`` duplicate_entries_allowed see `duplicate_entries_allowed <ContentAddressableMemory.duplicate_entries_allowed>` :default value: False :type: ``bool or OVERWRITE`` duplicate_threshold see `duplicate_threshold <ContentAddressableMemory.duplicate_threshold>` :default value: 0 :type: ``float`` equidistant_entries_select see `equidistant_entries_select <ContentAddressableMemory.equidistant_entries_select>` :default value: `RANDOM` :type: ``str`` memory_num_fields see `memory_num_fields <ContentAddressableMemory.memory_num_fields>` :default value: 1 :type: ``int`` memory_field_shapes see `memory_field_shapes <ContentAddressableMemory.memory_field_shapes>` :default value: [1] :type: ``numpy.ndarray`` initializer see `initializer <ContentAddressableMemory.initializer>` :default value: None :type: ``numpy.ndarray`` max_entries see `max_entries <ContentAddressableMemory.max_entries>` :default value: 1000 :type: ``int`` noise see `noise <ContentAddressableMemory.noise>` :default value: 0.0 :type: ``float`` previous_value see `previous_value <ContentAddressableMemory.previous_value>` :default value: None :type: ``numpy.ndarray`` random_state see `random_state <ContentAddressableMemory.random_state>` :default value: None :type: ``numpy.random.RandomState`` rate see `rate <ContentAddressableMemory.rate>` :default value: 1.0 :type: ``float`` retrieval_prob see `retrieval_prob <ContentAddressableMemory.retrieval_prob>` :default value: 1.0 :type: ``float`` selection_function see `selection_function <ContentAddressableMemory.selection_function>` :default value: `OneHot`(mode=MIN_INDICATOR) :type: `Function` storage_prob see `storage_prob <ContentAddressableMemory.storage_prob>` :default value: 1.0 :type: ``float`` val_size see `val_size <ContentAddressableMemory.val_size>` :default value: 1 :type: ``int`` """ variable = Parameter([[0],[0]], pnl_internal=True, constructor_argument='default_variable') initializer = Parameter(None, pnl_internal=True) previous_value = Parameter(None, initializer='initializer', pnl_internal=True) retrieval_prob = Parameter(1.0, modulable=True) storage_prob = Parameter(1.0, modulable=True, aliases=[MULTIPLICATIVE_PARAM]) # FIX: MAKE THESE ATTRIBUTES RATHER THAN PARAMETERS: memory_num_fields = Parameter(None, stateful=False, read_only=True) memory_field_shapes = Parameter(None, stateful=False, read_only=True) # FIX: -------------------- distance_field_weights = Parameter([1], stateful=True, modulable=True, dependencies='initializer') duplicate_entries_allowed = Parameter(False, stateful=True) duplicate_threshold = Parameter(EPSILON, stateful=False, modulable=True) equidistant_entries_select = Parameter(RANDOM) rate = Parameter(1.0, modulable=True) noise = Parameter(0.0, modulable=True, aliases=[ADDITIVE_PARAM]) max_entries = Parameter(1000) random_state = Parameter(None, loggable=False, getter=_random_state_getter, dependencies='seed') seed = Parameter(DEFAULT_SEED, modulable=True, fallback_default=True, setter=_seed_setter) distance_function = Parameter(Distance(metric=COSINE), stateful=False, loggable=False) selection_function = Parameter(OneHot(mode=MIN_INDICATOR), stateful=False, loggable=False) distance = Parameter(0, stateful=True, read_only=True) distances_by_field = Parameter([0], stateful=True, read_only=True) distances_to_entries = Parameter([0], stateful=True, read_only=True) def _validate_retrieval_prob(self, retrieval_prob): retrieval_prob = float(retrieval_prob) if not all_within_range(retrieval_prob, 0, 1): return f"must be a float in the interval [0,1]." def _validate_storage_prob(self, storage_prob): storage_prob = float(storage_prob) if not all_within_range(storage_prob, 0, 1): return f"must be a float in the interval [0,1]." def _validate_distance_field_weights(self, field_weights): if self.distance_field_weights._user_specified is True and self.initializer.default_value is not None: field_weights = np.array(field_weights) if not np.isscalar(field_weights) and field_weights.ndim != 1: return f"must be a scalar or list or 1d array of scalars" fw_len = len(field_weights) num_fields = convert_all_elements_to_np_array(self.initializer.default_value).shape[1] if len(field_weights) not in {1, num_fields}: return f"length ({fw_len}) must be same as number of fields " \ f"in entries of initializer ({num_fields})." if not np.any(field_weights): warnings.warn(f"All weights in the 'distance_fields_weights' Parameter of {self._owner.name} " f"are set to '0', so all entries of its memory will be treated as duplicates.") def _validate_equidistant_entries_select(self, equidistant_entries_select): if equidistant_entries_select not in equidistant_entries_select_keywords: return f"must be {' or '.join(equidistant_entries_select_keywords)}." def _validate_duplicate_entries_allowed(self, duplicate_entries_allowed): if not isinstance(duplicate_entries_allowed, bool) and duplicate_entries_allowed != OVERWRITE: return f"must be a bool or 'OVERWRITE'." def _validate_initializer(self, initializer): pass def _parse_initializer(self, initializer): if initializer is not None: initializer = ContentAddressableMemory._enforce_memory_shape(initializer) return initializer @tc.typecheck def __init__(self, # FIX: REINSTATE WHEN 3.6 IS RETIRED: # default_variable=None, # retrieval_prob: Optional[Union[int, float]]=None, # storage_prob: Optional[Union[int, float]]=None, # rate: Optional[Union[int, float, list, np.ndarray]]=None, # noise: Optional[Union[int, float, list, np.ndarray, callable]]=None, # initializer:Optional[Union[list, np.ndarray]]=None, # distance_field_weights:Optional[Union[list, np.ndarray]]=None, # distance_function:Optional[Union[Distance, is_function_type]]=None, # selection_function:Optional[Union[OneHot, is_function_type]]=None, # duplicate_entries_allowed:Optional[Union[(bool, Literal[OVERWRITE]]]=None, # duplicate_threshold:Optional[int]=None, # equidistant_entries_select:Optional[Literal[Union[RANDOM, OLDEST, NEWEST]]]=None, # max_entries:Optional[int]=None, # seed:Optional[int]=None, # params:Optional[Union[list, np.ndarray]]=None, # owner=None, # prefs:tc.optional(is_pref_set)=None): default_variable=None, retrieval_prob=None, storage_prob=None, rate=None, noise=None, initializer=None, distance_field_weights=None, distance_function=None, selection_function=None, duplicate_entries_allowed=None, duplicate_threshold=None, equidistant_entries_select=None, max_entries=None, seed=None, params=None, owner=None, prefs:tc.optional(is_pref_set)=None): self._memory = [] super().__init__( default_variable=default_variable, retrieval_prob=retrieval_prob, storage_prob=storage_prob, initializer=initializer, duplicate_entries_allowed=duplicate_entries_allowed, duplicate_threshold=duplicate_threshold, equidistant_entries_select=equidistant_entries_select, distance_function=distance_function, distance_field_weights=distance_field_weights, rate=rate, noise=noise, max_entries=max_entries, seed=seed, params=params, owner=owner, prefs=prefs, ) if self.previous_value is not None: self.parameters.memory_num_fields.set(self.previous_value.shape[1], override=True) self.parameters.memory_field_shapes.set([item.shape for item in self.previous_value[0]], override=True) def _parse_selection_function_variable(self, variable, context=None): # this should be replaced in the future with the variable # argument when function ordering (and so ordering of parsers) # is made explicit distance_result = self.distance_function.parameters.value._get(context) # TEST PRINT: # print(distance_result, self.distance_function.defaults.value) return np.asfarray([ distance_result if i == 0 else np.zeros_like(distance_result) for i in range(self.defaults.max_entries) ]) def _validate(self, context=None): """Validate distance_function, selection_function and memory store""" distance_function = self.distance_function if self.get_previous_value(context) is not None: test_var = self.get_previous_value(context)[0] else: test_var = self.defaults.variable if isinstance(distance_function, type): distance_function = distance_function(default_variable=test_var) fct_msg = 'Function type' else: distance_function.defaults.variable = [test_var,test_var] distance_function._instantiate_value(context) fct_msg = 'Function' if (isinstance(distance_function, Distance) and distance_function.metric == COSINE and any([len(v)==1 for v in test_var])): warnings.warn(f"{self.__class__.__name__} is using {distance_function} with metric=COSINE and has " f"at least one memory field that is a scalar (i.e., size=1), which will always produce " f"a distance of 0 (the angle of scalars is not defined).") field_wts_homog = np.full(len(test_var),1).tolist() field_wts_heterog = np.full(len(test_var),range(0,len(test_var))).tolist() for granularity, field_weights in product(['full_entry', 'per_field'],[field_wts_homog, field_wts_heterog]): try: distance_result = self._get_distance(test_var, test_var, field_weights, granularity, context=context) except: raise FunctionError(f"{fct_msg} specified for {repr(DISTANCE_FUNCTION)} arg of " f"{self.__class__.__name__} ({distance_function}) must accept an array " f"with two lists or 1d arrays, or a 2d array, as its argument.") if granularity == 'full_entry' and not np.isscalar(distance_result): raise FunctionError(f"Value returned by {repr(DISTANCE_FUNCTION)} " f"({distance_function.__class__.__name__}) specified for " f"{self.__class__.__name__} must return a scalar if " f"{repr(DISTANCE_FIELD_WEIGHTS)} is not specified or is homogenous " f"(i.e., all elements are the same.") if granularity == 'per_field' and not len(distance_result)==len(field_weights): raise FunctionError(f"Value returned by {repr(DISTANCE_FUNCTION)} " f"({distance_function.__class__.__name__}) specified for " f"{self.__class__.__name__} must return an array " f"if {repr(DISTANCE_FIELD_WEIGHTS)} is a non-homogenous list or array" f"(i.e., not all elements are the same.") # FIX: 4/5/21 SHOULD VALIDATE NOISE AND RATE HERE AS WELL? # Default to full memory selection_function = self.selection_function test_var = np.asfarray([distance_result if i==0 else np.zeros_like(distance_result) for i in range(self._get_current_parameter_value('max_entries', context))]) if isinstance(selection_function, type): selection_function = selection_function(default_variable=test_var, context=context) fct_string = 'Function type' else: selection_function.defaults.variable = test_var selection_function._instantiate_value(context) fct_string = 'Function' try: result = np.asarray(selection_function(test_var, context=context)) except e: raise FunctionError(f'{fct_string} specified for {repr(SELECTION_FUNCTION)} arg of {self.__class__} ' f'({selection_function}) must accept a 1d array as its argument') if result.shape != test_var.shape: raise FunctionError(f'Value returned by {repr(SELECTION_FUNCTION)} specified for {self.__class__} ' f'({result}) must return an array of the same length it receives') @handle_external_context() def _update_default_variable(self, new_default_variable, context=None): """Override method on parent (StatefulFunction) since it can't handle arbitrarily-shaped fields""" if not self.parameters.initializer._user_specified and self.parameters.variable._user_specified: new_default_variable = self.parameters.variable.default_value super(StatefulFunction, self)._update_default_variable(new_default_variable, context=context) def _initialize_previous_value(self, initializer, context=None): """Ensure that initializer is appropriate for assignment as memory attribute and assign as previous_value If specified and it is the first entry: - set memory_num_fields and memory_field_shapes based on initializer - use to set previous_value (and return previous_value) (must be done here rather than in validate_params as it is needed to initialize previous_value """ if initializer is None or convert_all_elements_to_np_array(initializer).size == 0: return None # FIX: HOW DOES THIS RELATE TO WHAT IS DONE IN __init__()? # Set memory fields shapes if this is the first entry self.parameters.memory_num_fields.set(initializer.shape[1], context=context, override=True) self.parameters.memory_field_shapes.set([item.shape for item in initializer[0]], context=context, override=True) self.parameters.previous_value.set(None, context, override=True) for entry in initializer: # Store each item, which also validates it by call to _validate_entry() if not self._store_memory(entry, context): warnings.warn(f"Attempt to initialize memory of {self.__class__.__name__} with an entry ({entry}) " f"that is identical to an existing one while 'duplicate_entries_allowed'==False; " f"that entry has been skipped") previous_value = self._memory self.parameters.previous_value.set(previous_value, context, override=True) return previous_value def _instantiate_attributes_before_function(self, function=None, context=None): self._initialize_previous_value(self.parameters.initializer._get(context), context) if isinstance(self.distance_function, type): self.distance_function = self.distance_function(context=context) if isinstance(self.selection_function, type): self.selection_function = self.selection_function(context=context) @handle_external_context(fallback_most_recent=True) def reset(self, new_value=None, context=None): """ reset(<new_dictionary> default={}) Clears the memory in `previous_value <ContentAddressableMemory.previous_value>`. If **new_value** is passed into reset or if the `initializer <ContentAddressableMemory.initializer>` attribute contains a value besides [], then that value is used to start the new memory in `previous_value <ContentAddressableMemory.previous_value>`. Otherwise, the new `previous_value <ContentAddressableMemory.previous_value>` memory starts out as None. `value <ContentAddressableMemory.value>` takes on the same value as `previous_value <ContentAddressableMemory.previous_value>`. """ if new_value is not None: value = self._initialize_previous_value(ContentAddressableMemory._enforce_memory_shape(new_value), context=context) else: # no arguments were passed in -- use current values of initializer attributes initializer = self._get_current_parameter_value("initializer", context) if initializer is not None: # set previous_value to initializer and get value value = self._initialize_previous_value(initializer, context=context) else: # no initializer, so clear previous_value and set value to None self.parameters.previous_value._get(context).clear() value = None self.parameters.value.set(value, context, override=True) return value def _function(self, variable:Optional[Union[list, np.array]]=None, context=None, params=None, ) -> list: """ Return entry in `memory <ContentAddressableMemory.memory>` that best matches `variable <ContentAddressableMemory.variable>`, then add `variable <ContentAddressableMemory.variable>` to `memory <ContentAddressableMemory.memory>` (see `above <ContentAddressableMemory_Execution>` for additional details). Arguments --------- variable : list or 2d array : default class_defaults.variable used to retrieve an entry from `memory <ContentAddressableMemory.memory>`, and then stored there. params : Dict[param keyword: param value] : default None a `parameter dictionary <ParameterPort_Specification>` that specifies the parameters for the function. Values specified for parameters in the dictionary override any assigned to those parameters in arguments of the constructor. Returns ------- value of entry that best matches `variable <ContentAddressableMemory.variable>` : 1d array """ # Enforce variable to be shape of an entry (1d for ragged fields or 2d for regular ones) # - note: this allows entries with a single field to be specified as a 1d regular array # (i.e., without enclosing it in an outer list or array), which are converted to a 2d array variable = convert_all_elements_to_np_array(variable) if variable.dtype != object and variable.ndim==1: variable = np.expand_dims(variable, axis=0) retrieval_prob = np.array(self._get_current_parameter_value(RETRIEVAL_PROB, context)).astype(float) storage_prob = np.array(self._get_current_parameter_value(STORAGE_PROB, context)).astype(float) # get random state random_state = self._get_current_parameter_value('random_state', context) # get memory field weights (which are modulable) distance_field_weights = self._get_current_parameter_value('distance_field_weights', context) # If this is an initialization run, leave memory empty (don't want to count it as an execution step), # but set entry size and then return current value (variable[1]) for validation. if self.is_initializing: return variable # Set memory fields sizes and total size if this is the first entry if self.parameters.previous_value._get(context) is None: self.parameters.memory_num_fields.set(len(variable), context=context, override=True) self.parameters.memory_field_shapes.set([item.shape for item in variable], context=context, override=True) # Retrieve entry from memory that best matches variable if retrieval_prob == 1.0 or (retrieval_prob > 0.0 and retrieval_prob > random_state.uniform()): entry = self.get_memory(variable, distance_field_weights, context).copy() else: # QUESTION: SHOULD IT RETURN ZERO VECTOR OR NOT RETRIEVE AT ALL (LEAVING VALUE AND OutputPort FROM LAST TRIAL)? # CURRENT PROBLEM WITH LATTER IS THAT IT CAUSES CRASH ON INIT, SINCE NOT OUTPUT_PORT # SO, WOULD HAVE TO RETURN ZEROS ON INIT AND THEN SUPPRESS AFTERWARDS, AS MOCKED UP BELOW entry = self.uniform_entry(0, context) # Store variable in memory if storage_prob == 1.0 or (storage_prob > 0.0 and storage_prob > random_state.uniform()): self._store_memory(variable, context) return entry def _validate_entry(self, entry:Union[list, np.ndarray], context) -> None: field_shapes = self.parameters.memory_field_shapes.get(context) num_fields = self.parameters.memory_num_fields.get(context) if not entry.ndim: # IMPLEMENTATION NOTE: Remove this if/when >2d arrays are supported more generally in PsyNeuLink raise FunctionError(f"Attempt to store and/or retrieve an entry in {self.__class__.__name__} that has " f"has dimensions ({entry}); must be a list or 1d or 2d array.") if entry.ndim >2: # IMPLEMENTATION NOTE: Remove this if/when >2d arrays are supported more generally in PsyNeuLink raise FunctionError(f"Attempt to store and/or retrieve an entry in {self.__class__.__name__} ({entry}) " f"that has more than 2 dimensions ({entry.ndim}); try flattening innermost ones.") if not len(entry) == num_fields: raise FunctionError(f"Attempt to store and/or retrieve entry in {self.__class__.__name__} ({entry}) " f"that has an incorrect number of fields ({len(entry)}; should be {num_fields}).") owner_name = f'of {self.owner.name}' if self.owner else '' for i, field in enumerate(entry): field = np.array(field) # IMPLEMENTATION NOTE: Remove requirement field.ndim==1 if/when >2d arrays are supported more generally if field.ndim != 1 or field.shape != field_shapes[i]: raise FunctionError(f"Field {i} of entry ({entry}) has incorrect shape ({field.shape}) " f"for memory of '{self.name}{owner_name}'; should be: {field_shapes[i]}.") def uniform_entry(self, value:Union[int, float], context) -> np.ndarray: return [np.full(i,value) for i in self.parameters.memory_field_shapes._get(context)] @handle_external_context() def get_memory(self, cue:Union[list, np.ndarray], field_weights=None, context=None) -> np.ndarray: """get_memory(query_key, context=None) Retrieve entry from `memory <ContentAddressableMemory.memory>` based on `distance_function <ContentAddressableMemory.distance_function>` and `selection_function <ContentAddressableMemory.selection_function>`. Arguments --------- cue : list or 2d array must have same number and shapes of fields as existing entries in `memory <ContentAddressableMemory.memory>`. Returns ------- entry retrieved : 2d array if no retrieval occurs, returns appropriately shaped zero-valued array. """ # QUESTION: SHOULD IT RETURN ZERO VECTOR OR NOT RETRIEVE AT ALL (LEAVING VALUE AND OutputPort FROM LAST TRIAL)? # ALSO, SHOULD PROBABILISTIC SUPPRESSION OF RETRIEVAL BE HANDLED HERE OR function (AS IT IS NOW). # FIX: RETRIEVE BASED ON SIMILARITY WITHIN EACH FIELD WEIGHTE BY distance_field_weights _memory = self.parameters.previous_value._get(context) # if no entries in memory, return the zero vector if _memory is None: return self.uniform_entry(0, context) cue = convert_all_elements_to_np_array(cue) self._validate_entry(cue, context) # Get mean of field-wise distances between cue each entry in memory distances_to_entries = [] for entry in _memory: distances_to_entries.append(self._get_distance(cue, entry, field_weights, 'full_entry', context)) # Get the best-match(es) in memory based on selection_function and return as non-zero value(s) in an array selection_array = self.selection_function(distances_to_entries, context=context) indices_of_selected_items = np.flatnonzero(selection_array) # Single entry identified if len(indices_of_selected_items)==1: index_of_selected_item = int(np.flatnonzero(selection_array)) # More than one entry identified else: # Check for any duplicate entries in matches and, if they are not allowed, return zeros if (not self.duplicate_entries_allowed and any(self._is_duplicate(_memory[i],_memory[j], field_weights, context) for i, j in combinations(indices_of_selected_items, 2))): warnings.warn(f"More than one entry matched cue ({cue}) in memory for {self.name}" f"{'of ' + self.owner.name if self.owner else ''} even though " f"{repr('duplicate_entries_allowed')} is False; zeros returned as retrieved item.") return self.uniform_entry(0, context) if self.equidistant_entries_select == RANDOM: random_state = self._get_current_parameter_value('random_state', context) index_of_selected_item = random_state.choice(indices_of_selected_items) elif self.equidistant_entries_select == OLDEST: index_of_selected_item = indices_of_selected_items[0] elif self.equidistant_entries_select == NEWEST: index_of_selected_item = indices_of_selected_items[-1] else: assert False, f"PROGRAM ERROR: bad specification ({repr(self.equidistant_entries_select)}) for " \ f"'equidistant_entries_select' parameter of {self.name}" \ f"{'for ' + self.owner.name if self.owner else ''}" best_match = _memory[index_of_selected_item] best_match_distances = self._get_distance(cue,best_match,field_weights, 'per_field',context) self.parameters.distance.set(distances_to_entries[index_of_selected_item], context, override=True) self.parameters.distances_by_field.set(best_match_distances,override=True) self.parameters.distances_to_entries.set(distances_to_entries, context,override=True) # Return entry return best_match def _store_memory(self, entry:Union[list, np.ndarray], context) -> bool: """Add an entry to `memory <ContentAddressableMemory.memory>` Arguments --------- entry : list or 2d array should be a list or 2d array containing 1d arrays (fields) each of which should be list or at least a 1d array; scalars, 1d and simple 2d arrays are allowed, and are interpreted as a single entry with a single field, which is converted to a 3d array. If any entries already exist in `memory <ContentAddressableMemory.memory>`, then both the number of fields and their shapes must match existing entries (contained in the `memory_num_fields <ContentAddressableMemory.memory_num_fields>` and `memory_field_shapes <ContentAddressableMemory.memory_field_shapes>` attributes, respectively). All elements of all entries are converted to np.arrays. .. technical_note:: this method supports adding entries with items in each field that are greater than 1d for potential future use (see format_for_storage() below); however they are currently rejected in _validate_entry as currently they may produce unexpected results (by returning entries that are greater than 2d). """ self._validate_entry(entry, context) # convert all fields and entry itself to arrays entry = convert_all_elements_to_np_array(entry) num_fields = self.parameters.memory_num_fields._get(context) field_weights = self.parameters.distance_field_weights._get(context) # execute noise if it is a function noise = self._try_execute_param(self._get_current_parameter_value(NOISE, context), entry, context=context) if noise is not None: try: entry = entry + noise except: raise FunctionError(f"'noise' for '{self.name}' of '{self.owner.name}' " f"not appropriate shape (single number or array of length {num_fields}.") existing_entries = self.parameters.previous_value._get(context) def format_for_storage(entry:np.ndarray) -> np.ndarray: """Format an entry to be added to memory Returns entry formatted to match the shape of `memory <EpisodicMemoryMechanism.memory>`, so that it can be appended (or, if it is the first, simply assigned) to memory: - if entry is a regular array (all fields [axis 0 items] have the same shape), returns object with ndim = entry.ndim + 1 (see `technical_note <ContentAddressableMemory_Shapes>` above) - if the entry is a ragged array (fields [axis 0 items] have differing shapes), returns 2d object with dtype=object. """ # Ragged array (i.e., fields of different shapes) if entry.ndim == 1 and entry.dtype==object: shape = (1, num_fields) # Regular array (all fields have the same shapes) elif entry.ndim >= 2: # Note: if greater ndim>2, item in each field is >1d shape = (1, num_fields, entry.shape[1]) else: raise ContentAddressableMemory(f"Unrecognized format for entry to be stored in {self.name}: {entry}.") return np.atleast_3d(entry).reshape(shape) if existing_entries is not None: # Check for matches of entry with existing entries matches = [m for m in existing_entries if len(m) and self._is_duplicate(entry, m, field_weights, context)] # If duplicate entries are not allowed and entry matches any existing entries, don't store if matches and self.duplicate_entries_allowed is False: storage_succeeded = False # If duplicate_entries_allowed is True or OVERWRITE, replace value for matching entry: # FIX: SHOULD BE OVERWRITE or False elif matches and self.duplicate_entries_allowed == OVERWRITE: if len(matches)>1: # If there is already more than one duplicate, raise error as it is not clear what to overwrite raise FunctionError(f"Attempt to store item ({entry}) in {self.name} " f"with 'duplicate_entries_allowed'='OVERWRITE' " f"when there is more than one matching entry in its memory; " f"'duplicate_entries_allowed' may have previously been set to 'True'") try: index = existing_entries.index(entry) except AttributeError: index = [i for i,e in enumerate(existing_entries) if np.all(e == matches[0])][0] except ValueError: index = existing_entries.tolist().index(entry) existing_entries[index] = entry storage_succeeded = True else: # Add to existing entries existing_entries = np.append(existing_entries, format_for_storage(entry), axis=0) storage_succeeded = True else: # No entries yet, so add new one existing_entries = format_for_storage(entry) storage_succeeded = True if len(existing_entries) > self.max_entries: existing_entries = np.delete(existing_entries,0,axis=0) self.parameters.previous_value._set(existing_entries,context) self._memory = existing_entries return storage_succeeded def _get_distance(self, cue:Union[list, np.ndarray], candidate:Union[list, np.ndarray], field_weights:Union[list, np.ndarray], granularity:str, # granularity:Literal[Union['full_entry', 'per_field']], context) -> Union[float, np.ndarray]: """Get distance of cue from candidate using `distance_function <ContentAddressableMemory.distance_function>`. - If **granularity**=='full_entry': returns *single scalar distance* computed over full **cue** and **candidate** entries if all elements of **fields_weights** are equal (i.e., it is a homogenous array); otherwise it is used to weight the the distance computed between each field of **cue** and corresponding one of **candidate**, when computing their mean field-wise distances. - if **granularity**=='per_field': returns *array of distances* computed field-wise (hadamard) for **cue** and **candidate**, weighted by **field_weights**. .. note:: granularity is only used for reporting field-wise distances in `distances_by_field <ContentAddressableMemory.distances_by_field>`, and not used to determine retrieval or storage :returns scalar if **granularity**=='full_entry'; array if **granularity**=='per_fields' """ # Get distance function and params distance_fct = self.parameters.distance_function._get(context) num_fields = self.parameters.memory_num_fields._get(context) or len(field_weights) if field_weights is None: # Could be from get_memory called from COMMAND LINE without field_weights field_weights = self._get_current_parameter_value('distance_field_weights', context) field_weights = np.atleast_1d(field_weights) if granularity == 'per_field': # Note: this is just used for reporting, and not determining storage or retrieval # Replace None's with 0 to allow multiplication distances_by_field = np.array([distance_fct([cue[i], candidate[i]]) for i in range(num_fields)] ) * np.array([f if f is not None else 0 for f in field_weights]) # If field_weights is scalar, splay out as array of length num_fields so can iterate through all of them if len(field_weights)==1: field_weights = np.full(num_fields, field_weights[0]) # Replace 0's with None's for fields with None in field_weights distances_by_field = np.array([distances_by_field[i] if f is not None else None for i,f in enumerate(field_weights)]) return distances_by_field elif granularity == 'full_entry': # Use first element as scalar if it is a homogenous array (i.e., all elements are the same) field_weights = field_weights[0] if np.all(field_weights[0]==field_weights) else field_weights distance_by_fields = not np.isscalar(field_weights) if distance_by_fields: num_non_zero_fields = len([fw for fw in field_weights if fw]) # Get mean of field-wise distances between cue each entry in memory, weighted by field_weights distance = np.sum([distance_fct([cue[i], candidate[i]]) * field_weights[i] for i in range(num_fields) if field_weights[i]]) / num_non_zero_fields else: # Get distances between entire cue vector and all that for each entry in memory # Note: in this case, field_weights is just a scalar coefficient distance = distance_fct([np.hstack(cue), np.hstack(candidate)]) * field_weights return distance else: assert False, f"PROGRAM ERROR: call to 'ContentAddressableMemory.get_distance()' method " \ f"with invalid 'granularity' argument ({granularity}); " \ f"should be 'full_entry' or 'per_field." def _parse_distance_function_variable(self, variable): return convert_to_np_array([variable[0], variable[0]]) @classmethod def _enforce_memory_shape(cls, memory): # Enforce memory to be 2d for ragged fields or 3d for regular ones # - note: this also allows memory (e.g., via initializer or reset) to be specified with a single entry # (i.e., without enclosing it in an outer list or array) memory = convert_all_elements_to_np_array(memory) memory = np.atleast_2d(memory) if memory.dtype != object and memory.ndim==2: memory = np.expand_dims(memory, axis=0) return memory def _is_duplicate(self, entry1:np.ndarray, entry2:np.ndarray, field_weights:np.ndarray, context) -> bool: """Determines whether two entries are duplicates Duplicates are treated as ones with a distance within the tolerance specified by duplicate_threshold. Distances are computed using distance_field_weights. """ if (self._get_distance(entry1, entry2, field_weights, 'full_entry', context) <= self.parameters.duplicate_threshold.get(context)): return True return False @handle_external_context() def add_to_memory(self, entries:Union[list, np.ndarray], context=None): """Add one or more entries into `memory <ContentAddressableMememory.memory>` Arguments --------- entries : list or array a single entry (list or array) or list or array of entries, each of which must be a valid entry; each must have the same number of and shapes of corresponding fields; items are added to memory in the order listed. """ entries = self._parse_memories(entries, 'add_to_memory', context) for entry in entries: self._store_memory(entry, context) @handle_external_context() def delete_from_memory(self, entries:Union[list, np.ndarray], fields:Optional[Union[int, list]]= None, context=None): """Delete one or more entries from `memory <ContentAddressableMememory.memory>` Arguments --------- memories : list or array a single entry (list or 2d array) or list or array of entries, each of which must be a valid entry (i.e. same number of fields and shapes of each as entries already in `memory <ContentAddressableMemory.memory>`. fields : int or list : default None if None, delete all entries in `memory <ContentAddressableMemory.memory>` that are identical to any of the **memories** specified; if int or list, delete all entries with the same values as those in the field(s) specified. """ memories = self._parse_memories(entries, 'add_to_memory', context) # FIX: ??IS THIS NEEDED (IS IT JUST A HOLDOVER FROM KEYS OR NEEDED FOR LIST-T0-LIST COMPARISON BELOW?): entries = [list(m) for m in memories] fields = convert_to_list(fields) existing_memory = self.parameters.previous_value._get(context) pruned_memory = existing_memory.copy() for entry, memory in product(entries, existing_memory): if (
np.all(entry == memory)
numpy.all
#!/usr/bin/env python # -*- coding: utf-8 -*- """ test_sfg2d ---------------------------------- Tests for `sfg2d` module. """ import sys import unittest from contextlib import contextmanager from click.testing import CliRunner import numpy as np from datetime import timedelta from sfg2d import SfgRecord class TestQuartz(unittest.TestCase): def setUp(self): self.data = SfgRecord( '../sfg2d/data/00_sp_quarz_w650_gcm_e20s_pr3000.dat') self.result_dict = { 'shape_of_data' : (1, 1, 3, 1600), 'metadata' : { 'central_wl' : 650, 'material' : 'quarz', 'sp_type' : 'sp', 'gain' : -1, 'exposure_time' : timedelta(0, 20), }, 'some_row' : ([0, 0, 1, slice(None, None)], np.load('../data/00_quarts_row_1.npy')), 'type' : 'sp', 'pixel' : np.arange(1600), 'times' : [timedelta(0)], 'frames' : 1, 'pp_delays' : np.array([0]), 'wavelength' : np.load('../data/00_quartz_wavelength.npy'), 'wavenumber' : np.load('../data/00_quartz_wavenumber.npy'), } def tearDown(self): del self.data def test_pp_delays_is_numpy_array(self): assert isinstance(self.data.pp_delays, type(np.zeros(1))) def test_data_is_numpy_array(self): assert isinstance(self.data.data, type(np.zeros(1))) def test_shape_of_data(self): assert self.data.data.shape == self.result_dict['shape_of_data'] def test_metadata(self): md = self.data.metadata for key in self.result_dict['metadata']: assert self.data.metadata[key] == self.result_dict['metadata'][key] def test_some_row(self): ind, data = self.result_dict['some_row'] assert np.all(self.data.data[ind] == data) def test_data_pixel(self): assert all(self.data.pixel == self.result_dict['pixel']) def test_data_times(self): assert self.data.times == self.result_dict['times'] def test_data_frames(self): assert self.data.frames == self.result_dict['frames'] def test_data_ppdelays(self): assert self.data.pp_delays == self.result_dict['pp_delays'] def test_data_wavelength(self): wl = self.result_dict['wavelength'] # Must allow for small machine percision differences small_values = np.abs(wl - self.data.wavelength) assert np.any(small_values < 10**(-12)) def test_data_wavenumber(self): wl = self.result_dict['wavenumber'] # Must allow for small machine percision differences self.data.metadata["vis_wl"] = 810 # Set the right vis_wl small_values = np.abs(wl - self.data.wavenumber) assert np.any(small_values < 10**(-12)) class TestSPE(TestQuartz): def setUp(self): self.data = SfgRecord('../data/08_h2o_gcm_e10m_ssp_purged1_pr6150nm_background.spe') self.result_dict = { 'shape_of_data' : (1, 60, 1, 1600), 'metadata' : { 'exposure_time' : timedelta(0, 599, 945984), 'material' : 'h2o', 'polarisation' : 'ssp', 'sp_type' : 'spe', 'tempSet' : -75.0, }, 'some_row' : ([0, 23, 0, slice(None, None)], np.load('../data/08_frame23.npy')), 'type' : 'spe', 'pixel' : np.arange(1600), 'times' : np.load('../data/08_times.npy').tolist(), 'frames' : 60, 'pp_delays' : np.array([0]), 'wavelength' :
np.load('../data/08_wavelength.npy')
numpy.load
import os import sys import copy import json import math import torch import pickle import random import logging import logging.config import numpy as np import torch.nn as nn from collections import Counter from numba import guvectorize from scipy.sparse import csr_matrix from imblearn.over_sampling import SMOTE, ADASYN from sklearn.linear_model import LogisticRegression from sklearn import metrics from sklearn.metrics import f1_score, precision_recall_fscore_support, precision_recall_curve, roc_curve, average_precision_score def check_args(args): if args.learn_method not in ['gnn', 'bigal', 'feature', 'rand', 'rand2', 'svdgnn', 'lr']: sys.exit('ERROR: invalid learning method.') if args.tvt_split not in [0, 1, 2, 3, 4]: sys.exit('ERROR: invalid train-vali-test data split selection.') def record_process(args, epoch): record_file = f'{args.out_path}/current_batch.txt' with open(record_file, 'w') as fw: fw.write(f'Current at batch {epoch}.') def get_simi_single_iter(params): entries_batch, feats, _get_simi = params ii, jj = entries_batch.T if isinstance(feats, np.ndarray): simi = _get_simi(feats[ii], feats[jj]) else: simi = _get_simi(feats[ii].toarray(), feats[jj].toarray()) return ii, jj, simi @guvectorize(['(float64[:], float64[:], float64[:])'], '(n),(n)->()') def getPCC(u1, u2, simi_score): eps = 1e-12 nz_u1 = u1.nonzero()[0] nz_u2 = u2.nonzero()[0] nz_inter = np.array(list(set(nz_u1) & set(nz_u2))) assert len(nz_inter) > 0 mean_u1 = u1.sum() / len(nz_u1) mean_u2 = u2.sum() / len(nz_u2) nume = np.sum((u1[nz_inter] - mean_u1) * (u2[nz_inter] - mean_u2)) deno = np.sqrt(max(eps, np.sum((u1[nz_inter] - mean_u1) ** 2)) * max(eps, np.sum((u2[nz_inter] - mean_u2) ** 2))) # deno = np.sqrt(np.sum((u1[nz_u1] - mean_u1) ** 2) * np.sum((u2[nz_u2] - mean_u2) ** 2)) assert deno > 0 simi_score[0] = nume / deno simi_score[0] = max(min(simi_score[0], 1.0), -1.0) @guvectorize(['(float64[:], float64[:], float64[:])'], '(n),(n)->()') def getACOS(u1, u2, simi_score): eps = 1e-12 nz_u1 = u1.nonzero()[0] nz_u2 = u2.nonzero()[0] nz_inter = np.intersect1d(nz_u1, nz_u2) assert len(nz_inter) > 0 nume = np.sum(u1[nz_inter] * u2[nz_inter]) deno = np.sqrt(max(eps, np.sum(u1[nz_inter] ** 2)) * max(eps, np.sum(u2[nz_inter] ** 2))) # deno = np.sqrt(np.sum(u1[nz_u1] ** 2) * np.sum(u2[nz_u2] ** 2)) simi_score[0] = nume / deno simi_score[0] = max(min(simi_score[0], 1.0), 0.0) simi_score[0] = 2 * simi_score[0] - 1 @guvectorize(['(float64[:], float64[:], float64[:])'], '(n),(n)->()') def getCOS(u1, u2, simi_score): eps = 1e-12 nz_u1 = u1.nonzero()[0] nz_u2 = u2.nonzero()[0] nz_inter = np.intersect1d(nz_u1, nz_u2) assert len(nz_inter) > 0 nume = np.sum(u1[nz_inter] * u2[nz_inter]) deno = np.sqrt(max(eps, np.sum(u1[nz_inter] ** 2)) * max(eps, np.sum(u2[nz_inter] ** 2))) simi_score[0] = nume / deno simi_score[0] = max(min(simi_score[0], 1.0), 0.0) def getLogger(name, out_path, config_dir): config_dict = json.load(open(config_dir + '/log_config.json')) config_dict['handlers']['file_handler']['filename'] = f'{out_path}/log-{name}.txt' logging.config.dictConfig(config_dict) logger = logging.getLogger(name) std_out_format = '%(asctime)s - [%(levelname)s] - %(message)s' consoleHandler = logging.StreamHandler(sys.stdout) consoleHandler.setFormatter(logging.Formatter(std_out_format)) logger.addHandler(consoleHandler) return logger def evaluate(Dc, ds, features, gnn, classification, device, max_vali_f1, epoch, assigned=None): test_nodes = getattr(Dc, ds+'_test') if assigned is None: val_nodes = getattr(Dc, ds+'_val') else: val_nodes = assigned labels = getattr(Dc, ds+'_labels') if Dc.args.learn_method == 'rand2': labels_test = labels[test_nodes] logists = np.random.rand(len(labels_test), 2) predicts = np.zeros(len(labels_test)) logists_file = f'{Dc.args.out_path}/logists_test.txt' results = eval_n_save(test_nodes, labels_test, logists, predicts, logists_file) Dc.logger.info(results) return classification, results['f1'] elif Dc.args.learn_method == 'lr': train_nodes = getattr(Dc, ds+'_train_cls') features = features.numpy() feats_train = features[train_nodes] label_train = labels[train_nodes] feats_test = features[test_nodes] label_test = labels[test_nodes] clf = LogisticRegression(random_state=0).fit(feats_train, label_train) logists = clf.predict_proba(feats_test) logists_file = f'{Dc.args.out_path}/logists_test.txt' results = eval_n_save(test_nodes, label_test, logists,
np.round(logists[:,1])
numpy.round
#!/usr/bin/env python3 import datetime import logging import os import itchat import numpy as np class TSP: '''An implementaion of MO-GA to solve TSP problem. ''' def __init__(self, generations, population, wechat_log=False): '''Init with data and several hyper-parameters. Args: generations: How many generations you wanna evolve. population: The size of the population. wechat_log (optional): Whether log to wechat or not. Default False. ''' self.wechat_log = wechat_log if wechat_log: self.config_itchat() self.config_logger() self.log('*' * 120) self.log('-*- TSP INITIALIZATION STARTED -*-') self.data = np.loadtxt('ja9847.txt', skiprows=7, usecols=(1, 2)) self.dist = np.load('dist.npy') self.m = len(self.data) self.n = population self.pm = 0.6 self.q = round(population * 0.8) self.generations = generations self.population = np.zeros([self.n, self.m], dtype=int) self.best_min_dist = float('inf') self.best_solution = None self.best_generation = -1 if __debug__: self.log('MODE:DEBUG') self.log('CITIES:{}'.format(self.m)) self.log('POPULATION:{}'.format(self.n)) self.log('PERMUTATION PROBABILITY:{:.2f}'.format(self.pm)) self.log('ELITE COUNT:{}'.format(self.q)) self.log('GENERATION:{}'.format(self.generations)) self.init_population(0.4) self.log('-*- TSP INITIALIZATION FINISHED -*-\n') def config_itchat(self): '''Configurate wechat log options. ''' if __debug__: itchat.auto_login(enableCmdQR=2, hotReload=True) else: itchat.auto_login(hotReload=True) def log(self, message): '''Log message to loggers those have been setted-up. Args: message: The message to log. ''' logging.info(message) if self.wechat_log: itchat.send('{0}-{1}'.format(datetime.datetime.now(), message), toUserName='filehelper') def config_logger(self): '''Config the logger. ''' logging.basicConfig(filename='tsp.log', level=logging.INFO, format='%(asctime)s-%(levelname)s-%(message)s') formatter = logging.Formatter( '%(asctime)s-%(levelname)s-%(message)s') console = logging.StreamHandler() console.setLevel(logging.DEBUG) console.setFormatter(formatter) logging.getLogger('').addHandler(console) def save_best_solution(self): if os.path.isfile('min_dist.txt'): min_dist = np.loadtxt('min_dist.txt') if min_dist > self.best_min_dist: np.savetxt('min_dist.txt', [self.best_min_dist], '%.3f') np.save('min.npy', self.best_solution) self.log('HISTORY MIN VALUE REFRESHED TO {:.3f}'.format( self.best_min_dist)) else: np.savetxt('min_dist.txt', [self.best_min_dist], '%.3f') np.save('min.npy', self.best_solution) self.log('HISTORY MIN VALUE REFRESHED TO {:.3f}'.format( self.best_min_dist)) def align(self, x): '''Align a individual to make it start from 1. Args: x: The individual that will be shifted. ''' i, = np.where(x == 0) return np.roll(x, -i) def init_population(self, p_inherit): '''Initialize the population. Args: The proportion of the inherited in the population. ''' for i in range(self.n): self.population[i, :] = np.random.permutation(self.m) if os.path.isfile('min.npy') and os.path.isfile('min_dist.txt'): history_min = np.load('min.npy') history_min_dist = np.loadtxt('min_dist.txt') replace_count = round(p_inherit * self.n) self.population[:replace_count] = history_min self.log('INHERIT {0} INDIVIDUALS WITH {1}'.format( replace_count, history_min_dist)) else: self.log('INIT FROM PURE RANDOM STATE') self.population = self.sort(self.population) def evolve(self): '''Evolve the population. ''' self.log('-*- EVOLUTION STARTED -*- ') for generation in range(self.generations): pts = self.choose_crossover_pt() offsprings = [] offsprings = [self.crossover(p1, p2, pts) for p1, p2 in self.pair_parents()] offsprings = np.array(offsprings) offsprings = np.reshape(offsprings, (4 * len(offsprings), self.m)) offsprings = self.mutation(offsprings) self.select(offsprings) min_dist = self.path_dist(self.population[0]) if self.best_min_dist > min_dist: self.best_min_dist = min_dist self.best_solution = self.population[0] self.best_generation = generation self.log('GENERATION {0:04d} - AVG: {1:.3f} - MIN: {2:.3f}'.format( generation, self.avg_dist(), min_dist)) if generation % 50 == 1: self.save_best_solution() self.log('-' * 120) self.log('MIN DIST: {0:.3f} IN GENERATION {1}'.format( self.best_min_dist, self.best_generation)) self.save_best_solution() self.log('-*- EVOLUTION FINISHED -*- ') self.log('*' * 120) def avg_dist(self): '''Calculate the average path distance of the current population. Returns: Return the tuple (avg, min). ''' dist_list = np.array([self.path_dist(v) for v in self.population]) return np.average(dist_list) def select(self, offsprings): '''Select next generation of population among current q elitest individuals and 2n offsprings. Args: offsprings: The offsprings of current generation. ''' selecting = self.population[0:self.q] selecting = np.vstack((selecting, offsprings)) selecting = self.sort(selecting) fits = self.fitness(len(selecting)) sum_fits = sum(fits) p = [f / sum_fits for f in fits] cul_p = np.array([sum(p[:i]) for i in range(len(selecting))]) new_population = [] for i in range(self.n): prob = np.random.random_sample() choice = np.argmax(cul_p > prob) new_population.append(selecting[choice]) self.population = self.sort(new_population) def sort(self, population): '''Sort the whole population depending on path distance. Args: population: The population which is going to be sorted. Returns: Return sorted population. ''' population = np.array(population) dist_list = np.array([self.path_dist(v) for v in population]) return population[dist_list.argsort()] def fitness(self, length): '''Calculate the fitness value of a individual. Args: length: The length of the population. Returns: Return a list of fitness values. ''' beta = 0.2 fitness_list = np.arange(length, dtype=float) fitness_list[::2] = np.apply_along_axis( lambda i: (length - i + 1) / length, 0, fitness_list[::2]) fitness_list[1::2] = np.apply_along_axis( lambda i: beta * np.power(1 - beta, i - 1), 0, fitness_list[1::2]) return fitness_list def path_dist(self, v): '''Calculate the whole distance of a individual. Args: v: An individual in the population. Returns: Return a summation of the whole distances ''' dist_sum = 0 for i in range(self.m - 1): dist_sum += self.dist[v[i], v[i + 1]] dist_sum += self.dist[v[-1], v[0]] return dist_sum def choose_crossover_pt(self): '''Choose the position to perform crossover operation. ''' pt1, pt2 = (0, 0) while pt1 == pt2: pt1, pt2 = sorted(
np.random.randint(1, self.m - 1, size=2)
numpy.random.randint
import random import glob import paddle import numpy as np import xml.etree.ElementTree as ET def iou(bbox, priors): """ 计算一个真实框与 k个先验框(priors) 的交并比(IOU)值。 bbox: 真实框的宽高,数据为(宽,高),其中宽与高都是归一化后的相对值。 priors: 生成的先验框,数据形状为(k,2),其中k是先验框priors的个数 """ x = np.minimum(priors[:, 0], bbox[0]) y = np.minimum(priors[:, 1], bbox[1]) if np.count_nonzero(x == 0) > 0 or np.count_nonzero(y == 0) > 0: raise ValueError("真实框有误") intersection = x * y bbox_area = bbox[0] * bbox[1] priors_area = priors[:, 0] * priors[:, 1] iou_ = intersection / (bbox_area + priors_area - intersection) return iou_ def avg_iou(bboxes, priors): """ 计算一个真实框和k个先验框的IOU的均值。 """ return np.mean([np.max(iou(bboxes[i], priors)) for i in range(bboxes.shape[0])]) def kmeans(bboxes, k, dist=np.median): """ 利用IOU值进行K-means聚类 bboxes: 真实框,数据形状为(n, 2),其中n是真实框的个数,2表示宽与高 k: 聚类中心个数,此处表示要生成的先验框的个数 dist: 用于更新聚类中心的函数,默认为求中值 返回 k 个先验框priors , 数据形状为(k,2) """ # 获取真实框个数 n = bboxes.shape[0] # 距离数组,记录每一个真实框和 k 个先验框的距离 distances = np.empty((n, k)) # 记录每一个真实框属于哪一个聚类中心,即与哪一个先验框的IOU值最大,记录的是先验框的索引值 last_priors = np.zeros((n,)) # 初始化聚类中心,随机从n个真实框中选择k个框作为聚类中心priors np.random.seed() priors = bboxes[np.random.choice(n, k, replace=False)] while True: # 计算每一个真实框和k个先验框的距离,距离指标为 1-IOU(box,priors) for i in range(n): distances[i] = 1 - iou(bboxes[i], priors) # 对于每一个真实框,要选取对应着 distances 最小的那个先验框,获取索引值 nearest_priors = np.argmin(distances, axis=1) # 如果获取到的索引值没变,说明聚类结束 if (last_priors == nearest_priors).all(): break # 更新聚类中心 for j in range(k): priors[j] = dist(bboxes[nearest_priors == j], axis=0) # 更新last_priors last_priors = nearest_priors return priors def load_dataset(path): ''' path: 标注文件,xml文件所在文件夹路径 ''' dataset = [] for xml_file in glob.glob("{}/*xml".format(path)): tree = ET.parse(xml_file) # 图片高度 height = int(tree.findtext("./size/height")) # 图片宽度 width = int(tree.findtext("./size/width")) for obj in tree.iter("object"): # 相对值 xmin = int(obj.findtext("bndbox/xmin")) / width ymin = int(obj.findtext("bndbox/ymin")) / height xmax = int(obj.findtext("bndbox/xmax")) / width ymax = int(obj.findtext("bndbox/ymax")) / height xmin = np.float64(xmin) ymin = np.float64(ymin) xmax = np.float64(xmax) ymax = np.float64(ymax) if xmax == xmin or ymax == ymin: print(xml_file) dataset.append([xmax - xmin, ymax - ymin]) # 宽与高的相对值 return np.array(dataset) # 转为numpy数组 def bbox2tensor(bbox,max_num=30): ''' bbox:标签信息。信息格式为[cls,x,y,w,h, cls,x,y,w,h, cls,x,y,w,h] 每5个元素为一组标签信息 max_num: 一张图片中最大的目标数,默认最多只能有30个物体 返回标签信息,tensor ''' gt_bbox = paddle.zeros(shape=[max_num, 5], dtype='float32') for i in range(len(bbox)//5): gt_bbox[i, 0] = bbox[i*5] gt_bbox[i, 1] = bbox[i*5+1] gt_bbox[i, 2] = bbox[i*5+2] gt_bbox[i, 3] = bbox[i*5+3] gt_bbox[i, 4] = bbox[i*5+4] if i >= max_num: break return gt_bbox def calculate_iou(bbox1,bbox2): """计算bbox1=(x1,y1,x2,y2)和bbox2=(x3,y3,x4,y4)两个bbox的iou""" intersect_bbox = [0., 0., 0., 0.] # bbox1和bbox2的交集 if bbox1[2]<bbox2[0] or bbox1[0]>bbox2[2] or bbox1[3]<bbox2[1] or bbox1[1]>bbox2[3]: pass else: intersect_bbox[0] = max(bbox1[0],bbox2[0]) intersect_bbox[1] = max(bbox1[1],bbox2[1]) intersect_bbox[2] = min(bbox1[2],bbox2[2]) intersect_bbox[3] = min(bbox1[3],bbox2[3]) area1 = (bbox1[2] - bbox1[0]) * (bbox1[3] - bbox1[1]) # bbox1面积 area2 = (bbox2[2] - bbox2[0]) * (bbox2[3] - bbox2[1]) # bbox2面积 area_intersect = (intersect_bbox[2] - intersect_bbox[0]) * (intersect_bbox[3] - intersect_bbox[1]) # 交集面积 if area_intersect>0: return area_intersect / (area1 + area2 - area_intersect) # 计算iou else: return 0 # 将网络输出的[tx, ty, th, tw]转化成预测框的坐标[x1, y1, x2, y2] def get_yolo_box_xxyy(pred, anchors, num_classes): """ 将网络输出的[tx, ty, th, tw]转化成预测框的坐标[x1, y1, x2, y2],也就是 [左上角坐标,右上角坐标] 格式 pred:网络输出,tensor anchors: 是一个list。表示锚框的大小。 YOLOv2官方配置文件中,anchors = [0.57273, 0.677385, 1.87446, 2.06253, 3.33843, 5.47434, 7.88282, 3.52778, 9.77052, 9.16828], 表示有5个锚框,第一个锚框大小[w, h]是[0.57273, 0.677385],第5个锚框大小是[9.77052, 9.16828] 锚框的大小都是表示在特征图13x13中的大小 num_classes:类别数 返回预测框pred_box, 最终输出数据保存在pred_box中,其形状是[N, num_anchors, 4, H, W,],4表示4个位置坐标 """ num_rows = pred.shape[-2] num_cols = pred.shape[-1] num_anchors = len(anchors) // 2 # pred的形状是[batchsize, C, H, W],其中C = num_anchors * (5 + num_classes) # 对pred进行reshape pred = pred.reshape([-1, num_anchors, 5 + num_classes, num_rows, num_cols]) pred = paddle.transpose(pred, perm=[0, 3, 4, 1, 2]) # 取出与位置相关的数据 pred_location = pred[:, :, :, :, 0:4] anchors_this = [] for ind in range(num_anchors): anchors_this.append([anchors[ind * 2], anchors[ind * 2 + 1]]) # anchors_this = np.array(anchors_this).astype('float32') anchors_this = paddle.to_tensor(anchors_this) pred_box = paddle.zeros(pred_location.shape) # for b in range(batchsize): for i in range(num_rows): for j in range(num_cols): for k in range(num_anchors): pred_box[:, i, j, k, 0] = j # 列 pred_box[:, i, j, k, 1] = i # 行 pred_box[:, i, j, k, 2] = anchors_this[k][0] # 先验框宽 pred_box[:, i, j, k, 3] = anchors_this[k][1] # 先验框高 # 这里使用相对坐标,pred_box的输出元素数值在0.~1.0之间, 相对于特征图大小的相对值 pred_box[:, :, :, :, 0] = (paddle.nn.functional.sigmoid(pred_location[:, :, :, :, 0]) + pred_box[:, :, :, :, 0]) / num_cols pred_box[:, :, :, :, 1] = (paddle.nn.functional.sigmoid(pred_location[:, :, :, :, 1]) + pred_box[:, :, :, :, 1]) / num_rows pred_box[:, :, :, :, 2] = paddle.exp(pred_location[:, :, :, :, 2]) * pred_box[:, :, :, :, 2] / num_cols pred_box[:, :, :, :, 3] = paddle.exp(pred_location[:, :, :, :, 3]) * pred_box[:, :, :, :, 3] / num_rows # 将坐标从xywh转化成xyxy,也就是 [左上角坐标,右上角坐标] 格式 pred_box[:, :, :, :, 0] = pred_box[:, :, :, :, 0] - pred_box[:, :, :, :, 2] / 2. pred_box[:, :, :, :, 1] = pred_box[:, :, :, :, 1] - pred_box[:, :, :, :, 3] / 2. pred_box[:, :, :, :, 2] = pred_box[:, :, :, :, 0] + pred_box[:, :, :, :, 2] pred_box[:, :, :, :, 3] = pred_box[:, :, :, :, 1] + pred_box[:, :, :, :, 3] return pred_box # 获取标签 def get_label(pred, gt_bboxs, anchors, iou_threshold, step_less_12800, num_classes=6, rescore=False): ''' pred:网络输出 gt_bboxs: 真实框信息,[class,x,y,w,h],其中x,y,w,h为归一化后的数据 anchors: 是一个list。表示锚框的大小。 YOLOv2官方配置文件中,anchors = [0.57273, 0.677385, 1.87446, 2.06253, 3.33843, 5.47434, 7.88282, 3.52778, 9.77052, 9.16828],(coco) 表示有5个锚框,第一个锚框大小[w, h]是[0.57273, 0.677385],第5个锚框大小是[9.77052, 9.16828] 锚框的大小都是表示在特征图13x13中的大小 step_less_12800:训练步数是否小于12800,bool num_classes:类别数 返回: label_objectness_confidence,label_location,label_classification,scale_location,object_mask,noobject_mask ''' batchsize, _, h, w = pred.shape # h = w = 13 特征图大小13x13 _, nums, c = gt_bboxs.shape # nums 表示一张图中最多只能由nums个目标,c = 5 num_anchors = len(anchors) // 2 # num_anchors = 5 pred_box = get_yolo_box_xxyy(pred, anchors, num_classes) # 获取预测框,此时预测框中的坐标格式为(左上角坐标,右上角坐标) # 形状为[batchsize, 13, 13, num_anchors, 4] pred_box = pred_box.numpy() gt_bboxs = gt_bboxs.numpy() # shape = (batchsize, nums, 5) anchors_copy = np.array(anchors).reshape((num_anchors, 2)) # shape = (num_anchors,2) anchors_copy = np.expand_dims(anchors_copy, 0).repeat(batchsize, axis=0) # shape = (batchsize, num_anchors,2) # print(anchors_copy.shape) # print(anchors_copy) label_objectness_confidence = np.zeros(shape=(batchsize, h, w, num_anchors), dtype='float32') label_location = np.zeros(shape=(batchsize, h, w, num_anchors, 4), dtype='float32') label_classification = np.zeros(shape=(batchsize, h, w, num_anchors, num_classes), dtype='float32') scale_location = 0.01 * np.ones((batchsize, h, w, num_anchors), dtype='float32') # 与位置损失相关的权重系数 object_mask = np.zeros(shape=(batchsize, h, w, num_anchors), dtype='float32') # 有目标掩码 noobject_mask = np.ones(shape=(batchsize, h, w, num_anchors), dtype='float32') # 无目标掩码 # 对于不负责预测目标的预测框,如果其与真实框的IOU大于iou_threshold(默认0.6),此预测框不参与任何损失计算 iou_above_thresh_indices = np.zeros((batchsize, h, w, num_anchors)) # 训练步数小于12800时,需要计算预测框与先验框的位置损失 if (step_less_12800): label_location[:, :, :, :, 0] = 0.5 label_location[:, :, :, :, 1] = 0.5 gt_cls = gt_bboxs[:, :, 0].astype(np.int32) # shape = (batchsize , nums,) gt_center_x = gt_bboxs[:, :, 1] # shape = (batchsize * nums) gt_center_y = gt_bboxs[:, :, 2] gt_w = gt_bboxs[:, :, 3] # shape = (batchsize , nums,) gt_h = gt_bboxs[:, :, 4] gtx_min = gt_center_x - gt_w / 2.0 gtx_max = gt_center_x + gt_w / 2.0 gty_min = gt_center_y - gt_h / 2.0 gty_max = gt_center_y + gt_h / 2.0 target_indexs = np.where(gt_bboxs[:, :, 3] != 0) # 宽不为0的目标框(真实目标)的索引值, [batch][num] i_float = gt_center_y * h i = np.floor(i_float).astype(np.int32) # 在第几行 shape = (batchsize, nums,) i = i[target_indexs] # 取出对应i j_float = gt_center_x * w # shape = (batchsize, nums,) j = np.floor(j_float).astype(np.int32) # 在第几列 j = j[target_indexs] # 取出对应j gt_bboxs_copy = np.expand_dims(gt_bboxs, 1).repeat(h * w * num_anchors, axis=1) # shape = (batchsize, h*w*num_anchors, nums, 5) gt_bboxs_copy = gt_bboxs_copy.reshape((batchsize, h, w, num_anchors, nums, 5))[:, :, :, :, :, 1:] # shape = (batchsize, h, w,num_anchors, nums, 5) gtx_min_copy = gt_bboxs_copy[:, :, :, :, :, 0] - gt_bboxs_copy[:, :, :, :, :, 2] / 2. # shape = (batchsize, h, w,num_anchors, nums) gty_min_copy = gt_bboxs_copy[:, :, :, :, :, 1] - gt_bboxs_copy[:, :, :, :, :, 3] / 2. gtx_max_copy = gt_bboxs_copy[:, :, :, :, :, 0] + gt_bboxs_copy[:, :, :, :, :, 2] / 2. gty_max_copy = gt_bboxs_copy[:, :, :, :, :, 1] + gt_bboxs_copy[:, :, :, :, :, 3] / 2. ious = [] for a in range(num_anchors): bbox1 = np.zeros((batchsize, nums, 4)) # 将真实框的中心点移到原点 bbox1[:, :, 2] = gt_w # gt_w.shape = (batchsize,nums,) bbox1[:, :, 3] = gt_h # shape = (batchsize,nums,) anchor_w = anchors[a * 2] anchor_h = anchors[a * 2 + 1] # x1 = np.maximum(bbox1[:, :, 0], 0) # x1.shape = (batchsize,nums,) # y1 = np.maximum(bbox1[:, :, 1], 0) x2 = np.minimum(bbox1[:, :, 2], anchor_w) # x2.shape = (batchsize,nums,) y2 = np.minimum(bbox1[:, :, 3], anchor_h) intersection = np.maximum(x2, 0.) * np.maximum(y2, 0.) # intersection.shape = (batchsize,nums,) s1 = gt_w * gt_h s2 = anchor_w * anchor_h union = s2 + s1 - intersection iou = intersection / union # iou.shape = (batchsize,nums,) ious.append(iou) ious = np.array(ious) # ious.shape = (num_anchors,batchsize,nums) inds_anchor = np.argmax(ious, axis=0) # inds.shape = (batchsize,nums,) # 获取与目标真实框IOU最大的anchor索引值 inds_anchor = inds_anchor[target_indexs].astype(np.int32) # 取出对应anchor索引值 # 设置掩码 object_mask[target_indexs[0], i, j, inds_anchor] = 1. # 把掩码中的对应位置设为1 noobject_mask[target_indexs[0], i, j, inds_anchor] = 0 # 把掩码中的对应位置设为 # 设置位置标签 # 对于负责预测目标的预测框, 需要计算位置损失 dx_label = j_float[target_indexs] - j # x方向上的偏移量,tx的标签值 dy_label = i_float[target_indexs] - i # y方向上的偏移量,ty的标签值 dw_label = np.log(j_float[target_indexs] / anchors_copy[target_indexs[0], inds_anchor, 0]) # tw的标签值 dh_label = np.log(i_float[target_indexs] / anchors_copy[target_indexs[0], inds_anchor, 1]) # th的标签值 label_location[target_indexs[0], i, j, inds_anchor, 0] = dx_label label_location[target_indexs[0], i, j, inds_anchor, 1] = dy_label label_location[target_indexs[0], i, j, inds_anchor, 2] = dw_label label_location[target_indexs[0], i, j, inds_anchor, 3] = dh_label # scale_location用来调节不同尺寸的锚框对损失函数的贡献,作为加权系数与位置损失函数相乘 scale_location[target_indexs[0], i, j, inds_anchor] = 2.0 - gt_w[target_indexs] * gt_h[target_indexs] # 设置类别标签 c = gt_cls[target_indexs] label_classification[target_indexs[0], i, j, inds_anchor, c] = 1. # 设置置信度标签 if rescore: # 计算对应的预测框与真实框之间的IOU值 bbox_pred_xyxy = pred_box[target_indexs[0], i, j, inds_anchor, :] # bbox_gt_xyxy = np.zeros(bbox_pred_xyxy.shape) # bbox_gt_xyxy[:,0] = gtx_min[target_indexs] # bbox_gt_xyxy[:,1] = gty_min[target_indexs] # bbox_gt_xyxy[:,2] = gtx_max[target_indexs] # bbox_gt_xyxy[:,3] = gty_max[target_indexs] x1 = np.maximum(bbox_pred_xyxy[:, 0], gtx_min[target_indexs]) # x1.shape = (batchsize,nums,) y1 =
np.maximum(bbox_pred_xyxy[:, 1], gty_min[target_indexs])
numpy.maximum
import numpy as np a = np.array([0, 0, 30, 10, 10, 20]) print(a) # [ 0 0 30 10 10 20] print(np.unique(a)) # [ 0 10 20 30] print(type(np.unique(a))) # <class 'numpy.ndarray'> l = [0, 0, 30, 10, 10, 20] print(l) # [0, 0, 30, 10, 10, 20] print(np.unique(l)) # [ 0 10 20 30] print(type(np.unique(l))) # <class 'numpy.ndarray'> print(np.unique(a).size) # 4 print(len(np.unique(a))) # 4 u, counts = np.unique(a, return_counts=True) print(u) # [ 0 10 20 30] print(counts) # [2 2 1 1] print(u[counts == 1]) # [20 30] print(u[counts != 1]) # [ 0 10] print(np.unique(a, return_counts=True)) # (array([ 0, 10, 20, 30]), array([2, 2, 1, 1])) print(type(np.unique(a, return_counts=True))) # <class 'tuple'> u, indices = np.unique(a, return_index=True) print(u) # [ 0 10 20 30] print(indices) # [0 3 5 2] print(a) # [ 0 0 30 10 10 20] print(a[indices]) # [ 0 10 20 30] u, inverse =
np.unique(a, return_inverse=True)
numpy.unique
## WAVE EQ """ this script solves the 1D-wave equations, i.e. du/dt = -d/dx(1/2*u^2 + g*h - nu*du/dx) + F dh/dt = -d/dx(u*h).""" import numpy as np import matplotlib.pyplot as plt from scipy import sparse import time as tictoc from scipy.signal import tukey ## physical parameters Lx = 1. # D = (0,Lx) is the domain H = 10. # depth at rest g = 10. # gravitational acceleration ## numerical parameters nx = 200 # number of grid points for the variable h nu = nx-1 # number of grid points for u nt = 1000 # number of time steps to integrate cfl = 0.9 # cfl number, 1 is the limit dx = Lx/nx # grid spacing x_h = np.arange(dx/2.,Lx,dx) # grid x on h-points x_u = np.arange(dx,Lx-dx+dx/2.,dx) # on u-points cph = np.sqrt(g*H) # long wave phase speed dt = cfl * dx / cph # dt based on desired cfl-number ## parameters rho = 1 v = 0.008 F0 = 500. F = -F0*np.sin(x_h/Lx*8*np.pi)*np.sin(x_h/Lx*np.pi)**2 ## OPERATORS #GTx = (sparse.diags(np.ones(nx-1),1,shape=(nu,nx)) +\ # sparse.diags(-np.ones(nx-1),0,shape=(nu,nx))) / dx GTx = (sparse.diags(np.ones(nx),1,shape=(nu,nx)) +\ sparse.diags(-
np.ones(nx)
numpy.ones
""" ColECM: Collagen ExtraCellular Matrix Simulation SIMULATION 2D ROUTINE Created by: <NAME> Created on: 09/03/2018 Last Modified: 19/04/2018 """ import numpy as np import sys, os, pickle import utilities as ut def cos_sin_theta_2D(vector, r_vector): """ cos_sin_theta_2D(vector, r_vector) Returns cosine and sine of angles of intersecting vectors betwen even and odd indicies Parameters ---------- vector: array_like, (float); shape=(n_vector, n_dim) Array of displacement vectors between connecting beads r_vector: array_like, (float); shape=(n_vector) Array of radial distances between connecting beads Returns ------- cos_the: array_like (float); shape=(n_vector/2) Cosine of the angle between each pair of displacement vectors sin_the: array_like (float); shape=(n_vector/2) Sine of the angle between each pair of displacement vectors r_prod: array_like (float); shape=(n_vector/2) Product of radial distance between each pair of displacement vectors """ n_vector = int(vector.shape[0]) n_dim = vector.shape[1] temp_vector = np.reshape(vector, (int(n_vector/2), 2, n_dim)) "Calculate |rij||rjk| product for each pair of vectors" r_prod = np.prod(np.reshape(r_vector, (int(n_vector/2), 2)), axis = 1) "Form dot product of each vector pair rij*rjk in vector array corresponding to an angle" dot_prod = np.sum(np.prod(temp_vector, axis=1), axis=1) "Form pseudo-cross product of each vector pair rij*rjk in vector array corresponding to an angle" cross_prod = np.linalg.det(temp_vector) "Calculate cos(theta) for each angle" cos_the = dot_prod / r_prod "Calculate sin(theta) for each angle" sin_the = cross_prod / r_prod return cos_the, sin_the, r_prod def calc_energy_forces_2D(pos, cell_dim, bond_indices, angle_indices, angle_bond_indices, param): """ calc_energy_forces(pos, cell_dim, bond_indices, angle_indices, angle_bond_indices, vdw_coeff, param) Return tot potential energy and forces on each bead in simulation Parameters ---------- dxy: array_like (float); shape=(2, n_bead, n_bead) Displacement along x and y axis between each bead r2: array_like (float); shape=(n_bead, n_bead) Square of Radial disance between each bead bond_matrix: array_like (int); shape=(n_bead, n_bead) Matrix determining whether a bond is present between two beads verlet_list: array_like (int); shape=(n_bead, n_bead) Matrix determining whether two beads are within rc radial distance vdw_param: array_like (float); shape=(2) Sigma and epsilon paameters for Van de Waals forces bond_param: array_like (float); shape=(2) Equilibrium length and energy paameters for bonded forces angle_param: array_like (float); shape=(2) Equilibrium angle and energy paameters for angular forces rc: float Interaction cutoff radius for non-bonded forces bond_beads: array_like, (int); shape=(n_angle, 3) Array containing indicies in pos array all 3-bead angular interactions dxy_index: array_like, (int); shape=(n_bond, 2) Array containing indicies in dx and dy arrays of all bonded interactions r_index: array_like, (int); shape=(n_bond, 2) Array containing indicies in r array of all bonded interactions Returns ------- pot_energy: float Total potential energy of simulation cell frc_beads: array_like (float); shape=(n_beads, n_dim) Forces acting upon each bead due to positional array virial_tensor: array_like (float); shape=(n_dim, n_dim) Virial term of pressure tensor components """ f_beads = np.zeros((2, pos.shape[0])) pot_energy = 0 cut_frc = ut.force_vdw(param['rc']**2, param['vdw_sigma'], param['vdw_epsilon']) cut_pot = ut.pot_vdw(param['rc']**2, param['vdw_sigma'], param['vdw_epsilon']) virial_tensor = np.zeros((2, 2)) n_bond = bond_indices[0].shape[0] pair_dist = ut.get_distances(pos, cell_dim) pair_r2 = np.sum(pair_dist**2, axis=0) if n_bond > 0: "Bond Lengths" bond_r = np.sqrt(pair_r2[bond_indices]) #verlet_list_r0 = ut.check_cutoff(r_half, param['bond_r0']) #verlet_list_r1 = ut.check_cutoff(r_half, param['bond_r1']) bond_pot = ut.pot_harmonic(bond_r, param['bond_r0'], param['bond_matrix'][bond_indices])# * verlet_list_r0 #bond_pot_1 = ut.pot_harmonic(r_half, param['bond_r1'], param['bond_k1']) * verlet_list_r1 pot_energy += 0.5 * np.sum(bond_pot)# + np.sum(bond_pot_1) bond_frc = ut.force_harmonic(bond_r, param['bond_r0'], param['bond_matrix'][bond_indices])# * verlet_list_r0 #bond_frc_1 = ut.force_harmonic(r_half, param['bond_r1'], param['bond_k1']) * verlet_list_r1 temp_frc = np.zeros((2, pos.shape[0], pos.shape[0])) for i in range(2): temp_frc[i][bond_indices] += bond_frc * pair_dist[i][bond_indices] / bond_r f_beads[i] += np.sum(temp_frc[i], axis=1) #for i in range(2): # for j in range(2): virial_tensor[i][j] += np.sum(bond_frc / r_half * distances[i][indices_half] * distances[j][indices_half]) "Bond Angles" try: angle_dist = pair_dist.T[angle_bond_indices].T "Make array of vectors rij, rjk for all connected bonds" vector = np.stack((angle_dist[0], angle_dist[1]), axis=1) n_vector = int(vector.shape[0]) "Find |rij| values for each vector" r_vector = np.sqrt(pair_r2[angle_bond_indices]) cos_the, sin_the, r_prod = cos_sin_theta_2D(vector, r_vector) pot_energy += np.sum(param['angle_array'] * (cos_the + 1)) "Form arrays of |rij| vales, cos(theta) and |rij||rjk| terms same shape as vector array" r_array = np.reshape(np.repeat(r_vector, 2), vector.shape) sin_the_array = np.reshape(np.repeat(sin_the, 4), vector.shape) r_prod_array = np.reshape(np.repeat(r_prod, 4), vector.shape) "Form left and right hand side terms of (cos(theta) rij / |rij|^2 - rjk / |rij||rjk|)" r_left = vector / r_prod_array r_right = sin_the_array * vector / r_array**2 ij_indices = np.arange(0, n_vector, 2) jk_indices = np.arange(1, n_vector, 2) "Perfrom right hand - left hand term for every rij rkj pair" r_left[ij_indices] -= r_right[jk_indices] r_left[jk_indices] -= r_right[ij_indices] "Calculate forces upon beads i, j and k" frc_angle_ij = param['angle_k0'] * r_left frc_angle_k = -np.sum(np.reshape(frc_angle_ij, (int(n_vector/2), 2, 2)), axis=1) "Add angular forces to force array" for i in range(2): f_beads[i][angle_indices.T[0]] -= frc_angle_ij[ij_indices].T[i] f_beads[i][angle_indices.T[1]] -= frc_angle_k.T[i] f_beads[i][angle_indices.T[2]] -= frc_angle_ij[jk_indices].T[i] except IndexError: pass verlet_list = ut.check_cutoff(pair_r2, param['rc']**2) non_zero = np.nonzero(pair_r2 * verlet_list) nonbond_pot = ut.pot_vdw((pair_r2 * verlet_list)[non_zero], param['vdw_sigma'], param['vdw_matrix'][non_zero]) - cut_pot pot_energy += np.nansum(nonbond_pot) / 2 nonbond_frc = ut.force_vdw((pair_r2 * verlet_list)[non_zero], param['vdw_sigma'], param['vdw_matrix'][non_zero]) - cut_frc temp_xy = np.zeros(pair_dist.shape) for i in range(2): temp_xy[i][non_zero] += nonbond_frc * (pair_dist[i][non_zero] / pair_r2[non_zero]) for j in range(2): virial_tensor[i][j] += np.sum(np.triu(temp_xy[i] * pair_dist[i] * pair_dist[j])) f_beads[i] += np.sum(temp_xy[i], axis=0) frc = f_beads.T return frc, pot_energy, virial_tensor def calc_energy_forces_2D_mpi(pos, cell_dim, pos_indices, bond_indices, glob_indices, angle_indices, angle_bond_indices, angle_coeff, vdw_coeff, virial_indicies, param): """ calc_energy_forces(distances, r2, bond_matrix, vdw_matrix, verlet_list, bond_beads, dist_index, r_index, param) Return tot potential energy and forces on each bead in simulation Parameters ---------- dxy: array_like (float); shape=(2, n_bead, n_bead) Displacement along x and y axis between each bead r2: array_like (float); shape=(n_bead, n_bead) Square of Radial disance between each bead bond_matrix: array_like (int); shape=(n_bead, n_bead) Matrix determining whether a bond is present between two beads verlet_list: array_like (int); shape=(n_bead, n_bead) Matrix determining whether two beads are within rc radial distance vdw_param: array_like (float); shape=(2) Sigma and epsilon paameters for Van de Waals forces bond_param: array_like (float); shape=(2) Equilibrium length and energy paameters for bonded forces angle_param: array_like (float); shape=(2) Equilibrium angle and energy paameters for angular forces rc: float Interaction cutoff radius for non-bonded forces bond_beads: array_like, (int); shape=(n_angle, 3) Array containing indicies in pos array all 3-bead angular interactions dxy_index: array_like, (int); shape=(n_bond, 2) Array containing indicies in dx and dy arrays of all bonded interactions r_index: array_like, (int); shape=(n_bond, 2) Array containing indicies in r array of all bonded interactions Returns ------- pot_energy: float Total potential energy of simulation cell frc_beads: array_like (float); shape=(n_beads, n_dim) Forces acting upon each bead due to positional array virial_tensor: array_like (float); shape=(n_dim, n_dim) Virial term of pressure tensor components """ f_beads = np.zeros((2, pos.shape[0])) pot_energy = 0 cut_frc = ut.force_vdw(param['rc']**2, param['vdw_sigma'], param['vdw_epsilon']) cut_pot = ut.pot_vdw(param['rc']**2, param['vdw_sigma'], param['vdw_epsilon']) virial_tensor = np.zeros((2, 2)) n_bond = bond_indices[0].shape[0] pair_dist = ut.get_distances_mpi(pos, pos_indices, cell_dim) pair_r2 = np.sum(pair_dist**2, axis=0) if n_bond > 0: "Bond Lengths" bond_r = np.sqrt(pair_r2[bond_indices]) #verlet_list_r0 = ut.check_cutoff(r_half, param['bond_r0']) #verlet_list_r1 = ut.check_cutoff(r_half, param['bond_r1']) bond_pot = ut.pot_harmonic(bond_r, param['bond_r0'], param['bond_matrix'][glob_indices])# * verlet_list_r0 #bond_pot_1 = ut.pot_harmonic(r_half, param['bond_r1'], param['bond_k1']) * verlet_list_r1 pot_energy += 0.5 * np.sum(bond_pot)# + np.sum(bond_pot_1) bond_frc = ut.force_harmonic(bond_r, param['bond_r0'], param['bond_matrix'][glob_indices])# * verlet_list_r0 #bond_frc_1 = ut.force_harmonic(r_half, param['bond_r1'], param['bond_k1']) * verlet_list_r1 temp_frc = np.zeros((2, pos.shape[0], pos.shape[0])) for i in range(2): temp_frc[i][glob_indices] += bond_frc * pair_dist[i][bond_indices] / bond_r f_beads[i] += np.sum(temp_frc[i], axis=1) #for i in range(2): # for j in range(2): virial_tensor[i][j] += np.sum(bond_frc / r_half * distances[i][indices_half] * distances[j][indices_half]) "Bond Angles" try: angle_dist = (pos[angle_bond_indices[1]] - pos[angle_bond_indices[0]]).T for i in range(param['n_dim']): angle_dist[i] -= cell_dim[i] * np.array(2 * angle_dist[i] / cell_dim[i], dtype=int) angle_r2 = np.sum(angle_dist**2, axis=0) r_vector = np.sqrt(angle_r2) "Make array of vectors rij, rjk for all connected bonds" vector = np.stack((angle_dist[0], angle_dist[1]), axis=1) n_vector = int(vector.shape[0]) "Find |rij| values for each vector" cos_the, sin_the, r_prod = cos_sin_theta_2D(vector, r_vector) pot_energy += np.sum(angle_coeff * (cos_the + 1)) "Form arrays of |rij| vales, cos(theta) and |rij||rjk| terms same shape as vector array" r_array = np.reshape(np.repeat(r_vector, 2), vector.shape) sin_the_array = np.reshape(np.repeat(sin_the, 4), vector.shape) r_prod_array = np.reshape(np.repeat(r_prod, 4), vector.shape) "Form left and right hand side terms of (cos(theta) rij / |rij|^2 - rjk / |rij||rjk|)" r_left = vector / r_prod_array r_right = sin_the_array * vector / r_array**2 ij_indices = np.arange(0, n_vector, 2) jk_indices = np.arange(1, n_vector, 2) "Perfrom right hand - left hand term for every rij rkj pair" r_left[ij_indices] -= r_right[jk_indices] r_left[jk_indices] -= r_right[ij_indices] "Calculate forces upon beads i, j and k" frc_angle_ij = param['angle_k0'] * r_left frc_angle_k = -np.sum(np.reshape(frc_angle_ij, (int(n_vector/2), 2, 2)), axis=1) "Add angular forces to force array" for i in range(2): f_beads[i][angle_indices.T[0]] -= frc_angle_ij[ij_indices].T[i] f_beads[i][angle_indices.T[1]] -= frc_angle_k.T[i] f_beads[i][angle_indices.T[2]] -= frc_angle_ij[jk_indices].T[i] except IndexError: pass verlet_list = ut.check_cutoff(pair_r2, param['rc']**2) non_zero = np.nonzero(pair_r2 * verlet_list) nonbond_pot = ut.pot_vdw((pair_r2 * verlet_list)[non_zero], param['vdw_sigma'], vdw_coeff[non_zero]) - cut_pot pot_energy += np.nansum(nonbond_pot) / 2 nonbond_frc = ut.force_vdw((pair_r2 * verlet_list)[non_zero], param['vdw_sigma'], vdw_coeff[non_zero]) - cut_frc temp_xy = np.zeros(pair_dist.shape) for i in range(2): temp_xy[i][non_zero] += nonbond_frc * (pair_dist[i][non_zero] / pair_r2[non_zero]) for j in range(2): virial_tensor[i][j] += np.sum(np.triu(temp_xy[i] * pair_dist[i] * pair_dist[j])[virial_indicies]) f_beads[i] += np.sum(temp_xy[i], axis=0) frc = f_beads.T return frc, pot_energy, virial_tensor def cos_sin_theta_3D(vector, r_vector): """ cos_sin_theta_3D(vector, r_vector) Returns cosine and sine of angles of intersecting vectors betwen even and odd indicies Parameters ---------- vector: array_like, (float); shape=(n_vector, n_dim) Array of displacement vectors between connecting beads r_vector: array_like, (float); shape=(n_vector) Array of radial distances between connecting beads Returns ------- cos_the: array_like (float); shape=(n_vector/2) Cosine of the angle between each pair of displacement vectors sin_the: array_like (float); shape=(n_vector/2) Sine of the angle between each pair of displacement vectors r_prod: array_like (float); shape=(n_vector/2) Product of radial distance between each pair of displacement vectors """ n_vector = int(vector.shape[0]) n_dim = vector.shape[1] temp_vector = np.reshape(vector, (int(n_vector/2), 2, n_dim)) "Calculate |rij||rjk| product for each pair of vectors" r_prod = np.prod(np.reshape(r_vector, (int(n_vector/2), 2)), axis = 1) "Form dot product of each vector pair rij*rjk in vector array corresponding to an angle" dot_prod = np.sum(np.prod(temp_vector, axis=1), axis=1) "Form pseudo-cross product of each vector pair rij*rjk in vector array corresponding to an angle" temp_vector = np.moveaxis(temp_vector, (1, 0, 2), (0, 1, 2)) cross_prod = np.cross(temp_vector[0], temp_vector[1]) "Calculate cos(theta) for each angle" cos_the = dot_prod / r_prod "Calculate sin(theta) for each angle" sin_the = cross_prod / np.reshape(np.repeat(r_prod, n_dim), cross_prod.shape) return cos_the, sin_the, r_prod def calc_energy_forces_3D(pos, cell_dim, bond_indices, angle_indices, angle_bond_indices, param): """ calc_energy_forces(pos, cell_dim, bond_indices, angle_indices, angle_bond_indices, vdw_coeff, param) Return tot potential energy and forces on each bead in simulation Parameters ---------- dxy: array_like (float); shape=(2, n_bead, n_bead) Displacement along x and y axis between each bead r2: array_like (float); shape=(n_bead, n_bead) Square of Radial disance between each bead bond_matrix: array_like (int); shape=(n_bead, n_bead) Matrix determining whether a bond is present between two beads verlet_list: array_like (int); shape=(n_bead, n_bead) Matrix determining whether two beads are within rc radial distance vdw_param: array_like (float); shape=(2) Sigma and epsilon paameters for Van de Waals forces bond_param: array_like (float); shape=(2) Equilibrium length and energy paameters for bonded forces angle_param: array_like (float); shape=(2) Equilibrium angle and energy paameters for angular forces rc: float Interaction cutoff radius for non-bonded forces bond_beads: array_like, (int); shape=(n_angle, 3) Array containing indicies in pos array all 3-bead angular interactions dxy_index: array_like, (int); shape=(n_bond, 2) Array containing indicies in dx and dy arrays of all bonded interactions r_index: array_like, (int); shape=(n_bond, 2) Array containing indicies in r array of all bonded interactions Returns ------- pot_energy: float Total potential energy of simulation cell frc_beads: array_like (float); shape=(n_beads, n_dim) Forces acting upon each bead due to positional array virial_tensor: array_like (float); shape=(n_dim, n_dim) Virial term of pressure tensor components """ f_beads = np.zeros((3, pos.shape[0])) pot_energy = 0 cut_frc = ut.force_vdw(param['rc']**2, param['vdw_sigma'], param['vdw_epsilon']) cut_pot = ut.pot_vdw(param['rc']**2, param['vdw_sigma'], param['vdw_epsilon']) virial_tensor = np.zeros((3, 3)) n_bond = bond_indices[0].shape[0] pair_dist = ut.get_distances(pos, cell_dim) pair_r2 = np.sum(pair_dist**2, axis=0) if n_bond > 0: "Bond Lengths" bond_r = np.sqrt(pair_r2[bond_indices]) #verlet_list_r0 = ut.check_cutoff(r_half, param['bond_r0']) #verlet_list_r1 = ut.check_cutoff(r_half, param['bond_r1']) bond_pot = ut.pot_harmonic(bond_r, param['bond_r0'], param['bond_matrix'][bond_indices])# * verlet_list_r0 #bond_pot_1 = ut.pot_harmonic(r_half, param['bond_r1'], param['bond_k1']) * verlet_list_r1 pot_energy += 0.5 * np.sum(bond_pot)# + np.sum(bond_pot_1) bond_frc = ut.force_harmonic(bond_r, param['bond_r0'], param['bond_matrix'][bond_indices])# * verlet_list_r0 #bond_frc_1 = ut.force_harmonic(r_half, param['bond_r1'], param['bond_k1']) * verlet_list_r1 temp_frc = np.zeros((3, pos.shape[0], pos.shape[0])) for i in range(3): temp_frc[i][bond_indices] += bond_frc * pair_dist[i][bond_indices] / bond_r f_beads[i] += np.sum(temp_frc[i], axis=1) #for i in range(3): # for j in range(3): virial_tensor[i][j] += np.sum(bond_frc / r_half * distances[i][indices_half] * distances[j][indices_half]) "Bond Angles" try: angle_dist = pair_dist.T[angle_bond_indices].T "Make array of vectors rij, rjk for all connected bonds" vector = np.stack((angle_dist[0], angle_dist[1], angle_dist[2]), axis=1) n_vector = int(vector.shape[0]) "Find |rij| values for each vector" r_vector = np.sqrt(pair_r2[angle_bond_indices]) cos_the, sin_the, r_prod = cos_sin_theta_3D(vector, r_vector) pot_energy += np.sum(param['angle_array'] * (cos_the + 1)) "Form arrays of |rij| vales, cos(theta) and |rij||rjk| terms same shape as vector array" r_array = np.reshape(np.repeat(r_vector, 3), vector.shape) sin_the_array = np.reshape(np.repeat(sin_the, 2), vector.shape) r_prod_array = np.reshape(np.repeat(r_prod, 6), vector.shape) "Form left and right hand side terms of (cos(theta) rij / |rij|^2 - rjk / |rij||rjk|)" r_left = vector / r_prod_array r_right = sin_the_array * vector / r_array**2 ij_indices =
np.arange(0, n_vector, 2)
numpy.arange
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. from os import getenv import numpy as np import torch from pyquaternion import Quaternion from transforms3d.euler import euler2mat, euler2quat, quat2mat, mat2euler from math_helper import rot_matrix_between_vectors, angle_between_vectors, quaternion_between_vectors, \ normalize_batch, rad2deg, normalize, rotation_between_triangles import quaternion from utils import torch_tile class KeyframeIdx: root = 0 spine0 = 1 spine1 = 2 spine2 = 3 spine3 = 4 neck = 5 head = 6 head_end = 7 rshoulder = 8 rscap = 9 rupperarm = 10 rlowerarm = 11 rwristtwist = 12 rwrist = 13 rindex1 = 14 rindex2 = 15 rindex3 = 16 rindex3_end = 17 rring1 = 18 rring2 = 19 rring3 = 20 rring3_end = 21 rmiddle1 = 22 rmiddle2 = 23 rmiddle3 = 24 rmiddle3_end = 25 rpinky1 = 26 rpinky2 = 27 rpinky3 = 28 rpinky3_end = 29 rthumb0 = 30 rthumb1 = 31 rthumb2 = 32 rthumb3 = 33 rthumb3_end = 34 lshoulder = 35 lscap = 36 lupperarm = 37 llowerarm = 38 lwristtwist = 39 lwrist = 40 lindex1 = 41 lindex2 = 42 lindex3 = 43 lindex3_end = 44 lring1 = 45 lring2 = 46 lring3 = 47 lring3_end = 48 lmiddle1 = 49 lmiddle2 = 50 lmiddle3 = 51 lmiddle3_end = 52 lpinky1 = 53 lpinky2 = 54 lpinky3 = 55 lpinky3_end = 56 lthumb0 = 57 lthumb1 = 58 lthumb2 = 59 lthumb3 = 60 lthumb3_end = 61 rupperleg = 62 rlowerleg = 63 rfoot = 64 rfootball = 65 rfootball_right = 66 rfootball_end = 67 lupperleg = 68 llowerleg = 69 lfoot = 70 lfootball = 71 lfootball_left = 72 lfootball_end = 73 all = { 'root': root, 'spine0': spine0, 'spine1': spine1, 'spine2': spine2, 'spine3': spine3, 'neck': neck, 'head': head, 'head_end': head_end, 'rshoulder': rshoulder, 'rscap': rscap, 'rupperarm': rupperarm, 'rlowerarm': rlowerarm, 'rwristtwist': rwristtwist, 'rwrist': rwrist, 'rindex1': rindex1, 'rindex2': rindex2, 'rindex3': rindex3, 'rindex3_end': rindex3_end, 'rring1': rring1, 'rring2': rring2, 'rring3': rring3, 'rring3_end': rring3_end, 'rmiddle1': rmiddle1, 'rmiddle2': rmiddle2, 'rmiddle3': rmiddle3, 'rmiddle3_end': rmiddle3_end, 'rpinky1': rpinky1, 'rpinky2': rpinky2, 'rpinky3': rpinky3, 'rpinky3_end': rpinky3_end, 'rthumb0': rthumb0, 'rthumb1': rthumb1, 'rthumb2': rthumb2, 'rthumb3': rthumb3, 'rthumb3_end': rthumb3_end, 'lshoulder': lshoulder, 'lscap': lscap, 'lupperarm': lupperarm, 'llowerarm': llowerarm, 'lwristtwist': lwristtwist, 'lwrist': lwrist, 'lindex1': lindex1, 'lindex2': lindex2, 'lindex3': lindex3, 'lindex3_end': lindex3_end, 'lring1': lring1, 'lring2': lring2, 'lring3': lring3, 'lring3_end': lring3_end, 'lmiddle1': lmiddle1, 'lmiddle2': lmiddle2, 'lmiddle3': lmiddle3, 'lmiddle3_end': lmiddle3_end, 'lpinky1': lpinky1, 'lpinky2': lpinky2, 'lpinky3': lpinky3, 'lpinky3_end': lpinky3_end, 'lthumb0': lthumb0, 'lthumb1': lthumb1, 'lthumb2': lthumb2, 'lthumb3': lthumb3, 'lthumb3_end': lthumb3_end, 'rupperleg': rupperleg, 'rlowerleg': rlowerleg, 'rfoot': rfoot, 'rfootball': rfootball, 'rfootball_right': rfootball_right, 'rfootball_end': rfootball_end, 'lupperleg': lupperleg, 'llowerleg': llowerleg, 'lfoot': lfoot, 'lfootball': lfootball, 'lfootball_left': lfootball_left, 'lfootball_end': lfootball_end, } @classmethod def exportable_joints(cls): result = {} for joint_name, joint_idx in cls.all.items(): if not joint_name.endswith("_end"): result[joint_name] = joint_idx return result @classmethod def get_hand_joints(cls): joint_l = [cls.lwrist] joint_r = [cls.rwrist] for j_name, j_idx in cls.all.items(): if "index" in j_name or "middle" in j_name or "ring" in j_name or "pinky" in j_name or "thumb" in j_name: if j_name.startswith("l"): joint_l.append(j_idx) else: joint_r.append(j_idx) return joint_l, joint_r @classmethod def get_finger_joints(cls): joint_l, joint_r = cls.get_hand_joints() return joint_l[1:], joint_r[1:] @classmethod def get_non_finger_joints(cls): l_hands, r_hands = cls.get_hand_joints() # keep wrists l_hands.remove(l_hands[0]) r_hands.remove(r_hands[0]) all = cls.all.copy() all_new = {} for j_name, j_idx in all.items(): if not (j_idx in l_hands or j_idx in r_hands): all_new[j_name] = j_idx return all_new class Bone: Idx = KeyframeIdx _joint_names = None def __init__(self, end, parent, normalized_length): self.end = end self.parent = parent self.children = [] self.normalized_length = normalized_length if self.parent is not None: self.start = parent.end self.parent.register_child(self) else: self.start = None @classmethod def build_hierarchy(cls): root = cls('root', None, 0) spine0 = cls('spine0', root, 0.0) spine1 = cls('spine1', spine0, 0.0) spine2 = cls('spine2', spine1, 0.0) spine3 = cls('spine3', spine2, 0.0) neck = cls('neck', spine3, 0.0) head = cls('head', neck, 0.0) head_end = cls('head_end', head, 0.0) rshoulder = cls('rshoulder', spine3, 0.0) rscap = cls('rscap', rshoulder, 0.0) rupperarm = cls('rupperarm', rscap, 0.0) rlowerarm = cls('rlowerarm', rupperarm, 0.0) rwristtwist = cls('rwristtwist', rlowerarm, 0.0) rwrist = cls('rwrist', rwristtwist, 0.0) rindex1 = cls('rindex1', rwrist, 0.0) rindex2 = cls('rindex2', rindex1, 0.0) rindex3 = cls('rindex3', rindex2, 0.0) rindex3_end = cls('rindex3_end', rindex3, 0.0) rring1 = cls('rring1', rwrist, 0.0) rring2 = cls('rring2', rring1, 0.0) rring3 = cls('rring3', rring2, 0.0) rring3_end = cls('rring3_end', rring3, 0.0) rmiddle1 = cls('rmiddle1', rwrist, 0.0) rmiddle2 = cls('rmiddle2', rmiddle1, 0.0) rmiddle3 = cls('rmiddle3', rmiddle2, 0.0) rmiddle3_end = cls('rmiddle3_end', rmiddle3, 0.0) rpinky1 = cls('rpinky1', rwrist, 0.0) rpinky2 = cls('rpinky2', rpinky1, 0.0) rpinky3 = cls('rpinky3', rpinky2, 0.0) rpinky3_end = cls('rpinky3_end', rpinky3, 0.0) rthumb0 = cls('rthumb0', rwrist, 0.0) rthumb1 = cls('rthumb1', rthumb0, 0.0) rthumb2 = cls('rthumb2', rthumb1, 0.0) rthumb3 = cls('rthumb3', rthumb2, 0.0) rthumb3_end = cls('rthumb3_end', rthumb3, 0.0) lshoulder = cls('lshoulder', spine3, 0.0) lscap = cls('lscap', lshoulder, 0.0) lupperarm = cls('lupperarm', lscap, 0.0) llowerarm = cls('llowerarm', lupperarm, 0.0) lwristtwist = cls('lwristtwist', llowerarm, 0.0) lwrist = cls('lwrist', lwristtwist, 0.0) lindex1 = cls('lindex1', lwrist, 0.0) lindex2 = cls('lindex2', lindex1, 0.0) lindex3 = cls('lindex3', lindex2, 0.0) lindex3_end = cls('lindex3_end', lindex3, 0.0) lring1 = cls('lring1', lwrist, 0.0) lring2 = cls('lring2', lring1, 0.0) lring3 = cls('lring3', lring2, 0.0) lring3_end = cls('lring3_end', lring3, 0.0) lmiddle1 = cls('lmiddle1', lwrist, 0.0) lmiddle2 = cls('lmiddle2', lmiddle1, 0.0) lmiddle3 = cls('lmiddle3', lmiddle2, 0.0) lmiddle3_end = cls('lmiddle3_end', lmiddle3, 0.0) lpinky1 = cls('lpinky1', lwrist, 0.0) lpinky2 = cls('lpinky2', lpinky1, 0.0) lpinky3 = cls('lpinky3', lpinky2, 0.0) lpinky3_end = cls('lpinky3_end', lpinky3, 0.0) lthumb0 = cls('lthumb0', lwrist, 0.0) lthumb1 = cls('lthumb1', lthumb0, 0.0) lthumb2 = cls('lthumb2', lthumb1, 0.0) lthumb3 = cls('lthumb3', lthumb2, 0.0) lthumb3_end = cls('lthumb3_end', lthumb3, 0.0) rupperleg = cls('rupperleg', root, 0.0) rlowerleg = cls('rlowerleg', rupperleg, 0.0) rfoot = cls('rfoot', rlowerleg, 0.0) rfootball = cls('rfootball', rfoot, 0.0) rfootball_right = cls('rfootball_right', rfootball, 0.0) rfootball_end = cls('rfootball_end', rfootball, 0.0) lupperleg = cls('lupperleg', root, 0.0) llowerleg = cls('llowerleg', lupperleg, 0.0) lfoot = cls('lfoot', llowerleg, 0.0) lfootball = cls('lfootball', lfoot, 0.0) lfootball_left = cls('lfootball_left', lfootball, 0.0) lfootball_end = cls('lfootball_end', lfootball, 0.0) return root @classmethod def get_shoulder_length(cls): return 0.15 @classmethod def get_upper_arm_length(cls): return 0.3 @classmethod def get_lower_arm_length(cls): return 0.3 @classmethod def get_neck_length(cls): return 0.1 def register_child(self, child): self.children.append(child) @classmethod def __recursive_add_joint(cls, joint, result): result.append([joint.start, joint.end]) for child in joint.children: cls.__recursive_add_joint(child, result) @classmethod def joint_names(cls): if cls._joint_names is None: result = [] cls.__recursive_add_joint(cls.build_hierarchy(), result) cls._joint_names = [] for bone in result: cls._joint_names.append(bone[1]) return cls._joint_names def __str__(self): return f"{self.end} (child of {self.start if self.start is not None else 'None'})" class Skeleton: Idx = KeyframeIdx Bone_set = Bone def __init__(self, global_positions): self.p = global_positions def height(self): return self.p[self.Idx.head][1] def shoulder_width(self): return np.linalg.norm(self.p[self.Idx.lshoulder] - self.p[self.Idx.rshoulder]) def arm_length(self): return np.linalg.norm(self.p[self.Idx.rshoulder] - self.p[self.Idx.rwrist]) def bone_offset_vector(self): def __recursive_add_child_offset(_v, bone): if bone.start is None: _v[self.Idx.all[bone.end]] = self.p[self.Idx.all[bone.end]] else: distance = self.p[self.Idx.all[bone.end]] - self.p[self.Idx.all[bone.start]] _v[self.Idx.all[bone.end]] = distance for child in bone.children: __recursive_add_child_offset(_v, child) v = np.zeros((len(self.Idx.all), 3)) hierarchy = self.Bone_set.build_hierarchy() __recursive_add_child_offset(v, hierarchy) return v @classmethod def parent_idx_vector(cls): def __recursive_add_children(bone, parent_index, result): result[cls.Idx.all[bone.end]] = parent_index bone_idx = cls.Idx.all[bone.end] for child in bone.children: __recursive_add_children(child, bone_idx, result) hierarchy = cls.Bone_set.build_hierarchy() parent_indices = np.zeros(len(cls.Idx.all), dtype=np.int32) __recursive_add_children(hierarchy, cls.Idx.all['head'], parent_indices) parent_indices[cls.Idx.all['head']] = -1 return parent_indices @classmethod def child_idx_vector(cls): parent_idx = cls.parent_idx_vector() child_idx = [] for i in range(len(parent_idx)): child_idx.append([]) for c_idx, p_idx in enumerate(parent_idx): if p_idx > -1 and c_idx > 0: child_idx[p_idx].append(c_idx) return child_idx # remove reference_p @classmethod def forward_kinematics_torch(cls, bone_offsets, parent_idx, root_offset, qrot): """ performs forward kinematics and returns global positions as vectors and orientations as quaternions """ p = torch.zeros((qrot.shape[0], qrot.shape[1], 3)).to(qrot.device) qrot_global = torch.zeros_like(qrot).to(qrot.device) _bone_offsets = torch.zeros_like(p).to(qrot.device) if type(bone_offsets) is np.ndarray: _bone_offsets[:] += torch.from_numpy(bone_offsets).type(torch.float32).to(qrot.device) else: _bone_offsets[:] += bone_offsets.float().to(qrot.device) for joint_idx, parent_idx in enumerate(parent_idx): if parent_idx < 0: qrot_global[:, joint_idx] = qrot[:, joint_idx] p[:, joint_idx] = root_offset else: qrot_global[:, joint_idx] = quaternion.qmul(qrot_global[:, parent_idx], qrot[:, joint_idx]) p[:, joint_idx] = p[:, parent_idx] + quaternion.qrot(qrot_global[:, parent_idx], _bone_offsets[:, joint_idx]) return p, qrot_global def positions_to_local_rot(self, p_g): r_l = torch.zeros_like(p_g) quat_g = torch.zeros((p_g.shape[0], p_g.shape[1], 4), device=p_g.device) quat_l = torch.zeros((p_g.shape[0], p_g.shape[1], 4), device=p_g.device) quat_g[:, :, 0] += 1.0 quat_l[:, :, 0] += 1.0 bone_offsets = torch.from_numpy(self.bone_offset_vector() / 100.0).float().to(p_g.device) child_idx_vector = self.child_idx_vector() parent_idx_vector = self.parent_idx_vector() depend_on_parent_idx = [1, 2, 3, 4] for joint_idx, parent_idx in enumerate(parent_idx_vector): parent_g = quat_g[:, parent_idx] use_par = joint_idx in depend_on_parent_idx my_pos = [torch.zeros_like(p_g[:, joint_idx])] ref_pos = [torch.zeros_like(p_g[:, joint_idx])] for child_idx in child_idx_vector[joint_idx]: ref_bone_dir = torch_tile(bone_offsets[child_idx][None, :].clone(), dim=0, n_tile=len(p_g)) if torch.norm(ref_bone_dir) == 0.0: continue my_pos.append(p_g[:, child_idx] - p_g[:, joint_idx]) if use_par: ref_bone_dir = quaternion.qrot(parent_g, ref_bone_dir) ref_pos.append(ref_bone_dir) finger_knuckles = [] # extrimities_start = [Id.rupperarm, Id.lupperarm, Id.rupperleg, Id.lupperleg] extrimities_start = [] if len(my_pos) == 2 and (joint_idx in extrimities_start or joint_idx in finger_knuckles): child_idx = child_idx_vector[joint_idx][0] grand_child_idx = child_idx_vector[child_idx][0] my_pos.append(p_g[:, grand_child_idx] - p_g[:, joint_idx]) b_off = bone_offsets[grand_child_idx] + bone_offsets[child_idx] ref_bone_dir = torch_tile(b_off[None, :], dim=0, n_tile=len(p_g)) ref_pos.append(ref_bone_dir) if len(my_pos) > 2: if use_par: quat_l[:, joint_idx] = rotation_between_triangles(ref_pos[:3], my_pos[:3]) r_l[:, joint_idx] = quaternion.qeuler(quat_l[:, joint_idx]) quat_g[:, joint_idx] = quaternion.qmul(parent_g, quat_l[:, joint_idx]) else: quat_g[:, joint_idx] = rotation_between_triangles(ref_pos[:3], my_pos[:3]) quat_l[:, joint_idx] = quaternion.qmul(quaternion.inverse(parent_g), quat_g[:, joint_idx]) r_l[:, joint_idx] = quaternion.qeuler(quat_l[:, joint_idx]) elif len(my_pos) == 2: ref_bone_dir = normalize_batch(ref_pos[1]) child_idx = child_idx_vector[joint_idx][0] anim_bone_dir = normalize_batch(p_g[:, child_idx] - p_g[:, joint_idx]) if use_par: quat_l[:, joint_idx] = quaternion.quaternion_between_vectors_torch(ref_bone_dir, anim_bone_dir) r_l[:, joint_idx] = quaternion.qeuler(quat_l[:, joint_idx]) quat_g[:, joint_idx] = quaternion.qmul(parent_g, quat_l[:, joint_idx]) else: quat_g[:, joint_idx] = quaternion.quaternion_between_vectors_torch(ref_bone_dir, anim_bone_dir) quat_l[:, joint_idx] = quaternion.qmul(quaternion.inverse(parent_g), quat_g[:, joint_idx]) r_l[:, joint_idx] = quaternion.qeuler(quat_l[:, joint_idx]) else: quat_g[:, joint_idx] = parent_g.clone() return rad2deg(r_l), quat_l @classmethod def parent_idx_vector(cls): def __recursive_add_children(bone, parent_index, result): result[cls.Idx.all[bone.end]] = parent_index bone_idx = cls.Idx.all[bone.end] for child in bone.children: __recursive_add_children(child, bone_idx, result) hierarchy = cls.Bone_set.build_hierarchy() parent_indices = np.zeros(len(cls.Idx.all), dtype=np.int32) __recursive_add_children(hierarchy, cls.Idx.all['root'], parent_indices) parent_indices[cls.Idx.all['root']] = -1 return parent_indices @classmethod def parent_idx_vector_reduced(cls): def __recursive_add_children(bone, parent_index, result): if bone.end in indices: result[indices[bone.end]] = parent_index bone_idx = indices[bone.end] else: # result[cls.Idx.all[bone.end]] = -1 bone_idx = parent_index for child in bone.children: __recursive_add_children(child, bone_idx, result) hierarchy = cls.Bone_set.build_hierarchy() # indices = cls.Idx.reduced_hands() indices = cls.Idx.all # parent_indices = np.zeros(len(indices), dtype=np.int32) parent_indices = {} __recursive_add_children(hierarchy, indices['root'], parent_indices) parent_indices[indices['root']] = -1 return parent_indices class Keyframe: Idx = KeyframeIdx __up = None __forward = None __right = None __up_local = None __forward_local = None __right_local = None # TODO remove default l/r_hand_visible params def __init__(self, global_positions, object_rotations, r_local, time, l_hand_visible=True, r_hand_visible=True): self.p = global_positions self.r = object_rotations self.r_local = r_local self.t = time self.l_hand_visible = l_hand_visible self.r_hand_visible = r_hand_visible @classmethod def __batch_torch_recursive_recover_local_rotation(cls, bone, skeleton, r, r_local): deg2rad = 3.1415 / 180 rad2deg = 180 / 3.1415 joint_idx = cls.Idx.all[bone.end] if bone.start is None: r_local[:, cls.Idx.all[bone.end]] = r[:, cls.Idx.all[bone.end]] else: # what is the bone vector - from parent joint to child joint parent_idx = cls.Idx.all[bone.start] parent_rot = quaternion.euler_to_quaternion_torch(r[:, parent_idx] * deg2rad, 'xyz') self_rot = quaternion.euler_to_quaternion_torch(r[:, joint_idx] * deg2rad, 'xyz') local_rot = quaternion.qmul(quaternion.inverse(parent_rot), self_rot) r_local[:, joint_idx] = rad2deg * quaternion.qeuler(local_rot, order="xyz") for child in bone.children: cls.__batch_torch_recursive_recover_local_rotation(child, skeleton, r, r_local) @classmethod def batch_torch_recover_local_rotations(cls, skeleton, r): r_local = torch.empty_like(r) root = skeleton.Bone_set.build_hierarchy() cls.__batch_torch_recursive_recover_local_rotation(root, skeleton, r, r_local) return r_local def __recursive_recover_local_rotation(self, bone): joint_idx = self.Idx.all[bone.end] if bone.start is None: self.r_local[self.Idx.all[bone.end]] = self.r[self.Idx.all[bone.end]] else: # what is the bone vector - from parent joint to child joint parent_idx = self.Idx.all[bone.start] parent_rot = quaternion.euler_to_quaternion(np.deg2rad(self.r[parent_idx]), 'xyz') self_rot = quaternion.euler_to_quaternion(np.deg2rad(self.r[joint_idx]), 'xyz') local_rot = quaternion.qmul_np(quaternion.inverse_np(parent_rot), self_rot) self.r_local[joint_idx] = np.rad2deg(quaternion.qeuler_np(local_rot, order="xyz")) for child in bone.children: self.__recursive_recover_local_rotation(child) def recover_local_rotations(self, skeleton): if self.r_local is None: self.r_local = np.empty_like(self.r) root = skeleton.Bone_set.build_hierarchy() self.__recursive_recover_local_rotation(root) @classmethod def from_numpy(cls, x): if x.shape[1] == 7: return cls(x[:, :3], x[:, 3:6], np.ones_like(x[:, 3:6]), x[0, 6], x[1, 6] > 0.00001, x[2, 6] > 0.00001) else: return cls(x[:, :3], x[:, 3:6], x[:, 6:9], x[0, 9], x[1, 9] > 0.00001, x[2, 9] > 0.00001) @classmethod def from_numpy_arr(cls, x_arr): for x in x_arr: if x.shape[1] == 7: yield cls(x[:, :3], x[:, 3:6], np.ones_like(x[:, 3:6]), x[0, 6], x[1, 6] > 0.00001, x[2, 6] > 0.00001) else: yield cls(x[:, :3], x[:, 3:6], x[:, 6:9], x[0, 9], x[1, 9] > 0.00001, x[2, 9] > 0.00001) def rotate_vector(self, v, local=False): rot = self.r_local if local else self.r result = np.empty((len(rot), 3)) for i, r in enumerate(rot): f = normalize(euler2mat(*np.deg2rad(r)).dot(v)) result[i] = f return result def reset_cache(self): self.__right = None self.__forward = None self.__up = None def forward(self, local=False): if local: if self.__forward_local is None: self.__forward_local = self.rotate_vector(np.array([0, 0, 1]), local) return self.__forward_local else: if self.__forward is None: self.__forward = self.rotate_vector(np.array([0, 0, 1]), local) return self.__forward def up(self, local=False): if local: if self.__up_local is None: self.__up_local = self.rotate_vector(np.array([0, 1, 0]), local) return self.__up_local else: if self.__up is None: self.__up = self.rotate_vector(np.array([0, 1, 0]), local) return self.__up def right(self, local=False): if local: if self.__right_local is None: self.__right_local = self.rotate_vector(np.array([1, 0, 0]), local) return self.__right_local else: if self.__right is None: self.__right = self.rotate_vector(np.array([1, 0, 0]), local) return self.__right # joint velocities def velocity(self, other): delta_t = max(0.007, self.t - other.t) delta_p = self.p - other.p return delta_p / delta_t # joint angular velocity quaternions def angular_speed(self, other): delta_t = max(0.0001, self.t - other.t) result = np.empty((len(self.r), 3)) for i, r in enumerate(self.r): quat_r = Quaternion(*euler2quat(*(np.deg2rad(r)))) quat_other = Quaternion(*euler2quat(*np.deg2rad(other.r[i]))) delta_r = quat_r.inverse * quat_other result[i] = delta_r.vector / delta_t return result def to_numpy(self): result = np.empty((len(self.p), 10), dtype=np.float32) result[:, :3] = self.p result[:, 3:6] = self.r result[:, 6:9] = self.r_local result[0, 9] = self.t result[1, 9] = 1 if self.l_hand_visible else 0 result[2, 9] = 1 if self.r_hand_visible else 0 return result def bone_p_diff(self, joints): return self.p[self.Idx.all[joints[1]]] - self.p[self.Idx.all[joints[0]]] def bone_direction(self, joints): p_diff = self.bone_p_diff(joints) if np.linalg.norm(p_diff) == 0: return p_diff return p_diff / np.linalg.norm(p_diff) def bone_length(self, joints): return np.linalg.norm(self.bone_p_diff(joints)) def position(self, joint_name): return self.p[self.Idx.all[joint_name]] def elbow_angle(self, shoulderIdx, elbowIdx, wristIdx): # shoulder hand distance s_h_diff = self.p[wristIdx] - self.p[shoulderIdx] s_h_axis = normalize(s_h_diff) inverse_s_h_rot = np.linalg.inv(rot_matrix_between_vectors(np.array([0, 0, 1]), s_h_axis)) s_h_mid = s_h_diff * 0.5 + self.p[shoulderIdx] elbow_dir = self.p[elbowIdx] - s_h_mid elbow_dir = normalize(inverse_s_h_rot.dot(elbow_dir)) elbow_ref_down = np.array([0, -1, 0]) angle = angle_between_vectors(elbow_ref_down, elbow_dir) elbow_ref_right =
np.array([1, 0, 0])
numpy.array
# -*- coding: utf-8 -*- ''' Data Handler Module This module contains a class for managing a data processing pipeline ''' from time import time from datetime import timedelta import numpy as np import pandas as pd from scipy.stats import mode, skew from scipy.interpolate import interp1d from sklearn.cluster import DBSCAN import cvxpy as cvx import matplotlib.pyplot as plt import matplotlib.cm as cm from pandas.plotting import register_matplotlib_converters register_matplotlib_converters() from solardatatools.time_axis_manipulation import make_time_series,\ standardize_time_axis from solardatatools.matrix_embedding import make_2d from solardatatools.data_quality import daily_missing_data_advanced from solardatatools.data_filling import zero_nighttime, interp_missing from solardatatools.clear_day_detection import find_clear_days from solardatatools.plotting import plot_2d from solardatatools.clear_time_labeling import find_clear_times from solardatatools.solar_noon import avg_sunrise_sunset from solardatatools.algorithms import CapacityChange, TimeShift, SunriseSunset class DataHandler(): def __init__(self, data_frame=None, raw_data_matrix=None, datetime_col=None, convert_to_ts=False, aggregate=None, how=lambda x: x.mean()): if data_frame is not None: if convert_to_ts: data_frame, keys = make_time_series(data_frame) self.keys = keys else: self.keys = list(data_frame.columns) self.data_frame_raw = data_frame.copy() if not isinstance(self.data_frame_raw.index, pd.DatetimeIndex): if datetime_col is not None: df = self.data_frame_raw df[datetime_col] = pd.to_datetime(df[datetime_col]) df.set_index(datetime_col, inplace=True) else: e = "Data frame must have a DatetimeIndex or" e += "the user must set the datetime_col kwarg." raise Exception(e) df_index = self.data_frame_raw.index if df_index.tz is not None: df_index = df_index.tz_localize(None) self.data_frame = None if aggregate is not None: new_data = how(self.data_frame_raw.resample(aggregate)) self.data_frame_raw = new_data else: self.data_frame_raw = None self.data_frame = None self.keys = None self.raw_data_matrix = raw_data_matrix if self.raw_data_matrix is not None: self.num_days = self.raw_data_matrix.shape[1] if self.raw_data_matrix.shape[0] <= 1400: self.data_sampling = int(24 * 60 / self.raw_data_matrix.shape[0]) else: self.data_sampling = 24 * 60 / self.raw_data_matrix.shape[0] else: self.num_days = None self.data_sampling = None self.filled_data_matrix = None self.use_column = None self.capacity_estimate = None self.start_doy = None self.day_index = None self.power_units = None # "Extra" data, i.e. additional columns to process from the table self.extra_matrices = {} # Matrix views of extra columns self.extra_quality_scores = {} # Relative quality: fraction of non-NaN values in column during daylight time periods, as defined by the main power columns # Scores for the entire data set self.data_quality_score = None # Fraction of days without data acquisition errors self.data_clearness_score = None # Fraction of days that are approximately clear/sunny # Flags for the entire data set self.inverter_clipping = None # True if there is inverter clipping, false otherwise self.num_clip_points = None # If clipping, the number of clipping set points self.capacity_changes = None # True if the apparent capacity seems to change over the data set self.normal_quality_scores = None # True if clustering of data quality scores are within decision boundaries self.time_shifts = None # True if time shifts detected and corrected in data set self.tz_correction = 0 # TZ correction factor (determined during pipeline run) # Daily scores (floats), flags (booleans), and boolean masks self.daily_scores = DailyScores() # 1D arrays of floats self.daily_flags = DailyFlags() # 1D arrays of Booleans self.boolean_masks = BooleanMasks() # 2D arrays of Booleans # Useful daily signals defined by the data set self.daily_signals = DailySignals() # Algorithm objects self.scsf = None self.capacity_analysis = None self.time_shift_analysis = None self.daytime_analysis = None # Private attributes self._ran_pipeline = False self._error_msg = '' self.__density_lower_threshold = None self.__density_upper_threshold = None self.__linearity_threshold = None self.__recursion_depth = 0 self.__initial_time = None self.__fix_dst_ran = False def run_pipeline(self, power_col=None, min_val=-5, max_val=None, zero_night=True, interp_day=True, fix_shifts=True, density_lower_threshold=0.6, density_upper_threshold=1.05, linearity_threshold=0.1, clear_day_smoothness_param=0.9, clear_day_energy_param=0.8, verbose=True, start_day_ix=None, end_day_ix=None, c1=None, c2=500., solar_noon_estimator='com', correct_tz=True, extra_cols=None, daytime_threshold=0.1, units='W'): self.daily_scores = DailyScores() self.daily_flags = DailyFlags() self.capacity_analysis = None self.time_shift_analysis = None self.extra_matrices = {} # Matrix views of extra columns self.extra_quality_scores = {} self.power_units = units if self.__recursion_depth == 0: self.tz_correction = 0 t = np.zeros(6) ###################################################################### # Preprocessing ###################################################################### t[0] = time() if self.data_frame_raw is not None: self.data_frame = standardize_time_axis(self.data_frame_raw, timeindex=True, verbose=verbose) if self.data_frame is not None: self.make_data_matrix(power_col, start_day_ix=start_day_ix, end_day_ix=end_day_ix) if max_val is not None: mat_copy = np.copy(self.raw_data_matrix) mat_copy[np.isnan(mat_copy)] = -9999 slct = mat_copy > max_val if np.sum(slct) > 0: self.raw_data_matrix[slct] = np.nan if min_val is not None: mat_copy = np.copy(self.raw_data_matrix) mat_copy[np.isnan(mat_copy)] = 9999 slct = mat_copy < min_val if np.sum(slct) > 0: self.raw_data_matrix[slct] = np.nan self.capacity_estimate = np.nanquantile(self.raw_data_matrix, 0.95) if self.capacity_estimate <= 500 and self.power_units == 'W': self.power_units = 'kW' self.boolean_masks.missing_values = np.isnan(self.raw_data_matrix) ss = SunriseSunset() ss.run_optimizer(self.raw_data_matrix, plot=False) self.boolean_masks.daytime = ss.sunup_mask_estimated self.daytime_analysis = ss ### TZ offset detection and correction ### # (1) Determine if there exists a "large" timezone offset error if power_col is None: power_col = self.data_frame.columns[0] if correct_tz: average_day = np.zeros(self.raw_data_matrix.shape[0]) all_nans = np.alltrue(np.isnan(self.raw_data_matrix), axis=1) average_day[~all_nans] = np.nanmean( self.raw_data_matrix[~all_nans, :], axis=1 ) average_day -= np.min(average_day) average_day /= np.max(average_day) ### Troubleshooting code # plt.plot(average_day) # plt.axhline(0.02, color='red', ls='--', linewidth=1) # plt.show() meas_per_hour = np.int(60 / self.data_sampling) cond1 = np.any(average_day[:meas_per_hour] > 0.02) cond2 = np.any(average_day[-meas_per_hour:] > 0.02) cond3 = self.__recursion_depth <= 2 if (cond1 or cond2) and cond3: if verbose: print( 'Warning: power generation at midnight. Attempting to correct...') # Catch values that are more than 4 hours from noon and make a # correction to the time axis (rough correction to avoid days # rolling over) rough_noon_est = np.nanmean( self.data_frame.groupby(pd.Grouper(freq='D')) \ .idxmax()[power_col].dt.time \ .apply(lambda x: 60 * x.hour + x.minute) ) / 60 self.tz_correction = 12 - np.round(rough_noon_est) self.data_frame.index = self.data_frame.index.shift( self.tz_correction, freq='H' ) if verbose: print('Done.\nRestarting the pipeline...') self.__recursion_depth += 1 if self.__initial_time is not None: self.__initial_time = t[0] self.run_pipeline( power_col=power_col, min_val=min_val, max_val=max_val, zero_night=zero_night, interp_day=interp_day, fix_shifts=fix_shifts, density_lower_threshold=density_lower_threshold, density_upper_threshold=density_upper_threshold, linearity_threshold=linearity_threshold, clear_day_smoothness_param=clear_day_smoothness_param, clear_day_energy_param=clear_day_energy_param, verbose=verbose, start_day_ix=start_day_ix, end_day_ix=end_day_ix, c1=c1, c2=c2, solar_noon_estimator=solar_noon_estimator, correct_tz=correct_tz, extra_cols=extra_cols, daytime_threshold=daytime_threshold, units=units ) return ###################################################################### # Cleaning ###################################################################### t[1] = time() self.make_filled_data_matrix(zero_night=zero_night, interp_day=interp_day) num_raw_measurements = np.count_nonzero( np.nan_to_num(self.raw_data_matrix, copy=True, nan=0.)[self.boolean_masks.daytime] ) num_filled_measurements = np.count_nonzero( np.nan_to_num(self.filled_data_matrix, copy=True, nan=0.)[self.boolean_masks.daytime] ) if num_raw_measurements > 0: ratio = num_filled_measurements / num_raw_measurements else: msg = 'Error: data set contains no non-zero values!' self._error_msg += '\n' + msg if verbose: print(msg) self.daily_scores = None self.daily_flags = None self.data_quality_score = 0.0 self.data_clearness_score = 0.0 self._ran_pipeline = True return if ratio < 0.9: msg = 'Error: data was lost during NaN filling procedure. ' msg += 'This typically occurs when\nthe time stamps are in the ' msg += 'wrong timezone. Please double check your data table.\n' self._error_msg += '\n' + msg if verbose: print(msg) self.daily_scores = None self.daily_flags = None self.data_quality_score = None self.data_clearness_score = None self._ran_pipeline = True return ### TZ offset detection and correction ### # (2) Determine if there is a "small" timezone offset error if correct_tz: average_noon = np.nanmean( avg_sunrise_sunset(self.filled_data_matrix, threshold=0.01) ) tz_offset = int(np.round(12 - average_noon)) if tz_offset != 0: self.tz_correction += tz_offset # Related to this bug fix: # https://github.com/slacgismo/solar-data-tools/commit/ae0037771c09ace08bff5a4904475da606e934da old_index = self.data_frame.index.copy() self.data_frame.index = self.data_frame.index.shift( tz_offset, freq='H' ) self.data_frame = self.data_frame.reindex(index=old_index, method='nearest', limit=1).fillna(0) meas_per_hour = self.filled_data_matrix.shape[0] / 24 roll_by = int(meas_per_hour * tz_offset) self.filled_data_matrix = np.nan_to_num( np.roll(self.filled_data_matrix, roll_by, axis=0), 0 ) self.raw_data_matrix = np.roll( self.raw_data_matrix, roll_by, axis=0 ) self.boolean_masks.daytime = np.roll( self.boolean_masks.daytime, roll_by, axis=0 ) ###################################################################### # Scoring ###################################################################### t[2] = time() t_clean = np.zeros(6) t_clean[0] = time() try: self.get_daily_scores(threshold=0.2) except: msg = 'Daily quality scoring failed.' self._error_msg += '\n' + msg if verbose: print(msg) self.daily_scores = None try: self.get_daily_flags(density_lower_threshold=density_lower_threshold, density_upper_threshold=density_upper_threshold, linearity_threshold=linearity_threshold) except: msg = 'Daily quality flagging failed.' self._error_msg += '\n' + msg if verbose: print(msg) self.daily_flags = None t_clean[1] = time() try: self.detect_clear_days(smoothness_threshold=clear_day_smoothness_param, energy_threshold=clear_day_energy_param) except: msg = 'Clear day detection failed.' self._error_msg += '\n' + msg if verbose: print(msg) t_clean[2] = time() try: self.clipping_check() except: msg = 'Clipping check failed.' self._error_msg += '\n' + msg if verbose: print(msg) self.inverter_clipping = None t_clean[3] = time() try: self.score_data_set() except: msg = 'Data set summary scoring failed.' self._error_msg += '\n' + msg if verbose: print(msg) self.data_quality_score = None self.data_clearness_score = None t_clean[4] = time() try: self.capacity_clustering() except TypeError: self.capacity_changes = None t_clean[5] = time() ###################################################################### # Fix Time Shifts ###################################################################### t[3] = time() if fix_shifts: try: self.auto_fix_time_shifts(c1=c1, c2=c2, estimator=solar_noon_estimator, threshold=daytime_threshold, periodic_detector=False) except Exception as e: msg = 'Fix time shift algorithm failed.' self._error_msg += '\n' + msg if verbose: print(msg) print('Error message:', e) print('\n') self.time_shifts = None ###################################################################### # Update daytime detection based on cleaned up data ###################################################################### # self.daytime_analysis.run_optimizer(self.filled_data_matrix, plot=False) self.daytime_analysis.calculate_times(self.filled_data_matrix) self.boolean_masks.daytime = self.daytime_analysis.sunup_mask_estimated ###################################################################### # Process Extra columns ###################################################################### t[4] = time() if extra_cols is not None: freq = int(self.data_sampling * 60) new_index = pd.date_range(start=self.day_index[0].date(), end=self.day_index[-1].date() + timedelta( days=1), freq='{}s'.format(freq))[:-1] if isinstance(extra_cols, str): extra_cols = np.atleast_1d(extra_cols) elif isinstance(extra_cols, tuple): extra_cols = [extra_cols] for col in extra_cols: self.generate_extra_matrix(col, new_index=new_index) t[5] = time() times = np.diff(t, n=1) cleaning_times = np.diff(t_clean, n=1) total_time = t[-1] - t[0] # Cleanup self.__recursion_depth = 0 if verbose: if self.__initial_time is not None: restart_msg = '{:.2f} seconds spent automatically localizing the time zone\n' restart_msg += 'Info for last pipeline run below:\n' restart_msg = restart_msg.format(t[0] - self.__initial_time) print(restart_msg) out = 'total time: {:.2f} seconds\n' out += '--------------------------------\n' out += 'Breakdown\n' out += '--------------------------------\n' out += 'Preprocessing {:.2f}s\n' out += 'Cleaning {:.2f}s\n' out += 'Filtering/Summarizing {:.2f}s\n' out += ' Data quality {:.2f}s\n' out += ' Clear day detect {:.2f}s\n' out += ' Clipping detect {:.2f}s\n' out += ' Capacity change detect {:.2f}s\n' if extra_cols is not None: out += 'Extra Column Processing {:.2f}s' print(out.format( total_time, times[0], times[1] + times[3], times[2], cleaning_times[0], cleaning_times[1], cleaning_times[2], cleaning_times[4], times[4] )) self._ran_pipeline = True return def report(self): try: if self.num_days >= 365: l1 = 'Length: {:.2f} years\n'.format(self.num_days / 365) else: l1 = 'Length: {} days\n'.format(self.num_days) if self.power_units == 'W': l1_a = 'Capacity estimate: {:.2f} kW\n'.format(self.capacity_estimate / 1000) elif self.power_units == 'kW': l1_a = 'Capacity estimate: {:.2f} kW\n'.format(self.capacity_estimate) else: l1_a = 'Capacity estimate: {:.2f} '.format(self.capacity_estimate) l1_a += self.power_units + '\n' if self.raw_data_matrix.shape[0] <= 1440: l2 = 'Data sampling: {} minute\n'.format(self.data_sampling) else: l2 = 'Data sampling: {} second\n'.format(int(self.data_sampling * 60)) l3 = 'Data quality score: {:.1f}%\n'.format(self.data_quality_score * 100) l4 = 'Data clearness score: {:.1f}%\n'.format(self.data_clearness_score * 100) l5 = 'Inverter clipping: {}\n'.format(self.inverter_clipping) l6 = 'Time shifts corrected: {}\n'.format(self.time_shifts) if self.tz_correction != 0: l7 = 'Time zone correction: {} hours'.format(int(self.tz_correction)) else: l7 = 'Time zone correction: None' p_out = l1 + l1_a + l2 + l3 + l4 + l5 + l6 + l7 if self.capacity_changes: p_out += '\nWARNING: Changes in system capacity detected!' if self.num_clip_points > 1: p_out += '\nWARNING: {} clipping set points detected!'.format( self.num_clip_points ) if not self.normal_quality_scores: p_out += '\nWARNING: Abnormal clustering of data quality scores!' print(p_out) return except TypeError: if self._ran_pipeline: m1 = 'Pipeline failed, please check data set.\n' m2 = "Try running: self.plot_heatmap(matrix='raw')\n\n" if self.num_days >= 365: l1 = 'Length: {:.2f} years\n'.format( self.num_days / 365) else: l1 = 'Length: {} days\n'.format( self.num_days) if self.power_units == 'W': l1_a = 'Capacity estimate: {:.2f} kW\n'.format(self.capacity_estimate / 1000) elif self.power_units == 'kW': l1_a = 'Capacity estimate: {:.2f} kW\n'.format(self.capacity_estimate) else: l1_a = 'Capacity estimate: {:.2f} '.format(self.capacity_estimate) l1_a += self.power_units + '\n' if self.raw_data_matrix.shape[0] <= 1440: l2 = 'Data sampling: {} minute\n'.format( self.data_sampling) else: l2 = 'Data sampling: {} second\n'.format( int(self.data_sampling * 60)) p_out = m1 + m2 + l1 + l1_a + l2 print(p_out) print('\nError messages captured from pipeline:' + self._error_msg) else: print('Please run the pipeline first!') return def augment_data_frame(self, boolean_index, column_name): """ Add a column to the data frame (tabular) representation of the data, containing True/False values at each time stamp. Boolean index is a 1-D or 2-D numpy array of True/False values. If 1-D, array should be of length N, where N is the number of days in the data set. If 2-D, the array should be of size M X N where M is the number of measurements each day and N is the number of days. :param boolean_index: Length N or size M X N numpy arrays of booleans :param column_name: Name for column :return: """ if self.data_frame is None: print('This DataHandler object does not contain a data frame.') return if boolean_index is None: print('No mask available for ' + column_name) return m, n = self.raw_data_matrix.shape index_shape = boolean_index.shape cond1 = index_shape == (m, n) cond2 = index_shape == (n ,) if not cond1 and not cond2: print('Boolean index shape does not match the data.') elif cond1: if self.time_shifts: ts = self.time_shift_analysis boolean_index = ts.invert_corrections(boolean_index) start = self.day_index[0] freq = '{}min'.format(self.data_sampling) periods = self.filled_data_matrix.size tindex = pd.date_range(start=start, freq=freq, periods=periods) series = pd.Series(data=boolean_index.ravel(order='F'), index=tindex) series.name = column_name if column_name in self.data_frame.columns: del self.data_frame[column_name] self.data_frame = self.data_frame.join(series) self.data_frame[column_name] = self.data_frame[column_name].fillna(False) elif cond2: slct_dates = self.day_index[boolean_index].date bix = np.isin(self.data_frame.index.date, slct_dates) self.data_frame[column_name] = False self.data_frame.loc[bix, column_name] = True if column_name in self.data_frame_raw.columns: del self.data_frame_raw[column_name] self.data_frame_raw = self.data_frame_raw.join(self.data_frame[column_name]) def fix_dst(self): """ Helper function for fixing data sets with known DST shift. This function works for data recorded anywhere in the United States. The choice of timezone (e.g. 'US/Pacific') does not matter, as long as the dates of the clock changes are the same. :return: """ if not self.__fix_dst_ran: df = self.data_frame_raw df_localized = df.tz_localize('US/Pacific', ambiguous='NaT', nonexistent='NaT') df_localized = df_localized[df_localized.index == df_localized.index] df_localized = df_localized.tz_convert('Etc/GMT+8') df_localized = df_localized.tz_localize(None) self.data_frame_raw = df_localized self.__fix_dst_ran = True return else: print('DST correction already performed on this data set.') return def make_data_matrix(self, use_col=None, start_day_ix=None, end_day_ix=None): df = self.data_frame if use_col is None: use_col = df.columns[0] self.raw_data_matrix, day_index = make_2d(df, key=use_col, return_day_axis=True) self.raw_data_matrix = self.raw_data_matrix[:, start_day_ix:end_day_ix] self.num_days = self.raw_data_matrix.shape[1] if self.raw_data_matrix.shape[0] <= 1400: self.data_sampling = int(24 * 60 / self.raw_data_matrix.shape[0]) else: self.data_sampling = 24 * 60 / self.raw_data_matrix.shape[0] self.use_column = use_col self.day_index = day_index[start_day_ix:end_day_ix] self.start_doy = self.day_index.dayofyear[0] return def make_filled_data_matrix(self, zero_night=True, interp_day=True): self.filled_data_matrix = np.copy(self.raw_data_matrix) if zero_night: self.filled_data_matrix = zero_nighttime(self.raw_data_matrix, night_mask=~self.boolean_masks.daytime) if interp_day: self.filled_data_matrix = interp_missing(self.filled_data_matrix) else: msk = np.isnan(self.filled_data_matrix) self.filled_data_matrix[msk] = 0 self.daily_signals.energy = np.sum(self.filled_data_matrix, axis=0) *\ 24 / self.filled_data_matrix.shape[1] return def generate_extra_matrix(self, column, new_index=None, key=None): if new_index is None: freq = self.data_sampling * 60 end = self.day_index[-1].date() + timedelta(days=1) new_index = pd.date_range(start=self.day_index[0].date(), end=end, freq='{}s'.format(freq))[:-1] num_meas = self.filled_data_matrix.shape[0] new_view = self.data_frame[column].loc[new_index[0]:new_index[-1]] new_view = new_view.values.reshape(num_meas, -1, order='F') if self.time_shifts: ts = self.time_shift_analysis new_view = ts.apply_corrections(new_view) if key is None: key = column self.extra_matrices[key] = new_view self.extra_quality_scores[key] = ( 1 - np.sum(np.isnan(new_view[self.boolean_masks.daytime])) / np.sum(self.boolean_masks.daytime) ) return def get_daily_scores(self, threshold=0.2): self.get_density_scores(threshold=threshold) self.get_linearity_scores() return def get_daily_flags(self, density_lower_threshold=0.6, density_upper_threshold=1.05, linearity_threshold=0.1): self.daily_flags.density = np.logical_and( self.daily_scores.density > density_lower_threshold, self.daily_scores.density < density_upper_threshold ) self.daily_flags.linearity = self.daily_scores.linearity < linearity_threshold self.daily_flags.flag_no_errors() scores = np.c_[self.daily_scores.density, self.daily_scores.linearity] db = DBSCAN(eps=.03, min_samples=max(0.01 * scores.shape[0], 3)).fit(scores) # Count the number of days that cluster to the main group but fall # outside the decision boundaries day_counts = [np.logical_or( self.daily_scores.linearity[db.labels_ == lb] > linearity_threshold, np.logical_or( self.daily_scores.density[db.labels_ == lb] < density_lower_threshold, self.daily_scores.density[db.labels_ == lb] > density_upper_threshold ) ) for lb in set(db.labels_)] self.normal_quality_scores = np.any([ np.sum(day_count) <= max(5e-3 * self.num_days, 1) for day_count in day_counts ]) self.__density_lower_threshold = density_lower_threshold self.__density_upper_threshold = density_upper_threshold self.__linearity_threshold = linearity_threshold self.daily_scores.quality_clustering = db.labels_ def get_density_scores(self, threshold=0.2): if self.raw_data_matrix is None: print('Generate a raw data matrix first.') return self.daily_scores.density, self.daily_signals.density, self.daily_signals.seasonal_density_fit\ = daily_missing_data_advanced( self.raw_data_matrix, threshold=threshold, return_density_signal=True, return_fit=True ) return def get_linearity_scores(self): if self.capacity_estimate is None: self.capacity_estimate = np.quantile(self.filled_data_matrix, 0.95) if self.daily_signals.seasonal_density_fit is None: print('Run the density check first') return temp_mat = np.copy(self.filled_data_matrix) temp_mat[temp_mat < 0.005 * self.capacity_estimate] = np.nan difference_mat = np.round(temp_mat[1:] - temp_mat[:-1], 4) modes, counts = mode(difference_mat, axis=0, nan_policy='omit') n = self.filled_data_matrix.shape[0] - 1 self.daily_scores.linearity = counts.data.squeeze() / (n * self.daily_signals.seasonal_density_fit) # Label detected infill points with a boolean mask infill = np.zeros_like(self.raw_data_matrix, dtype=np.bool) slct = self.daily_scores.linearity >= 0.1 reference_diffs = np.tile(modes[0][slct], (self.filled_data_matrix.shape[0], 1)) found_infill = np.logical_or( np.isclose( np.r_[np.zeros(self.num_days).reshape((1, -1)), difference_mat][ :, slct], reference_diffs), np.isclose( np.r_[difference_mat, np.zeros(self.num_days).reshape((1, -1))][:, slct], reference_diffs), ) infill[:, slct] = found_infill self.boolean_masks.infill = infill return def score_data_set(self): num_days = self.raw_data_matrix.shape[1] try: self.data_quality_score = np.sum(self.daily_flags.no_errors) / num_days except TypeError: self.data_quality_score = None try: self.data_clearness_score =
np.sum(self.daily_flags.clear)
numpy.sum
# Copyright 2020 Q-CTRL Pty Ltd & Q-CTRL Inc # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ ======================== Tests for Predefined DDS ======================== """ import numpy as np import pytest from qctrlopencontrols import new_predefined_dds from qctrlopencontrols.dynamic_decoupling_sequences import ( CARR_PURCELL, CARR_PURCELL_MEIBOOM_GILL, PERIODIC_SINGLE_AXIS, QUADRATIC, SPIN_ECHO, UHRIG_SINGLE_AXIS, WALSH_SINGLE_AXIS, X_CONCATENATED, XY_CONCATENATED, ) from qctrlopencontrols.exceptions import ArgumentsValueError SIGMA_X = np.array([[0.0, 1.0], [1.0, 0.0]]) SIGMA_Y = np.array([[0.0, -1.0j], [1.0j, 0.0]]) SIGMA_Z = np.array([[1.0, 0.0], [0.0, -1.0]]) def test_ramsey(): """Tests Ramsey sequence """ duration = 10.0 sequence = new_predefined_dds(scheme="Ramsey", duration=duration) _offsets = np.array([]) _rabi_rotations = np.array([]) _azimuthal_angles = np.array([]) _detuning_rotations = np.array([]) assert np.allclose(_offsets, sequence.offsets) assert np.allclose(_rabi_rotations, sequence.rabi_rotations) assert np.allclose(_azimuthal_angles, sequence.azimuthal_angles) assert np.allclose(_detuning_rotations, sequence.detuning_rotations) sequence = new_predefined_dds( scheme="Ramsey", duration=duration, pre_post_rotation=True ) _rabi_rotations = np.array([np.pi / 2, np.pi / 2]) _azimuthal_angles = np.array([0.0, np.pi]) _detuning_rotations = np.array([0.0, 0.0]) assert np.allclose(_rabi_rotations, sequence.rabi_rotations) assert np.allclose(_azimuthal_angles, sequence.azimuthal_angles) assert np.allclose(_detuning_rotations, sequence.detuning_rotations) def test_spin_echo(): """ Test for Spin Echo Sequence """ duration = 10.0 sequence = new_predefined_dds(scheme=SPIN_ECHO, duration=duration) _offsets = np.array([duration / 2.0]) _rabi_rotations = np.array([np.pi]) _azimuthal_angles = np.array([0]) _detuning_rotations = np.array([0]) assert np.allclose(_offsets, sequence.offsets) assert np.allclose(_rabi_rotations, sequence.rabi_rotations) assert np.allclose(_azimuthal_angles, sequence.azimuthal_angles) assert np.allclose(_detuning_rotations, sequence.detuning_rotations) sequence = new_predefined_dds( scheme=SPIN_ECHO, duration=duration, pre_post_rotation=True ) _offsets = np.array([0, duration / 2.0, duration]) _rabi_rotations = np.array([np.pi / 2, np.pi, np.pi / 2]) _azimuthal_angles = np.array([0, 0, 0]) _detuning_rotations = np.array([0, 0, 0]) assert np.allclose(_offsets, sequence.offsets) assert np.allclose(_rabi_rotations, sequence.rabi_rotations) assert np.allclose(_azimuthal_angles, sequence.azimuthal_angles) assert np.allclose(_detuning_rotations, sequence.detuning_rotations) def test_curr_purcell(): """ Test for Carr-Purcell (CP) sequence """ duration = 10.0 number_of_offsets = 4 sequence = new_predefined_dds( scheme=CARR_PURCELL, duration=duration, number_of_offsets=number_of_offsets ) _spacing = duration / number_of_offsets _offsets = np.array( [ _spacing * 0.5, _spacing * 0.5 + _spacing, _spacing * 0.5 + 2 * _spacing, _spacing * 0.5 + 3 * _spacing, ] ) _rabi_rotations = np.array([np.pi, np.pi, np.pi, np.pi]) _azimuthal_angles = np.array([0, 0, 0, 0]) _detuning_rotations = np.array([0, 0, 0, 0]) assert np.allclose(_offsets, sequence.offsets) assert np.allclose(_rabi_rotations, sequence.rabi_rotations) assert np.allclose(_azimuthal_angles, sequence.azimuthal_angles) assert np.allclose(_detuning_rotations, sequence.detuning_rotations) sequence = new_predefined_dds( scheme=CARR_PURCELL, duration=duration, number_of_offsets=number_of_offsets, pre_post_rotation=True, ) _offsets = np.array( [ 0, _spacing * 0.5, _spacing * 0.5 + _spacing, _spacing * 0.5 + 2 * _spacing, _spacing * 0.5 + 3 * _spacing, duration, ] ) _rabi_rotations = np.array([np.pi / 2, np.pi, np.pi, np.pi, np.pi, np.pi / 2]) _azimuthal_angles = np.array([0, 0, 0, 0, 0, np.pi]) _detuning_rotations = np.array([0, 0, 0, 0, 0, 0]) assert np.allclose(_offsets, sequence.offsets) assert np.allclose(_rabi_rotations, sequence.rabi_rotations) assert np.allclose(_azimuthal_angles, sequence.azimuthal_angles) assert np.allclose(_detuning_rotations, sequence.detuning_rotations) def test_curr_purcell_meiboom_sequence(): """ Test for Carr-Purcell-Meiboom-Sequence (CPMG) sequence """ duration = 10.0 number_of_offsets = 4 sequence = new_predefined_dds( scheme=CARR_PURCELL_MEIBOOM_GILL, duration=duration, number_of_offsets=number_of_offsets, ) _spacing = duration / number_of_offsets _offsets = np.array( [ _spacing * 0.5, _spacing * 0.5 + _spacing, _spacing * 0.5 + 2 * _spacing, _spacing * 0.5 + 3 * _spacing, ] ) _rabi_rotations = np.array([np.pi, np.pi, np.pi, np.pi]) _azimuthal_angles = np.array([np.pi / 2, np.pi / 2, np.pi / 2, np.pi / 2]) _detuning_rotations = np.array([0, 0, 0, 0]) assert np.allclose(_offsets, sequence.offsets) assert np.allclose(_rabi_rotations, sequence.rabi_rotations) assert np.allclose(_azimuthal_angles, sequence.azimuthal_angles) assert np.allclose(_detuning_rotations, sequence.detuning_rotations) sequence = new_predefined_dds( scheme=CARR_PURCELL_MEIBOOM_GILL, duration=duration, number_of_offsets=number_of_offsets, pre_post_rotation=True, ) _offsets = np.array( [ 0, _spacing * 0.5, _spacing * 0.5 + _spacing, _spacing * 0.5 + 2 * _spacing, _spacing * 0.5 + 3 * _spacing, duration, ] ) _rabi_rotations = np.array([np.pi / 2, np.pi, np.pi, np.pi, np.pi, np.pi / 2]) _azimuthal_angles = np.array([0, np.pi / 2, np.pi / 2, np.pi / 2, np.pi / 2, np.pi]) _detuning_rotations = np.array([0, 0, 0, 0, 0, 0]) assert np.allclose(_offsets, sequence.offsets) assert np.allclose(_rabi_rotations, sequence.rabi_rotations) assert np.allclose(_azimuthal_angles, sequence.azimuthal_angles) assert np.allclose(_detuning_rotations, sequence.detuning_rotations) def test_uhrig_single_axis_sequence(): """ Test for Uhrig Single Axis Sequence """ duration = 10.0 number_of_offsets = 4 sequence = new_predefined_dds( scheme=UHRIG_SINGLE_AXIS, duration=duration, number_of_offsets=number_of_offsets ) constant = 0.5 / (number_of_offsets + 1) _delta_positions = [ duration * (np.sin(np.pi * (k + 1) * constant)) ** 2 for k in range(number_of_offsets) ] _offsets = np.array(_delta_positions) _rabi_rotations = np.array([np.pi, np.pi, np.pi, np.pi]) _azimuthal_angles = np.array([np.pi / 2, np.pi / 2, np.pi / 2, np.pi / 2]) _detuning_rotations = np.array([0, 0, 0, 0]) assert np.allclose(_offsets, sequence.offsets) assert np.allclose(_rabi_rotations, sequence.rabi_rotations) assert np.allclose(_azimuthal_angles, sequence.azimuthal_angles) assert np.allclose(_detuning_rotations, sequence.detuning_rotations) sequence = new_predefined_dds( scheme=UHRIG_SINGLE_AXIS, duration=duration, number_of_offsets=number_of_offsets, pre_post_rotation=True, ) _offsets = np.array(_delta_positions) _offsets = np.insert(_offsets, [0, _offsets.shape[0]], [0, duration],) _rabi_rotations = np.array([np.pi / 2, np.pi, np.pi, np.pi, np.pi, np.pi / 2]) _azimuthal_angles = np.array( [0.0, np.pi / 2, np.pi / 2, np.pi / 2, np.pi / 2, np.pi] ) _detuning_rotations = np.array([0.0, 0, 0, 0, 0, 0.0]) assert np.allclose(_offsets, sequence.offsets) assert np.allclose(_rabi_rotations, sequence.rabi_rotations) assert np.allclose(_azimuthal_angles, sequence.azimuthal_angles) assert np.allclose(_detuning_rotations, sequence.detuning_rotations) def test_periodic_single_axis_sequence(): """ Test for Periodic Single Axis Sequence """ duration = 10.0 number_of_offsets = 4 sequence = new_predefined_dds( scheme=PERIODIC_SINGLE_AXIS, duration=duration, number_of_offsets=number_of_offsets, ) constant = 1 / (number_of_offsets + 1) # prepare the offsets for delta comb _delta_positions = [ duration * k * constant for k in range(1, number_of_offsets + 1) ] _offsets = np.array(_delta_positions) _rabi_rotations = np.array([np.pi, np.pi, np.pi, np.pi]) _azimuthal_angles = np.array([0, 0, 0, 0]) _detuning_rotations = np.array([0, 0, 0, 0]) assert np.allclose(_offsets, sequence.offsets) assert np.allclose(_rabi_rotations, sequence.rabi_rotations) assert np.allclose(_azimuthal_angles, sequence.azimuthal_angles) assert np.allclose(_detuning_rotations, sequence.detuning_rotations) sequence = new_predefined_dds( scheme=PERIODIC_SINGLE_AXIS, duration=duration, number_of_offsets=number_of_offsets, pre_post_rotation=True, ) _offsets = np.array(_delta_positions) _offsets = np.insert(_offsets, [0, _offsets.shape[0]], [0, duration],) _rabi_rotations = np.array([np.pi / 2, np.pi, np.pi, np.pi, np.pi, np.pi / 2]) _azimuthal_angles = np.array([0, 0, 0, 0, 0, np.pi]) _detuning_rotations = np.array([0, 0, 0, 0, 0, 0]) assert np.allclose(_offsets, sequence.offsets) assert np.allclose(_rabi_rotations, sequence.rabi_rotations) assert np.allclose(_azimuthal_angles, sequence.azimuthal_angles) assert np.allclose(_detuning_rotations, sequence.detuning_rotations) def test_walsh_single_axis_sequence(): """ Test for Periodic Single Axis Sequence """ duration = 10.0 paley_order = 20 sequence = new_predefined_dds( scheme=WALSH_SINGLE_AXIS, duration=duration, paley_order=paley_order ) hamming_weight = 5 samples = 2 ** hamming_weight relative_offset = np.arange(1.0 / (2 * samples), 1.0, 1.0 / samples) binary_string = np.binary_repr(paley_order) binary_order = [int(binary_string[i]) for i in range(hamming_weight)] walsh_array = np.ones([samples]) for i in range(hamming_weight): walsh_array *= ( np.sign(np.sin(2 ** (i + 1) * np.pi * relative_offset)) ** binary_order[hamming_weight - 1 - i] ) walsh_relative_offsets = [] for i in range(samples - 1): if walsh_array[i] != walsh_array[i + 1]: walsh_relative_offsets.append((i + 1) * (1.0 / samples)) walsh_relative_offsets = np.array(walsh_relative_offsets, dtype=np.float) _offsets = duration * walsh_relative_offsets _offsets = np.array(_offsets) _rabi_rotations = np.pi * np.ones(_offsets.shape) _azimuthal_angles = np.zeros(_offsets.shape) _detuning_rotations = np.zeros(_offsets.shape) assert np.allclose(_offsets, sequence.offsets) assert np.allclose(_rabi_rotations, sequence.rabi_rotations) assert np.allclose(_azimuthal_angles, sequence.azimuthal_angles) assert np.allclose(_detuning_rotations, sequence.detuning_rotations) sequence = new_predefined_dds( scheme=WALSH_SINGLE_AXIS, duration=duration, paley_order=paley_order, pre_post_rotation=True, ) _offsets = np.insert(_offsets, [0, _offsets.shape[0]], [0, duration],) _rabi_rotations = np.insert( _rabi_rotations, [0, _rabi_rotations.shape[0]], [np.pi / 2, np.pi / 2] ) _azimuthal_angles = np.zeros(_offsets.shape) _azimuthal_angles[-1] = np.pi _detuning_rotations = np.zeros(_offsets.shape) assert np.allclose(_offsets, sequence.offsets) assert np.allclose(_rabi_rotations, sequence.rabi_rotations) assert np.allclose(_azimuthal_angles, sequence.azimuthal_angles) assert np.allclose(_detuning_rotations, sequence.detuning_rotations) def test_quadratic_sequence(): """ Test for Quadratic Sequence """ duration = 10.0 number_inner_offsets = 4 number_outer_offsets = 4 sequence = new_predefined_dds( scheme=QUADRATIC, duration=duration, number_inner_offsets=number_inner_offsets, number_outer_offsets=number_outer_offsets, ) _offsets = np.zeros((number_outer_offsets + 1, number_inner_offsets + 1)) constant = 0.5 / (number_outer_offsets + 1) _delta_positions = [ duration * (
np.sin(np.pi * (k + 1) * constant)
numpy.sin
#!/usr/bin/python from __future__ import division from __future__ import print_function import sys import os import re import datetime import zipfile import tempfile import argparse import math import warnings import json import csv import numpy as np import scipy.stats as scp from lxml import etree as et def get_rdml_lib_version(): """Return the version string of the RDML library. Returns: The version string of the RDML library. """ return "1.0.0" class NpEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, np.integer): return int(obj) elif isinstance(obj, np.floating): return float(obj) elif isinstance(obj, np.bool_): return bool(obj) elif isinstance(obj, np.ndarray): return obj.tolist() else: return super(NpEncoder, self).default(obj) class RdmlError(Exception): """Basic exception for errors raised by the RDML-Python library""" def __init__(self, message): Exception.__init__(self, message) pass class secondError(RdmlError): """Just to have, not used yet""" def __init__(self, message): RdmlError.__init__(self, message) pass def _get_first_child(base, tag): """Get a child element of the base node with a given tag. Args: base: The base node element. (lxml node) tag: Child elements group tag used to select the elements. (string) Returns: The first child lxml node element found or None. """ for node in base: if node.tag.replace("{http://www.rdml.org}", "") == tag: return node return None def _get_first_child_text(base, tag): """Get a child element of the base node with a given tag. Args: base: The base node element. (lxml node) tag: Child elements group tag used to select the elements. (string) Returns: The text of first child node element found or an empty string. """ for node in base: if node.tag.replace("{http://www.rdml.org}", "") == tag: return node.text return "" def _get_first_child_bool(base, tag, triple=True): """Get a child element of the base node with a given tag. Args: base: The base node element. (lxml node) tag: Child elements group tag used to select the elements. (string) triple: If True, None is returned if not found, if False, False Returns: The a bool value of tag or if triple is True None. """ for node in base: if node.tag.replace("{http://www.rdml.org}", "") == tag: return _string_to_bool(node.text, triple) if triple is False: return False else: return None def _get_step_sort_nr(elem): """Get the number of the step eg. for sorting. Args: elem: The node element. (lxml node) Returns: The a int value of the step node nr. """ if elem is None: raise RdmlError('A step element must be provided for sorting.') ret = _get_first_child_text(elem, "nr") if ret == "": raise RdmlError('A step element must have a \"nr\" element for sorting.') return int(ret) def _sort_list_int(elem): """Get the first element of the array as int. for sorting. Args: elem: The 2d list Returns: The a int value of the first list element. """ return int(elem[0]) def _sort_list_float(elem): """Get the first element of the array as float. for sorting. Args: elem: The 2d list Returns: The a float value of the first list element. """ return float(elem[0]) def _sort_list_digital_PCR(elem): """Get the first column of the list as int. for sorting. Args: elem: The list Returns: The a int value of the first list element. """ arr = elem.split("\t") return int(arr[0]), arr[4] def _string_to_bool(value, triple=True): """Translates a string into bool value or None. Args: value: The string value to evaluate. (string) triple: If True, None is returned if not found, if False, False Returns: The a bool value of tag or if triple is True None. """ if value is None or value == "": if triple is True: return None else: return False if type(value) is bool: return value if type(value) is int: if value != 0: return True else: return False if type(value) is str: if value.lower() in ['false', '0', 'f', '-', 'n', 'no']: return False else: return True return def _value_to_booldic(value): """Translates a string, list or dic to a dictionary with true/false. Args: value: The string value to evaluate. (string) Returns: The a bool value of tag or if triple is True None. """ ret = {} if type(value) is str: ret[value] = True if type(value) is list: for ele in value: ret[ele] = True if type(value) is dict: for key, val in value.items(): ret[key] = _string_to_bool(val, triple=False) return ret def _get_first_child_by_pos_or_id(base, tag, by_id, by_pos): """Get a child element of the base node with a given tag and position or id. Args: base: The base node element. (lxml node) tag: Child elements group tag used to select the elements. (string) by_id: The unique id to search for. (string) by_pos: The position of the element in the list (int) Returns: The child node element found or raise error. """ if by_id is None and by_pos is None: raise RdmlError('Either an ' + tag + ' id or a position must be provided.') if by_id is not None and by_pos is not None: raise RdmlError('Only an ' + tag + ' id or a position can be provided.') allChildren = _get_all_children(base, tag) if by_id is not None: for node in allChildren: if node.get('id') == by_id: return node raise RdmlError('The ' + tag + ' id: ' + by_id + ' was not found in RDML file.') if by_pos is not None: if by_pos < 0 or by_pos > len(allChildren) - 1: raise RdmlError('The ' + tag + ' position ' + by_pos + ' is out of range.') return allChildren[by_pos] def _add_first_child_to_dic(base, dic, opt, tag): """Adds the first child element with a given tag to a dictionary. Args: base: The base node element. (lxml node) dic: The dictionary to add the element to (dictionary) opt: If false and id is not found in base, the element is added with an empty string (Bool) tag: Child elements group tag used to select the elements. (string) Returns: The dictionary with the added element. """ for node in base: if node.tag.replace("{http://www.rdml.org}", "") == tag: dic[tag] = node.text return dic if not opt: dic[tag] = "" return dic def _get_all_children(base, tag): """Get a list of all child elements with a given tag. Args: base: The base node element. (lxml node) tag: Child elements group tag used to select the elements. (string) Returns: A list with all child node elements found or an empty list. """ ret = [] for node in base: if node.tag.replace("{http://www.rdml.org}", "") == tag: ret.append(node) return ret def _get_all_children_id(base, tag): """Get a list of ids of all child elements with a given tag. Args: base: The base node element. (lxml node) tag: Child elements group tag used to select the elements. (string) Returns: A list with all child id strings found or an empty list. """ ret = [] for node in base: if node.tag.replace("{http://www.rdml.org}", "") == tag: ret.append(node.get('id')) return ret def _get_number_of_children(base, tag): """Count all child elements with a given tag. Args: base: The base node element. (lxml node) tag: Child elements group tag used to select the elements. (string) Returns: A int number of the found child elements with the id. """ counter = 0 for node in base: if node.tag.replace("{http://www.rdml.org}", "") == tag: counter += 1 return counter def _check_unique_id(base, tag, id): """Find all child elements with a given group and check if the id is already used. Args: base: The base node element. (lxml node) tag: Child elements group tag used to select the elements. (string) id: The unique id to search for. (string) Returns: False if the id is already used, True if not. """ for node in base: if node.tag.replace("{http://www.rdml.org}", "") == tag: if node.get('id') == id: return False return True def _create_new_element(base, tag, id): """Create a new element with a given tag and id. Args: base: The base node element. (lxml node) tag: Child elements group tag. (string) id: The unique id of the new element. (string) Returns: False if the id is already used, True if not. """ if id is None or id == "": raise RdmlError('An ' + tag + ' id must be provided.') if not _check_unique_id(base, tag, id): raise RdmlError('The ' + tag + ' id "' + id + '" must be unique.') return et.Element(tag, id=id) def _add_new_subelement(base, basetag, tag, text, opt): """Create a new element with a given tag and id. Args: base: The base node element. (lxml node) basetag: Child elements group tag. (string) tag: Child elements own tag, to be created. (string) text: The text content of the new element. (string) opt: If true, the element is optional (Bool) Returns: Nothing, the base lxml element is modified. """ if opt is False: if text is None or text == "": raise RdmlError('An ' + basetag + ' ' + tag + ' must be provided.') et.SubElement(base, tag).text = text else: if text is not None and text != "": et.SubElement(base, tag).text = text def _change_subelement(base, tag, xmlkeys, value, opt, vtype, id_as_element=False): """Change the value of the element with a given tag. Args: base: The base node element. (lxml node) tag: Child elements own tag, to be created. (string) xmlkeys: The list of possible keys in the right order for xml (list strings) value: The text content of the new element. opt: If true, the element is optional (Bool) vtype: If true, the element is optional ("string", "int", "float") id_as_element: If true, handle tag "id" as element, else as attribute Returns: Nothing, the base lxml element is modified. """ # Todo validate values with vtype goodVal = value if vtype == "bool": ev = _string_to_bool(value, triple=True) if ev is None or ev == "": goodVal = "" else: if ev: goodVal = "true" else: goodVal = "false" if opt is False: if goodVal is None or goodVal == "": raise RdmlError('A value for ' + tag + ' must be provided.') if tag == "id" and id_as_element is False: if base.get('id') != goodVal: par = base.getparent() groupTag = base.tag.replace("{http://www.rdml.org}", "") if not _check_unique_id(par, groupTag, goodVal): raise RdmlError('The ' + groupTag + ' id "' + goodVal + '" is not unique.') base.attrib['id'] = goodVal return # Check if the tag already excists elem = _get_first_child(base, tag) if elem is not None: if goodVal is None or goodVal == "": base.remove(elem) else: elem.text = goodVal else: if goodVal is not None and goodVal != "": new_node = et.Element(tag) new_node.text = goodVal place = _get_tag_pos(base, tag, xmlkeys, 0) base.insert(place, new_node) def _get_or_create_subelement(base, tag, xmlkeys): """Get element with a given tag, if not present, create it. Args: base: The base node element. (lxml node) tag: Child elements own tag, to be created. (string) xmlkeys: The list of possible keys in the right order for xml (list strings) Returns: The node element with the tag. """ # Check if the tag already excists if _get_first_child(base, tag) is None: new_node = et.Element(tag) place = _get_tag_pos(base, tag, xmlkeys, 0) base.insert(place, new_node) return _get_first_child(base, tag) def _remove_irrelevant_subelement(base, tag): """If element with a given tag has no children, remove it. Args: base: The base node element. (lxml node) tag: Child elements own tag, to be created. (string) Returns: The node element with the tag. """ # Check if the tag already excists elem = _get_first_child(base, tag) if elem is None: return if len(elem) == 0: base.remove(elem) def _move_subelement(base, tag, id, xmlkeys, position): """Change the value of the element with a given tag. Args: base: The base node element. (lxml node) tag: The id to search for. (string) id: The unique id of the new element. (string) xmlkeys: The list of possible keys in the right order for xml (list strings) position: the new position of the element (int) Returns: Nothing, the base lxml element is modified. """ pos = _get_tag_pos(base, tag, xmlkeys, position) ele = _get_first_child_by_pos_or_id(base, tag, id, None) base.insert(pos, ele) def _move_subelement_pos(base, tag, oldpos, xmlkeys, position): """Change the value of the element with a given tag. Args: base: The base node element. (lxml node) tag: The id to search for. (string) oldpos: The unique id of the new element. (string) xmlkeys: The list of possible keys in the right order for xml (list strings) position: the new position of the element (int) Returns: Nothing, the base lxml element is modified. """ pos = _get_tag_pos(base, tag, xmlkeys, position) ele = _get_first_child_by_pos_or_id(base, tag, None, oldpos) base.insert(pos, ele) def _get_tag_pos(base, tag, xmlkeys, pos): """Returns a position were to add a subelement with the given tag inc. pos offset. Args: base: The base node element. (lxml node) tag: The id to search for. (string) xmlkeys: The list of possible keys in the right order for xml (list strings) pos: The position relative to the tag elements (int) Returns: The int number of were to add the element with the tag. """ count = _get_number_of_children(base, tag) offset = pos if pos is None or pos < 0: offset = 0 pos = 0 if pos > count: offset = count return _get_first_tag_pos(base, tag, xmlkeys) + offset def _get_first_tag_pos(base, tag, xmlkeys): """Returns a position were to add a subelement with the given tag. Args: base: The base node element. (lxml node) tag: The id to search for. (string) xmlkeys: The list of possible keys in the right order for xml (list strings) Returns: The int number of were to add the element with the tag. """ listrest = xmlkeys[xmlkeys.index(tag):] counter = 0 for node in base: if node.tag.replace("{http://www.rdml.org}", "") in listrest: return counter counter += 1 return counter def _writeFileInRDML(rdmlName, fileName, data): """Writes a file in the RDML zip, even if it existed before. Args: rdmlName: The name of the RDML zip file fileName: The name of the file to write into the zip data: The data string of the file Returns: Nothing, modifies the RDML file. """ needRewrite = False if os.path.isfile(rdmlName): with zipfile.ZipFile(rdmlName, 'r') as RDMLin: for item in RDMLin.infolist(): if item.filename == fileName: needRewrite = True if needRewrite: tempFolder, tempName = tempfile.mkstemp(dir=os.path.dirname(rdmlName)) os.close(tempFolder) # copy everything except the filename with zipfile.ZipFile(rdmlName, 'r') as RDMLin: with zipfile.ZipFile(tempName, mode='w', compression=zipfile.ZIP_DEFLATED) as RDMLout: RDMLout.comment = RDMLin.comment for item in RDMLin.infolist(): if item.filename != fileName: RDMLout.writestr(item, RDMLin.read(item.filename)) if data != "": RDMLout.writestr(fileName, data) os.remove(rdmlName) os.rename(tempName, rdmlName) else: with zipfile.ZipFile(rdmlName, mode='a', compression=zipfile.ZIP_DEFLATED) as RDMLout: RDMLout.writestr(fileName, data) def _lrp_linReg(xIn, yUse): """A function which calculates the slope or the intercept by linear regression. Args: xIn: The numpy array of the cycles yUse: The numpy array that contains the fluorescence Returns: An array with the slope and intercept. """ counts = np.ones(yUse.shape) xUse = xIn.copy() xUse[np.isnan(yUse)] = 0 counts[np.isnan(yUse)] = 0 cycSqared = xUse * xUse cycFluor = xUse * yUse sumCyc = np.nansum(xUse, axis=1) sumFluor =
np.nansum(yUse, axis=1)
numpy.nansum
# # TODO: # - Add progress bar from PyQt5.QtWidgets import QApplication from PyQt5.QtGui import QPalette, QColor, QFont from silx.gui.plot import PlotWindow, Plot2D from silx.gui.plot.StackView import StackViewMainWindow import numpy from orangewidget import gui from orangewidget.settings import Setting from orangecontrib.wofry.widgets.gui.ow_wofry_widget import WofryWidget from orangewidget import widget from oasys.widgets import congruence from oasys.widgets import gui as oasysgui from wofry.propagator.wavefront2D.generic_wavefront import GenericWavefront2D from wofryimpl.beamline.beamline import WOBeamline from comsyl.autocorrelation.CompactAFReader import CompactAFReader from orangecontrib.wofry.util.wofry_objects import WofryData from orangecontrib.comsyl.util.light_source import WOLightSourceCOMSYL from oasys.util.oasys_util import TriggerIn, TriggerOut class OWModesSelector(WofryWidget): name = "ModesSelector" id = "orangecontrib.comsyl.widgets.applications.comsyl_modes_viewer" description = "" icon = "icons/selector.png" author = "" maintainer_email = "<EMAIL>" priority = 45 category = "" keywords = ["COMSYL", "coherent modes"] inputs = [("COMSYL modes" , CompactAFReader, "setCompactAFReader" ), ("Trigger", TriggerOut, "receive_trigger_signal")] outputs = [{"name":"WofryData", "type":WofryData, "doc":"WofryData", "id":"WofryData"}, {"name":"Trigger", "type": TriggerIn, "doc":"Feedback signal to start a new beam simulation", "id":"Trigger"}, {"name":"COMSYL modes", "type":CompactAFReader, "doc":"COMSYL modes", "id":"COMSYL modes"},] NORMALIZATION = Setting(1) # 0=No, 1=With eigenvalues TYPE_PRESENTATION = Setting(0) # 0=intensity, 1=real, 2=phase INDIVIDUAL_MODES = Setting(False) MODE_INDEX = Setting(0) REFERENCE_SOURCE = Setting(0) # IS_DEVELOP = True _input_available = False def __init__(self): super().__init__(is_automatic=True, show_view_options=True, show_script_tab=True) self.runaction = widget.OWAction("Generate Wavefront", self) self.runaction.triggered.connect(self.do_plot_and_send_mode) self.addAction(self.runaction) gui.separator(self.controlArea) gui.separator(self.controlArea) button_box = oasysgui.widgetBox(self.controlArea, "", addSpace=False, orientation="horizontal") button = gui.button(button_box, self, "Plot and Send mode", callback=self.do_plot_and_send_mode) font = QFont(button.font()) font.setBold(True) button.setFont(font) palette = QPalette(button.palette()) # make a copy of the palette palette.setColor(QPalette.ButtonText, QColor('Dark Blue')) button.setPalette(palette) # assign new palette button.setFixedHeight(45) gui.separator(self.controlArea) self.controlArea.setFixedWidth(self.CONTROL_AREA_WIDTH) tabs_setting = oasysgui.tabWidget(self.controlArea) tabs_setting.setFixedHeight(self.TABS_AREA_HEIGHT + 50) tabs_setting.setFixedWidth(self.CONTROL_AREA_WIDTH-5) self.tab_settings = oasysgui.createTabPage(tabs_setting, "Settings") # # Settings # gui.comboBox(self.tab_settings, self, "NORMALIZATION", label="Renormalize modes ", addSpace=False, items=["No (pure eigenvectors)", "Yes (to carry intensity from eigenvalues)"], valueType=int, orientation="horizontal", callback=self.do_plot_and_send_mode) gui.comboBox(self.tab_settings, self, "TYPE_PRESENTATION", label="Display coherent mode ", addSpace=False, items=self.list_TYPE_PRESENTATION(), valueType=int, orientation="horizontal", callback=self.do_plot_and_send_mode) gui.comboBox(self.tab_settings, self, "INDIVIDUAL_MODES", label="Load all modes in memory ", addSpace=False, items=['No [Fast, Recommended]','Yes [Slow, Memory hungry]',], valueType=int, orientation="horizontal", callback=self.do_plot_and_send_mode) gui.comboBox(self.tab_settings, self, "REFERENCE_SOURCE", label="Display reference source ", addSpace=False, items=['No','Yes',], valueType=int, orientation="horizontal", callback=self.do_plot_and_send_mode) mode_index_box = oasysgui.widgetBox(self.tab_settings, "", addSpace=True, orientation="horizontal") left_box_5 = oasysgui.widgetBox(mode_index_box, "", addSpace=True, orientation="horizontal", ) oasysgui.lineEdit(left_box_5, self, "MODE_INDEX", "Send mode", labelWidth=200, valueType=int, tooltip = "mode_index", orientation="horizontal", callback=self.do_plot_and_send_mode) gui.button(left_box_5, self, "+1", callback=self.increase_mode_index, width=30) gui.button(left_box_5, self, "-1", callback=self.decrease_mode_index, width=30) gui.button(left_box_5, self, "0", callback=self.reset_mode_index, width=30) def list_TYPE_PRESENTATION(self): return ['intensity','modulus','real part','imaginary part','angle [rad]'] def get_light_source(self): return WOLightSourceCOMSYL(name=self.name, filename=self.af._filename, mode_index=self.MODE_INDEX, normalize_with_eigenvalue=self.NORMALIZATION) def initializeTabs(self): size = len(self.tab) indexes = range(0, size) for index in indexes: self.tabs.removeTab(size-1-index) self.tab = [] self.plot_canvas = [] self.set_tab_titles() for index in range(0, len(self.tab_titles)): self.tab.append(gui.createTabPage(self.tabs, self.tab_titles[index])) self.plot_canvas.append(None) for tab in self.tab: tab.setFixedHeight(self.IMAGE_HEIGHT) tab.setFixedWidth(self.IMAGE_WIDTH) def setCompactAFReader(self, data): if not data is None: self.af = data self._input_available = True self.wofry_output.setText(self.af.info(list_modes=False)) self.main_tabs.setCurrentIndex(1) self.initializeTabs() self.do_plot_and_send_mode() def receive_trigger_signal(self, trigger): if trigger and trigger.new_object == True: if trigger.has_additional_parameter("variable_name"): variable_name = trigger.get_additional_parameter("variable_name").strip() variable_display_name = trigger.get_additional_parameter("variable_display_name").strip() variable_value = trigger.get_additional_parameter("variable_value") variable_um = trigger.get_additional_parameter("variable_um") if "," in variable_name: variable_names = variable_name.split(",") for variable_name in variable_names: setattr(self, variable_name.strip(), variable_value) else: setattr(self, variable_name, variable_value) self.do_plot_and_send_mode() else: self.increase_mode_index() def increase_mode_index(self): if self.MODE_INDEX+1 >= self.af.number_of_modes(): pass else: self.MODE_INDEX += 1 self.do_plot_and_send_mode() def decrease_mode_index(self): if self.MODE_INDEX-1 < 0: pass else: self.MODE_INDEX -= 1 self.do_plot_and_send_mode() def reset_mode_index(self): self.MODE_INDEX = 0 self.do_plot_and_send_mode() def _square_modulus(self,array1): return (numpy.absolute(array1))**2 def _intensity_times_eigenvalue(self,array1): s = array1.shape if len(s) == 3: # stack for i in range(s[0]): array1[i] *= numpy.sqrt(self.af.eigenvalue(i).real) else: array1 *= numpy.sqrt(self.af.eigenvalue(self.MODE_INDEX).real) return (numpy.absolute(array1))**2 def plot_data2D(self, data2D, dataX, dataY, plot_canvas_index, title="", xtitle="", ytitle=""): xmin = numpy.min(dataX) xmax = numpy.max(dataX) ymin = numpy.min(dataY) ymax =
numpy.max(dataY)
numpy.max
# BWB-450.py # # Created: Aug 2014, SUAVE Team # Modified: Jan 2017, <NAME> # Jul 2017, <NAME> # Jan 2018, <NAME> # ---------------------------------------------------------------------- # Imports # ---------------------------------------------------------------------- import SUAVE from SUAVE.Core import Units import numpy as np import pylab as plt import copy, time from SUAVE.Core import ( Data, Container ) #from SUAVE.Input_Output.OpenVSP import write #from SUAVE.Input_Output.OpenVSP.get_vsp_areas import get_vsp_areas import sys sys.path.append('../Vehicles') # the analysis functions from Boeing_BWB_450 import vehicle_setup, configs_setup # ---------------------------------------------------------------------- # Main # ---------------------------------------------------------------------- def main(): configs, analyses = full_setup() simple_sizing(configs) configs.finalize() analyses.finalize() # weight analysis weights = analyses.configs.base.weights # mission analysis mission = analyses.missions.base results = mission.evaluate() final_mass = results.segments[-1].conditions.weights.total_mass[-1,0]/Units.lb final_mass_true = 563022.9267107359 # [lbs] print(final_mass) # Error Calculation error_final_mass = np.abs(final_mass - final_mass_true)/final_mass_true print(error_final_mass) assert error_final_mass < 1e-6 return # ---------------------------------------------------------------------- # Analysis Setup # ---------------------------------------------------------------------- def full_setup(): # vehicle data vehicle = vehicle_setup() configs = configs_setup(vehicle) # vehicle analyses configs_analyses = analyses_setup(configs) # mission analyses mission = mission_setup(configs_analyses) missions_analyses = missions_setup(mission) analyses = SUAVE.Analyses.Analysis.Container() analyses.configs = configs_analyses analyses.missions = missions_analyses return configs, analyses # ---------------------------------------------------------------------- # Define the Vehicle Analyses # ---------------------------------------------------------------------- def analyses_setup(configs): analyses = SUAVE.Analyses.Analysis.Container() # build a base analysis for each config for tag,config in list(configs.items()): analysis = base_analysis(config) analyses[tag] = analysis return analyses def base_analysis(vehicle): # ------------------------------------------------------------------ # Initialize the Analyses # ------------------------------------------------------------------ analyses = SUAVE.Analyses.Vehicle() # ------------------------------------------------------------------ # Basic Geometry Relations sizing = SUAVE.Analyses.Sizing.Sizing() sizing.features.vehicle = vehicle analyses.append(sizing) # ------------------------------------------------------------------ # Weights weights = SUAVE.Analyses.Weights.Weights_BWB() weights.vehicle = vehicle analyses.append(weights) # ------------------------------------------------------------------ # Aerodynamics Analysis aerodynamics = SUAVE.Analyses.Aerodynamics.SU2_Euler() #aerodynamics = SUAVE.Analyses.Aerodynamics.Fidelity_Zero() aerodynamics.geometry = vehicle aerodynamics.settings.span_efficiency = 0.95 #aerodynamics.process.compute.lift.inviscid.settings.parallel = True aerodynamics.process.compute.lift.inviscid.settings.processors = 12 aerodynamics.process.compute.lift.inviscid.training.Mach = np.array([.3, .5, .7, .85]) aerodynamics.process.compute.lift.inviscid.training.angle_of_attack =
np.array([0.,3.,6.])
numpy.array
""" Preprocess SYSU Dataset """ import os import argparse import numpy as np from PIL import Image parser = argparse.ArgumentParser(description="SYSU-MM01 Preprocessing") parser.add_argument("--data-path", type=str, default="Define your own path/sysu",\ help="path to SYSU-MM01 dataset folder") args = parser.parse_args() data_path = args.data_path rgb_cameras = ['cam1', 'cam2', 'cam4', 'cam5'] ir_cameras = ['cam3', 'cam6'] # load id info file_path_train = os.path.join(data_path, 'exp/train_id.txt') file_path_val = os.path.join(data_path, 'exp/val_id.txt') with open(file_path_train, 'r', encoding="utf-8") as file: ids = file.read().splitlines() ids = [int(y) for y in ids[0].split(',')] id_train = [f"{x:0>4d}" for x in ids] with open(file_path_val, 'r', encoding="utf-8") as file: ids = file.read().splitlines() ids = [int(y) for y in ids[0].split(',')] id_val = [f"{x:0>4d}" for x in ids] # combine train and val split id_train.extend(id_val) files_rgb = [] files_ir = [] for id_ in sorted(id_train): for cam in rgb_cameras: img_dir = os.path.join(data_path, cam, id_) if os.path.isdir(img_dir): new_files = sorted([img_dir + '/' + i for i in os.listdir(img_dir)]) files_rgb.extend(new_files) for cam in ir_cameras: img_dir = os.path.join(data_path, cam, id_) if os.path.isdir(img_dir): new_files = sorted([img_dir + '/' + i for i in os.listdir(img_dir)]) files_ir.extend(new_files) # relabel pid_container = set() for img_path in files_ir: pid = int(img_path[-13:-9]) pid_container.add(pid) pid2label = {pid: label for label, pid in enumerate(pid_container)} fix_image_width = 144 fix_image_height = 288 def read_imgs(train_image): """ read_imgs """ train_img_ = [] train_label_ = [] for img_path_ in train_image: # img img = Image.open(img_path_) img = img.resize((fix_image_width, fix_image_height), Image.ANTIALIAS) pix_array =
np.array(img)
numpy.array
import numpy as np import scipy.spatial.distance as ssd from gym import spaces from gym.utils import seeding from malib.environments.base_game import BaseGame from malib.spaces import Box, MASpace, MAEnvSpec class Agent(object): def __new__(cls, *args, **kwargs): agent = super(Agent, cls).__new__(cls) return agent @property def observation_space(self): raise NotImplementedError() @property def action_space(self): raise NotImplementedError() def __str__(self): return "<{} instance>".format(type(self).__name__) class Archea(Agent): def __init__( self, idx, radius, n_sensors, sensor_range, addid=True, speed_features=True ): self._idx = idx self._radius = radius self._n_sensors = n_sensors self._sensor_range = sensor_range # Number of observation coordinates from each sensor self._sensor_obscoord = 4 if speed_features: self._sensor_obscoord += 3 self._obscoord_from_sensors = self._n_sensors * self._sensor_obscoord self._obs_dim = self._obscoord_from_sensors + 2 # + 1 #2 for type, 1 for id if addid: self._obs_dim += 1 self._position = None self._velocity = None # Sensors angles_K = np.linspace(0.0, 2.0 * np.pi, self._n_sensors + 1)[:-1] sensor_vecs_K_2 = np.c_[np.cos(angles_K), np.sin(angles_K)] self._sensors = sensor_vecs_K_2 @property def observation_space(self): return spaces.Box(low=-10, high=10, shape=(self._obs_dim,)) @property def action_space(self): return spaces.Box(low=-1, high=1, shape=(2,)) @property def position(self): assert self._position is not None return self._position @property def velocity(self): assert self._velocity is not None return self._velocity def set_position(self, x_2): assert x_2.shape == (2,) self._position = x_2 def set_velocity(self, v_2): assert v_2.shape == (2,) self._velocity = v_2 @property def sensors(self): assert self._sensors is not None return self._sensors def sensed(self, objx_N_2, same=False): """Whether `obj` would be sensed by the pursuers""" relpos_obj_N_2 = objx_N_2 - np.expand_dims(self.position, 0) sensorvals_K_N = self.sensors.dot(relpos_obj_N_2.T) sensorvals_K_N[ (sensorvals_K_N < 0) | (sensorvals_K_N > self._sensor_range) | ( (relpos_obj_N_2 ** 2).sum(axis=1)[None, :] - sensorvals_K_N ** 2 > self._radius ** 2 ) ] = np.inf if same: sensorvals_K_N[:, self._idx - 1] = np.inf return sensorvals_K_N class MAWaterWorld_mod(BaseGame): def __init__( self, n_pursuers, n_evaders, n_coop=2, n_poison=10, radius=0.015, obstacle_radius=0.2, obstacle_loc=np.array([0.5, 0.5]), ev_speed=0.01, poison_speed=0.01, n_sensors=30, sensor_range=0.2, action_scale=0.01, poison_reward=-1.0, food_reward=1.0, encounter_reward=0.05, control_penalty=-0.5, reward_mech="global", addid=True, speed_features=True, **kwargs ): self.n_pursuers = n_pursuers self.n_evaders = n_evaders self.n_coop = n_coop self.n_poison = n_poison self.obstacle_radius = obstacle_radius self.obstacle_loc = obstacle_loc self.poison_speed = poison_speed self.radius = radius self.ev_speed = ev_speed self.n_sensors = n_sensors self.sensor_range = np.ones(self.n_pursuers) * sensor_range self.action_scale = action_scale self.poison_reward = poison_reward self.food_reward = food_reward self.control_penalty = control_penalty self.encounter_reward = encounter_reward self.n_obstacles = 1 self._reward_mech = reward_mech self._addid = addid self._speed_features = speed_features self.seed() self._pursuers = [ Archea( npu + 1, self.radius * 3 / 4, self.n_sensors, self.sensor_range[npu], addid=self._addid, speed_features=self._speed_features, ) for npu in range(self.n_pursuers) ] self._evaders = [ Archea( nev + 1, self.radius * 3 / 4, self.n_pursuers, self.sensor_range.mean() / 2, ) for nev in range(self.n_evaders) ] self._poisons = [ Archea(npo + 1, self.radius * 3 / 4, self.n_poison, 0) for npo in range(self.n_poison) ] self.observation_spaces = MASpace( tuple(pursuer.observation_space for pursuer in self._pursuers) ) self.action_spaces = MASpace( tuple(pursuer.action_space for pursuer in self._pursuers) ) self.env_specs = MAEnvSpec(self.observation_spaces, self.action_spaces) @property def reward_mech(self): return self._reward_mech @property def timestep_limit(self): return 1000 @property def agents(self): return self._pursuers def get_param_values(self): return self.__dict__ def seed(self, seed=None): self.np_random, seed_ = seeding.np_random(seed) return [seed_] def _respawn(self, objx_2, radius): while ( ssd.cdist(objx_2[None, :], self.obstaclesx_No_2) <= radius * 2 + self.obstacle_radius ): objx_2 = self.np_random.rand(2) return objx_2 def reset(self): self._timesteps = 0 # Initialize obstacles if self.obstacle_loc is None: self.obstaclesx_No_2 = self.np_random.rand(self.n_obstacles, 2) else: self.obstaclesx_No_2 = self.obstacle_loc[None, :] self.obstaclesv_No_2 = np.zeros((self.n_obstacles, 2)) # Initialize pursuers for pursuer in self._pursuers: pursuer.set_position(self.np_random.rand(2)) # Avoid spawning where the obstacles lie pursuer.set_position(self._respawn(pursuer.position, pursuer._radius)) pursuer.set_velocity(np.zeros(2)) # Initialize evaders for evader in self._evaders: evader.set_position(self.np_random.rand(2)) evader.set_position(self._respawn(evader.position, evader._radius)) evader.set_velocity( (self.np_random.rand(2) - 0.5) * self.ev_speed ) # TODO policies # Initialize poisons for poison in self._poisons: poison.set_position(self.np_random.rand(2)) poison.set_position(self._respawn(poison.position, poison._radius)) poison.set_velocity((self.np_random.rand(2) - 0.5) * self.ev_speed) return self.step(np.zeros((self.n_pursuers, 2)))[0] @property def is_terminal(self): if self._timesteps >= self.timestep_limit: return True return False def _caught(self, is_colliding_N1_N2, n_coop): """ Checke whether collision results in catching the object This is because you need exactly `n_coop` agents to collide with the object to actually catch it """ # number of N1 colliding with given N2 n_collisions_N2 = is_colliding_N1_N2.sum(axis=0) is_caught_cN2 = np.where(n_collisions_N2 == n_coop)[0] # number of N2 colliding with given N1 who_collisions_N1_cN2 = is_colliding_N1_N2[:, is_caught_cN2] who_caught_cN1 = np.where(who_collisions_N1_cN2 >= 1)[0] return is_caught_cN2, who_caught_cN1 def _closest_dist(self, closest_obj_idx_Np_K, sensorvals_Np_K_N): """Closest distances according to `idx`""" sensorvals = [] for inp in range(self.n_pursuers): sensorvals.append( sensorvals_Np_K_N[inp, ...][ np.arange(self.n_sensors), closest_obj_idx_Np_K[inp, ...] ] ) return np.c_[sensorvals] def _extract_speed_features( self, objv_N_2, closest_obj_idx_N_K, sensedmask_obj_Np_K ): sensorvals = [] for pursuer in self._pursuers: sensorvals.append( pursuer.sensors.dot((objv_N_2 - np.expand_dims(pursuer.velocity, 0)).T) ) sensed_objspeed_Np_K_N = np.c_[sensorvals] sensed_objspeedfeatures_Np_K = np.zeros((self.n_pursuers, self.n_sensors)) sensorvals = [] for inp in range(self.n_pursuers): sensorvals.append( sensed_objspeed_Np_K_N[inp, :, :][ np.arange(self.n_sensors), closest_obj_idx_N_K[inp, :] ] ) sensed_objspeedfeatures_Np_K[sensedmask_obj_Np_K] = np.c_[sensorvals][ sensedmask_obj_Np_K ] return sensed_objspeedfeatures_Np_K def step(self, action_Np2): action_Np2 = np.asarray(action_Np2) action_Np_2 = action_Np2.reshape((self.n_pursuers, 2)) # Players actions_Np_2 = action_Np_2 * self.action_scale rewards = np.zeros((self.n_pursuers,)) assert action_Np_2.shape == (self.n_pursuers, 2) for npu, pursuer in enumerate(self._pursuers): pursuer.set_velocity(pursuer.velocity + actions_Np_2[npu]) pursuer.set_position(pursuer.position + pursuer.velocity) # Penalize large actions if self.reward_mech == "global": rewards += self.control_penalty * (actions_Np_2 ** 2).sum() else: rewards += self.control_penalty * (actions_Np_2 ** 2).sum(axis=1) # Players stop on hitting a wall for npu, pursuer in enumerate(self._pursuers): clippedx_2 = np.clip(pursuer.position, 0, 1) vel_2 = pursuer.velocity vel_2[pursuer.position != clippedx_2] = 0 pursuer.set_velocity(vel_2) pursuer.set_position(clippedx_2) obstacle_coll_Np =
np.zeros(self.n_pursuers)
numpy.zeros
# -*- coding: utf-8 -*- # Copyright 2017-2018 Orbital Insight Inc., all rights reserved. # Contains confidential and trade secret information. # Government Users: Commercial Computer Software - Use governed by # terms of Orbital Insight commercial license agreement. """ Created on Tue Jun 25 20:55:47 2019 Everything is computed in eclipitic plane coordinates. z-hat points toward ecliptic pole """ from __future__ import print_function from __future__ import division from pdb import set_trace as debug import matplotlib.pyplot as plt import matplotlib.patches as mpatch import matplotlib as mpl import numpy as np def computeSolarAngleFromDoy(doy): pass def computeEarthUnitVector(alpha_rad): """Compute vector from sun to Earth given earth orbit angle, alpha""" return np.array([np.cos(alpha_rad), np.sin(alpha_rad), 0]) def computeTelescopeUnitVector(alpha_rad, rho_rad): """ alpha is earth orbital phase angle, rho is telescope orbital phase angle """ x = -np.sin(alpha_rad) * np.cos(rho_rad) y = np.cos(alpha_rad) * np.cos(rho_rad) z = np.sin(rho_rad) return np.array([x, y, z]) def computeStarUnitVector(ecliptic_lng_deg, ecliptic_lat_deg): lng_rad, lat_rad = np.radians([ecliptic_lng_deg, ecliptic_lat_deg]) x = np.cos(lat_rad) * np.cos(lng_rad) y = np.cos(lat_rad) * np.sin(lng_rad) z = np.sin(lat_rad) return np.array([x, y, z]) def computeDutyCycle(alpha_rad, starUnitVec, maxAntiSolarAngle_rad, maxZenithAngle_rad): earthUnitVec = computeEarthUnitVector(alpha_rad) if np.dot(earthUnitVec, starUnitVec) < np.cos(maxAntiSolarAngle_rad): return 0 try: angles_rad = computeExtremaOfOrbitalPhase_rad(alpha_rad, starUnitVec, maxZenithAngle_rad) except ValueError: #Star is never observable return 0 #Compute fraction of circle subtended by angles twopi = 2 * np.pi dutyCycle = np.diff(np.sort(angles_rad))[0] / twopi assert 0 <= dutyCycle and dutyCycle <= 1 cos_zeta =
np.cos(maxZenithAngle_rad)
numpy.cos
# !/usr/bin/env python # Created by "Thieu" at 10:08, 02/03/2021 ----------% # Email: <EMAIL> % # Github: https://github.com/thieu1995 % # --------------------------------------------------% import numpy as np from mealpy.optimizer import Optimizer class OriginalHC(Optimizer): """ The original version of: Hill Climbing (HC) Notes ~~~~~ + The number of neighbour solutions are equal to user defined + The step size to calculate neighbour is randomized Hyper-parameters should fine tuned in approximate range to get faster convergence toward the global optimum: + neighbour_size (int): [pop_size/2, pop_size], fixed parameter, sensitive exploitation parameter, Default: 50 Examples ~~~~~~~~ >>> import numpy as np >>> from mealpy.math_based.HC import OriginalHC >>> >>> def fitness_function(solution): >>> return np.sum(solution**2) >>> >>> problem_dict1 = { >>> "fit_func": fitness_function, >>> "lb": [-10, -15, -4, -2, -8], >>> "ub": [10, 15, 12, 8, 20], >>> "minmax": "min", >>> } >>> >>> epoch = 1000 >>> pop_size = 50 >>> neighbour_size = 50 >>> model = OriginalHC(problem_dict1, epoch, pop_size, neighbour_size) >>> best_position, best_fitness = model.solve() >>> print(f"Solution: {best_position}, Fitness: {best_fitness}") References ~~~~~~~~~~ [1] <NAME>., <NAME>. and <NAME>., 1993. When will a genetic algorithm outperform hill climbing. Advances in neural information processing systems, 6. """ def __init__(self, problem, epoch=10000, pop_size=100, neighbour_size=50, **kwargs): """ Args: problem (dict): The problem dictionary epoch (int): maximum number of iterations, default = 10000 pop_size (int): number of population size, default = 100 neighbour_size (int): fixed parameter, sensitive exploitation parameter, Default: 50 """ super().__init__(problem, kwargs) self.epoch = self.validator.check_int("epoch", epoch, [1, 100000]) self.pop_size = self.validator.check_int("pop_size", pop_size, [10, 10000]) self.neighbour_size = self.validator.check_int("neighbour_size", neighbour_size, [2, self.pop_size]) self.nfe_per_epoch = self.pop_size self.sort_flag = False def evolve(self, epoch): """ The main operations (equations) of algorithm. Inherit from Optimizer class Args: epoch (int): The current iteration """ self.nfe_per_epoch = self.neighbour_size step_size = np.mean(self.problem.ub - self.problem.lb) * np.exp(-2 * (epoch + 1) / self.epoch) pop_neighbours = [] for i in range(0, self.neighbour_size): pos_new = self.g_best[self.ID_POS] + np.random.normal(0, 1, self.problem.n_dims) * step_size pos_new = self.amend_position(pos_new, self.problem.lb, self.problem.ub) pop_neighbours.append([pos_new, None]) self.pop = self.update_target_wrapper_population(pop_neighbours) class BaseHC(OriginalHC): """ My changed version of: Swarm-based Hill Climbing (S-HC) Notes ~~~~~ + Based on swarm-of people are trying to climb on the mountain idea + The number of neighbour solutions are equal to population size + The step size to calculate neighbour is randomized and based on rank of solution. + The guys near on top of mountain will move slower than the guys on bottom of mountain. + Imagination: exploration when far from global best, and exploitation when near global best + Who on top of mountain first will be the winner. (global optimal) Hyper-parameters should fine tuned in approximate range to get faster convergence toward the global optimum: + neighbour_size (int): [pop_size/2, pop_size], fixed parameter, sensitive exploitation parameter, Default: 50 Examples ~~~~~~~~ >>> import numpy as np >>> from mealpy.math_based.HC import BaseHC >>> >>> def fitness_function(solution): >>> return np.sum(solution**2) >>> >>> problem_dict1 = { >>> "fit_func": fitness_function, >>> "lb": [-10, -15, -4, -2, -8], >>> "ub": [10, 15, 12, 8, 20], >>> "minmax": "min", >>> } >>> >>> epoch = 1000 >>> pop_size = 50 >>> neighbour_size = 50 >>> model = BaseHC(problem_dict1, epoch, pop_size, neighbour_size) >>> best_position, best_fitness = model.solve() >>> print(f"Solution: {best_position}, Fitness: {best_fitness}") """ def __init__(self, problem, epoch=10000, pop_size=100, neighbour_size=50, **kwargs): """ Args: epoch (int): maximum number of iterations, default = 10000 pop_size (int): number of population size, default = 100 neighbour_size (int): fixed parameter, sensitive exploitation parameter, Default: 50 """ super().__init__(problem, epoch, pop_size, neighbour_size, **kwargs) self.nfe_per_epoch = self.pop_size self.sort_flag = True def evolve(self, epoch): """ Args: epoch (int): The current iteration """ ranks = np.array(list(range(1, self.pop_size + 1))) ranks = ranks / sum(ranks) step_size =
np.mean(self.problem.ub - self.problem.lb)
numpy.mean
import numpy as np import xarray as xr def mse( forecast_field: np.ndarray or xr.DataArray, analysis_field: np.ndarray or xr.DataArray, latitudes: np.ndarray or xr.DataArray ) -> np.ndarray or xr.DataArray: """ Mean Square Error (MSE) Parameters ---------- forecast_field analysis_field latitudes Returns ------- """ result = np.sum( np.power(forecast_field - analysis_field, 2) * np.cos(latitudes * np.pi / 180.0) ) / np.sum(np.cos(latitudes * np.pi / 180.0)) return result def me( forecast_field: np.ndarray or xr.DataArray, analysis_field: np.ndarray or xr.DataArray, latitudes: np.ndarray or xr.DataArray ) -> np.ndarray or xr.DataArray: """ Mean Error (ME), also called Bias. Parameters ---------- forecast_field: np.ndarray or xr.DataArray analysis_field: np.ndarray or xr.DataArray latitudes: np.ndarray or xr.DataArray Returns ------- np.ndarray or xr.DataArray """ result = np.sum( (forecast_field - analysis_field) * np.cos(latitudes * np.pi / 180.0) ) / np.sum(np.cos(latitudes * np.pi / 180.0)) return result def mae( forecast_field: np.ndarray or xr.DataArray, analysis_field: np.ndarray or xr.DataArray, latitudes: np.ndarray or xr.DataArray ) -> np.ndarray or xr.DataArray: result = np.sum( np.abs(forecast_field - analysis_field) *
np.cos(latitudes * np.pi / 180.0)
numpy.cos
#!/usr/bin/env python3 import os import glob import re import sys import math TIMEOUT = 100 # use cases and their directory names tests = [ "CP3-4.8.5", "CP1-4.8.5", "CP3-4.8.9", "CP1-4.8.9", "noSeqCon-CP3-4.8.5", "noSeqCon-CP1-4.8.5", "noSeqCon-CP3-4.8.9", "noSeqCon-CP1-4.8.9", "nolambda-CP3-4.8.5", "nolambda-CP1-4.8.5", "nolambda-CP3-4.8.9", "nolambda-CP1-4.8.9" ] loc_orig_5 = os.path.join('Logs_DLL_8.20', 'Logs_orig_4.8.5', '*.trace') loc_orig_9 = os.path.join('Logs_DLL_8.20', 'Logs_orig_4.8.9', '*.trace') loc_noseqcon_5 = os.path.join('Logs_DLL_8.20', 'Logs_noseqcon_4.8.5', '*.trace') loc_noseqcon_9 = os.path.join('Logs_DLL_8.20', 'Logs_noseqcon_4.8.9', '*.trace') loc_nolambda_5 = os.path.join('Logs_DLL_8.20', 'Logs_nolambda_4.8.5', '*.trace') loc_nolambda_9 = os.path.join('Logs_DLL_8.20', 'Logs_nolambda_4.8.9', '*.trace') file_orig_5 = glob.glob(loc_orig_5) file_orig_9 = glob.glob(loc_orig_9) file_noseqcon_5 = glob.glob(loc_noseqcon_5) file_noseqcon_9 = glob.glob(loc_noseqcon_9) file_nolambda_5 = glob.glob(loc_nolambda_5) file_nolambda_9 = glob.glob(loc_nolambda_9) allinfo_Expand = {} allinfo_Remove = {} allinfo_InsertAfter = {} allinfo_InsertBefore = {} def get_time (files, index): for f in files: outfile = open(f, 'r') data = outfile.readlines() outfile.close() for i in range(0, len(data)): if 'Verifying Impl$$_module.__default.Expand ...' in data[i]: time = re.findall("\[([0-9.]*) s, ([0-9.]*) proof obligations\] ([a-z]+)", data[i + 1]) if len(time) > 0: if time[0][2] == "verified": if 'CP3' in f: allinfo_Expand[tests[index]] = allinfo_Expand.get(tests[index], []) allinfo_Expand[tests[index]] += [float(time[0][0])] else: allinfo_Expand[tests[index+1]] = allinfo_Expand.get(tests[index+1], []) allinfo_Expand[tests[index+1]] += [float(time[0][0])] else: if time[0][2] == "timed": if 'CP3' in f: allinfo_Expand[tests[index]] = allinfo_Expand.get(tests[index], []) allinfo_Expand[tests[index]] += [float(TIMEOUT)] else: allinfo_Expand[tests[index+1]] = allinfo_Expand.get(tests[index+1], []) allinfo_Expand[tests[index+1]] += [float(TIMEOUT)] else: allinfo_Expand[tests[index]] = allinfo_Expand.get(tests[index], []) allinfo_Expand[tests[index+1]] = allinfo_Expand.get(tests[index+1], []) if 'Verifying Impl$$_module.__default.Remove ...' in data[i]: time = re.findall("\[([0-9.]*) s, ([0-9.]*) proof obligations\] ([a-z]+)", data[i + 1]) if len(time) > 0: if time[0][2] == "verified": if 'CP3' in f: allinfo_Remove[tests[index]] = allinfo_Remove.get(tests[index], []) allinfo_Remove[tests[index]] += [float(time[0][0])] else: allinfo_Remove[tests[index+1]] = allinfo_Remove.get(tests[index+1], []) allinfo_Remove[tests[index+1]] += [float(time[0][0])] else: if time[0][2] == "timed": if 'CP3' in f: allinfo_Remove[tests[index]] = allinfo_Remove.get(tests[index], []) allinfo_Remove[tests[index]] += [float(TIMEOUT)] else: allinfo_Remove[tests[index+1]] = allinfo_Remove.get(tests[index+1], []) allinfo_Remove[tests[index+1]] += [float(TIMEOUT)] else: allinfo_Remove[tests[index]] = allinfo_Remove.get(tests[index], []) allinfo_Remove[tests[index+1]] = allinfo_Remove.get(tests[index+1], []) if 'Verifying Impl$$_module.__default.InsertAfter ...' in data[i]: time = re.findall("\[([0-9.]*) s, ([0-9.]*) proof obligations\] ([a-z]+)", data[i + 1]) if len(time) > 0: if time[0][2] == "verified": if 'CP3' in f: allinfo_InsertAfter[tests[index]] = allinfo_InsertAfter.get(tests[index], []) allinfo_InsertAfter[tests[index]] += [float(time[0][0])] else: allinfo_InsertAfter[tests[index+1]] = allinfo_InsertAfter.get(tests[index+1], []) allinfo_InsertAfter[tests[index+1]] += [float(time[0][0])] else: if time[0][2] == "timed": if 'CP3' in f: allinfo_InsertAfter[tests[index]] = allinfo_InsertAfter.get(tests[index], []) allinfo_InsertAfter[tests[index]] += [float(TIMEOUT)] else: allinfo_InsertAfter[tests[index+1]] = allinfo_InsertAfter.get(tests[index+1], []) allinfo_InsertAfter[tests[index+1]] += [float(TIMEOUT)] else: allinfo_InsertAfter[tests[index]] = allinfo_InsertAfter.get(tests[index], []) allinfo_InsertAfter[tests[index+1]] = allinfo_InsertAfter.get(tests[index+1], []) if 'Verifying Impl$$_module.__default.InsertBefore ...' in data[i]: time = re.findall("\[([0-9.]*) s, ([0-9.]*) proof obligations\] ([a-z]+)", data[i + 1]) if len(time) > 0: if time[0][2] == "verified": if 'CP3' in f: allinfo_InsertBefore[tests[index]] = allinfo_InsertBefore.get(tests[index], []) allinfo_InsertBefore[tests[index]] += [float(time[0][0])] else: allinfo_InsertBefore[tests[index+1]] = allinfo_InsertBefore.get(tests[index+1], []) allinfo_InsertBefore[tests[index+1]] += [float(time[0][0])] else: if time[0][2] == "timed": if 'CP3' in f: allinfo_InsertBefore[tests[index]] = allinfo_InsertBefore.get(tests[index], []) allinfo_InsertBefore[tests[index]] += [float(TIMEOUT)] else: allinfo_InsertBefore[tests[index+1]] = allinfo_InsertBefore.get(tests[index+1], []) allinfo_InsertBefore[tests[index+1]] += [float(TIMEOUT)] else: allinfo_InsertBefore[tests[index]] = allinfo_InsertBefore.get(tests[index], []) allinfo_InsertBefore[tests[index+1]] = allinfo_InsertBefore.get(tests[index+1], []) get_time(file_orig_5, 0) get_time(file_orig_9, 2) get_time(file_noseqcon_5, 4) get_time(file_noseqcon_9, 6) get_time(file_nolambda_5, 8) get_time(file_nolambda_9, 10) # print(allinfo_Expand) # print(allinfo_Remove) # print(allinfo_InsertAfter) # print(allinfo_InsertBefore) # print a CSV def show_csv(allinfo, info): for test in tests: if test in allinfo: times = allinfo[test] print(test + ", " + info), for i in times: print(", " + str(i)), print ("\n"), # show_csv(allinfo_Expand, "Expand") # show_csv(allinfo_Remove, "Remove") # show_csv(allinfo_InsertAfter, "InsertAfter") # show_csv(allinfo_InsertBefore, "InsertBefore") import numpy as np import matplotlib import matplotlib.pyplot as plt matplotlib.rcParams.update({'font.size': 20}) Expand_cp3_5 = np.array(allinfo_Expand[tests[0]]) Expand_cp1_5 = np.array(allinfo_Expand[tests[1]]) Expand_cp3_9 = np.array(allinfo_Expand[tests[2]]) Expand_cp1_9 = np.array(allinfo_Expand[tests[3]]) Expand_noseqcon_cp3_5 = np.array(allinfo_Expand[tests[4]]) Expand_noseqcon_cp1_5 = np.array(allinfo_Expand[tests[5]]) Expand_noseqcon_cp3_9 = np.array(allinfo_Expand[tests[6]]) Expand_noseqcon_cp1_9 = np.array(allinfo_Expand[tests[7]]) Expand_nolambda_cp3_5 = np.array(allinfo_Expand[tests[8]]) Expand_nolambda_cp1_5 = np.array(allinfo_Expand[tests[9]]) Expand_nolambda_cp3_9 = np.array(allinfo_Expand[tests[10]]) Expand_nolambda_cp1_9 = np.array(allinfo_Expand[tests[11]]) Expand_cp3_5_mean = np.mean(Expand_cp3_5) Expand_cp3_5_std = np.std(Expand_cp3_5) Expand_cp1_5_mean = np.mean(Expand_cp1_5) Expand_cp1_5_std = np.std(Expand_cp1_5) Expand_cp3_9_mean = np.mean(Expand_cp3_9) Expand_cp3_9_std = np.std(Expand_cp3_9) Expand_cp1_9_mean = np.mean(Expand_cp1_9) Expand_cp1_9_std = np.std(Expand_cp1_9) Expand_noseqcon_cp3_5_mean = np.mean(Expand_noseqcon_cp3_5) Expand_noseqcon_cp3_5_std = np.std(Expand_noseqcon_cp3_5) Expand_noseqcon_cp1_5_mean = np.mean(Expand_noseqcon_cp1_5) Expand_noseqcon_cp1_5_std = np.std(Expand_noseqcon_cp1_5) Expand_noseqcon_cp3_9_mean = np.mean(Expand_noseqcon_cp3_9) Expand_noseqcon_cp3_9_std = np.std(Expand_noseqcon_cp3_9) Expand_noseqcon_cp1_9_mean = np.mean(Expand_noseqcon_cp1_9) Expand_noseqcon_cp1_9_std = np.std(Expand_noseqcon_cp1_9) Expand_nolambda_cp3_5_mean = np.mean(Expand_nolambda_cp3_5) Expand_nolambda_cp3_5_std = np.std(Expand_nolambda_cp3_5) Expand_nolambda_cp1_5_mean =
np.mean(Expand_nolambda_cp1_5)
numpy.mean
from fractions import Fraction from numpy import diff def _bjorklund(subsequences): """ Distribute onsets as evenly as possible by modifying subsequences """ while True: remainder = subsequences[-1] distributed = [] while subsequences and subsequences[-1] == remainder: distributed.append(subsequences.pop()) if not subsequences or len(distributed) <= 1: subsequences.extend(distributed) return subsequences for i in range(min(len(distributed), len(subsequences))): subsequences[i].extend(distributed.pop()) subsequences.extend(distributed) def euclidean_rhythm(num_onsets, num_beats): """ Evenly distributes a given number of onsets in a grid of the given size """ sequence = [True] * num_onsets + [False] * (num_beats - num_onsets) return sum(_bjorklund([[b] for b in sequence]), []) def rotate_sequence(sequence): return sequence[1:] + sequence[:1] def rotate_to_onset(sequence, num_iter): if not any(sequence): return sequence for _ in range(num_iter): sequence = _rotate_sequence(sequence) while not sequence[0]: sequence = _rotate_sequence(sequence) return sequence def sequence_to_time_duration(sequence): result = [] time = None duration = 0 for i, b in enumerate(sequence): if b: if time is not None: result.append((time, duration)) duration = 0 time = i duration += 1 result.append((time, duration)) return result def time_duration_to_sequence(times_durations): end_time = 0 for t, d in times_durations: end_time = max(end_time, t + d) sequence = [False] * end_time for t, _ in times_durations: sequence[t] = True return sequence def sequence_to_string(sequence): return "".join([".x"[int(b)] for b in sequence]) def rotate_string(string): return string[-1] + string[:-1] def pergen_rhythm(num_onsets, generator, period=1): beats = sorted([(generator * i) % period for i in range(num_onsets)] + [period]) times = beats[:num_onsets] durations = diff(beats) return list(zip(times, durations)) def geometric_rhythm(num_onsets, initial, factor): """ Onsets in a geometric progression """ time = initial times = [] for _ in range(num_onsets+1): times.append(time) time *= factor times.sort() result = [] time = Fraction(0) for duration in
diff(times)
numpy.diff
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Fri Jan 31 15:50:31 2020 @author: Dr. <NAME> European Space Agency (ESA) European Space Research and Technology Centre (ESTEC) Keplerlaan 1, 2201 AZ Noordwijk, The Netherlands Email: <EMAIL> GitHub: mnguenther Twitter: m_n_guenther Web: www.mnguenther.com """ from __future__ import print_function, division, absolute_import #::: plotting settings import seaborn as sns sns.set(context='paper', style='ticks', palette='deep', font='sans-serif', font_scale=1.5, color_codes=True) sns.set_style({"xtick.direction": "in","ytick.direction": "in"}) sns.set_context(rc={'lines.markeredgewidth': 1}) #::: modules import numpy as np import matplotlib.pyplot as plt import os import warnings #::: specific modules try: from wotan import flatten except ImportError: pass #::: my modules import allesfitter from allesfitter.lightcurves import eclipse_width_smart from allesfitter.exoworlds_rdx.lightcurves.index_transits import get_tmid_observed_transits ############################################################################### #::: prepare TTV fit (if chosen) ############################################################################### def prepare_ttv_fit(datadir, ax=None): ''' this must be run *after* reduce_phot_data() ''' ax0 = ax alles = allesfitter.allesclass(datadir) window = alles.settings['fast_fit_width'] if not os.path.exists( os.path.join(datadir,'ttv_preparation') ): os.makedirs(os.path.join(datadir,'ttv_preparation')) with open(os.path.join(datadir,'ttv_preparation','ttv_initial_guess_params.csv'),'w') as f: f.write('') def plot_all_transits_color_coded(): for inst in alles.settings['inst_phot']: time = alles.data[inst]['time'] for companion in alles.settings['companions_phot']: ind = [] for i, t in enumerate(alles.data[companion+'_tmid_observed_transits']): ind += list( np.where((time >= (t - window/2.)) & (time <= (t + window/2.)))[0] ) ax.plot( alles.data[inst]['time'][ind], alles.data[inst]['flux'][ind], ls='none', marker='.', label=companion ) for companion in alles.settings['companions_phot']: with open(os.path.join(datadir,'ttv_preparation','ttv_initial_guess_params.csv'),'a') as f: f.write('#TTV companion '+companion+',,,,,\n') #---------------------------------------------------------------------- #::: get combined data from all instruments #---------------------------------------------------------------------- all_times = [] all_flux = [] for inst in alles.settings['inst_phot']: all_times += list(alles.data[inst]['time']) all_flux += list(alles.data[inst]['flux']) ind_sort = np.argsort(all_times) all_times = np.array(all_times)[ind_sort] all_flux = np.array(all_flux)[ind_sort] #---------------------------------------------------------------------- #::: get eclipse window #---------------------------------------------------------------------- alles.initial_guess_params_median[companion+'_epoch'] eclipse_width = eclipse_width_smart(alles.initial_guess_params_median[companion+'_period'], alles.initial_guess_params_median[companion+'_rr'], alles.initial_guess_params_median[companion+'_rsuma'], alles.initial_guess_params_median[companion+'_cosi'], alles.initial_guess_params_median[companion+'_f_s'], alles.initial_guess_params_median[companion+'_f_c'], )[0] #---------------------------------------------------------------------- #::: compute tmid, ttv_guess and make per-transit-plots #---------------------------------------------------------------------- tmids = [] alles.data[companion+'_tmid_observed_transits'] = get_tmid_observed_transits(all_times,alles.initial_guess_params_median[companion+'_epoch'],alles.initial_guess_params_median[companion+'_period'],alles.settings['fast_fit_width']) N = len(alles.data[companion+'_tmid_observed_transits']) fig, axes = plt.subplots(N, 1, figsize=(6,4*N), sharey=True, tight_layout=True) for i, t in enumerate(alles.data[companion+'_tmid_observed_transits']): ind_tr1 = np.where((all_times >= (t - window/2.)) & (all_times <= (t + window/2.)))[0] tr_times = all_times[ind_tr1] tr_flux = all_flux[ind_tr1] t_exp = np.median(np.diff(tr_times)) N_points_in_eclipse = int(eclipse_width/t_exp) try: trend = flatten(tr_times, tr_flux, window_length=eclipse_width/2., method='biweight', return_trend=True)[1] tmid = np.median( tr_times[ np.argsort(trend)[0:int(N_points_in_eclipse/2.)] ] ) except: warnings.warn('Install wotan for improved performance of prepare_ttv_fit().') trend = None tmid = np.median( tr_times[ np.argsort(tr_times)[0:int(N_points_in_eclipse/2.)] ] ) ttv_guess = tmid - t tmids.append(tmid) ax = axes[i] ax.plot(tr_times, tr_flux, 'b.', rasterized=True) if trend is not None: ax.plot(tr_times, trend, 'r-') ax.axvline(t,c='grey',ls='--',label='linear prediction') ax.axvline(tmid,c='r',ls='--',label='flux minimum') ax.set(xlabel='Time', ylabel='Flux', xlim=[t-window/2., t+window/2.]) ax.text(0.95,0.95,'Transit '+str(i+1), va='top', ha='right', transform=ax.transAxes) with open(os.path.join(datadir,'ttv_preparation','ttv_initial_guess_params.csv'),'a') as f: f.write(companion+'_ttv_transit_'+str(i+1)+','+np.format_float_positional(ttv_guess,4)+',1,uniform '+np.format_float_positional(ttv_guess-0.01,4)+' '+np.format_float_positional(ttv_guess+0.01,4)+',TTV$_\mathrm{'+companion+';'+str(i+1)+'}$,d\n') axes[0].legend() fig.savefig(os.path.join(datadir,'ttv_preparation','ttv_preparation_'+companion+'_per_transit.pdf'), bbox_inches='tight') plt.close(fig) tmids = np.array(tmids) #---------------------------------------------------------------------- #::: ttv guess 0-C plot #---------------------------------------------------------------------- nr = np.array([ int(np.round( (t-tmids[0]) / alles.initial_guess_params_median[companion+'_period'] )) for t in tmids ]) #get corresponding transit number nr -= int(nr[-1]/2.) #shift into the middle of the data set period_mean, epoch_mean = np.polyfit(nr, tmids, 1) fig, axes = plt.subplots(2,1,figsize=(6,8),tight_layout=True,sharex=True) axes[0].plot(nr, tmids, 'bo', label='Companion '+companion) axes[0].plot(nr, epoch_mean + nr * period_mean, 'b-') axes[0].set(xlabel='Nr.', ylabel='Transit mid-time') axes[0].legend() axes[1].plot(nr, tmids-(epoch_mean + nr * period_mean), 'bo') axes[1].axhline(0,c='grey',ls='--') fig.savefig(os.path.join(datadir,'ttv_preparation','ttv_preparation_'+companion+'_oc.pdf'), bbox_inches='tight') period_dev = np.abs( (period_mean-alles.initial_guess_params_median[companion+'_period'])/alles.initial_guess_params_median[companion+'_period'] ) epoch_dev = np.abs( (epoch_mean-alles.initial_guess_params_median[companion+'_epoch'])/alles.initial_guess_params_median[companion+'_epoch'] ) print('\nCompanion', companion) print('Initial guess for mean period and epoch:') print(np.format_float_positional(alles.initial_guess_params_median[companion+'_period']), np.format_float_positional(alles.initial_guess_params_median[companion+'_epoch'])) print('New estimate for mean period and epoch:') print(np.format_float_positional(period_mean,4), np.format_float_positional(epoch_mean,4)) # print('Deviation from another:') # print(np.format_float_positional(period_dev,4), # np.format_float_positional(epoch_dev,4)) if (period_dev > 0.01) or (epoch_dev > 0.01): print('\n! Consider updating your initial guess to these new estimated mean values.') print('\n! If you do, then you must rerun this code.') else: print('\n! Looks great! You are ready to fit.') #---------------------------------------------------------------------- #::: full lightcurve plot #---------------------------------------------------------------------- flux_min =
np.nanmin(all_flux)
numpy.nanmin
import numpy as np import math def make_triangle(sx, sy, dx, dy, delta): '''Create a right rectangle, alinged with X and Y axes, with x-side size sX and y-side size sY. Triangle is offset by dX and dY.''' x1 = np.arange(0, sx, delta) y1 = np.zeros(x1.shape) y2 = np.arange(0, sy, delta) x2 = np.zeros(y2.shape) x3 = np.arange(0, sx, delta) y3 = sy - x3 * sy / sx xs = np.hstack([x1, x2, x3]) - dx ys = np.hstack([y1, y2, y3]) - dy return xs, ys def make_tri_pyramid(sx, sy, sz, dx, dy, dz, delta): '''Create a right rectangle triangular pyramid, alinged with X and Y axes, with x-side at the base size of sX and y-side size at the base of sY. Pyramid has high sZ. It is offset by dX, dY and dZ.''' points = [] for z in np.arange(0, sz, delta): ai = sx - z * sx / sz bi = sy - z * sy / sz xs, ys = make_triangle(ai, bi, dx, dy, delta) points.append((xs, ys, z * np.ones(xs.shape))) xs = np.hstack([x for x, y, z in points]) ys = np.hstack([y for x, y, z in points]) zs = np.hstack([z for x, y, z in points]) - dz points = np.vstack([xs, ys, zs]).T return points def make_tri_pyramid_footprint(sx, sy, sz, dx, dy, dz): '''Create the footprint of a pyramid created by make_tri_pyramid''' footprint = np.array([ [0, 0, 0], [0, sy, 0], [sx, 0, 0], [0, 0, 0], ]) footprint[:, 0] -= dx footprint[:, 1] -= dy footprint[:, 2] -= dz return footprint def make_tri_pyramid_with_base(side, delta, offset): '''Create a pyramid as per make_tri_pyramid, suroundeded by a triangular flat base.''' rng = np.random.RandomState(0) sx = side / 2 sy = side sz = side / 4 dx = offset[0] + side / 2 dy = offset[1] + side / 2 dz = offset[2] points = make_tri_pyramid(sx, sy, sz, dx, dy, dz, delta) _add_noise(points, 0.1, rng) for s in np.arange(0, side * 0.05, delta): xs, ys = make_triangle(sx * (1 + s), sy * (1 + s), dx + s, dy + s, delta) zs = np.zeros(xs.shape) - dz tmp = np.vstack([xs, ys, zs]).T points = np.vstack([points, tmp]) footprint = make_tri_pyramid_footprint(sx, sy, sz, dx, dy, dz) return points, footprint def _add_noise(points, size, rng): '''Add noise to an array of 2D points''' points += (rng.rand(points.shape[0], points.shape[1]) - 0.5) * size def perpendicular_2d(a): '''Create a vector perpendicular to the original''' b = np.zeros(a.shape) b[0] = -a[1] b[1] = a[0] return b def rotation_around_axis(axis, theta): ''' Return the rotation matrix associated with counterclockwise rotation about the given axis by theta radians. ''' axis = np.asarray(axis) theta = np.asarray(theta) axis = axis / math.sqrt(np.dot(axis, axis)) a = math.cos(theta / 2) b, c, d = -axis * math.sin(theta / 2) aa, bb, cc, dd = a * a, b * b, c * c, d * d bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d return np.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)], [2 * (bc - ad), aa+cc-bb-dd, 2 * (cd + ab)], [2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]]) def make_half_red_stick(point_from, point_to, width=0.04, num_pts_per_line=50, num_lines_per_stick=25): ''' Make a hollow red-white-red-white stick ''' point_from = np.asarray(point_from, dtype=float)[:3] point_to = np.asarray(point_to, dtype=float)[:3] length = np.linalg.norm(point_to - point_from) width = length * 0.04 axis = (point_to - point_from) * 1.0 / length origin = perpendicular_2d(axis) * width points = np.zeros((num_pts_per_line * num_lines_per_stick, 6)) points[:, 3:6] = 255 idx = 0 unitline = np.linspace(0, 1, num_pts_per_line) for theta in np.linspace(0, math.pi, num_lines_per_stick): src = np.dot(rotation_around_axis(axis, theta), origin) # straight slope [0, 1] line =
np.array((unitline, unitline, unitline))
numpy.array
# -*- coding: utf-8 -*- """ Created on Tue Feb 27 14:12:12 2018 Switchback square @author: oddvi """ import matplotlib.pyplot as plt import shapely.geometry import shapely.affinity import shapely.ops import patternGenerators as gen def make_square_switchback_gen_reg(cut_width, flexure_width, junction_length, edge_space, num_flex, side_cut='default'): """ """ import numpy as np a = cut_width; b = flexure_width; c = junction_length; d = edge_space if side_cut == 'default': # x displacement along diagonal cut ax = cut_width/(2**0.5)/2 else: ax = side_cut dx = a+b # displacement y direction dy = dx # displacement y direction h0 = a+b/2+c # height in triangle l1 = b/2 # height baseline -> flexure bottom l2 = a+b/2 # height baseline -> flexure top x = np.array([]) y = np.array([]) x = np.append(x, 0) # 0 y = np.append(y, h0) # 0 x = np.append(x, -h0+l2+ax/2) # 1 y = np.append(y, l2+ax/2) # 1 x = np.append(x, -h0+l2+ax) # 2 y = np.append(y, l2) # 2 x = np.append(x, -h0+ax) # 3 y = np.append(y, 0) # 3 x =
np.append(x, h0-ax)
numpy.append
""" Mallett and Yuksel (2019) - Reflectance Recovery ================================================ Defines the objects for reflectance recovery, i.e. spectral upsampling, using *Mallett and Yuksel (2019)* method: - :func:`colour.recovery.spectral_primary_decomposition_Mallett2019` - :func:`colour.recovery.RGB_to_sd_Mallett2019` References ---------- - :cite:`Mallett2019` : <NAME>., & <NAME>. (2019). Spectral Primary Decomposition for Rendering with sRGB Reflectance. Eurographics Symposium on Rendering - DL-Only and Industry Track, 7 pages. doi:10.2312/SR.20191216 """ from __future__ import annotations import numpy as np from scipy.linalg import block_diag from scipy.optimize import Bounds, LinearConstraint, minimize from colour.colorimetry import ( MultiSpectralDistributions, SpectralDistribution, handle_spectral_arguments, ) from colour.models import RGB_Colourspace from colour.hints import ArrayLike, Callable, Dict, Optional, Tuple from colour.recovery import MSDS_BASIS_FUNCTIONS_sRGB_MALLETT2019 from colour.utilities import to_domain_1 __author__ = "Colour Developers" __copyright__ = "Copyright 2013 Colour Developers" __license__ = "New BSD License - https://opensource.org/licenses/BSD-3-Clause" __maintainer__ = "Colour Developers" __email__ = "<EMAIL>" __status__ = "Production" __all__ = [ "spectral_primary_decomposition_Mallett2019", "RGB_to_sd_Mallett2019", ] def spectral_primary_decomposition_Mallett2019( colourspace: RGB_Colourspace, cmfs: Optional[MultiSpectralDistributions] = None, illuminant: Optional[SpectralDistribution] = None, metric: Callable = np.linalg.norm, metric_args: Tuple = tuple(), optimisation_kwargs: Optional[Dict] = None, ) -> MultiSpectralDistributions: """ Perform the spectral primary decomposition as described in *Mallett and Yuksel (2019)* for given *RGB* colourspace. Parameters ---------- colourspace *RGB* colourspace. cmfs Standard observer colour matching functions, default to the *CIE 1931 2 Degree Standard Observer*. illuminant Illuminant spectral distribution, default to *CIE Standard Illuminant D65*. metric Function to be minimised, i.e. the objective function. ``metric(basis, *metric_args) -> float`` where ``basis`` is three reflectances concatenated together, each with a shape matching ``shape``. metric_args Additional arguments passed to ``metric``. optimisation_kwargs Parameters for :func:`scipy.optimize.minimize` definition. Returns ------- :class:`colour.MultiSpectralDistributions` Basis functions for given *RGB* colourspace. References ---------- :cite:`Mallett2019` Notes ----- - In-addition to the *BT.709* primaries used by the *sRGB* colourspace, :cite:`Mallett2019` tried *BT.2020*, *P3 D65*, *Adobe RGB 1998*, *NTSC (1987)*, *Pal/Secam*, *ProPhoto RGB*, and *Adobe Wide Gamut RGB* primaries, every one of which encompasses a larger (albeit not-always-enveloping) set of *CIE L\\*a\\*b\\** colours than BT.709. Of these, only *Pal/Secam* produces a feasible basis, which is relatively unsurprising since it is very similar to *BT.709*, whereas the others are significantly larger. Examples -------- >>> from colour import MSDS_CMFS, SDS_ILLUMINANTS, SpectralShape >>> from colour.models import RGB_COLOURSPACE_PAL_SECAM >>> from colour.utilities import numpy_print_options >>> cmfs = ( ... MSDS_CMFS['CIE 1931 2 Degree Standard Observer']. ... copy().align(SpectralShape(360, 780, 10)) ... ) >>> illuminant = SDS_ILLUMINANTS['D65'].copy().align(cmfs.shape) >>> msds = spectral_primary_decomposition_Mallett2019( ... RGB_COLOURSPACE_PAL_SECAM, cmfs, illuminant, optimisation_kwargs={ ... 'options': {'ftol': 1e-5} ... } ... ) >>> with numpy_print_options(suppress=True): ... print(msds) # doctest: +SKIP [[ 360. 0.3395134... 0.3400214... 0.3204650...] [ 370. 0.3355246... 0.3338028... 0.3306724...] [ 380. 0.3376707... 0.3185578... 0.3437715...] [ 390. 0.3178866... 0.3351754... 0.3469378...] [ 400. 0.3045154... 0.3248376... 0.3706469...] [ 410. 0.2935652... 0.2919463... 0.4144884...] [ 420. 0.1875740... 0.1853729... 0.6270530...] [ 430. 0.0167983... 0.054483 ... 0.9287186...] [ 440. 0. ... 0. ... 1. ...] [ 450. 0. ... 0. ... 1. ...] [ 460. 0. ... 0. ... 1. ...] [ 470. 0. ... 0.0458044... 0.9541955...] [ 480. 0. ... 0.2960917... 0.7039082...] [ 490. 0. ... 0.5042592... 0.4957407...] [ 500. 0. ... 0.6655795... 0.3344204...] [ 510. 0. ... 0.8607541... 0.1392458...] [ 520. 0. ... 0.9999998... 0.0000001...] [ 530. 0. ... 1. ... 0. ...] [ 540. 0. ... 1. ... 0. ...] [ 550. 0. ... 1. ... 0. ...] [ 560. 0. ... 0.9924229... 0. ...] [ 570. 0. ... 0.9970703... 0.0025673...] [ 580. 0.0396002... 0.9028231... 0.0575766...] [ 590. 0.7058973... 0.2941026... 0. ...] [ 600. 1. ... 0. ... 0. ...] [ 610. 1. ... 0. ... 0. ...] [ 620. 1. ... 0. ... 0. ...] [ 630. 1. ... 0. ... 0. ...] [ 640. 0.9835925... 0.0100166... 0.0063908...] [ 650. 0.7878949... 0.1265097... 0.0855953...] [ 660. 0.5987994... 0.2051062... 0.1960942...] [ 670. 0.4724493... 0.2649623... 0.2625883...] [ 680. 0.3989806... 0.3007488... 0.3002704...] [ 690. 0.3666586... 0.3164003... 0.3169410...] [ 700. 0.3497806... 0.3242863... 0.3259329...] [ 710. 0.3563736... 0.3232441... 0.3203822...] [ 720. 0.3362624... 0.3326209... 0.3311165...] [ 730. 0.3245015... 0.3365982... 0.3389002...] [ 740. 0.3335520... 0.3320670... 0.3343808...] [ 750. 0.3441287... 0.3291168... 0.3267544...] [ 760. 0.3343705... 0.3330132... 0.3326162...] [ 770. 0.3274633... 0.3305704... 0.3419662...] [ 780. 0.3475263... 0.3262331... 0.3262404...]] """ cmfs, illuminant = handle_spectral_arguments(cmfs, illuminant) N = len(cmfs.shape) R_to_XYZ = np.transpose( illuminant.values[..., np.newaxis] * cmfs.values / (np.sum(cmfs.values[:, 1] * illuminant.values)) ) R_to_RGB = np.dot(colourspace.matrix_XYZ_to_RGB, R_to_XYZ) basis_to_RGB = block_diag(R_to_RGB, R_to_RGB, R_to_RGB) primaries = np.identity(3).reshape(9) # Ensure that the reflectances correspond to the correct RGB colours. colour_match = LinearConstraint(basis_to_RGB, primaries, primaries) # Ensure that the reflectances are bounded by [0, 1]. energy_conservation = Bounds(np.zeros(3 * N), np.ones(3 * N)) # Ensure that the sum of the three bases is bounded by [0, 1]. sum_matrix = np.transpose(np.tile(np.identity(N), (3, 1))) sum_constraint = LinearConstraint(sum_matrix, np.zeros(N), np.ones(N)) optimisation_settings = { "method": "SLSQP", "constraints": [colour_match, sum_constraint], "bounds": energy_conservation, "options": { "ftol": 1e-10, }, } if optimisation_kwargs is not None: optimisation_settings.update(optimisation_kwargs) result = minimize( metric, args=metric_args, x0=np.zeros(3 * N), **optimisation_settings ) basis_functions = np.transpose(result.x.reshape(3, N)) return MultiSpectralDistributions( basis_functions, cmfs.shape.range(), name=f"Basis Functions - {colourspace.name} - Mallett (2019)", labels=("red", "green", "blue"), ) def RGB_to_sd_Mallett2019( RGB: ArrayLike, basis_functions: MultiSpectralDistributions = MSDS_BASIS_FUNCTIONS_sRGB_MALLETT2019, ) -> SpectralDistribution: """ Recover the spectral distribution of given *RGB* colourspace array using *Mallett and Yuksel (2019)* method. Parameters ---------- RGB *RGB* colourspace array. basis_functions Basis functions for the method. The default is to use the built-in *sRGB* basis functions, i.e. :attr:`colour.recovery.MSDS_BASIS_FUNCTIONS_sRGB_MALLETT2019`. Returns ------- :class:`colour.SpectralDistribution` Recovered reflectance. References ---------- :cite:`Mallett2019` Notes ----- - In-addition to the *BT.709* primaries used by the *sRGB* colourspace, :cite:`Mallett2019` tried *BT.2020*, *P3 D65*, *Adobe RGB 1998*, *NTSC (1987)*, *Pal/Secam*, *ProPhoto RGB*, and *Adobe Wide Gamut RGB* primaries, every one of which encompasses a larger (albeit not-always-enveloping) set of *CIE L\\*a\\*b\\** colours than BT.709. Of these, only *Pal/Secam* produces a feasible basis, which is relatively unsurprising since it is very similar to *BT.709*, whereas the others are significantly larger. Examples -------- >>> from colour import MSDS_CMFS, SDS_ILLUMINANTS, XYZ_to_sRGB >>> from colour.colorimetry import sd_to_XYZ_integration >>> from colour.recovery import SPECTRAL_SHAPE_sRGB_MALLETT2019 >>> from colour.utilities import numpy_print_options >>> XYZ = np.array([0.20654008, 0.12197225, 0.05136952]) >>> RGB = XYZ_to_sRGB(XYZ, apply_cctf_encoding=False) >>> cmfs = ( ... MSDS_CMFS['CIE 1931 2 Degree Standard Observer']. ... copy().align(SPECTRAL_SHAPE_sRGB_MALLETT2019) ... ) >>> illuminant = SDS_ILLUMINANTS['D65'].copy().align(cmfs.shape) >>> sd = RGB_to_sd_Mallett2019(RGB) >>> with numpy_print_options(suppress=True): ... sd # doctest: +ELLIPSIS SpectralDistribution([[ 380. , 0.1735531...], [ 385. , 0.1720357...], [ 390. , 0.1677721...], [ 395. , 0.1576605...], [ 400. , 0.1372829...], [ 405. , 0.1170849...], [ 410. , 0.0895694...], [ 415. , 0.0706232...], [ 420. , 0.0585765...], [ 425. , 0.0523959...], [ 430. , 0.0497598...], [ 435. , 0.0476057...], [ 440. , 0.0465079...], [ 445. , 0.0460337...], [ 450. , 0.0455839...], [ 455. , 0.0452872...], [ 460. , 0.0450981...], [ 465. , 0.0448895...], [ 470. , 0.0449257...], [ 475. , 0.0448987...], [ 480. , 0.0446834...], [ 485. , 0.0441372...], [ 490. , 0.0417137...], [ 495. , 0.0373832...], [ 500. , 0.0357657...], [ 505. , 0.0348263...], [ 510. , 0.0341953...], [ 515. , 0.0337683...], [ 520. , 0.0334979...], [ 525. , 0.0332991...], [ 530. , 0.0331909...], [ 535. , 0.0332181...], [ 540. , 0.0333387...], [ 545. , 0.0334970...], [ 550. , 0.0337381...], [ 555. , 0.0341847...], [ 560. , 0.0346447...], [ 565. , 0.0353993...], [ 570. , 0.0367367...], [ 575. , 0.0392007...], [ 580. , 0.0445902...], [ 585. , 0.0625633...], [ 590. , 0.2965381...], [ 595. , 0.4215576...], [ 600. , 0.4347139...], [ 605. , 0.4385134...], [ 610. , 0.4385184...], [ 615. , 0.4385249...], [ 620. , 0.4374694...], [ 625. , 0.4384672...], [ 630. , 0.4368251...], [ 635. , 0.4340867...], [ 640. , 0.4303219...], [ 645. , 0.4243257...], [ 650. , 0.4159482...], [ 655. , 0.4057443...], [ 660. , 0.3919874...], [ 665. , 0.3742784...], [ 670. , 0.3518421...], [ 675. , 0.3240127...], [ 680. , 0.2955145...], [ 685. , 0.2625658...], [ 690. , 0.2343423...], [ 695. , 0.2174830...], [ 700. , 0.2060461...], [ 705. , 0.1977437...], [ 710. , 0.1916846...], [ 715. , 0.1861020...], [ 720. , 0.1823908...], [ 725. , 0.1807923...], [ 730. , 0.1795571...], [ 735. , 0.1785623...], [ 740. , 0.1775758...], [ 745. , 0.1771614...], [ 750. , 0.1767431...], [ 755. , 0.1764319...], [ 760. , 0.1762597...], [ 765. , 0.1762209...], [ 770. , 0.1761803...], [ 775. , 0.1761195...], [ 780. , 0.1760763...]], interpolator=SpragueInterpolator, interpolator_kwargs={}, extrapolator=Extrapolator, extrapolator_kwargs={...}) >>> sd_to_XYZ_integration(sd, cmfs, illuminant) / 100 ... # doctest: +ELLIPSIS array([ 0.2065436..., 0.1219996..., 0.0513764...]) """ RGB = to_domain_1(RGB) sd = SpectralDistribution( np.dot(RGB,
np.transpose(basis_functions.values)
numpy.transpose
import numpy as np from numpy.testing import assert_equal from .common import get_four_level_data from glmtools.io.traversal import OneToManyTraversal def get_four_level_data_traversal(): d = get_four_level_data() entity_vars = ('storm_id', 'flash_id', 'stroke_id', 'trig_id') parent_vars = ('flash_parent_storm_id', 'stroke_parent_flash_id', 'trig_parent_stroke_id') traversal = OneToManyTraversal(d, entity_vars, parent_vars) return d, traversal def test_count_children(): d, traversal = get_four_level_data_traversal() # validation data storm_child_count = d['storm_child_flash_count'].data flash_child_count = d['flash_child_stroke_count'].data stroke_child_count = d['stroke_child_trig_count'].data storm_child_stroke_count = d['storm_child_stroke_count'].data storm_child_trig_count = d['storm_child_trig_count'].data n_storms = traversal.count_children('storm_id')[0] assert_equal(storm_child_count, n_storms) n_flashes = traversal.count_children('flash_id')[0] assert_equal(flash_child_count, n_flashes) n_strokes = traversal.count_children('stroke_id')[0] assert_equal(stroke_child_count, n_strokes) all_counts = traversal.count_children('storm_id', 'trig_id') assert_equal(storm_child_count, all_counts[0]) assert_equal(flash_child_count, all_counts[1]) assert_equal(stroke_child_count, all_counts[2]) grouper = d.groupby('trig_parent_storm_id').groups count = [len(grouper[eid]) if (eid in grouper) else 0 for eid in d['storm_id'].data] assert_equal(storm_child_trig_count, count) def test_replicate_parent_ids(): d, traversal = get_four_level_data_traversal() trig_parent_storm_ids = traversal.replicate_parent_ids('storm_id', 'trig_parent_stroke_id') trig_parent_flash_ids = traversal.replicate_parent_ids('flash_id', 'trig_parent_stroke_id') trig_parent_stroke_ids = traversal.replicate_parent_ids('stroke_id', 'trig_parent_stroke_id') assert_equal(d['trig_parent_storm_id'].data, trig_parent_storm_ids) assert_equal(d['trig_parent_flash_id'].data, trig_parent_flash_ids) assert_equal(d['trig_parent_stroke_id'].data, trig_parent_stroke_ids) def test_prune_from_middle(): d, traversal = get_four_level_data_traversal() reduced_stroke_id = [13,15,23] d = traversal.reduce_to_entities('stroke_id', reduced_stroke_id) reduced_storm_id = [2,] reduced_flash_id = [4,8] reduced_trig_id = [18,19,23,31] assert_equal(d['storm_id'].data, reduced_storm_id) assert_equal(d['flash_id'].data, reduced_flash_id) assert_equal(d['stroke_id'].data, reduced_stroke_id) assert_equal(d['trig_id'].data, reduced_trig_id) def test_prune_from_bottom(): d, traversal = get_four_level_data_traversal() trig_idx = slice(7,10) reduced_storm_id =
np.unique(d['trig_parent_storm_id'][trig_idx].data)
numpy.unique
import pytest import numpy as np import numpy.testing as npt def test_shrink_mask(): from ..apod import shrink_mask kernel_size = 3 mask_size = 48 mask =
np.ones((2 * mask_size, 2 * mask_size), bool)
numpy.ones
''' Turbulence analysis of incompressible fluids. ''' import networkx as nx import numpy as np import pdb import os import shutil from itertools import count import colorcet as cc import random import matplotlib.pyplot as plt from matplotlib.colors import LogNorm, Normalize import seaborn as sns from scipy.spatial.distance import pdist, squareform from scipy.signal import stft from scipy.fft import rfftn import statsmodels.api as sm import gds from gds.types import * from .fluid_projected import * folder = 'runs/turbulence' def solve(T=20, dt=0.01): if os.path.isdir(folder): shutil.rmtree(folder) os.mkdir(folder) n_triangles = list(range(2, 7)) energies = np.logspace(-1, 1.5, 5) for N in n_triangles: os.mkdir(f'{folder}/{N}') G = gds.triangular_lattice(m=1, n=N) N_e = len(G.edges()) y0 = np.random.uniform(low=1, high=2, size=N_e) for KE in energies: V, P = euler(G) y0_ = V.leray_project(y0) y0_ *= np.sqrt(N_e * KE / np.dot(y0_, y0_)) V.set_initial(y0=lambda e: y0_[V.X[e]]) sys = gds.couple({'V': V, 'P': P}) time, data = sys.solve(T, dt) with open(f'{folder}/{N}/{KE}.npy', 'wb') as f: np.save(f, data['V']) def analyze(foreach: Callable): if not os.path.isdir(folder): raise Exception('no data') n_triangles = [int(s) for s in os.listdir(folder)] energies = [float(s[:-4]) for s in os.listdir(f'{folder}/{n_triangles[0]}')] fig, axs = plt.subplots(nrows=len(n_triangles), ncols=len(energies), figsize=(len(energies)*2, len(n_triangles)*2)) for fig_i, N in enumerate(sorted(n_triangles)): for fig_j, KE in enumerate(sorted(energies)): with open(f'{folder}/{N}/{KE}.npy', 'rb') as f: data =
np.load(f)
numpy.load
#!/usr/bin/env python from __future__ import division, absolute_import, print_function import numpy as np import jams.const as const def cuntz_gleixner(idecdate, iGPP, iRd, iCa, iRa, igtot, sunrise, Vcyt=None, date0=False, V0starch=const.eps, R0starch=const.R13VPDB, R0cyt=const.R13VPDB, daynight=None, daylength=57600, Phi=0.3, s_resid=const.eps, betas=None, betap=0.75, epsa=4.4e-3, epsb=29.5e-3, epsg=20.0e-3, epst=-4.4e-3, epss=10.0e-3, epsp=1.0e-3, steady=False, Rass=False, Rm=False, Rchl=False, Rcyt=False, Rstarch=False, Rpyr=False, Rbio=False, Rphloem=False, Vstarch=False, ass13=False, disc=False, Rnew_starch=False, Rnew_cyt=False, fullmodel=True, julian=True, nocheck=False, starch_mol2g=None, V0starchg=const.eps): """ Calculates the Cuntz-Gleixner steady state and non-steady state models of 13C discrimiantion in the Calvin cycle. Definition ---------- def cuntz_gleixner(idecdate, iGPP, iRd, iCa, iRa, igtot, sunrise, Vcyt=None, date0=False, V0starch=const.eps, R0starch=const.R13VPDB, R0cyt=const.R13VPDB, daynight=None, daylength=57600, Phi=0.3, s_resid=const.eps, betas=None, betap=0.75, epsa=4.4e-3, epsb=29.5e-3, epsg=20.0e-3, epst=-4.4e-3, epss=10.0e-3, epsp=1.0e-3, steady=False, Rass=False, Rm=False, Rchl=False, Rcyt=False, Rstarch=False, Rpyr=False, Rbio=False, Rphloem=False, Vstarch=False, ass13=False, disc=False, Rnew_starch=False, Rnew_cyt=False, fullmodel=True, julian=True, nocheck=False, starch_mol2g=None, V0starchg=const.eps): Input ----- idecdate decimal date iGPP Gross Photosynthesis: GPP = A + Rd [umol(CO2)/m2s] iRd Leaf respiration: GPP - A [umol(CO2)/m2s] iCa Outside CO2 concentration [ppm=umol(CO2)/umol(air)] iRa 13C/12C ratio of outside CO2 concentration igtot Total conductance for CO2 from outside air to chloroplast [mol(CO2)/m2s] sunrise decial date of first sunrise in data set Input (only nss model) ----- Vcyt C-concentration of sucrose pool in cytoplasm [umol(C)/m2(leaf)] Optional Input -------------- date0 Start date of 1st time step (default: False) If False, take same time step as first time step in idecdate V0starch Initial C-concentration in Starch [umol(C)/m2(leaf)] (default: 1e-6) V0starchg Initial C-concentration in Starch [g(C)/gDW] (default: 1e-6) starch_mol2g Conversion factor from [umol(C)/m2(leaf)] to [g(C)/gDW] used for starch (default: None) If given and Vstarch==True then Vstarchg will be returned as well R0starch Initial 13C/12C ratio of starch pool (default: PDB) R0cyt Initial 13C/12C ratio of C in cytoplasm (default: PDB) daynight 1/0 array of day or night (default: False) If False, day is when gpp>0 daylength length of daylight [s] (default: 57600 = 16h) Phi Vc/Vo, ratio of carboxylation to oxygenation of Rudisco (default: 0.3) s_resid Residual starch concentration at end of night [umol(C)/m2(leaf)] (default: 1e-6) betas factor of leaf respiration transferred to biosynthesis (default: False) If False, betas is 3*gpp/max(gpp) Note: betas*(1-betap) <= 1: if betap=2/3 -> betas<3: if betap=5/6 -> betas < 6 betap fraction of respiration occuring during biosynthesis: min=2/3; max=4/5 (default: 3/4) epsa effective fractionation along gtot (default: 4.4e-3) epsb fractionation of Rubisco (default: 29.5e-3) epsg fractionation of photorespiration (default: 20e-3) epst equilibrium fractionation value for starch synthesis (default: -4.4e-3) epss fractionation of biosynthesis production (default: 10e-3) epsp fractionation of biosynthesis bifurcation (default: 1e-3) Parameter --------- steady If True, steady-state instead of non-steady-state model (default: False) Rass If True, output 13C/12C ratio of assimilated carbon (default: False) Rm If True, output 13C/12C ratio of chloroplast CO2 (default: False) Rchl If True, output 13C/12C ratio of sugars in chloroplast (default: False) Rcyt If True, output 13C/12C ratio of sugars in cytoplasm (default: False) Rstarch If True, output 13C/12C ratio of starch (default: False) Rpyr If True, output 13C/12C ratio of sugars at pyruvate pathway (default: False) Rbio If True, output 13C/12C ratio of biosynthesis products before bifurcation (default: False) Rphloem If True, output 13C/12C ratio of new phloem products (sugars & biosynthesis products) (default: False) Vstarch If True, output C-concentration in Starch [umol(C)/m2(leaf)] (default: False) ass13 If True, output 13C assimilation rate [umol(13C)/m2s] (default: False) disc If True, output Discrimination 1-Rass/Ra (default: False) Rnew_starch If True, output 13C/12C ratio of newly produced starch (default: False) Rnew_cyt If True, output 13C/12C ratio of newly produced sugars in cytoplasm (default: False) fullmodel If True, output all in the above order (default: True) julian If True, dates are given as Julian days, otherwise as decimal year (default: True) nocheck If True, do not check betap and betas ranges (default: False) Output ------ if fullmodel=True Rass, Rm, Rchl, Rcyt, Rstarch, Rpyr, Rbio, Rphloem, Vstarch, ass13, disc, Rnew_starch, Rnew_cyt and if Vstarch=True and starch_mol2g!=None Vstarchg Restrictions ------------ If at least one individual output parameter is True then fullmode=False. References ---------- <NAME>, <NAME>, <NAME> & <NAME>, Theoretical considerations about carbon isotope distribution in glucose of C3 plants, Functional Plant Biology 31, 857-877, 2004 <NAME>, <NAME>, <NAME>, <NAME> & <NAME>, Experimental evidence for diel variations of the carbon isotope composition in leaf, stem and phloem sap organic matter in Ricinus communis, Plant, Cell and Environment 31, 941-953, 2004 Examples -------- # steady state >>> adecdate = np.array([2008.918658925319050,2008.918772768671033,2008.918886612022106, ... 2008.919000455374089,2008.919114298724935,2008.919228142076918, ... 2008.919341985427991,2008.919455828779974,2008.919569672131956, ... 2008.919683515483030,2008.919797358835012,2008.919911202186086, ... 2008.920025045538068,2008.920138888888914,2008.920252732240897, ... 2008.920366575591970,2008.920480418943953,2008.920556314511941, ... 2008.920594262295026,2008.920708105647009,2008.920821948998992, ... 2008.920935792350065,2008.921049635702047,2008.921163479052893, ... 2008.921277322405103,2008.921391165755949,2008.921505009107932]) >>> gpp = np.array([0.000000000000,23.700827991217,22.449718259243,21.253578109071,20.222525197027, ... 19.503625355216,18.797132965271,18.102416224453,17.780887860470,17.491607940331, ... 17.207072197663,17.089915139494,17.995854647885,18.901914959729,19.681631460738, ... 19.681631460738,19.681631460738,0.000000000000,0.000000000000,0.000000000000, ... 0.000000000000,0.000000000000,0.000000000000,0.000000000000,0.000000000000, ... 0.000000000000] ) >>> Rd = np.array([0.511900000000,2.361686144687,2.743373026721,3.180029474251,3.476842651940, ... 3.259038512076,3.053641828083,2.860020793216,2.958750580931,3.083603451827, ... 3.213200496886,3.331826587704,3.352936200975,3.374166608865,3.392531460738, ... 3.392531460738,3.392531460738,1.025929405070,0.829676977143,0.633424549217, ... 0.437172122858,0.303515021488,0.547877741613,0.792240464668,1.036603184794, ... 1.280965907360] ) >>> CO2air = np.array([620.902600000000,537.510500000000,608.806500000000,671.251000000000, ... 652.204000000000,560.157800000000,427.130100000000,395.276000000000, ... 427.000400000000,410.953300000000,386.943500000000,500.417500000000, ... 552.776800000000,515.865800000000,542.450400000000,692.503500000000, ... 656.423500000000,588.844100000000,675.156500000000,725.101900000000, ... 664.837000000000,598.080600000000,610.713600000000,487.087000000000, ... 531.921300000000,675.177700000000] ) >>> Ra = np.array([0.011067265443,0.011083081802,0.011071245659,0.011060401761,0.011063313320, ... 0.011080216316,0.011111970396,0.011122420992,0.011111174802,0.011116914764, ... 0.011125605614,0.011097923896,0.011079382516,0.011087211473,0.011083896499, ... 0.011057329511,0.011062335683,0.011072518834,0.011061590657,0.011053508863, ... 0.011061281634,0.011071628848,0.011069690431,0.011093962783,0.011086022577, ... 0.011059558971] ) >>> gtot = np.array([0.064395001124,0.074054920058,0.078085762302,0.078484156864,0.078127160737, ... 0.085209848990,0.088685679784,0.089611189047,0.088528095110,0.086087621579, ... 0.081901616151,0.076984314568,0.080693530135,0.084173028182,0.087005780100, ... 0.087005780100,0.087005780100,0.046798889383,0.042324852911,0.037583815518, ... 0.032460459750,0.028193059760,0.031985237181,0.035564641600,0.038983725824, ... 0.042282334176] ) >>> ndecdate = 2008.918772768670806 >>> V0starch = 60498.901260546168 >>> R0starch = 0.010949362493 >>> daynight = np.array([0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0]) >>> daylength = 56400. >>> Phi = np.array([0.081107124487,0.228537911761,0.178664767959,0.161131279216,0.173373369802, ... 0.195820137312,0.276226485398,0.291675965181,0.259480203313,0.285111049806, ... 0.331368464761,0.228636707479,0.199019858196,0.224041328002,0.209848472684, ... 0.147530335706,0.158874834596,0.090595209673,0.077545142771,0.070693599170, ... 0.075277039328,0.082213420279,0.081562178107,0.102842605886,0.095940191411, ... 0.077688517811] ) >>> s_resid = 40532.561476983901 >>> betas = np.array([0.000000000000,4.999999000000,4.736061020188,4.483719593421,4.266205481166, ... 4.114544146364,3.965500325995,3.818940732998,3.751110114569,3.690082727626, ... 3.630056187040,3.605340371581,3.796460173119,3.987605459727,4.152096865111, ... 4.152096865111,4.152096865111,0.000000000000,0.000000000000,0.000000000000, ... 0.000000000000,0.000000000000,0.000000000000,0.000000000000,0.000000000000, ... 0.000000000000] ) >>> betap = 0.8 >>> epsa = np.array([0.002995512907,0.003039740417,0.003192366495,0.003375544906,0.003479516543, ... 0.003318658489,0.003187204366,0.003078822327,0.003144673535,0.003235857882, ... 0.003343598868,0.003447509132,0.003408830551,0.003373557382,0.003345605523, ... 0.003345605523,0.003345605523,0.003500242606,0.003551923813,0.003615033761, ... 0.003693231875,0.003766948014,0.003706681514,0.003655727827,0.003612282052, ... 0.003574980671] ) >>> epsb = 0.029 >>> epsg = 0.0185 >>> epst = -0.004 >>> epss = 0.01 >>> epsp = 0.003 >>> [Vstarch] = cuntz_gleixner(adecdate[1:], gpp, Rd, CO2air, Ra, gtot, ndecdate, ... date0=adecdate[0], V0starch=V0starch, R0starch=R0starch, daynight=daynight, ... daylength=daylength, Phi=Phi, s_resid=s_resid, ... betas=betas, betap=betap, epsa=epsa, epsb=epsb, ... epsg=epsg, epst=epst, epss=epss, epsp=epsp, ... steady=True, Vstarch=True, julian=False) >>> from autostring import astr >>> print(astr(Vstarch[0:6],5,pp=True)) [' 40532.56148' ' 70158.59647' ' 98220.74429' '124787.71693' '150065.87342' '174445.40512'] >>> [Rass,Rm,Rchl,Rcyt,Rstarch,Rpyr,Rbio,Rphloem,Vstarch,ass13,disc,Rnew_starch,Rnew_cyt,Vstarchg] = cuntz_gleixner( ... adecdate[1:], gpp, Rd, CO2air, Ra, gtot, ndecdate, ... date0=adecdate[0], V0starch=V0starch, R0starch=R0starch, daynight=daynight, ... daylength=daylength, Phi=Phi, s_resid=s_resid, ... betas=betas, betap=betap, epsa=epsa, epsb=epsb, ... epsg=epsg, epst=epst, epss=epss, epsp=epsp, ... steady=True, fullmodel=True, julian=False, ... starch_mol2g=1., V0starchg=V0starch) >>> print(astr(Rass[0:6],5,pp=True)) ['0.01095' '0.01092' '0.01087' '0.01083' '0.01083' '0.01085'] >>> print(astr(Rm[0:6],5,pp=True)) ['0.01107' '0.01123' '0.01119' '0.01116' '0.01116' '0.01118'] >>> print(astr(Rchl[0:6],5,pp=True)) ['0.01095' '0.01092' '0.01087' '0.01084' '0.01084' '0.01086'] >>> print(astr(Rcyt[0:6],5,pp=True)) ['0.01095' '0.01092' '0.01087' '0.01084' '0.01084' '0.01086'] >>> print(astr(Rstarch[0:6],5,pp=True)) ['0.01095' '0.01095' '0.01094' '0.01093' '0.01092' '0.01092'] >>> print(astr(Rpyr[0:6],5,pp=True)) ['0.01095' '0.01105' '0.01100' '0.01097' '0.01097' '0.01099'] >>> print(astr(Rbio[0:6],5,pp=True)) ['0.01095' '0.01094' '0.01089' '0.01086' '0.01086' '0.01088'] >>> print(astr(Rphloem[0:6],5,pp=True)) ['0.01095' '0.01091' '0.01086' '0.01082' '0.01082' '0.01084'] >>> print(astr(Vstarch[0:6],5,pp=True)) [' 40532.56148' ' 70158.59647' ' 98220.74429' '124787.71693' '150065.87342' '174445.40512'] >>> print(astr(ass13[0:6],5,pp=True)) ['-0.00560' ' 0.23301' ' 0.21418' ' 0.19580' ' 0.18136' ' 0.17631'] >>> print(astr(Rnew_starch[0:6],5,pp=True)) ['0.01095' '0.01096' '0.01091' '0.01088' '0.01088' '0.01090'] >>> print(astr(Rnew_cyt[0:6],5,pp=True)) ['0.01095' '0.01092' '0.01087' '0.01084' '0.01084' '0.01086'] >>> print(astr(Vstarchg[0:6],5,pp=True)) [' 40532.56148' ' 70158.59647' ' 98220.74429' '124787.71693' '150065.87342' '174445.40512'] # non-steady state >>> R0cyt = 0.010911449304 >>> Vcyt = np.array([135000.,135000.,135000.,135000.,135000.,135000.,135000., ... 135000.,135000.,135000.,135000.,135000.,135000.,135000., ... 135000.,135000.,135000.,135000.,135000.,135000.,135000., ... 135000.,135000.,135000.,135000.,135000.]) >>> [Rass,Rm,Rchl,Rcyt,Rstarch,Rpyr,Rbio,Rphloem,Vstarch,ass13,disc,Rnew_starch,Rnew_cyt] = cuntz_gleixner( ... adecdate[1:], gpp, Rd, CO2air, Ra, gtot, ndecdate, Vcyt=Vcyt, ... date0=adecdate[0], V0starch=V0starch, R0starch=R0starch, R0cyt=R0cyt, ... daynight=daynight, ... daylength=daylength, Phi=Phi, s_resid=s_resid, ... betas=betas, betap=betap, epsa=epsa, epsb=epsb, ... epsg=epsg, epst=epst, epss=epss, epsp=epsp, ... steady=False, fullmodel=True, julian=False) >>> print(astr(Vstarch[0:6],5,pp=True)) [' 40532.56148' ' 70158.59647' ' 98220.74429' '124787.71693' '150065.87342' '174445.40512'] >>> print(astr(ass13[0:6],5,pp=True)) ['-0.00559' ' 0.23302' ' 0.21413' ' 0.19571' ' 0.18129' ' 0.17629'] >>> print(astr(Rass[0:6],5,pp=True)) ['0.01092' '0.01092' '0.01087' '0.01083' '0.01083' '0.01085'] >>> print(astr(Rm[0:6],5,pp=True)) ['0.01107' '0.01123' '0.01119' '0.01116' '0.01116' '0.01118'] >>> print(astr(Rchl[0:6],5,pp=True)) ['0.01095' '0.01092' '0.01087' '0.01084' '0.01084' '0.01086'] >>> print(astr(Rstarch[0:6],5,pp=True)) ['0.01095' '0.01095' '0.01094' '0.01093' '0.01092' '0.01092'] >>> print(astr(Rcyt[0:6],5,pp=True)) ['0.01092' '0.01092' '0.01090' '0.01088' '0.01087' '0.01087'] >>> print(astr(Rpyr[0:6],5,pp=True)) ['0.01092' '0.01105' '0.01103' '0.01101' '0.01100' '0.01099'] >>> print(astr(Rbio[0:6],5,pp=True)) ['0.01092' '0.01094' '0.01092' '0.01090' '0.01089' '0.01088'] >>> print(astr(Rnew_cyt[0:6],5,pp=True)) ['0.01095' '0.01092' '0.01087' '0.01084' '0.01084' '0.01086'] >>> print(astr(Rnew_starch[0:6],5,pp=True)) ['0.01095' '0.01096' '0.01091' '0.01088' '0.01088' '0.01090'] >>> print(astr(Rphloem[0:6],5,pp=True)) ['0.01092' '0.01091' '0.01089' '0.01087' '0.01085' '0.01085'] >>> from dec2date import dec2date >>> from date2dec import date2dec >>> aa = dec2date(adecdate, ascii=True, calendar='decimal') >>> jadecdate = date2dec(ascii=aa) >>> ndecdate = 2008.918772768670806 >>> bb = dec2date(ndecdate, ascii=True, calendar='decimal') >>> jndecdate = date2dec(ascii=bb) >>> [Rass,Rm,Rchl,Rcyt,Rstarch,Rpyr,Rbio,Rphloem,Vstarch,ass13,disc,Rnew_starch,Rnew_cyt] = cuntz_gleixner( ... jadecdate[1:], gpp, Rd, CO2air, Ra, gtot, jndecdate, Vcyt=Vcyt, ... date0=jadecdate[0], V0starch=V0starch, R0starch=R0starch, R0cyt=R0cyt, ... daynight=daynight, ... daylength=daylength, Phi=Phi, s_resid=s_resid, ... betas=betas, betap=betap, epsa=epsa, epsb=epsb, ... epsg=epsg, epst=epst, epss=epss, epsp=epsp, ... steady=False, fullmodel=True, julian=True) >>> # There are slight differences due to precision of dates >>> print(astr(Rphloem[0:6],5,pp=True)) ['0.01092' '0.01091' '0.01089' '0.01087' '0.01085' '0.01085'] >>> [Rass,Rm,Rchl,Rcyt,Rstarch,Rpyr,Rbio,Rphloem,Vstarch,ass13,disc,Rnew_starch,Rnew_cyt] = cuntz_gleixner( ... jadecdate[1:], gpp, Rd, CO2air, Ra, gtot, jndecdate, Vcyt=Vcyt, ... date0=jadecdate[0], V0starch=V0starch, R0starch=R0starch, R0cyt=R0cyt, ... daynight=daynight, ... daylength=daylength, Phi=Phi, s_resid=s_resid, ... betas=betas, betap=2./3.-0.1, epsa=epsa, epsb=epsb, ... epsg=epsg, epst=epst, epss=epss, epsp=epsp, ... steady=False, fullmodel=True, julian=True, nocheck=True) >>> print(astr(Rphloem[0:6],5,pp=True)) ['0.01092' '0.00956' '0.00932' '0.00906' '0.00889' '0.00909'] License ------- This file is part of the JAMS Python package, distributed under the MIT License. The JAMS Python package originates from the former UFZ Python library, Department of Computational Hydrosystems, Helmholtz Centre for Environmental Research - UFZ, Leipzig, Germany. Copyright (c) 2012-2016 <NAME> - mc (at) macu (dot) de Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. History ------- Written, MC, Jan 2012 Modified, MC, Mar 2012 - julian MC, May 2012 - nocheck MC, May 2012 - Vcyt, daynight and betas=None default MC, Feb 2013 - starch_mol2g, V0starchg MC, Feb 2013 - ported to Python 3 MC, Nov 2016 - const.tiny -> const.eps """ # # Checks nss = False if not steady: nss = True nd = idecdate.size ng = iGPP.size nr = iRd.size nc = iCa.size na = iRa.size ns = igtot.size if ((nd != ng) | (nd != nr) | (nd != nc) | (nd != na) | (nd != ns)): raise ValueError('not all input sizes are equal') if (Rass | Rm | Rchl | Rcyt | Rstarch | Rpyr | Rbio | Rphloem | Vstarch | ass13 | disc | Rnew_starch | Rnew_cyt): fullmodel = False if fullmodel: Rass = True Rm = True Rchl = True Rcyt = True Rstarch = True Rpyr = True Rbio = True Rphloem = True Vstarch = True ass13 = True disc = True Rnew_starch = True Rnew_cyt = True # Defaults # Day (1) or night (0) if not np.any(daynight is not None): daynight = np.where(iGPP > 0., 1, 0) isarr = np.ndim(daynight) if (isarr==0): idaynight = np.ones(nd, dtype=np.int) * daynight else: idaynight = daynight nn = idaynight.size if (nn != nd): raise ValueError('daynight must be size 1 or size(idecdate)') # length of day [s] isarr = np.ndim(daylength) if (isarr==0): idaylength =
np.ones(nd)
numpy.ones
import os import warnings import unittest import numpy as np import pandas as pd from pyseer.lmm import initialise_lmm from pyseer.lmm import fit_lmm from pyseer.lmm import fit_lmm_block from pyseer.classes import LMM DATA_DIR = 'tests' P_BINARY = os.path.join(DATA_DIR, 'subset.pheno') S = os.path.join(DATA_DIR, 'similarity_subset.tsv.gz') COV = os.path.join(DATA_DIR, 'covariates.txt') C = os.path.join(DATA_DIR, 'lmm_cache.npz') K = os.path.join(DATA_DIR, 'unit_tests_data', 'k.txt') M = os.path.join(DATA_DIR, 'unit_tests_data', 'm.txt') def eq_lmm(s1, s2): """Test whether two LMM objects are the same""" diff = set() for p in ['kmer', 'pattern', 'kstrains', 'nkstrains', 'notes', 'prefilter', 'filter']: x = getattr(s1, p) y = getattr(s2, p) if x != y: diff.add(p) for p in ['af', 'prep', 'pvalue', 'kbeta', 'bse', 'frac_h2']: x = getattr(s1, p) y = getattr(s2, p) if not np.isfinite(x) and not np.isfinite(y): continue if np.isfinite(x) and not np.isfinite(y): diff.add(p) if np.isfinite(y) and not np.isfinite(x): diff.add(p) if abs(x - y) > 1E-7: diff.add(p) if s1.max_lineage is not None and s2.max_lineage is not None: p = 'max_lineage' x = getattr(s1, p) y = getattr(s2, p) if not np.isfinite(x) and not np.isfinite(y): pass else: if np.isfinite(x) and not np.isfinite(y): diff.add(p) if np.isfinite(y) and not np.isfinite(x): diff.add(p) if x != y: diff.add(p) elif s1.max_lineage is None and s2.max_lineage is None: pass else: diff.add('max_lineage') return diff class TestInitialiseLmm(unittest.TestCase): def test_initialise_lmm(self): p = pd.read_table(P_BINARY, index_col=0)['binary'] cov = pd.DataFrame([]) x, y, z = initialise_lmm(p, cov, S, lmm_cache_in=None, lmm_cache_out=None) self.assertEqual(x.shape[0], 50) self.assertAlmostEqual(y.findH2()['nLL'][0], 35.7033778) self.assertAlmostEqual(z, 0.0) # covariates cov = pd.read_table(COV, index_col=0, header=None) x, y, z = initialise_lmm(p, cov, S, lmm_cache_in=None, lmm_cache_out=None) self.assertEqual(x.shape[0], 50) self.assertAlmostEqual(y.findH2()['nLL'][0], 34.55403861) self.assertAlmostEqual(z, 0.0) # sample names not matching b = pd.Series(np.random.random(100), index=['test_%d' % x for x in range(100)]) with warnings.catch_warnings(): warnings.simplefilter('ignore') x, y, z = initialise_lmm(b, cov, S, lmm_cache_in=None, lmm_cache_out=None) self.assertEqual(x.shape[0], 0) self.assertTrue(not np.isfinite(y.findH2()['nLL'][0])) self.assertAlmostEqual(z, 0.0) # save cache x, y, z = initialise_lmm(p, cov, S, lmm_cache_in=None, lmm_cache_out=C) # load cache x, y, z = initialise_lmm(p, cov, S, lmm_cache_in=C, lmm_cache_out=None) self.assertEqual(x.shape[0], 50) self.assertAlmostEqual(y.findH2()['nLL'][0], 34.55403861) self.assertAlmostEqual(z, 0.0) # different sizes b = pd.Series(np.random.random(10), index=['test_%d' % x for x in range(10)]) with self.assertRaises(SystemExit) as cm: initialise_lmm(b, cov, S, lmm_cache_in=C, lmm_cache_out=None) self.assertEqual(cm.exception.code, 1) # matching lineage samples cov = pd.DataFrame([]) s = pd.read_table(S, index_col=0) x, y, z = initialise_lmm(p, cov, S, lmm_cache_in=None, lmm_cache_out=None, lineage_samples=s.index) # non-matching lineage samples with self.assertRaises(SystemExit) as cm: x, y, z = initialise_lmm(p, cov, S, lmm_cache_in=None, lmm_cache_out=None, lineage_samples=s.index[:-1]) class TestFitLmm(unittest.TestCase): def test_fit_lmm(self): p = pd.read_table(P_BINARY, index_col=0)['binary'] cov = pd.DataFrame([]) x, y, z = initialise_lmm(p, cov, S, lmm_cache_in=None, lmm_cache_out=None) var = LMM('variant', 'pattern', 0.2, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, ['k%d' % x for x in range(p[p == 1].shape[0])], ['nk%d' % x for x in range(p[p == 0].shape[0])], set(), True, True) k =
np.loadtxt(K)
numpy.loadtxt
#!/usr/bin/env python import numpy as np import spatialmath.base.argcheck as argcheck import cv2 as cv import scipy as sp from scipy import signal class ImageProcessingKernelMixin: """ Image processing kernel operations on the Image class """ @staticmethod def kgauss(sigma, hw=None): """ Gaussian kernel :param sigma: standard deviation of Gaussian kernel :type sigma: float :param hw: width of the kernel :type hw: integer :return k: kernel :rtype: numpy array (N,H) - ``IM.kgauss(sigma)`` is a 2-dimensional Gaussian kernel of standard deviation ``sigma``, and centred within the matrix ``k`` whose half-width is ``hw=2*sigma`` and ``w=2*hw+1``. - ``IM.kgauss(sigma, hw)`` as above but the half-width ``hw`` is specified. Example: .. runblock:: pycon .. note:: - The volume under the Gaussian kernel is one. """ # make sure sigma, w are valid input if hw is None: hw = np.ceil(3 * sigma) wi = np.arange(-hw, hw + 1) x, y = np.meshgrid(wi, wi) m = 1.0 / (2.0 * np.pi * sigma ** 2) * \ np.exp(-(np.power(x, 2) + np.power(y, 2)) / 2.0 / sigma ** 2) # area under the curve should be 1, but the discrete case is only # an approximation return m / np.sum(m) @staticmethod def klaplace(): r""" Laplacian kernel :return k: kernel :rtype: numpy array (3,3) - ``IM.klaplace()`` is the Laplacian kernel: .. math:: K = \begin{bmatrix} 0 & 1 & 0 \\ 1 & -4 & 1 \\ 0 & 1 & 0 \end{bmatrix} Example: .. runblock:: pycon .. note:: - This kernel has an isotropic response to image gradient. """ return np.array([[0, 1, 0], [1, -4, 1], [0, 1, 0]]) @staticmethod def ksobel(): r""" Sobel edge detector :return k: kernel :rtype: numpy array (3,3) - ``IM.ksobel()`` is the Sobel x-derivative kernel: .. math:: K = \frac{1}{8} \begin{bmatrix} 1 & 0 & -1 \\ 2 & 0 & -2 \\ 1 & 0 & -1 \end{bmatrix} .. note:: - This kernel is an effective vertical-edge detector - The y-derivative (horizontal-edge) kernel is K' """ return np.array([[1, 0, -1], [2, 0, -2], [1, 0, -1]]) / 8.0 @staticmethod def kdog(sigma1, sigma2=None, hw=None): """ Difference of Gaussians kernel :param sigma1: standard deviation of first Gaussian kernel :type sigma1: float :param sigma2: standard deviation of second Gaussian kernel :type sigma2: float :param hw: half-width of Gaussian kernel :type hw: integer :return k: kernel :rtype: numpy array - ``IM.kdog(sigma1)`` is a 2-dimensional difference of Gaussian kernel equal to ``kgauss(sigma1) - kgauss(sigma2)``, where ``sigma1`` > ``sigma2. By default, ``sigma2 = 1.6 * sigma1``. The kernel is centred within the matrix ``k`` whose half-width ``hw = 3xsigma1`` and full width of the kernel is ``2xhw+1``. - ``IM.kdog(sigma1, sigma2)`` as above but sigma2 is specified directly. - ``IM.kdog(sigma1, sigma2, hw)`` as above but the kernel half-width is specified Example: .. runblock:: pycon .. note:: - This kernel is similar to the Laplacian of Gaussian and is often used as an efficient approximation. """ # sigma1 > sigma2 if sigma2 is None: sigma2 = 1.6 * sigma1 else: if sigma2 > sigma1: t = sigma1 sigma1 = sigma2 sigma2 = t # thus, sigma2 > sigma1 if hw is None: hw = np.ceil(3.0 * sigma1) m1 = self.kgauss(sigma1, hw) # thin kernel m2 = self.kgauss(sigma2, hw) # wide kernel return m2 - m1 @staticmethod def klog(sigma, hw=None): """ Laplacian of Gaussian kernel :param sigma1: standard deviation of first Gaussian kernel :type sigma1: float :param hw: half-width of kernel :type hw: integer :return k: kernel :rtype: numpy array (2 * 3 * sigma + 1, 2 * 3 * sigma + 1) - ``IM.klog(sigma)`` is a 2-dimensional Laplacian of Gaussian kernel of width (standard deviation) sigma and centred within the matrix ``k`` whose half-width is ``hw=3xsigma``, and ``w=2xhw+1``. - ``IM.klog(sigma, hw)`` as above but the half-width ``w`` is specified. Example: .. runblock:: pycon """ if hw is None: hw = np.ceil(3.0 * sigma) wi = np.arange(-hw, hw + 1) x, y = np.meshgrid(wi, wi) return 1.0 / (np.pi * sigma ** 4.0) * \ ((np.power(x, 2) + np.power(y, 2)) / (2.0 * sigma ** 2) - 1) * \ np.exp(-(np.power(x, 2) + np.power(y, 2)) / (2.0 * sigma ** 2)) @staticmethod def kdgauss(sigma, hw=None): """ Derivative of Gaussian kernel :param sigma1: standard deviation of first Gaussian kernel :type sigma1: float :param hw: half-width of kernel :type hw: integer :return k: kernel :rtype: numpy array (2 * 3 * sigma + 1, 2 * 3 * sigma + 1) - ``IM.kdgauss(sigma)`` is a 2-dimensional derivative of Gaussian kernel ``(w,w)`` of width (standard deviation) sigma and centred within the matrix ``k`` whose half-width ``hw = 3xsigma`` and ``w=2xhw+1``. - ``IM.kdgauss(sigma, hw)`` as above but the half-width is explictly specified. Example: .. runblock:: pycon .. note:: - This kernel is the horizontal derivative of the Gaussian, dG/dx. - The vertical derivative, dG/dy, is k'. - This kernel is an effective edge detector. """ if hw is None: hw =
np.ceil(3.0 * sigma)
numpy.ceil
import numpy as np import random def bagging_balance_weight(X, y): # 解决分类数据集中类别不平衡的数据,bagging出新的数据 # input: # X, y # 这里输入的X可以是二维或者三维,只要保证第一维是sample_num # y可以是列向量,也可以是one_hot编码的多列矩阵 # output: # X_result, y_result # 输出的格式和输入的X, y相同 drop_th = 0.01 # 当某一类ratio小于这个阈值时,认为没有该类 max_subsample_ratio = 1 # 首先记录在原来数据集里面最大的max_n_subsample,然后bagging的数据集的每一类的数量都是max_n_subsample*该参数 if y.ndim == 1: y_label = y else: y_label = np.zeros(y.shape[0]) for i in range(y.shape[0]): y_label[i] = np.where(y[i] == 1)[0][0] unique = np.unique(y_label) num_class = len(unique) unique_ratio = np.zeros(num_class) for i in range(num_class): unique_ratio[i] = sum(y_label == unique[i]) / len(y_label) unique_ratio[unique_ratio < drop_th] = 0 n_bagging = int(max(unique_ratio) * len(y) * max_subsample_ratio) X_result = [] y_result = [] for i in range(num_class): if unique_ratio[i] == 0: continue else: sub_X = X[y_label == unique[i]] sub_y = y[y_label == unique[i]] for j in range(n_bagging): index = random.randint(0, sub_X.shape[0] - 1) X_result.append(sub_X[index]) y_result.append(sub_y[index]) X_result = np.array(X_result) y_result =
np.array(y_result)
numpy.array
import random import cv2 import numpy as np from aug import Operation, perform_randomly from aug import SaltNoise from aug.ops import utils @perform_randomly class Erosion(Operation): def __init__(self, kernel_size=5, reversed=False): self._kernel_size = kernel_size self._reversed = reversed def apply_on_image(self, image): self._kernel_size = self._kernel_size if self._kernel_size % 2 != 0 else self._kernel_size + 1 kernel = np.ones((self._kernel_size, self._kernel_size), np.uint8) er, dil = cv2.erode, cv2.dilate if self._reversed: er, dil = dil, er image[:, :, 0] = er(image[:, :, 0], kernel, iterations=1) image[:, :, 1] = er(image[:, :, 1], kernel, iterations=1) image[:, :, 2] = er(image[:, :, 2], kernel, iterations=1) if image.shape[2] > 3: image[:, :, 3] = dil(image[:, :, 3], kernel, iterations=1) return image @perform_randomly class Dilation(Operation): def __init__(self, kernel_size=3): self._kernel_size = kernel_size def apply_on_image(self, image): return Erosion(kernel_size=self._kernel_size, reversed=True).apply_on_image(image) class BoundingBoxesFinder(object): """ Find bounding boxes of letters. """ def apply_on_image(self, in_image): last_empty = None last_letter = None top_border = None bottom_border = None borders = [] image = in_image.copy() if len(image.shape) == 3: image = cv2.cvtColor(image, cv2.COLOR_RGBA2GRAY) im_height, im_width = image.shape[:2] # Find top/bottom border for i in range(im_height): column_sum = sum(image[i, :]) if column_sum != 255 * im_width and top_border is None: top_border = i column_sum = sum(image[-i, :]) if column_sum != 255 * im_width and bottom_border is None: bottom_border = im_height - i if top_border is not None and bottom_border is not None: break # Find vertical borders for i in range(im_width): column_sum = sum(image[:, i]) if column_sum != 255 * im_height: if last_letter != i - 1: borders.append(i) last_letter = i else: if last_empty is not None and last_empty != i - 1: borders.append(i) last_empty = i vertical_borders = sorted(borders) crop_borders = [] for i in range(len(vertical_borders), 2): crop_borders.append( [top_border, bottom_border, vertical_borders[i], vertical_borders[i + 1]]) return crop_borders class SeparatedLettersErosionOrDilation: EROSION_MODE = 0 DILATION_MODE = 1 MIX_MODE = 2 # padding - distance between countour and box borders def __init__(self, mode=MIX_MODE, padding=6, iterations=(1, 6), kernel_size=(5, 5), salt_noise=True): assert mode in [self.EROSION_MODE, self.DILATION_MODE, self.MIX_MODE] self._padding = padding self._mode = mode self._iterations = iterations self._kernel_size = kernel_size self._salt_noise = salt_noise def apply_on_image(self, image): im_height, im_width = image.shape[:2] if self._salt_noise: image = SaltNoise(p=1., percent=random.uniform(0.0001, 0.001)).apply_on_image(image) imgray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) ret, thresh = cv2.threshold(imgray, 75, 150, 3) thresh = 255 - thresh img, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) dst = image for i in range(len(contours)): mask = np.zeros_like(img) cv2.drawContours(mask, contours, i, 255, -1) x, y = np.where(mask == 255) topx, topy = np.min(x), np.min(y) bottomx, bottomy = np.max(x), np.max(y) out = image[topx - self._padding:bottomx + self._padding, topy - self._padding:bottomy + self._padding] # out = 255 - out kernel = cv2.getStructuringElement( random.choice([cv2.MORPH_ELLIPSE, cv2.MORPH_CROSS, cv2.MORPH_RECT]), self._kernel_size) if not self._mode == self.MIX_MODE: if self._mode == self.EROSION_MODE: transformed = cv2.erode(out, kernel, iterations=random.randint(*self._iterations)) elif self._mode == self.DILATION_MODE: transformed = cv2.dilate(out, kernel, iterations=random.randint(*self._iterations)) else: raise Exception('Unknown mode') else: if random.randint(0, 1): transformed = cv2.erode(out, kernel, iterations=random.randint(*self._iterations)) else: transformed = cv2.dilate(out, kernel, iterations=random.randint(*self._iterations)) transformed = 255 - transformed dst[topx - self._padding:bottomx + self._padding, topy - self._padding:bottomy + self._padding] = transformed dst = cv2.resize(dst, (im_width, im_height), interpolation=cv2.INTER_CUBIC) return dst @perform_randomly class ScatterLetters(Operation): def __init__(self, max_dev_ox=0.02, max_dev_oy=0.15): self._max_devx = max_dev_ox self._max_devy = max_dev_oy def apply_on_image(self, image): im_height, im_width = image.shape[:2] fill_color = (255, 255, 255, 0) h = int(self._max_devy * im_height + 1) w = int(self._max_devx * im_width + 1) image = cv2.copyMakeBorder(image, h, h, w, w, cv2.BORDER_CONSTANT, value=fill_color) borders = BoundingBoxesFinder().apply_on_image(image) for b in borders: y1, y2, x1, x2 = b ox_dev = int(random.uniform(-self._max_devx, self._max_devx) * im_width) / 2 oy_dev = int(random.uniform(-self._max_devy, self._max_devy) * im_height) / 2 tmp_x1, tmp_x2 = x1 + ox_dev, x2 + ox_dev tmp_y1, tmp_y2 = y1 + oy_dev, y2 + oy_dev tmp_tensor = image[y1:y2, x1:x2].copy() image[max(0, y1 - 1):min(image.shape[0], y2 + 1), max(0, x1 - 1):min(image.shape[1], x2 + 1)] = fill_color image[tmp_y1:tmp_y2, tmp_x1:tmp_x2] = tmp_tensor return cv2.resize(image, (im_width, im_height), interpolation=cv2.INTER_CUBIC) @perform_randomly class Noise(Operation): def __init__(self, mode='normal'): self._mode = mode assert self._mode in ['dotted', 'normal'] def noise(self, mask, image, color_diff=10, percent=0.05, radius=10): im_height, im_width = image.shape[:2] tmp = image.copy() tmp = 255 - tmp tmp = cv2.cvtColor(tmp, cv2.COLOR_RGBA2GRAY) _, tmp = cv2.threshold(tmp, 1, 255, cv2.THRESH_BINARY) number = int(percent * im_height * im_width) for _ in range(number): c = random.randint(0, color_diff) color = [c, c, c, 255] oy = random.randint(0, im_height - 1) ox = random.randint(0, im_width - 1) if mask[oy, ox]: cv2.circle(image, (ox, oy), 0, color, radius) return image def apply_noises(self, img, configs): mask = img.copy() mask = cv2.cvtColor(mask, cv2.COLOR_RGBA2GRAY) mask = 255 - mask img2 = np.zeros(img.shape, dtype=np.uint8) img2[:, :, :3] = 255 config = random.choice(configs) for params in config: img2 = self.noise(mask, img2, *params) return img2 def apply_noises_dotted_font(self, img): """ Apply different kinds of noises defined in configs (dotted fonts) Single config row: [x, y, z] x - max deviation from base color y - density of noise in percent z - radius of single dot """ configs = [[ [20, 2.2, 1], ]] return self.apply_noises(img, configs) def apply_noises_normal_font(self, img): """ Apply different kinds of noises defined in configs (normal fonts) Single config row: [x, y, z] x - max deviation from base color y - density of noise in percent z - radius of single dot """ configs = [[ [20, 0.7, 1], [100, 0.01, 7], [70, 0.05, 4], ], [ [20, 0.25, 3], [40, 0.2, 2], [130, 0.01, 2], ], [ [20, 2.2, 1], ]] return self.apply_noises(img, configs) def apply_on_image(self, image): if self._mode == 'normal': return self.apply_noises_normal_font(image) if self._mode == 'dotted': return self.apply_noises_dotted_font(image) @perform_randomly class RandomSizeBorder(Operation): def __init__(self, max_border=.1, horizontal_sides_probability=.5, vertical_sides_probability=.5): self._max_border = max_border self._horizontal_sides_probability = horizontal_sides_probability self._vertical_sides_probability = vertical_sides_probability def apply_on_image(self, image): im_height, im_width = image.shape[:2] borders = [ 0 if random.random() < self._horizontal_sides_probability else int( random.uniform(0., self._max_border * im_height)) for _ in range(2) ] borders.extend([ 0 if random.random() < self._vertical_sides_probability else int( random.uniform(0., self._max_border * im_width)) for _ in range(2) ]) image = cv2.copyMakeBorder(image, *borders, borderType=cv2.BORDER_CONSTANT, value=(255, 255, 255, 0)) return cv2.resize(image, (im_width, im_height), interpolation=cv2.INTER_CUBIC) @perform_randomly class HorizontalCut(Operation): def __init__(self, left=.1, right=.1, rescale=True, horizontal=True): self._left = left self._right = right self._rescale = rescale self._horizontal = horizontal def apply_on_image(self, image): im_height, im_width = image.shape[:2] if self._horizontal: left = int(im_width * self._left) right = int(im_width * self._right) image = image[:, left:im_width - right] else: top = int(im_height * self._left) bottom = int(im_height * self._right) image = image[top:im_height - bottom, :] if self._rescale: image = cv2.resize(image, (im_width, im_height), interpolation=cv2.INTER_CUBIC) return image @perform_randomly class VerticalCut(Operation): def __init__(self, top=.1, bottom=.1, rescale=True): self._top = top self._bottom = bottom self._rescale = rescale def apply_on_image(self, image): return HorizontalCut(self._top, self._bottom, self._rescale, horizontal=False).apply_on_image(image) @perform_randomly class Scratches(Operation): """ Scratches will be drawn only in box witch coords: left_top_corner=(min_x, min_y) and right_bottom_corner=(max_x, max_y) if corods will be not set, the scratches will be drawn on whole image """ def __init__(self, num_scratches=20, alpha=None): self._min_x = None self._max_x = None self._min_y = None self._max_y = None self._num_scratches = num_scratches self._alpha = alpha if alpha is not None else .5 def test_probability(self, prob): n = random.randint(0, 100) return n <= prob def apply_on_image(self, image): h, w = image.shape[:2] min_x, min_y = 0, 0 max_x, max_y = 2 * w, 2 * h scratches = np.zeros((max_y, max_x, 3), np.uint8) scratches[:] = 0 # main scratch for i in range(0, self._num_scratches): x1 = random.randint(min_x, max_x) x2 = random.randint(min_x, max_x) y1 = random.randint(min_y, max_y) y2 = random.randint(min_y, max_y) color = tuple([random.randint(0, 255)] * 3) cv2.line(scratches, (x1, y1), (x2, y2), color, thickness=1, lineType=cv2.LINE_AA) # additional scratches for main scratch num_additional_scratches = random.randint(1, 4) for j in range(0, num_additional_scratches): if self.test_probability(35): new_color = random.randint(15, 70) param_x1 = random.randint(1, 5) param_x2 = random.randint(1, 5) param_y1 = random.randint(1, 5) param_y2 = random.randint(1, 5) cv2.line(scratches, (x1 - param_x1, y1 - param_x2), (x2 - param_y1, y2 - param_y2), (new_color, new_color, new_color), thickness=1, lineType=cv2.LINE_AA) top, bottom = h // 2, scratches.shape[0] - (h - h // 2) left, right = w // 2, scratches.shape[1] - (w - w // 2) scratches = scratches[top:bottom, left:right] dst = cv2.addWeighted(image[:, :, :3], 1.0, scratches, self._alpha, 0.0) return cv2.resize(dst, (w, h), interpolation=cv2.INTER_CUBIC) @perform_randomly class TextureModification(Operation): """ Creates effect of dirt/dust. """ def __init__(self, blur_kernel=(3, 3), emboss_kernel_size=None, alpha=None): self._blur_kernel = blur_kernel self._emboss_kernel_size = random.choice([9, 11]) if \ emboss_kernel_size is None else emboss_kernel_size self._alpha = random.uniform(0.4, 0.7) if alpha is None else alpha def apply_on_image(self, image): def create_emboss_kernel_top_down(size): assert size % 2 == 1, "Kernel must be of an uneven size!" k = np.ones((size, size), dtype=np.int) for i in range(size): for j in range(size): k[i][j] = -1 if i > (size - 1) / 2: k[i][j] = 1 if i == (size - 1) / 2: k[i][j] = 0 return k h, w = image.shape[:2] k_size = max(int((h + w) // 300), 3) self._blur_kernel = k_size, k_size # creating 'dirt' random_noise = np.random.randint(0, 256, (h, w, 3), dtype=np.uint8) dirt_kernel = create_emboss_kernel_top_down(self._emboss_kernel_size) dirt_colour = cv2.filter2D(random_noise, -1, dirt_kernel) gray_dirt = cv2.cvtColor(dirt_colour, cv2.COLOR_BGR2GRAY) # back to 3 channels (can't use addWeighted() to add images that have different number of channels) gray_dirt_3_channels = cv2.cvtColor(gray_dirt, cv2.COLOR_GRAY2BGR) blurred_dirt = cv2.blur(gray_dirt_3_channels, self._blur_kernel) blurred_dirt = utils.unify_num_of_channels(image, blurred_dirt) final = cv2.addWeighted(image, 1.0, blurred_dirt, self._alpha, 0.0) return final @perform_randomly class Jitter(Operation): def __init__(self, magnitude=.25): super().__init__() self._magnitude = magnitude def apply_on_image(self, image): if image.ndim == 3: w, h, c = image.shape[:3] else: w, h = image.shape[:2] c = 1 magnitude = int(min(w, h) / 10 * self._magnitude) noise_x = np.random.randint(magnitude, size=w * h) - magnitude // 2 noise_y = np.random.randint(magnitude, size=w * h) - magnitude // 2 indices_x = np.clip(noise_x + np.arange(w * h), 0, w * h - 1) indices_y = np.clip(noise_y +
np.arange(w * h)
numpy.arange
#!/usr/bin/env python # -*- coding: utf-8 -*- # This file is part of the # Apode Project (https://github.com/ngrion/apode). # Copyright (c) 2020, <NAME> and <NAME> # License: MIT # Full Text: https://github.com/ngrion/apode/blob/master/LICENSE.txt # ============================================================================= # DOCS # ============================================================================= """Data simulation tools for Apode.""" # ============================================================================= # IMPORTS # ============================================================================= from apode.basic import ApodeData import numpy as np import pandas as pd # ============================================================================= # FUNCTIONS # ============================================================================= def make_pareto(seed=None, a=5, size=100, c=200, nbin=None): """Pareto Distribution. Parameters ---------- seed: int, optional(default=None) a: float, optional(default=5) size: int, optional(default=100) c: int, optional(default=200) nbin: int, optional(default=None) Return ------ out: float array Array of random numbers. """ random = np.random.RandomState(seed=seed) y = c * random.pareto(a=a, size=size) df = pd.DataFrame({"x": y}) if nbin is None: return ApodeData(df, income_column="x") else: df = binning(df, nbin=nbin) return ApodeData(df, income_column="x") def make_uniform(seed=None, size=100, mu=100, nbin=None): """Uniform Distribution. Parameters ---------- seed: int, optional(default=None) size: int, optional(default=100) mu: float, optional(default=100) nbin: int, optional(default=None) Return ------ out: float array Array of random numbers. """ random = np.random.RandomState(seed=seed) y = random.uniform(size=size) * mu df = pd.DataFrame({"x": y}) if nbin is None: return ApodeData(df, income_column="x") else: df = binning(df, nbin=nbin) return ApodeData(df, income_column="x") def make_lognormal(seed=None, size=100, sigma=1.0, nbin=None): """Lognormal Distribution. Parameters ---------- seed: int, optional(default=None) size: int, optional(default=100) sigma: float, optional(default=1.0) nbin: int, optional(default=None) Return ------ out: float array Array of random numbers. """ random = np.random.RandomState(seed=seed) y = random.lognormal(mean=3.3, sigma=sigma, size=size) df = pd.DataFrame({"x": y}) if nbin is None: return ApodeData(df, income_column="x") else: df = binning(df, nbin=nbin) return ApodeData(df, income_column="x") def make_chisquare(seed=None, size=100, df=5, c=10, nbin=None): """Chisquare Distribution. Parameters ---------- seed: int, optional(default=None) size: int, optional(default=100) df: float, optional(default=5) c: float, optional(default=10) nbin: int, optional(default=None) Return ------ out: float array Array of random numbers. """ random = np.random.RandomState(seed=seed) y = c * random.chisquare(df=df, size=size) df = pd.DataFrame({"x": y}) if nbin is None: return ApodeData(df, income_column="x") else: df = binning(df, nbin=nbin) return ApodeData(df, income_column="x") def make_gamma(seed=None, size=100, shape=1, scale=50.0, nbin=None): """Gamma Distribution. Parameters ---------- seed: int, optional(default=None) size: int, optional(default=100) shape: float, optional(default=1.0) scale: float, optional(default=50.0) nbin: int, optional(default=None) Return ------ out: float array Array of random numbers. """ random = np.random.RandomState(seed=seed) y = random.gamma(shape=shape, scale=scale, size=size) df = pd.DataFrame({"x": y}) if nbin is None: return ApodeData(df, income_column="x") else: df = binning(df, nbin=nbin) return ApodeData(df, income_column="x") def make_weibull(seed=None, size=100, a=1.5, c=50, nbin=None): """Weibull Distribution. Parameters ---------- seed: int, optional(default=None) size: int, optional(default=100) a: float, optional(default=1.5) c: float, optional(default=50) nbin: int, optional(default=None) Return ------ out: float array Array of random numbers. """ random = np.random.RandomState(seed=seed) y = c * random.weibull(a=a, size=size) df = pd.DataFrame({"x": y}) if nbin is None: return ApodeData(df, income_column="x") else: df = binning(df, nbin=nbin) return ApodeData(df, income_column="x") def make_exponential(seed=None, size=100, scale=1, c=50, nbin=None): """Exponential Distribution. Parameters ---------- seed: int, optional(default=None) size: int, optional(default=100) scale: float, optional(default=1.0) c: float, optional(default=50) nbin: int, optional(default=None) Return ------ out: float array Array of random numbers. """ random =
np.random.RandomState(seed=seed)
numpy.random.RandomState
import sys,glob,os,time,copy import numpy as np import pickle as pickle import multiprocessing as mp from .. import get_img_info, corrections, visual_tools, spot_tools, domain_tools from .. import _correction_folder, _corr_channels, _temp_folder,_distance_zxy,\ _sigma_zxy,_image_size, _allowed_colors, _num_buffer_frames, _num_empty_frames, _image_dtype from ..External import Fitting_v3 from scipy import ndimage, stats from scipy.spatial.distance import pdist,cdist,squareform from skimage import morphology from skimage.segmentation import random_walker from functools import partial import matplotlib import matplotlib.pyplot as plt import h5py import ast _allowed_kwds = {'combo': 'c', 'decoded':'d', 'unique': 'u', 'relabeled_unique':'l', 'merfish': 'm', #'rna-unique':'r', 'rna': 'r', # long term used label, because "-" is creating issue in python 'gene':'g', 'protein':'p', } _max_num_seeds = 4000 _min_num_seeds = 50 _spot_seeding_th = 200 from . import batch_functions from . import field_of_view # initialize pool init_dic = {} def _init_unique_pool(_ic_profile_dic, _cac_profile_dic, _ic_shape, _cac_shape): """initialize pool, function used to put data into shared memory""" print(f"- Initialize core with illumination correction profiles for {list(_ic_profile_dic.keys())}") init_dic['illumination'] = _ic_profile_dic print(f"- Initialize core with chromatic correction profiles for {list(_cac_profile_dic.keys())}") init_dic['chromatic'] = _cac_profile_dic init_dic['ic_shape'] = _ic_shape init_dic['cac_shape'] = _cac_shape def _fit_single_image(_im, _id, _chrom_coords, _seeding_args, _fitting_args, _check_fitting=True, _normalization=True, _verbose=False): if _verbose: print(f"+++ fitting for region:{_id}") _spots_for_chrom = [] if _normalization: _norm_cst = np.nanmedian(_im) for _chrom_coord in _chrom_coords: if _im is None: _spots_for_chrom.append(np.array([])) else: # seeding _seeds = visual_tools.get_seed_in_distance(_im, _chrom_coord, *_seeding_args) if len(_seeds) == 0: # no seed found, return empty array _spots_for_chrom.append(np.array([])) continue # fit _fitter = Fitting_v3.iter_fit_seed_points( _im, _seeds.T, *_fitting_args) _fitter.firstfit() # if check-fitting if _check_fitting: _fitter.repeatfit() #_fits = visual_tools.fit_multi_gaussian(_im, _seeds, *_fitting_args) _spots = np.array(_fitter.ps) if _normalization: _spots[:,0] = _spots[:,0] / _norm_cst _spots_for_chrom.append(_spots) return _spots_for_chrom # function to allow multi-processing pick spots def _pick_spot_in_batch(_cell, _data_type='unique', _pick_type='EM', _use_chrom_coords=True, _sel_ids=None, _num_iters=10, _terminate_th=0.003, _intensity_th=1, _hard_intensity_th=True, _spot_num_th=100, _ref_spot_list=None, _ref_spot_ids=None, _ref_pick_type='EM', _ignore_ids=False, _ref_dist_metric='median', _score_metric='linear', _local_size=5, _w_ctdist=2, _w_lcdist=1, _w_int=1, _w_nbdist=2, _save_inter_plot=False, _save_to_info=True, _save_plot=True, _check_spots=True, _check_th=-3.5, _check_percentile=10., _hard_dist_th=6000, _distance_limits=[0, np.inf], _ignore_nan=True, _nan_mask=0., _inf_mask=-1000., _chrom_share_spots=False, _plot_limits=[0, 1500], _cmap='seismic_r', _fig_dpi=300, _fig_size=4, _overwrite=False, _verbose=True): """_cell: Cell_Data class""" if _verbose: print(f"-- {_pick_type} pick spots for fov:{_cell.fov_id}, cell:{_cell.cell_id}") # notice: always load in attributes, never return indices in batch format _picked_spots = _cell._pick_spots(_data_type=_data_type, _pick_type=_pick_type, _use_chrom_coords=_use_chrom_coords, _sel_ids=_sel_ids, _num_iters=_num_iters, _terminate_th=_terminate_th, _intensity_th=_intensity_th, _hard_intensity_th=_hard_intensity_th, _spot_num_th=_spot_num_th, _ref_spot_list=_ref_spot_list, _ref_spot_ids=_ref_spot_ids, _ref_pick_type=_ref_pick_type, _ignore_ids=_ignore_ids, _ref_dist_metric=_ref_dist_metric, _score_metric=_score_metric, _local_size=_local_size, _w_ctdist=_w_ctdist, _w_lcdist=_w_lcdist, _w_int=_w_int, _w_nbdist=_w_nbdist, _distance_limits=_distance_limits, _ignore_nan=_ignore_nan, _nan_mask=_nan_mask, _inf_mask=_inf_mask, _chrom_share_spots=_chrom_share_spots, _check_spots=_check_spots, _check_th=_check_th, _check_percentile=_check_percentile, _hard_dist_th=_hard_dist_th, _save_inter_plot=_save_inter_plot, _save_to_attr=True, _save_to_info=_save_to_info, _return_indices=False, _overwrite=_overwrite, _verbose=_verbose) _distmaps = _cell._generate_distance_map(_data_type=_data_type, _pick_type=_pick_type, _sel_ids=_sel_ids, _save_info=_save_to_info, _save_plot=_save_plot, _limits=_plot_limits, _cmap=_cmap, _fig_dpi=_fig_dpi, _fig_size=_fig_size, _overwrite=_overwrite, _verbose=_verbose) return _cell def _load_cell_in_batch(_cell, _data_type='all', _save_folder=None, _decoded_flag=None, _distmap_data='unique', _distmap_pick='EM', _load_attrs=[], _exclude_attrs=[], _overwrite=False, _verbose=True): """Function to allow batch loading""" _cell._load_from_file(_data_type=_data_type, _save_folder=_save_folder, _decoded_flag=_decoded_flag, _distmap_data=_distmap_data, _distmap_pick=_distmap_pick, _load_attrs=_load_attrs, _exclude_attrs=_exclude_attrs, _overwrite=_overwrite, _verbose=_verbose) return _cell def _save_cell_in_batch(_cell, _data_type='cell_info', _save_dic={}, _save_folder=None, _unsaved_attrs=None, _clear_old_attrs=False, _overwrite=False, _verbose=True): """Function to allow batch saving""" _cell._save_to_file(_data_type=_data_type, _save_dic=_save_dic, _save_folder=_save_folder, _unsaved_attrs=_unsaved_attrs, _clear_old_attrs=_clear_old_attrs, _overwrite=_overwrite, _verbose=_verbose) # batch merge cells def _merge_RNA_to_DNA_in_batch(_cell, _source_cell_data, _merge_type='cell_info', _attr_feature='rna-', _load_in_ram=True, _save_to_file=True, _overwrite=False, _verbose=True): """Function to allow batch cell_data merging""" _cell._merge_RNA_to_DNA(_source_cell_data=_source_cell_data, _merge_type=_merge_type, _attr_feature=_attr_feature, _load_in_ram=_load_in_ram, _save_to_file=_save_to_file, _overwrite=_overwrite, _verbose=_verbose) return _cell class Cell_List(): """ Class Cell_List: this is a typical data structure of cells within one chromosome with images in multiple independent color-channels and decoding-groups. """ # initialize def __init__(self, parameters, _chosen_fovs=[], _exclude_fovs=[], _load_all_attr=False, _load_reference_info=True, _color_filename='Color_Usage'): if not isinstance(parameters, dict): raise TypeError('wrong input type of parameters, should be a dictionary containing essential info.') ## required parameters: data folder (list) if isinstance(parameters['data_folder'], list): self.data_folder = [str(_fd) for _fd in parameters['data_folder']] else: self.data_folder = [str(parameters['data_folder'])] ## extract hybe folders and field-of-view names self.folders = [] for _fd in self.data_folder: _hyb_fds, _fovs = get_img_info.get_folders(_fd, feature='H', verbose=True) self.folders += _hyb_fds self.fovs = _fovs ## experiment_type, default is DNA if 'experiment_type' in parameters: setattr(self, 'experiment_type', parameters['experiment_type']) else: setattr(self, 'experiment_type', 'DNA') # experiment_folder if 'experiment_folder' in parameters: self.experiment_folder = parameters['experiment_folder'] else: self.experiment_folder = os.path.join(self.data_folder[0], 'Experiment') # experiment type if 'experiment_type' in parameters: self.experiment_type = parameters['experiment_type'] else: self.experiment_type = 'DNA' ## analysis_folder, segmentation_folder, save_folder, correction_folder,map_folder if 'analysis_folder' in parameters: self.analysis_folder = str(parameters['analysis_folder']) else: self.analysis_folder = self.data_folder[0]+os.sep+'Analysis' if 'segmentation_folder' in parameters: self.segmentation_folder = parameters['segmentation_folder'] else: self.segmentation_folder = self.analysis_folder+os.sep+'segmentation' if 'save_folder' in parameters: self.save_folder = parameters['save_folder'] else: self.save_folder = self.analysis_folder+os.sep+'5x10' if 'correction_folder' in parameters: self.correction_folder = parameters['correction_folder'] else: self.correction_folder = _correction_folder if 'drift_folder' in parameters: self.drift_folder = parameters['drift_folder'] else: self.drift_folder = self.analysis_folder+os.sep+'drift' if 'map_folder' in parameters: self.map_folder = parameters['map_folder'] else: self.map_folder = self.analysis_folder+os.sep+'distmap' # number of num_threads if 'num_threads' in parameters: self.num_threads = parameters['num_threads'] else: self.num_threads = int(os.cpu_count() / 4) # default: use one third of cpus. # other shared_parameters for imaging processing, etc if "shared_parameters" in parameters: self.shared_parameters = parameters['shared_parameters'] else: self.shared_parameters = {} ## if loading all remaining attr in parameter if _load_all_attr: for _key, _value in parameters.items(): if not hasattr(self, _key): setattr(self, _key, _value) ## list to store Cell_data self.cells = [] # distance from pixel to nm: if 'distance_zxy' not in self.shared_parameters: self.shared_parameters['distance_zxy'] = _distance_zxy if 'sigma_zxy' not in self.shared_parameters: self.shared_parameters['sigma_zxy'] = _sigma_zxy if 'single_im_size' not in self.shared_parameters: self.shared_parameters['single_im_size'] = _image_size if 'num_buffer_frames' not in self.shared_parameters: self.shared_parameters['num_buffer_frames'] = _num_buffer_frames if 'num_empty_frames' not in self.shared_parameters: self.shared_parameters['num_empty_frames'] = _num_empty_frames if 'normalization' not in self.shared_parameters: self.shared_parameters['normalization'] = False if 'corr_bleed' not in self.shared_parameters: self.shared_parameters['corr_bleed'] = True if 'corr_Z_shift' not in self.shared_parameters: self.shared_parameters['corr_Z_shift'] = True if 'corr_hot_pixel' not in self.shared_parameters: self.shared_parameters['corr_hot_pixel'] = True if 'corr_illumination' not in self.shared_parameters: self.shared_parameters['corr_illumination'] = True if 'corr_chromatic' not in self.shared_parameters: self.shared_parameters['corr_chromatic'] = True if 'allowed_kwds' not in self.shared_parameters: self.shared_parameters['allowed_data_types'] = _allowed_kwds ## chosen field of views if len(_chosen_fovs) == 0: # no specification _chosen_fovs = np.arange(len(_fovs)) if len(_chosen_fovs) > 0: # there are specifications _chosen_fovs = [_i for _i in _chosen_fovs if _i <= len(_fovs)] _chosen_fovs = list(np.array(np.unique(_chosen_fovs), dtype=np.int)) # exclude fovs if len(_exclude_fovs) > 0: #exclude any fov: for _i in _exclude_fovs: if _i in _chosen_fovs: _chosen_fovs.pop(_chosen_fovs.index(_i)) # save values to the class self.fov_ids = _chosen_fovs self.chosen_fovs = list(np.array(self.fovs)[np.array(self.fov_ids, dtype=np.int)]) # read color-usage and encodding-scheme if not hasattr(self, 'color_dic') or not hasattr(self, 'channels'): self._load_color_info(_color_filename=_color_filename) # load extra info for DNA / RNA if _load_reference_info: if getattr(self, 'experiment_type') == 'RNA' and not hasattr(self, 'rna-info_dic'): self._load_rna_info() elif getattr(self, 'experiment_type') == 'DNA' and not hasattr(self, 'region_dic'): self._load_genomic_regions() # get annotated folders by color usage self.annotated_folders = [] for _hyb_fd, _info in self.color_dic.items(): _matches = [_fd for _fd in self.folders if _hyb_fd == _fd.split(os.sep)[-1]] if len(_matches)==1: self.annotated_folders.append(_matches[0]) print(f"{len(self.annotated_folders)} folders are found according to color-usage annotation.") # tool for iteration self.index = 0 # allow print info of Cell_List def __str__(self): if hasattr(self, 'data_folder'): print("Data folder:", self.data_folder) if hasattr(self, 'cells'): print("Number of cells in this list:", len(self.cells)) return 'test' # allow iteration of Cell_List def __iter__(self): return self.cells def __next__(self): if not hasattr(self, 'cells') or not not hasattr(self, 'index'): raise StopIteration elif self.index == 0: raise StopIteration else: self.index -= 1 return self.cells[self.index] ## Load basic info def _load_color_info(self, _color_filename='Color_Usage', _color_format='csv', _save_color_dic=True): _color_dic, _use_dapi, _channels = get_img_info.Load_Color_Usage(self.analysis_folder, color_filename=_color_filename, color_format=_color_format, return_color=True) # need-based store color_dic if _save_color_dic: self.color_dic = _color_dic # store other info self.use_dapi = _use_dapi self.channels = [str(ch) for ch in _channels] # channel for beads _bead_channel = get_img_info.find_bead_channel(_color_dic) self.bead_channel_index = _bead_channel _dapi_channel = get_img_info.find_dapi_channel(_color_dic) self.dapi_channel_index = _dapi_channel return _color_dic ## load RNA def _load_rna_info(self, _filename='RNA_Info', _table_format='csv', _match_to_genomic_region=True, _verbose=True): """Load RNA information""" _rna_dic = get_img_info.Load_RNA_Info(self.analysis_folder, filename=_filename, table_format=_table_format, verbose=_verbose) if _match_to_genomic_region: _region_dic = self._load_genomic_regions(_verbose=_verbose) _rna_dic = get_img_info.match_RNA_to_DNA(_rna_dic, _region_dic) # set to attribute setattr(self, 'rna-info_dic', _rna_dic) return _rna_dic ## load Gene def _load_gene_info(self, _filename='Gene_Info', _table_format='csv', _match_to_genomic_region=True, _verbose=True): """Load RNA information""" _gene_dic = get_img_info.Load_Gene_Info(self.analysis_folder, filename=_filename, table_format=_table_format, verbose=_verbose) if _match_to_genomic_region: _region_dic = self._load_genomic_regions(_verbose=_verbose) _gene_dic = get_img_info.match_Gene_to_DNA(_gene_dic, _region_dic) # set to attribute setattr(self, 'gene_dic', _gene_dic) return _gene_dic ## load genomic regions def _load_genomic_regions(self, _filename='Region_Positions', _table_format='csv', _verbose=True): """Function to load Genomic Positions etc.""" _region_dic = get_img_info.Load_Region_Positions(self.analysis_folder, filename=_filename, table_format=_table_format, verbose=_verbose) setattr(self, 'region_dic', _region_dic) return _region_dic def _load_encoding_scheme(self, _encoding_filename='Encoding_Scheme', _encoding_format='csv', _save_encoding_scheme=True): _encoding_scheme, self.hyb_per_group, self.reg_per_group, \ self.encoding_colors, self.encoding_group_nums \ = get_img_info.Load_Encoding_Scheme(self.analysis_folder, encoding_filename=_encoding_filename, encoding_format=_encoding_format, return_info=True) # need-based encoding scheme saving if _save_encoding_scheme: self.encoding_scheme = _encoding_scheme return _encoding_scheme ## Pick segmentations info for all fovs def _pick_cell_segmentations(self, _num_threads=None, _allow_manual=True, _min_shape_ratio=0.036, _signal_cap_ratio=0.2, _denoise_window=5, _shrink_percent=13, _max_conv_th=0, _min_boundary_th=0.48, _load_in_ram=True, _save=True, _save_npy=True, _save_postfix='_segmentation', _cell_coord_fl='cell_coords.pkl', _overwrite=False, _verbose=True): ## load segmentation # check attributes if not hasattr(self, 'channels') or not hasattr(self, 'color_dic'): self._load_color_info() if _num_threads is None: if not hasattr(self, 'num_threads'): raise AttributeError('No num_threads given in funtion kwds and class attributes') else: _num_threads = self.num_threads # find the folder name for dapi _select_dapi = False # not select dapi fd yet for _fd, _info in self.color_dic.items(): if len(_info) >= self.dapi_channel_index+1 and _info[self.dapi_channel_index] == 'DAPI': _dapi_fd = [_full_fd for _full_fd in self.annotated_folders if os.path.basename(_full_fd) == _fd] if len(_dapi_fd) == 1: if _verbose: print(f"-- choose dapi images from folder: {_dapi_fd[0]}.") _dapi_fd = _dapi_fd[0] _select_dapi = True # successfully selected dapi if not _select_dapi: raise ValueError("No DAPI folder detected in annotated_folders, stop!") # prepare filenames for images to do segmentation if _verbose: print(f"{len(self.chosen_fovs)} of field-of-views are selected to load segmentation.") _chosen_files = [os.path.join(_dapi_fd, _fov) for _fov in self.chosen_fovs] # do segmentation _segmentation_labels, _dapi_ims = visual_tools.DAPI_convoluted_segmentation( _chosen_files, self.channels[self.dapi_channel_index], num_threads=_num_threads, single_im_size=self.shared_parameters['single_im_size'], all_channels=self.channels, num_buffer_frames=self.shared_parameters['num_buffer_frames'], num_empty_frames=self.shared_parameters['num_empty_frames'], correction_folder=self.correction_folder, illumination_correction=self.shared_parameters['corr_illumination'], min_shape_ratio=_min_shape_ratio, signal_cap_ratio=_signal_cap_ratio, denoise_window=_denoise_window, shrink_percent=_shrink_percent, max_conv_th=_max_conv_th, min_boundary_th=_min_boundary_th, make_plot=False, return_images=True, save=_save, save_npy=_save_npy, save_folder=self.segmentation_folder, save_postfix=_save_postfix, force=_overwrite, verbose=_verbose) ## pick(exclude) cells from previous result if _allow_manual: # generate coordinates _coord_list, _index_list = [],[] for _i, _label in enumerate(_segmentation_labels): for _j in range(np.max(_label)): _center = np.round(ndimage.measurements.center_of_mass(_label==_j+1)) _center = list(np.flipud(_center)) _center.append(_dapi_ims[0].shape[0]/2) _coord_list.append(_center) _index_list.append(_i) # wrap into a dic _cell_coord_dic = {'coords': _coord_list, 'class_ids': _index_list, 'pfits':{}, 'dec_text':{}, } self.cell_coord_dic = copy.deepcopy(_cell_coord_dic) # use visual tools to pick _cell_coord_savefile = self.segmentation_folder + os.sep + _cell_coord_fl _cell_viewer = visual_tools.imshow_mark_3d_v2(_dapi_ims, image_names=self.chosen_fovs, save_file=_cell_coord_savefile, given_dic=_cell_coord_dic) return _cell_viewer else: return _segmentation_labels, _dapi_ims def _update_cell_segmentations(self, _cell_coord_fl='cell_coords.pkl', _overwrite_segmentation=False, _marker_displace_th = 50, _append_new=True, _append_radius=100, _overlap_percent=60, _save_npy=True, _save_postfix="_segmentation", _make_plot=True, _return_all=False, _verbose=True): """Function to update cell segmentation info from saved file, - usually do this after automatic segmentation Inputs: _cell_coord_fl: cell coordinate file generated by _pick_cell_segmentations, str _overwrite_segmentation: whether overwrite previous segmentation files, bool (default: True) _marker_displace_th: overall displacement of picked cellcenter to previous ones, int (default:300) _append_new: whether append manually picked spots, bool (default: True) _append_radius: the radii of circled-shape label appended manually, int (default:90) _overlap_percent: percentage of manual labels allowed to overlap with existing labels, float (default:60) _save_npy: whether save .npy file or .pkl file, bool (default: True) _save_postfix: filename postfix for saved segmentation files, str _make_plot: whether make plots for new segmentation labels, bool (default: True) _return_all: whether return all info, bool (default: False) _verbose: say something!, bool (default: True) Outputs: _new_seg_labels, _remove_cts, _append_cts""" ## decide save_handle if _save_npy: _file_type = '.npy' else: _file_type = '.pkl' print(f"- Update segmentation information for file type: {_file_type}") ## check saved cell_coord.pkl file, which was generated by _pick_cell_segmentations _cell_coord_savefile = self.segmentation_folder + os.sep + _cell_coord_fl if not os.path.exists(_cell_coord_savefile): raise IOError(f'{_cell_coord_savefile} doesnot exist, exit') # open cell_coord.pkl with open(_cell_coord_savefile, 'rb') as handle: _new_cell_coord_dic = pickle.load(handle) # parse _new_ccd = visual_tools.partition_map(_new_cell_coord_dic['coords'], _new_cell_coord_dic['class_ids']) ## check if cell_coord for automatic file existed, otherwise load if not hasattr(self, 'cell_coord_dic'): # check if all segmentation files exists _segmentation_filenames = [os.path.join(self.segmentation_folder, _fov.replace('.dax', _save_postfix + _file_type)) for _fov in self.chosen_fovs] _missed_segmentation_files = [_fl for _fl in _segmentation_filenames if not os.path.isfile(_fl)] if len(_missed_segmentation_files) > 0: raise IOError(f"Not full segmentation results were found, {_missed_segmentation_files} are missing!") else: # generate coordinates _coord_list, _index_list = [],[] for _i, _label_file in enumerate(_segmentation_filenames): # load segmentation labels _label = np.load(_label_file) # get centers for _j in range(np.max(_label)): _center = np.round(ndimage.measurements.center_of_mass(_label==_j+1)) _center = list(np.flipud(_center)) _center.append(_image_size[0]/2) _coord_list.append(_center) _index_list.append(_i) # wrap into a dic _cell_coord_dic = {'coords': _coord_list, 'class_ids': _index_list, 'pfits':{}, 'dec_text':{}, } # save to cell-list self.cell_coord_dic = _cell_coord_dic # parse _ccd = visual_tools.partition_map(self.cell_coord_dic['coords'], self.cell_coord_dic['class_ids']) # initialize _new_seg_labels, _dapi_ims = [], [] _remove_cts, _append_cts = [], [] for _i, (_cell_coords, _new_cell_coords) in enumerate(zip(_ccd, _new_ccd)): # now we are taking care of one specific field of view if _verbose: print(f"-- fov-{_i}, match manually picked cell with sgementation ") # load fov image _seg_file = os.path.join(self.segmentation_folder, self.chosen_fovs[_i].replace('.dax', _save_postfix+_file_type)) if _save_npy: _seg_label = np.load(_seg_file) if not _overwrite_segmentation: # save original seg label into another file _old_seg_folder = os.path.join(os.path.dirname(_seg_file), 'old') if not os.path.exists(_old_seg_folder): os.makedirs(_old_seg_folder) _old_seg_file = os.path.join(os.path.dirname(_seg_file), 'old', os.path.basename(_seg_file).replace(_save_postfix+_file_type, _save_postfix)) # notice: _file_type .npy was not added to _old_seg_file because np.save automatically adds postfix np.save(_old_seg_file, _seg_label) else: _seg_label, _dapi_im = pickle.load(open(_seg_file, 'rb')) if not _overwrite_segmentation: # save original seg label into another file _old_seg_file = _seg_file.replace(_save_postfix+_file_type, _save_postfix+'_old'+_file_type) pickle.dump([_seg_label, _dapi_im], open(_old_seg_file, 'wb')) # keep record of removed labels _remove = 0 # keep cells in original segmentation with markers for _l, _coord in enumerate(_cell_coords): _dist = [np.sum((_c-_coord)**2) for _c in _new_cell_coords] _match = [_d < _marker_displace_th for _d in _dist] if sum(_match) == 0: _seg_label[_seg_label==_l+1-_remove] = -1 _seg_label[_seg_label >_l+1-_remove] -= 1 _remove += 1 if _append_new: _append = 0 if _verbose: print(f"-- Appending manually added markers with radius={_append_radius}") # local function used to add a new marker to label def _add_round_marker(_label, _center, _radius, _overlap_percent=60, overwrite_marker=False): """Function to add round-marker with given center and radius""" if len(_label.shape) != len(_center): raise ValueError( "Dimension of label and center doesn't match") # convert format _center = np.array(_center, dtype=np.int) _radius = np.int(_radius) # generate mask _shape_lst = (list(range(_label.shape[i])) for i in range(len(_label.shape))) _coord_lst = np.meshgrid(*_shape_lst, indexing='ij') _dist = np.sqrt(np.sum(np.stack( [(_coords - _ct)**2 for _coords, _ct in zip(_coord_lst, _center)]), axis=0)) _new_mask = np.array(_dist <= _radius, dtype=np.int) if not overwrite_marker: _new_mask *= np.array(_label <= 0, dtype=np.int) # check overlap percentage of new mask to previous ones _overlap = np.array(_new_mask * (_label > 0), dtype=np.int) if np.float(np.sum(_overlap)) / np.sum(_new_mask) > _overlap_percent / 100.0: print(np.float(np.sum(_overlap)) / np.sum(_new_mask)) return _label else: # create new label _new_label = _label.copy() _new_label[_new_mask > 0] = int(np.max(_label))+1 return _new_label for _l, _new_coord in enumerate(_new_cell_coords): _dist = [np.sum((_c-_new_coord)**2) for _c in _cell_coords] _match = [_d < _marker_displace_th for _d in _dist] if sum(_match) == 0: if _verbose: print(f"--- adding manually picked new label in {_i}, label={np.max(_seg_label)+1} ") _seg_label = _add_round_marker(_seg_label, np.flipud(_new_coord)[-len(_seg_label.shape):], _append_radius, _overlap_percent=_overlap_percent) _append += 1 _append_cts.append(_append) if _verbose: print(f"--- {_remove} label(s) got removed!") _new_seg_labels.append(_seg_label) #_dapi_ims.append(_dapi_im) _remove_cts.append(_remove) if _make_plot: plt.figure() plt.imshow(_seg_label) plt.colorbar() plt.title(f"Updated segmentation: {os.path.basename(_seg_file)}") plt.show() # save if _verbose: print(f"--- save updated segmentation to {os.path.basename(_seg_file)}") if _save_npy: np.save(_seg_file.replace(_save_postfix+_file_type, _save_postfix), _seg_label) else: pickle.dump([_seg_label, _dapi_im], open(_seg_file, 'wb')) #return _new_seg_labels, _dapi_ims, _remove_cts, _append_cts if _return_all: return _new_seg_labels, _remove_cts, _append_cts else: # just return numbers of removed and append cells return _remove_cts, _append_cts ## translate from a previous segmentation def _translate_old_segmentations(self, old_segmentation_folder, old_dapi_folder, rotation_mat, _old_correction_folder=_correction_folder, _new_correction_folder=_correction_folder, _num_threads=12, _fft_gb=0, _fft_max_disp=200, _save=True, _save_postfix='_segmentation', _save_npy=True, _return_all=False, _force=False, _verbose=True): """Function to translate segmenation from a previous experiment given old_segmentation_folder and rotation matrix""" # number of threads if hasattr(self, 'num_threads'): _num_threads = max(_num_threads, self.num_threads) # decide filetype if _save_npy: _file_postfix = '.npy' else: _file_postfix = '.pkl' if _verbose: print( f"+ Start translating {_file_postfix} segmentation labels from folder:{old_segmentation_folder}") # find old segmentation files if not os.path.isdir(old_segmentation_folder): raise IOError( f"old_segmentation_folder:{old_segmentation_folder} doesn't exist, exit!") old_seg_filenames = glob.glob(os.path.join( old_segmentation_folder, '*' + _file_postfix)) # find old_dapi_folder if not os.path.isdir(old_dapi_folder): raise IOError( f"old_dapi_folder:{old_dapi_folder} doesn't exist, exit!") # create new segmentation folder if necessary if not os.path.exists(self.segmentation_folder): os.makedirs(self.segmentation_folder) # find the folder name for dapi _select_dapi = False # not select dapi fd yet for _fd, _info in self.color_dic.items(): if len(_info) >= self.dapi_channel_index+1 and _info[self.dapi_channel_index] == 'DAPI': _dapi_fd = [_full_fd for _full_fd in self.annotated_folders if os.path.basename( _full_fd) == _fd] if len(_dapi_fd) == 1: if _verbose: print(f"-- choose dapi images from folder: {_dapi_fd[0]}.") _dapi_fd = _dapi_fd[0] _select_dapi = True # successfully selected dapi if not _select_dapi: raise ValueError("No DAPI folder detected in annotated_folders, stop!") # translate segmentation file _seg_args, _seg_fls = [], [] # list for multi-processing _new_filenames, _new_labels, _dapi_ims = [], [], [] # list for final results for _old_fl in old_seg_filenames: _new_fl = os.path.join(self.segmentation_folder, os.path.basename(_old_fl)) _dapi_im_name = os.path.basename(_old_fl).replace( _save_postfix+_file_postfix, '.dax') # translate new segmentation if it doesn't exists or force to generate new ones if _force or not os.path.exists(_new_fl): if _verbose: print(f"++ prepare translating segmentation label:{_old_fl}") # prepare args for multi-processing _arg = (_old_fl, os.path.join(old_dapi_folder, _dapi_im_name), os.path.join(_dapi_fd, _dapi_im_name), rotation_mat, None, '405', self.channels, self.shared_parameters['num_buffer_frames'], self.shared_parameters['num_empty_frames'], _old_correction_folder, _new_correction_folder, _fft_gb, _fft_max_disp, _return_all, _verbose) _seg_args.append(_arg) _seg_fls.append(_new_fl) else: if _verbose: print(f"++ directly loading segmentation label:{_new_fl}") if _save_npy: _new_label = np.load(_new_fl) if _return_all: _dapi_im = corrections.correct_single_image(os.path.join( _dapi_fd, _dapi_im_name), self.channels[self.dapi_channel_index], correction_folder=self.correction_folder, single_im_size=self.shared_parameters['single_im_size'], all_channels=self.channels, num_buffer_frames=self.shared_parameters['num_buffer_frames'], num_empty_frames=self.shared_parameters['num_empty_frames'], ) else: _new_label, _dapi_im = pickle.load(open(_new_fl, 'rb')) _new_labels.append(_new_label) _dapi_ims.append(_dapi_im) ## multi-processing for translating segmentation with mp.Pool(_num_threads,) as _seg_pool: if _verbose: print(f"+ Start multi-processing of translate_segmentation for {len(_seg_args)} fovs!") # Multi-proessing! _seg_result = _seg_pool.starmap(visual_tools.translate_segmentation, _seg_args, chunksize=1) # close multiprocessing _seg_pool.close() _seg_pool.join() _seg_pool.terminate() # clear batch_functions.killchild() del(_seg_args) # extract result _new_filenames += _seg_fls # filenames if _return_all: _new_labels += [_r[0] for _r in _seg_result] # segmentation_label _dapi_ims += [_r[1] for _r in _seg_result] # dapi_im else: _new_labels += _seg_result # segmentation_label only ## save if _save: if _verbose: print(f"++ saving segmentation result to file:{_new_fl}") if _save_npy or not _return_all: [np.save(_new_fl.replace('.npy', ''), _new_label) for _new_fl, _new_label in zip(_new_filenames, _new_labels)] else: [pickle.dump([_new_label, _dapi_im], open(_new_fl, 'wb')) for _new_fl, _new_label in zip(_new_filenames, _new_labels)] # return if _return_all: return _new_labels, _dapi_ims else: return True def _create_cell(self, _parameter, _load_info=True, _color_filename='Color_Usage', _load_segmentation=True, _load_drift=True, _drift_size=500, _drift_ref=0, _drift_postfix='_sequential_current_cor.pkl', _dynamic=True, _load_cell=True, _exclude_attrs=[], _save=False, _append_cell_list=False, _verbose=True): """Function to create one cell_data object""" if _verbose: print(f"+ creating cell for fov:{_parameter['fov_id']}, cell:{_parameter['cell_id']}") _cell = Cell_Data(_parameter, _load_all_attr=True, _load_reference_info=False) if _load_info: if not hasattr(_cell, 'color_dic') or not hasattr(_cell, 'channels'): _cell._load_color_info(_color_filename=_color_filename) # load segmentation if _load_segmentation and (not hasattr(_cell, 'segmentation_label') or not hasattr(_cell, 'segmentation_crop')): _cell._load_segmentation(_load_in_ram=True) # load drift v if _load_drift and not _cell._check_drift(_verbose=False): _cell._load_drift(_num_threads=self.num_threads, _size=_drift_size, _ref_id=_drift_ref, _drift_postfix=_drift_postfix, _dynamic=_dynamic, _force=False, _verbose=_verbose) # load cell_info if _load_cell and os.path.exists(os.path.join(_cell.save_folder, 'cell_info.pkl')): _cell._load_from_file('cell_info', _exclude_attrs=_exclude_attrs, _overwrite=False, _verbose=_verbose) if _save: _cell._save_to_file('cell_info') # whether directly store if _append_cell_list: self.cells.append(_cell) return _cell def _create_cells_fov(self, _fov_ids, _num_threads=None, _sequential_mode=False, _plot_segmentation=True, _load_segmentation=True, _load_exist_info=True, _exclude_attrs=[], _color_filename='Color_Usage', _load_annotated_only=True, _drift_size=500, _drift_ref=0, _drift_postfix='_current_cor.pkl', _coord_sel=None, _dynamic=True, _save=False, _force_drift=False, _stringent=True, _verbose=True): """Create Cele_data objects for one field of view""" if not _num_threads: _num_threads = int(self.num_threads) if isinstance(_fov_ids, int): _fov_ids = [_fov_ids] for _fov_id in _fov_ids: if _fov_id not in self.fov_ids: raise ValueError("Wrong fov_id kwd given! \ this should be real fov-number that allowed during intiation of class.") if _verbose: print(f"+ Create Cell_Data objects for field of view: {_fov_ids}") print("++ preparing variables") # whether load annotated hybs only if _load_annotated_only: _folders = self.annotated_folders else: _folders = self.folders # check attributes if not hasattr(self, 'channels') or not hasattr(self, 'color_dic'): self._load_color_info(_color_filename=_color_filename) # find the folder name for dapi _select_dapi = False # not select dapi fd yet for _fd, _info in self.color_dic.items(): if len(_info) >= self.dapi_channel_index+1 and _info[self.dapi_channel_index] == 'DAPI': _dapi_fd = [_full_fd for _full_fd in _folders if os.path.basename(_full_fd) == _fd] if len(_dapi_fd) == 1: if _verbose: print(f"++ choose dapi images from folder: {_dapi_fd[0]}.") _dapi_fd = _dapi_fd[0] _select_dapi = True # successfully selected dapi if not _select_dapi: raise ValueError("No DAPI folder detected in annotated_folders, stop!") ## load segmentation for this fov _args = [] for _fov_id in _fov_ids: if _verbose: print("+ Load segmentation for fov", _fov_id) # do segmentation if necessary, or just load existing segmentation file _fov_segmentation_labels = visual_tools.DAPI_convoluted_segmentation( os.path.join(_dapi_fd, self.fovs[_fov_id]), self.channels[self.dapi_channel_index], single_im_size=self.shared_parameters['single_im_size'], all_channels=self.channels, num_buffer_frames=self.shared_parameters['num_buffer_frames'], num_empty_frames=self.shared_parameters['num_empty_frames'], illumination_correction=self.shared_parameters['corr_illumination'], correction_folder=self.correction_folder, num_threads=_num_threads, make_plot=_plot_segmentation, return_images=False, save=_save, save_npy=True, save_folder=self.segmentation_folder, force=False,verbose=_verbose) # extract result segmentation and image _fov_segmentation_label = _fov_segmentation_labels[0] # make plot if necesary if _plot_segmentation: plt.figure() plt.imshow(_fov_segmentation_label) plt.colorbar() plt.title(f"Segmentation result for fov:{self.fovs[_fov_id]}") plt.show() # check whether can directly load drift _direct_load_drift = False _drift_filename = os.path.join(self.drift_folder, self.fovs[_fov_id].replace('.dax', _drift_postfix)) if os.path.isfile(_drift_filename): _drift = pickle.load(open(_drift_filename, 'rb')) _exist = [os.path.join(os.path.basename(_fd),self.fovs[_fov_id]) for _fd in _folders \ if os.path.join(os.path.basename(_fd),self.fovs[_fov_id]) in _drift] if len(_exist) == len(self.annotated_folders): _direct_load_drift = True if not _direct_load_drift: if _verbose: print(f"+ Generate drift correction profile for fov:{self.fovs[_fov_id]}") _drift, _failed_count = corrections.Calculate_Bead_Drift(_folders, self.fovs, _fov_id, num_threads=_num_threads, sequential_mode=_sequential_mode, single_im_size=self.shared_parameters['single_im_size'], all_channels=self.channels, num_buffer_frames=self.shared_parameters['num_buffer_frames'], num_empty_frames=self.shared_parameters['num_empty_frames'], illumination_corr=self.shared_parameters['corr_illumination'], correction_folder=self.correction_folder, ref_id=_drift_ref, drift_size=_drift_size, save_postfix=_drift_postfix, coord_sel=_coord_sel, stringent=_stringent, ref_seed_per=90, overwrite=_force_drift, verbose=_verbose) # create cells in parallel _cell_ids = np.array(np.unique(_fov_segmentation_label[_fov_segmentation_label>0])-1, dtype=np.int) if _verbose: print(f"+ Create cell_data objects, num_of_cell:{len(_cell_ids)}") _params = [{'fov_id': _fov_id, 'cell_id': _cell_id, 'folders': self.folders, 'fovs': self.fovs, 'data_folder': self.data_folder, 'color_dic': self.color_dic, 'use_dapi': self.use_dapi, 'channels': self.channels, 'bead_channel_index': self.bead_channel_index, 'dapi_channel_index': self.dapi_channel_index, 'annotated_folders': self.annotated_folders, 'experiment_folder': self.experiment_folder, 'analysis_folder':self.analysis_folder, 'save_folder': self.save_folder, 'segmentation_folder': self.segmentation_folder, 'correction_folder': self.correction_folder, 'drift_folder': self.drift_folder, 'map_folder': self.map_folder, 'shared_parameters': self.shared_parameters, 'experiment_type': self.experiment_type, } for _cell_id in _cell_ids] if not _direct_load_drift: for _p in _params: _p['drift'] = _drift if self.experiment_type == 'RNA': for _p in _params: _p['rna-info_dic'] = getattr(self, 'rna-info_dic') _args += [(_p, True, _color_filename, _load_segmentation, _direct_load_drift, _drift_size, _drift_ref, _drift_postfix, _dynamic, _load_exist_info, _exclude_attrs, _save, False, _verbose) for _p in _params] del(_fov_segmentation_label, _params, _cell_ids) ## do multi-processing to create cells! if _verbose: print(f"+ Creating {len(_args)} cells with {_num_threads} threads.") _cell_pool = mp.Pool(_num_threads) _cells = _cell_pool.starmap(self._create_cell, _args, chunksize=1) _cell_pool.close() _cell_pool.terminate() _cell_pool.join() # clear batch_functions.killchild() del(_args, _cell_pool) # load self.cells += _cells ## If not directly load drift, do them here: for _cell in self.cells: if not hasattr(_cell, 'drift'): _cell._load_drift(_num_threads=self.num_threads, _size=_drift_size, _ref_id=_drift_ref, _drift_postfix=_drift_postfix,_load_annotated_only=_load_annotated_only, _sequential_mode=_sequential_mode, _force=_force_drift, _dynamic=_dynamic, _verbose=_verbose) if _save: _cell._save_to_file('cell_info', _verbose=_verbose) # function to do cropping def _crop_image_for_cells(self, _data_type='unique', _load_in_ram=False, _load_annotated_only=True, _extend_dim=20, _corr_drift=True, _save=True, _force=False, _overwrite_cell_info=False, _verbose=True): """Load images for all cells in this cell_list Inputs: _data_type: loading type for this """ ## check inputs # check whether cells and segmentation,drift info exists if _verbose: print (f"+ Load images for {len(self.cells)} cells in this cell list") if not hasattr(self, 'cells'): raise ValueError("No cells loaded in cell_list") if len(self.cells) == 0: print("cell_list is empty, exit.") # check type _data_type = _data_type.lower() if _data_type not in self.shared_parameters['allowed_data_types']: raise ValueError(f"Wrong _data_type kwd, {_data_type} is given, {self.shared_parameters['allowed_data_types']} are expected") # whether load annotated hybs only if _load_annotated_only: _folders = self.annotated_folders else: _folders = self.folders ## Start to generate temp_files # collect field of views _used_fov_ids = [] for _cell in self.cells: if _cell.fov_id not in _used_fov_ids: _used_fov_ids.append(_cell.fov_id) if _data_type in self.shared_parameters['allowed_data_types']: if _verbose: print(f"+ generating unique images for field-of-view:{_used_fov_ids}") for _fov_id in _used_fov_ids: _fov_cells = [_cell for _cell in self.cells if _cell.fov_id==_fov_id] for _cell in _fov_cells: # if not all unique exists for this cell: if not _cell._check_full_set(_data_type) or _force: if _verbose: print(f"+ Crop unique images for fov:{_cell.fov_id}, cell:{_cell.cell_id}") _cell._crop_images(_data_type, _num_threads=self.num_threads, _load_in_ram=_load_in_ram, _extend_dim=_extend_dim, _save=_save, _overwrite=_force, _overwrite_cell_info=_overwrite_cell_info, _verbose=_verbose) else: if _verbose: print(f"+ unique info exists for fov:{_cell.fov_id}, cell:{_cell.cell_id}, skip") def _crop_image_by_fov(self, _data_type='unique', _num_threads=None, _load_in_ram=False, _load_annotated_only=True, _sel_folders=None, _extend_dim=20, _corr_drift=True, _shift_order=1, _save=True, _force=False, _overwrite_cell_info=False, _verbose=True): """Function to crop image for the whole cell_list by field-of-view Inputs: Outputs: """ ## check inputs from ..corrections import multi_correct_one_dax # check whether cells and segmentation,drift info exists if _verbose: print (f"+ Load images for {len(self.cells)} cells in this cell list") if not hasattr(self, 'cells'): raise ValueError("No cells loaded in cell_list") if len(self.cells) == 0: print("+ cell_list is empty, exit.") return # check type _data_type = _data_type.lower() if _data_type not in self.shared_parameters['allowed_data_types']: raise ValueError(f"Wrong _data_type kwd, {_data_type} is given, {self.shared_parameters['allowed_data_types']} are expected") else: # generate attribute names _im_attr = _data_type + '_' + 'ims' _id_attr = _data_type + '_' + 'ids' _channel_attr = _data_type + '_' + 'channels' # whether load annotated hybs only if _load_annotated_only: _folders = self.annotated_folders else: _folders = self.folders # if specified selected folders if _sel_folders is not None: if not isinstance(_sel_folders, list): raise TypeError(f"_sel_folders should be a list but {type(_sel_folders)} is given.") # collect field of views _used_fov_ids = [] for _cell in self.cells: if _cell.fov_id not in _used_fov_ids: _used_fov_ids.append(_cell.fov_id) ## Start to loop through field-of-view if _verbose: print(f"++ generating unique images for field-of-view:{_used_fov_ids}") _fov_start = time.time() for _fov_id in _used_fov_ids: # load data_type data for this fov self._load_cells_from_files(_data_type=_data_type, _sel_fovs=[_fov_id]) # first get cells _fov_cells = [_cell for _cell in self.cells if _cell.fov_id==_fov_id] print('fov', _fov_id, len(_fov_cells)) _fov_name = self.fovs[_fov_cells[0].fov_id] # get segmentation crops for cells for _cell in _fov_cells: if not hasattr(_cell, 'segmentation_crop'): # check segmentation crops _cell._load_segmentation() _crops = [_cell.segmentation_crop for _cell in _fov_cells] # get corresponding ids for fov_cells _reg_id_list = [] for _cell in _fov_cells: _reg_id_list.append(getattr(_cell, _id_attr, [])) # for all corresponding folders and colors, pick the ones to be corrected _corr_colors = [] _corr_ids = [] _corr_folders = [] for _fd in _folders: # skip if sel_folders not including this folder if _sel_folders is not None and _fd not in _sel_folders: continue # loop through colors _base_fd = os.path.basename(_fd) _colors_to_process = [] _ids_to_process = [] for _c, _info in zip(self.channels, self.color_dic[_base_fd]): if self.shared_parameters['allowed_data_types'][_data_type] in _info: _rid = int( _info.split(self.shared_parameters['allowed_data_types'][_data_type])[1] ) # check whether this exist or not in all cells, if not exists in all, append _exist_in_cells = [_rid in _ids for _ids in _reg_id_list] if np.sum(_exist_in_cells) < len(_exist_in_cells): # not exist in all cells _colors_to_process.append(_c) _ids_to_process.append(_rid) # after loop through colors, if there's any color to process, append if len(_colors_to_process) > 0: _corr_folders.append(_fd) _corr_colors.append(_colors_to_process) _corr_ids.append(_ids_to_process) # start multi-processing _corr_args = [] for _fd, _colors in zip(_corr_folders, _corr_colors): _dft = _fov_cells[0].drift _dft_q = os.path.join(os.path.basename(_fd), _fov_name) if _dft_q not in _dft: raise KeyError(f"drift for {_dft_q} doesn't exist in fov:{_fov_cells[0].fov_id}") _corr_args.append( (os.path.join(_fd, _fov_name), _colors, _crops, None, _extend_dim, self.shared_parameters['single_im_size'],self.channels, self.shared_parameters['num_buffer_frames'], self.shared_parameters['num_empty_frames'], _dft[_dft_q], _shift_order, None, self.correction_folder, self.shared_parameters['normalization'], self.shared_parameters['corr_bleed'], self.shared_parameters['corr_Z_shift'], self.shared_parameters['corr_hot_pixel'], self.shared_parameters['corr_illumination'], self.shared_parameters['corr_chromatic'], False, _verbose,) ) if _num_threads is None: _num_threads = self.num_threads with mp.Pool(_num_threads) as _corr_pool: if _verbose: print(f"++ start multi-processing with {_num_threads} threads for {len(_fov_cells)} cells in fov:{_fov_cells[0].fov_id}") print(f"++ number of jobs: {len(_corr_args)}") _start_time = time.time() _cropped_ims_for_cells = _corr_pool.starmap(multi_correct_one_dax, _corr_args, chunksize=1) _corr_pool.close() _corr_pool.join() _corr_pool.terminate() if _verbose: print(f"+++ time spent in multiprocessing cropping:{time.time()-_start_time}s") print('result length:', len(_cropped_ims_for_cells)) # summarize for each of the cells if _verbose: print(f"++ summarize cropped {_data_type} images for cells in fov:{_fov_id}") _summ_start = time.time() for _cell_id, _cell in enumerate(_fov_cells): # initialize _cell_ims = getattr(_fov_cells[_cell_id], _im_attr, []) _cell_ids = list(getattr(_fov_cells[_cell_id], _id_attr, [])) _cell_channels = getattr(_fov_cells[_cell_id], _channel_attr, []) # loop through result and append for _im_cells_list, _id_list, _color_list in zip(_cropped_ims_for_cells, _corr_ids, _corr_colors): _im_list = _im_cells_list[_cell_id] # loop through multiple colors for _im, _id, _color in zip(_im_list, _id_list, _color_list): # case1, completely new id, append if _id not in list(_cell_ids): _cell_ims.append(_im) _cell_ids.append(_id) _cell_channels.append(_color) print("append_id", _id) # case2, exist and not force, skip elif _id in list(_cell_ids) and not _force: print("skip id", _id) continue # case3, exist and force, overwrite else: _index = list(_cell_ids).index(_id) _cell_ims[_index] = _im _cell_channels[_index] = _color print("replace_id", _id) print('cell_size', len(_cell_ids), len(_cell_ims), len(_cell_channels)) # sort _tp = [(_id, _im, _ch) for _id, _im, _ch in sorted(zip(_cell_ids, _cell_ims, _cell_channels))] _sorted_ids = [_t[0] for _t in _tp] _sorted_ims = [_t[1] for _t in _tp] _sorted_channels = [_t[2] for _t in _tp] print('size', len(_sorted_ids), len(_sorted_ims), len(_sorted_channels)) # append to attributes setattr(_fov_cells[_cell_id], _im_attr, _sorted_ims) setattr(_fov_cells[_cell_id], _id_attr, _sorted_ids) setattr(_fov_cells[_cell_id], _channel_attr, _sorted_channels) # replace the one in cell_list for _old_id, _old_cell in enumerate(self.cells): if _old_cell.fov_id == _fov_cells[_cell_id].fov_id \ and _old_cell.cell_id == _fov_cells[_cell_id].cell_id: # update cell_list self.cells[_old_id] = _fov_cells[_cell_id] if _verbose: print(f"+++ time spent in summarizing: {time.time()-_summ_start}s") # save to file if specified: if _save: if _verbose: print(f"++ save result to {_data_type} file and cell_info:") _save_start = time.time() self._save_cells_to_files(_data_type=_data_type, _sel_fovs=[_fov_id], _overwrite=_force, _verbose=_verbose) self._save_cells_to_files('cell_info', _sel_fovs=[_fov_id], _overwrite=_overwrite_cell_info, _verbose=_verbose) if _verbose: print(f"+++ time spent in saving:{time.time()-_save_start}s") # remove if load_in_ram is false if not _load_in_ram: for _cell in _fov_cells: delattr(_cell, _im_attr) if _verbose: print(f"++ time spent for fov:{_fov_id}: {time.time()-_fov_start}s") # load processed cell info/unique/decoded/merfish from files def _load_cells_from_files(self, _data_type='cell_info', _num_threads=None, _sel_fovs=None, _save_folder=None, _decoded_flag=None, _distmap_data='unique', _distmap_pick='EM', _load_attrs=[], _exclude_attrs=[], _overwrite=False, _verbose=True): """Function to load cells from existing files""" if _num_threads is None: _num_threads = getattr(self, 'num_threads') if _verbose: print(f"+ Load {_data_type} for cells from existing files.") if not hasattr(self, 'cells') or len(self.cells) == 0: raise ValueError( 'No cell information provided, should create cells first!') # check fov_id input _loading_args = [] # prepare args for _cell in self.cells: if _sel_fovs is None or (_sel_fovs is not None and _cell.fov_id in _sel_fovs): _loading_args.append((_cell, _data_type, _save_folder, _decoded_flag, _distmap_data, _distmap_pick, _load_attrs, _exclude_attrs, _overwrite, _verbose)) if _verbose: print(f"++ {len(_loading_args)} of {_data_type} loading jobs planned.") _start_time = time.time() # load info by multi-processing! with mp.Pool(_num_threads) as _loading_pool: # Multi-proessing! _updated_cells = _loading_pool.starmap(_load_cell_in_batch, _loading_args, chunksize=1) # close multiprocessing _loading_pool.close() _loading_pool.join() _loading_pool.terminate() # update _updated_cell_infos = [(_cell.fov_id, _cell.cell_id) for _cell in _updated_cells] for _cid, _cell in enumerate(self.cells): if (_cell.fov_id, _cell.cell_id) in _updated_cell_infos: self.cells[_cid] = _updated_cells[_updated_cell_infos.index((_cell.fov_id, _cell.cell_id))] if _verbose: print(f"+++ time spent in loading: {time.time()-_start_time}s") # load processed cell info/unique/decoded/merfish from files def _save_cells_to_files(self, _data_type='cell_info', _num_threads=None, _sel_fovs=None, _save_folder=None, _save_list=[], _unsaved_attrs=None, _clear_old_attrs=False, _overwrite=False, _verbose=True): """Function to load cells from existing files""" if _num_threads is None: _num_threads = getattr(self, 'num_threads') if _verbose: print(f"+ Save {_data_type} for cells from existing files.") if not hasattr(self, 'cells') or len(self.cells) == 0: raise ValueError( 'No cell information provided, should create cells first!') # check fov_id input _saving_args = [] # prepare args for _cell in self.cells: if _sel_fovs is None or (_sel_fovs is not None and _cell.fov_id in _sel_fovs): # generate a temp save_dic _save_dic = {_k: getattr(_cell, _k) for _k in _save_list if hasattr(_cell, _k)} # append save_arg _saving_args.append((_cell, _data_type, _save_dic, _save_folder, _unsaved_attrs, _clear_old_attrs, _overwrite, _verbose)) if _verbose: print(f"++ {len(_saving_args)} of {_data_type} loading jobs submitted to {_num_threads} threads.") # load info by multi-processing! with mp.Pool(_num_threads) as _saving_pool: # Multi-proessing! _updated_cells = _saving_pool.starmap(_save_cell_in_batch, _saving_args, chunksize=1) # close multiprocessing _saving_pool.close() _saving_pool.join() _saving_pool.terminate() # generate chromosome coordinates def _get_chromosomes_for_cells(self, _source='unique', _max_count= 90, _gaussian_size=2, _cap_percentile=1, _seed_dim=3, _th_percentile=99.5, _min_obj_size=125, _coord_filename='chrom_coords.pkl', _overwrite=False, _verbose=True): """Function to generate chromosome and chromosome coordinates, open a picker to correct for it Inputs: _source: image source to generate chromosome image, combo requires "combo_gorups", unique requires 'unique_ims', 'combo'/'unique' (default: 'combo') _max_count: maximum image count to generate chromosome profile, int (default:30) Outputs: _chrom_viewer: chromosome viewer object, used to click""" # check attribute if not hasattr(self, 'cells') or len(self.cells) == 0: raise ValueError('No cells are generated in this cell list!') if _verbose: print("+ Generate chromosomes for cells.") # chromsome savefile # _fov_ids = [_cell.fov_id for _cell in self.cells] _fov_ids = np.unique(_fov_ids) _filename = '_'+str(min(_fov_ids)) + '-' + str(max(_fov_ids))+'.pkl' _chrom_savefile = os.path.join(self.save_folder, _coord_filename.replace('.pkl', _filename)) # loop through cells to generate chromosome _chrom_ims = [] _chrom_dims = [] _coord_dic = {'coords': [], 'class_ids': [], 'pfits':{}, 'dec_text':{}, } # initialize _coord_dic for picking for _i, _cell in enumerate(self.cells): # first try to load chrom_im if not exist right now if not hasattr(_cell, 'chrom_im') and not _overwrite: _cell._load_from_file('cell_info', _load_attrs=['chrom_im'], _verbose=_verbose) # directly use chrom_im in cell_data if hasattr(_cell, 'chrom_im') and not _overwrite: _cim = _cell.chrom_im # else create a new chrom_im else: _cim = _cell._generate_chromosome_image(_source=_source, _max_count=_max_count, _verbose=_verbose) _cell.chrom_im = _cim _chrom_ims.append(_cim) _chrom_dims.append(np.array(np.shape(_cim))) if not hasattr(_cell, 'chrom_coords') and not _overwrite: _cell._load_from_file('cell_info', _load_attrs=['chrom_coords'], _verbose=_verbose) # directly use chrom_coords in cell_data if hasattr(_cell, 'chrom_coords') and not _overwrite: _chrom_coords = _cell.chrom_coords # else try to generate automatically else: _chrom_coords = _cell._identify_chromosomes(_gaussian_size=_gaussian_size, _cap_percentile=_cap_percentile, _seed_dim=_seed_dim, _th_percentile=_th_percentile, _min_obj_size=_min_obj_size,_verbose=_verbose) # build chrom_coord_dic _coord_dic['coords'] += [np.flipud(_coord) for _coord in _chrom_coords] _coord_dic['class_ids'] += list(np.ones(len(_chrom_coords),dtype=np.uint8)*int(_i)) # create existing coord_dic file if _verbose: print("++ dumping existing info to file:", _chrom_savefile) pickle.dump(_coord_dic, open(_chrom_savefile, 'wb')) # convert to the same dimension _max_dim = np.max(np.concatenate([_d[np.newaxis,:] for _d in _chrom_dims]), axis=0) if _verbose: print("Maximum dimension for these images:", _max_dim) _converted_ims = [np.ones(_max_dim) * np.min(_cim) for _cim in _chrom_ims] for _im, _d, _cim in zip(_converted_ims, _chrom_dims, _chrom_ims): _im[:_d[0], :_d[1],:_d[2]] = _cim _chrom_viewer = visual_tools.imshow_mark_3d_v2(_converted_ims, image_names=[f"fov:{_cell.fov_id}, cell:{_cell.cell_id}" for _cell in self.cells], save_file=_chrom_savefile) _chrom_viewer.load_coords() return _chrom_viewer def _update_chromosomes_for_cells(self, _coord_filename='chrom_coords.pkl', _force_save_to_combo=False, _save=True, _verbose=True): # check attribute if not hasattr(self, 'cells') or len(self.cells) == 0: raise ValueError('No cells are generated in this cell list!') if _verbose: print("+ Update manually picked chromosomes to cells") # chromsome savefile # _fov_ids = [_cell.fov_id for _cell in self.cells] _fov_ids = np.unique(_fov_ids) _filename = '_'+str(min(_fov_ids)) + '-' + str(max(_fov_ids))+'.pkl' _chrom_savefile = os.path.join( self.save_folder, _coord_filename.replace('.pkl', _filename)) # load from chrom-coord and partition it _coord_dic = pickle.load(open(_chrom_savefile, 'rb')) _coord_list = visual_tools.partition_map(_coord_dic['coords'], _coord_dic['class_ids'], enumerate_all=True) if len(_coord_list) > len(self.cells): raise ValueError(f'Number of cells doesnot match between cell-list and {_chrom_savefile}') elif len(_coord_list) < len(self.cells): print("++ fewer picked chromosome sets discovered than number of cells, append with empty lists.") for _i in range(len(self.cells) - len(_coord_list)): _coord_list.append([]) # save to attribute first for _cell, _coords in zip(self.cells, _coord_list): _chrom_coords = [np.flipud(_coord) for _coord in _coords] _cell.chrom_coords = _chrom_coords if _verbose: print(f"++ matching {len(_chrom_coords)} chromosomes for fov:{_cell.fov_id}, cell:{_cell.cell_id}") # then update files if specified if _save: _cell._save_to_file('cell_info', _save_dic={'chrom_coords': _cell.chrom_coords, 'chrom_im':_cell.chrom_im,}, _verbose=_verbose) if hasattr(_cell, 'combo_groups') or _force_save_to_combo: if _cell._check_full_set('combo'): if not hasattr(_cell, 'combo_groups'): _cell._load_from_file('combo', _verbose=_verbose) _load_mk = True else: _load_mk = False _cell._save_to_file('combo', _overwrite=True, _verbose=_verbose) # remove temporarily loaded combo_groups if _load_mk: delattr(_cell, 'combo_groups') else: if _verbose: print(f"++ Combo info not complete for fov:{_cell.fov_id}, cell:{_cell.cell_id}, skip") def _translate_chromosome_coords(self, _source_cell_list, _num_threads=12, _rotation_mat=None, _rotation_ref_file=None, _rotation_order='reverse', _border_lim=10, _save=True, _overwrite=False, _verbose=True): """Function to translate chromosome coordinates from source_cell_list Inputs: _source_cell_list _num_threads _rotation_mat: rotation matrix, if provided, np.2darray (default:None) _rotation_ref_file: file for rotation matrix, string (default:None) _rotation_order: whether rotation_mat is forward or reverse, (default:'reverse') _border_lim: limit to judge whether close to border, int (default:10) _overwrite: whether overwrite existing chrom_coords in this cell_list, bool (default:False) _verbose: say something!, bool (default:True) """ from copy import copy if _verbose: print(f"+ Start translating chromosome coordinates from other cell_list:{_source_cell_list}") # load rotation matrix if _rotation_mat is None or len(_rotation_mat.shape())!=2 or np.array(_rotation_mat.shape()-2).any(): if _rotation_ref_file is None: _rotation_ref_file = os.path.join(self.experiment_folder, 'rotation.npy') _rotation_mat = np.load(_rotation_ref_file) if _verbose: print(f"++ neither rotation_mat and rotation_ref_file are given, load from default:\n\t{_rotation_ref_file}") else: if not os.path.isfile(_rotation_ref_file): raise IOError(f"Wrong input rotation_ref_file:{_rotation_ref_file}") # start recording args _trans_args = [] _trans_ids = [] for _i, _cell in enumerate(self.cells): # find matched cell_data _matched_cell = [_src_cell for _src_cell in _source_cell_list.cells if getattr(_src_cell,'fov_id')==getattr(_cell,'fov_id') and getattr(_src_cell,'cell_id')==getattr(_cell,'cell_id') ] # unique match if len(_matched_cell) == 1: _trans_args.append((copy(_matched_cell[0]), copy(_cell), _rotation_mat, None, _rotation_order, self.shared_parameters['single_im_size'], _border_lim, _overwrite, True, _verbose)) _trans_ids.append(_i) else: if _verbose: print(f"++ cell from fov:{_cell.fov_id}, cell:{_cell.cell_id} \ doesn't have uniquely matched source cell, skip") # multiprocessing for translating chrom_coords if hasattr(self, 'num_threads'): _num_threads = getattr(self, 'num_threads') if _verbose: print( f"++ start translating chromosomes for {len(_trans_args)} cells with {_num_threads} threads") with mp.Pool(_num_threads) as _trans_pool: _new_coords = _trans_pool.starmap(visual_tools.translate_chromosome_coordinates, _trans_args) _trans_pool.close() _trans_pool.join() _trans_pool.terminate() batch_functions.killchild() # save for _i, _cell in enumerate(self.cells): if _i in _trans_ids: _coords = _new_coords[_trans_ids.index(_i)] if _overwrite or not hasattr(_cell, 'chrom_coords'): setattr(_cell, 'chrom_coords', _coords) if _save: if _overwrite: _cell._save_to_file('cell_info',_save_dic={'chrom_coords':_coords}, _verbose=_verbose) else: _cell._save_to_file('cell_info',_save_dic={'chrom_coords':_coords}, _verbose=_verbose) # multi-gaussian fitting def _spot_finding_for_cells(self, _data_type='unique', _decoded_flag='diff', _max_fitting_threads=12, _clear_image=False, _normalize_image=True, _use_chrom_coords=True, _seed_by_per=True, _th_seed_percentile=90, _max_filt_size=3, _max_seed_count=6, _min_seed_count=3, _fit_window=40, _expect_weight=1000, _min_height=100, _max_iter=10, _th_to_end=1e-6, _save=True, _overwrite=False, _verbose=True): """Function to allow multi-fitting in cell_list""" ## Check attributes for _cell_id, _cell in enumerate(self.cells): _clear_image_for_cell = _clear_image # whether clear image for this cell _im_attr = _data_type + '_' + 'ims' _id_attr = _data_type + '_' + 'ids' _result_attr = _data_type + '_' + 'spots' if not hasattr(self, _im_attr) or not hasattr(self, _id_attr): _clear_image_for_cell = True try: _cell._load_from_file(_data_type, _verbose=_verbose) except: raise IOError(f"Cannot load {_data_type} files") # do multi_fitting _cell._multi_fitting_for_chromosome(_data_type=_data_type, _decoded_flag=_decoded_flag, _normalization=_normalize_image, _use_chrom_coords=_use_chrom_coords, _num_threads=max(_max_fitting_threads, self.num_threads), _seed_by_per=_seed_by_per, _th_seed_percentile=_th_seed_percentile,_max_filt_size=_max_filt_size, _max_seed_count=_max_seed_count, _min_seed_count=_min_seed_count, _fit_window=_fit_window, _expect_weight=_expect_weight, _min_height=_min_height, _max_iter=_max_iter, _save=_save, _overwrite=_overwrite, _verbose=_verbose) if _clear_image_for_cell: if _verbose: print(f"++ clear images for {_data_type} in fov:{_cell.fov_id}, cell:{_cell.cell_id}") delattr(_cell, _im_attr) # new version for batch pick spots def _pick_spots_for_cells(self, _data_type='unique', _pick_type='EM', decoded_flag='diff', _num_threads=12, _use_chrom_coords=True, _sel_ids=None, _num_iters=10, _terminate_th=0.0025, _intensity_th=1., _hard_intensity_th=True, _spot_num_th=100, _ref_spot_list=None, _ref_spot_ids=None, _ref_pick_type='EM', _ignore_ids=False, _ref_dist_metric='median', _score_metric='linear', _local_size=5, _w_ctdist=1, _w_lcdist=0.1, _w_int=1, _w_nbdist=3, _save_inter_plot=False, _save_to_info=True, _save_plot=True, _check_spots=True, _check_th=-1.5, _check_percentile=10., _hard_dist_th=6000, _distance_limits=[0,np.inf], _ignore_nan=True, _nan_mask=0., _inf_mask=-1000., _chrom_share_spots=False, _plot_limits=[0, 1500], _cmap='seismic_r', _fig_dpi=100, _fig_size=4, _release_ram=False, _overwrite=False, _verbose=True): """Function to pick spots given candidates in batch""" ## Check Inputs if _verbose: print(f"+ Pick spots and convert to distmap, use_chrom_coords:{_use_chrom_coords}") _start_time = time.time() if _pick_type not in ['dynamic', 'naive', 'EM']: raise ValueError( f"Wrong _pick_type kwd given ({_pick_type}), should be dynamic or naive.") # check num_threads if _num_threads is None: _num_threads = self.num_threads if _save_inter_plot: print( "++ _save_inter_plot is ON for now, which may requires long time to finish.") # decide references if _ref_spot_list is None or isinstance(_ref_spot_list, str): _ref_spot_list = [_ref_spot_list for _cell in self.cells] _ref_id_list = [None for _cell in self.cells] elif isinstance(_ref_spot_list, list): if len(_ref_spot_list) != len(self.cells): raise IndexError(f"Wrong length of _ref_spot_list as list:{len(_ref_spot_list)}, should be same as number of cells:{len(self.cells)} ") if _ref_spot_ids is None: _ref_id_list = [None for _cell in self.cells] else: _ref_id_list = [_ref_spot_ids for _cell in self.cells] ## start generate multi-processing args _pick_args = [] for _i, _cell in enumerate(self.cells): # extract references _ref_spots = _ref_spot_list[_i] _ref_ids = _ref_id_list[_i] _pick_args.append((_cell, _data_type, _pick_type, _use_chrom_coords, _sel_ids, _num_iters, _terminate_th, _intensity_th, _hard_intensity_th, _spot_num_th, _ref_spots, _ref_ids, _ref_pick_type, _ignore_ids, _ref_dist_metric, _score_metric, _local_size, _w_ctdist, _w_lcdist, _w_int, _w_nbdist, _save_inter_plot, _save_to_info, _save_plot, _check_spots, _check_th, _check_percentile, _hard_dist_th, _distance_limits, _ignore_nan, _nan_mask, _inf_mask, _chrom_share_spots, _plot_limits, _cmap, _fig_dpi, _fig_size, _overwrite, _verbose)) # create folder to save distmaps ahead if _save_plot: _distmap_fd = os.path.join(_cell.map_folder, _cell.fovs[_cell.fov_id].replace('.dax','')) if not os.path.exists(_distmap_fd): if _verbose: print(f"+++ create distance map folder:{_distmap_fd}") os.makedirs(_distmap_fd) with mp.Pool(_num_threads) as _pick_pool: _pick_start = time.time() if _verbose: print(f"++ start multi-processing picking spots by {_pick_type} for {len(self.cells)} cells") # feed in args _updated_cells = _pick_pool.starmap(_pick_spot_in_batch, _pick_args, chunksize=1) # close multi-processing _pick_pool.close() _pick_pool.join() _pick_pool.terminate() # clear batch_functions.killchild() del(_pick_args) if not _release_ram or not _save_to_info: if _verbose: print("") self.cells = _updated_cells else: for _cell in _updated_cells: for _attr in dir(_cell): if _attr[0] != '_' and 'distance_map' in _attr: delattr(_cell, _attr) self.cells = _updated_cells if _verbose: print(f"+++ finish in {time.time()-_start_time:.2f}s.") # Calculate population median / contact map def _calculate_population_map(self, _data_type='unique', _pick_type='EM', _stat_type='median', _max_loss_prob=0.2, _pick_flag=None, _contact_th=200, _make_plot=True, _save_plot=True, _save_name='distance_map', _cmap='seismic', _fig_dpi=300, _fig_size=4, _gfilt_size=0.75, _plot_limits=[0,2000], _release_ram=False, _return_all_maps=False, _verbose=True): """Calculate 'averaged' map for all cells in this list Inputs: _data_type: unique or decoded _max_loss_prob: maximum """ ## check inputs: if _data_type not in self.shared_parameters['allowed_data_types']: raise ValueError(f"Wrong _data_type kwd given, should be {self.shared_parameters['allowed_data_types'].keys()}, {_data_type} is given!") _allowed_pick_types = ['EM', 'dynamic', 'naive'] if _pick_type not in _allowed_pick_types: raise ValueError( f"Wrong _pick_type kwd given ({_pick_type}), should be among {_allowed_pick_types}.") if _stat_type not in ['median', 'mean', 'contact']: raise ValueError(f"Wrong _stat_type({_stat_type}) kwd is given!") if _cmap not in ['seismic', 'Reds']: raise ValueError(f"Wrong imnut _cmap:{_cmap}, exit!") # check _pick_flag if _pick_flag is not None: if len(_pick_flag) != len(self.cells): raise ValueError(f"_pick_flag should have exactly same length as cells!") # get distmap attr if _pick_type != '': _distmap_attr = str(_pick_type) + '_' + str(_data_type) + '_' + 'distance_map' else: _distmap_attr = str(_data_type) + '_' + 'distance_map' # detect distmap shape _distmap_shape=[] for _cell_id, _cell in enumerate(self.cells): if hasattr(_cell, _distmap_attr): # loop through distmaps to get shape for _distmap in getattr(_cell, _distmap_attr): if np.shape(_distmap)[0] not in _distmap_shape: _distmap_shape.append(np.shape(_distmap)[0]) else: # try to load distmap _cell._load_from_file('distance_map', _distmap_data=_data_type, _distmap_pick=_pick_type, _verbose=False) # then do the same: loop through distmaps to get shape if hasattr(_cell, _distmap_attr): for _distmap in getattr(_cell, _distmap_attr): if np.shape(_distmap)[0] not in _distmap_shape: _distmap_shape.append(np.shape(_distmap)[0]) # check if _pick_flag is fully given if _pick_flag is not None and len(_pick_flag[_cell_id]) < len(getattr(_cell, _distmap_attr)): raise IndexError(f"Wrong _pick_flag for cell-num:{_cell_id}, not enough flags are given") if len(_distmap_shape) == 0: print("No distant map loaded, return.") return None, 0 if _verbose: print(f"+++ maximum distance-map size is {max(_distmap_shape)}") _cand_distmaps = [] ## check and collect distance maps for _cell_id, _cell in enumerate(self.cells): if not hasattr(_cell, _distmap_attr): if _verbose: print(f"+++ fov:{_cell.fov_id}, cell:{_cell.cell_id} doesn't have {_distmap_attr}, skip!") else: for _chrom_id, _distmap in enumerate(getattr(_cell, _distmap_attr)): # calculate failed entries _chr_failure_rate = np.sum(np.isnan(_distmap).sum(0) == len(_distmap)-1)/len(_distmap) # screen out by flag if _pick_flag is not None and np.max(_pick_flag[_cell_id][_chrom_id]) < 1: if _verbose: print( f"+++ filtered out by pick_flag, fov:{_cell.fov_id}, cell:{_cell.cell_id}, chrom:{_chrom_id}") # screen out by failure rate elif _chr_failure_rate > _max_loss_prob: if _verbose: print(f"+++ filtered out by loss probability, fov:{_cell.fov_id}, cell:{_cell.cell_id}, chrom:{_chrom_id}") continue # screen out by shape elif np.shape(_distmap)[0] != max(_distmap_shape): if _verbose: print(f"+++ filtered out by dist-map shape, fov:{_cell.fov_id}, cell:{_cell.cell_id}, chrom:{_chrom_id}") continue else: _cand_distmaps.append(_distmap.astype(np.float)) ## calculate averaged map # acquire total map _total_map = np.array(_cand_distmaps, dtype=np.float) _region_failure_rate = np.sum(np.sum(np.isnan(_total_map),axis=1) >= \ np.shape(_total_map)[2]-1, axis=0) / len(_total_map) # calculate averaged map if _stat_type == 'median': _averaged_map = np.nanmedian(_total_map, axis=0) _cmap+= '_r' elif _stat_type == 'mean': _averaged_map = np.nanmean(_total_map, axis=0) _cmap += '_r' elif _stat_type == 'contact': _averaged_map =
np.nansum(_total_map < _contact_th, axis=0)
numpy.nansum
""" Signals and Systems Function Module Copyright (c) March 2017, <NAME> All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. The views and conclusions contained in the software and documentation are those of the authors and should not be interpreted as representing official policies, either expressed or implied, of the FreeBSD Project. Notes ----- The primary purpose of this function library is to support the book Signals and Systems for Dummies. Beyond that it should be useful to anyone who wants to use Pylab for general signals and systems modeling and simulation. There is a good collection of digital communication simulation primitives included in the library. More enhancements are planned over time. The formatted docstrings for the library follow. Click index in the upper right to get an alphabetical listing of the library functions. In all of the example code given it is assumed that ssd has been imported into your workspace. See the examples below for import options. Examples -------- >>> import sk_dsp_comm.sigsys as ssd >>> # Commands then need to be prefixed with ssd., i.e., >>> ssd.tri(t,tau) >>> # A full import of the module, to avoid the the need to prefix with ssd, is: >>> from sk_dsp_comm.sigsys import * Function Catalog ---------------- """ from matplotlib import pylab import numpy as np from numpy import fft import matplotlib.pyplot as plt from scipy import signal from scipy.io import wavfile from logging import getLogger log = getLogger(__name__) import warnings def cic(m, k): """ A functional form implementation of a cascade of integrator comb (CIC) filters. Parameters ---------- m : Effective number of taps per section (typically the decimation factor). k : The number of CIC sections cascaded (larger K gives the filter a wider image rejection bandwidth). Returns ------- b : FIR filter coefficients for a simple direct form implementation using the filter() function. Notes ----- Commonly used in multirate signal processing digital down-converters and digital up-converters. A true CIC filter requires no multiplies, only add and subtract operations. The functional form created here is a simple FIR requiring real coefficient multiplies via filter(). <NAME> July 2013 """ if k == 1: b = np.ones(m) else: h = np.ones(m) b = h for i in range(1, k): b = signal.convolve(b, h) # cascade by convolving impulse responses # Make filter have unity gain at DC return b / np.sum(b) def ten_band_eq_filt(x,GdB,Q=3.5): """ Filter the input signal x with a ten-band equalizer having octave gain values in ndarray GdB. The signal x is filtered using octave-spaced peaking filters starting at 31.25 Hz and stopping at 16 kHz. The Q of each filter is 3.5, but can be changed. The sampling rate is assumed to be 44.1 kHz. Parameters ---------- x : ndarray of the input signal samples GdB : ndarray containing ten octave band gain values [G0dB,...,G9dB] Q : Quality factor vector for each of the NB peaking filters Returns ------- y : ndarray of output signal samples Examples -------- >>> # Test with white noise >>> w = randn(100000) >>> y = ten_band_eq_filt(x,GdB) >>> psd(y,2**10,44.1) """ fs = 44100.0 # Hz NB = len(GdB) if not NB == 10: raise ValueError("GdB length not equal to ten") Fc = 31.25*2**np.arange(NB) B = np.zeros((NB,3)) A = np.zeros((NB,3)) # Create matrix of cascade coefficients for k in range(NB): [b,a] = peaking(GdB[k],Fc[k],Q) B[k,:] = b A[k,:] = a # Pass signal x through the cascade of ten filters y = np.zeros(len(x)) for k in range(NB): if k == 0: y = signal.lfilter(B[k,:],A[k,:],x) else: y = signal.lfilter(B[k,:],A[k,:],y) return y def ten_band_eq_resp(GdB,Q=3.5): """ Create a frequency response magnitude plot in dB of a ten band equalizer using a semilogplot (semilogx()) type plot Parameters ---------- GdB : Gain vector for 10 peaking filters [G0,...,G9] Q : Quality factor for each peaking filter (default 3.5) Returns ------- Nothing : two plots are created Examples -------- >>> import matplotlib.pyplot as plt >>> from sk_dsp_comm import sigsys as ss >>> ss.ten_band_eq_resp([0,10.0,0,0,-1,0,5,0,-4,0]) >>> plt.show() """ fs = 44100.0 # Hz NB = len(GdB) if not NB == 10: raise ValueError("GdB length not equal to ten") Fc = 31.25*2**np.arange(NB) B = np.zeros((NB,3)); A = np.zeros((NB,3)); # Create matrix of cascade coefficients for k in range(NB): b,a = peaking(GdB[k],Fc[k],Q,fs) B[k,:] = b A[k,:] = a # Create the cascade frequency response F = np.logspace(1,np.log10(20e3),1000) H = np.ones(len(F))*np.complex(1.0,0.0) for k in range(NB): w,Htemp = signal.freqz(B[k,:],A[k,:],2*np.pi*F/fs) H *= Htemp plt.figure(figsize=(6,4)) plt.subplot(211) plt.semilogx(F,20*np.log10(abs(H))) plt.axis([10, fs/2, -12, 12]) plt.grid() plt.title('Ten-Band Equalizer Frequency Response') plt.xlabel('Frequency (Hz)') plt.ylabel('Gain (dB)') plt.subplot(212) plt.stem(
np.arange(NB)
numpy.arange
import numpy as np class Madgwick: """ Madgwick filter for sensor fusion of IMU The class fuses the roll, pitch and yaw from accelrometer and magneotmeter with gyroscope. reference article : https://www.x-io.co.uk/res/doc/madgwick_internal_report.pdf refer to examples of the git repo """ def __init__(self, b = 0.1): """ Initialises all the variables. The option of setting your own values is given in the form of set functions """ GyroMeasError = np.pi * (40.0 / 180.0) self.beta = np.sqrt(3.0 / 4.0) * GyroMeasError # self.beta = b self.q = np.array([1.0, 0.0, 0.0, 0.0]) self.roll = 0 self.pitch = 0 self.yaw = 0 def computeOrientation(self, q): """ Computes euler angles from quaternion Parameter --------- q: array containing quaternion vals """ self.yaw = np.degrees(np.arctan2(2*q[1]*q[2] + 2*q[0]*q[3],\ q[0]*q[0] + q[1]*q[1] - q[2]*q[2] -q[3]*q[3])) self.pitch = np.degrees(-1*np.arcsin(2*(q[1]*q[3] - q[0]*q[2]))) self.roll = np.degrees(np.arctan2(2*q[0]*q[1] + 2*q[2]*q[3],\ q[0]*q[0] + q[3]*q[3] - q[1]*q[1] - q[2]*q[2])) def quaternionMul(self, q1, q2): """ Provides quaternion multiplication Parameters ---------- q1: array containing quaternion vals q2: array containing quaternion vals Return ------ finalq: new quaternion obtained from q1*q2 """ mat1 = np.array([[0,1,0,0],[-1,0,0,0],[0,0,0,1],[0,0,-1,0]]) mat2 = np.array([[0,0,1,0],[0,0,0,-1],[-1,0,0,0],[0,1,0,0]]) mat3 = np.array([[0,0,0,1],[0,0,1,0],[0,-1,0,0],[-1,0,0,0]]) k1 = np.matmul(q1,mat1)[np.newaxis,:].T k2 = np.matmul(q1,mat2)[np.newaxis,:].T k3 = np.matmul(q1,mat3)[np.newaxis,:].T k0 = q1[np.newaxis,:].T mat =
np.concatenate((k0,k1,k2,k3), axis = 1)
numpy.concatenate
import numpy as np import os from scipy.io import loadmat from scipy.special import kv, iv from numpy import pi, real, imag, exp, sqrt, sum, sin, cos # see <NAME>., and <NAME>. "Stokes flow due to a Stokeslet in a pipe." # Journal of Fluid Mechanics 86.04 (1978): 727-744. # class containing functions for detailed expression # noinspection PyTypeChecker class detail: def __init__(self, threshold, b): self._threshold = threshold self._b = b self._k = np.zeros([0]) self._n = np.zeros([0]) self._xn = np.zeros([0]) self._yn = np.zeros([0]) self._DmyD_xn = np.zeros([0]) self._DmyD_yn = np.zeros([0]) self._xn_k0 = np.zeros([0]) self._yn_k0 = np.zeros([0]) self._DmyD_xn_k0 = np.zeros([0]) self._DmyD_yn_k0 = np.zeros([0]) self._psi_xn1 = np.zeros([0]) self._psi_xn2 = np.zeros([0]) self._psi_xn3 = np.zeros([0]) self._pi_xn1 = np.zeros([0]) self._pi_xn2 = np.zeros([0]) self._pi_xn3 = np.zeros([0]) self._omega_xn1 = np.zeros([0]) self._omega_xn2 = np.zeros([0]) self._omega_xn3 = np.zeros([0]) self._psi_yn1 = np.zeros([0]) self._psi_yn2 = np.zeros([0]) self._psi_yn3 = np.zeros([0]) self._pi_yn1 = np.zeros([0]) self._pi_yn2 = np.zeros([0]) self._pi_yn3 = np.zeros([0]) self._omega_yn1 = np.zeros([0]) self._omega_yn2 = np.zeros([0]) self._omega_yn3 = np.zeros([0]) self._psi_xn1_k0 = np.zeros([0]) self._psi_xn3_k0 = np.zeros([0]) self._pi_xn1_k0 = np.zeros([0]) self._pi_xn3_k0 = np.zeros([0]) self._omega_xn1_k0 = np.zeros([0]) self._omega_xn3_k0 = np.zeros([0]) self._psi_yn2_k0 = np.zeros([0]) self._pi_yn2_k0 = np.zeros([0]) self._omega_yn2_k0 = np.zeros([0]) self._finish_xyk = False # run _set_xyk first self._finish_xn = False # run _solve_prepare_xn first self._finish_yn = False # run _solve_prepare_yn first self._finish1 = False # run _solve_prepare1 first self._finish2 = False # run _solve_prepare2 first self._finish3 = False # run _solve_prepare3 first def _set_xyk(self): threshold = self._threshold kmax = int(threshold - 2) nmax = int(threshold / 2) n_use, k_use = np.meshgrid(np.arange(1, nmax + 1), np.arange(-kmax, kmax + 1)) INDEX = (np.abs(k_use) + 2 * n_use) <= threshold INDEX[kmax, :] = 0 k_use = k_use[INDEX] n_use = n_use[INDEX] t_path = os.path.dirname(os.path.abspath(__file__)) full_path = os.path.normpath(t_path + '/' + 'xn.mat') mat_contents = loadmat(full_path) xn = mat_contents['xn'] full_path = os.path.normpath(t_path + '/' + 'yn.mat') mat_contents = loadmat(full_path) yn = mat_contents['yn'] xn_use = np.vstack((xn[kmax:0:-1, 0: nmax], xn[0: kmax + 1, 0: nmax])) yn_use = np.vstack((yn[kmax:0:-1, 0: nmax], yn[0: kmax + 1, 0: nmax])) xn_use = xn_use[INDEX] yn_use = yn_use[INDEX] xn_k0 = xn[0, 0:nmax] yn_k0 = yn[0, 0:nmax] self._k = k_use self._n = n_use self._xn = xn_use self._yn = yn_use self._xn_k0 = xn_k0 self._yn_k0 = yn_k0 self._finish_xyk = True return True def get_b(self): return self._b def _solve_prepare_xn(self): err_msg = 'run _set_xyk first. ' assert self._finish_xyk, err_msg DmyD = lambda k, s: 2 * s ** (-2) * iv(k, s) * ( (-1) * s * ((-4) + k ** 2 + s ** 2) * iv((-1) + k, s) ** 2 + 2 * ((-2) + k) * ( k * (2 + k) + s ** 2) * iv( (-1) + k, s) * iv(k, s) + s * (k * (4 + k) + s ** 2) * iv(k, s) ** 2) DmyDk0 = lambda s: 2 * iv(0, s) * ( s * iv(0, s) ** 2 + (-4) * iv(0, s) * iv(1, s) + (-1) * s ** (-1) * ( (-4) + s ** 2) * iv(1, s) ** 2) self._DmyD_xn = DmyD(self._k, self._xn) self._DmyD_xn_k0 = DmyDk0(self._xn_k0) self._finish_xn = True return True def _solve_prepare_yn(self): err_msg = 'run _set_xyk first. ' assert self._finish_xyk, err_msg DmyD = lambda k, s: 2 * s ** (-2) * iv(k, s) * ( (-1) * s * ((-4) + k ** 2 + s ** 2) * iv((-1) + k, s) ** 2 + 2 * ((-2) + k) * ( k * (2 + k) + s ** 2) * iv( (-1) + k, s) * iv(k, s) + s * (k * (4 + k) + s ** 2) * iv(k, s) ** 2) DmyDk0 = lambda s: 2 * iv(0, s) * ( s * iv(0, s) ** 2 + (-4) * iv(0, s) * iv(1, s) + (-1) * s ** (-1) * ( (-4) + s ** 2) * iv(1, s) ** 2) self._DmyD_yn = DmyD(self._k, self._yn) self._DmyD_yn_k0 = DmyDk0(self._yn_k0) self._finish_yn = True return True def _solve_prepare1(self): err_msg = 'run _solve_prepare_xn first. ' assert self._finish_xn, err_msg psi1 = lambda k, s, b: (1 / 16) * pi ** (-2) * ( s ** 2 * ((iv((-2) + k, s) + iv(k, s)) * iv(1 + k, s) + iv((-1) + k, s) * ( iv(k, s) + iv(2 + k, s))) * ( iv((-1) + k, b * s) * kv((-1) + k, s) + (-2) * b * iv(k, b * s) * kv(k, s) + iv( 1 + k, b * s) * kv( 1 + k, s)) + ( -1) * (s * iv((-1) + k, s) + (-1) * ((-1) + k) * iv(k, s)) * ( iv(1 + k, s) * ( b * s * (iv((-2) + k, b * s) + 3 * iv(k, b * s)) * kv((-1) + k, s) + iv( (-1) + k, b * s) * ( (-2) * s * kv((-2) + k, s) + (-2) * (1 + k) * kv((-1) + k, s)) + ( -2) * s * iv(1 + k, b * s) * kv(k, s)) + 2 * iv( (-1) + k, s) * ( (-1) * s * (iv((-1) + k, b * s) + iv(1 + k, b * s)) * kv(k, s) + 2 * ( b * s * iv(k, b * s) + (-1) * (2 + k) * iv(1 + k, b * s)) * kv( 1 + k, s)))) pi1 = lambda k, s, b: (1 / 16) * pi ** (-2) * (iv(k, s) * iv(1 + k, s) * ( b * s * (iv((-2) + k, b * s) + 3 * iv(k, b * s)) * kv((-1) + k, s) + iv((-1) + k, b * s) * ( (-2) * s * kv((-2) + k, s) + (-2) * (1 + k) * kv((-1) + k, s)) + ( -2) * s * iv(1 + k, b * s) * kv( k, s)) + ( -2) * iv((-1) + k, s) * ( s * iv((-1) + k, b * s) * ( 2 * iv(1 + k, s) * kv((-1) + k, s) + iv(k, s) * kv( k, s)) + ( -2) * b * s * iv(k, b * s) * ( 2 * iv(1 + k, s) * kv(k, s) + iv( k, s) * kv(1 + k, s)) + iv( 1 + k, b * s) * ( 2 * s * iv(1 + k, s) * kv( 1 + k, s) + iv(k, s) * ( s * kv(k, s) + 2 * ( 2 + k) * kv( 1 + k, s))))) omega1 = lambda k, s, b: (1 / 16) * pi ** (-2) * s ** (-1) * ( s ** 2 * iv((-1) + k, s) ** 2 * ( (-1) * b * s * iv((-2) + k, b * s) * kv((-1) + k, s) + (-3) * b * s * iv(k, b * s) * kv( (-1) + k, s) + ( -8) * b * k * iv(k, b * s) * kv(k, s) + 2 * iv((-1) + k, b * s) * ( s * kv((-2) + k, s) + (1 + 3 * k) * kv((-1) + k, s) + (-1) * s * kv(k, s)) + 4 * b * s * iv( k, b * s) * kv( 1 + k, s) + (-8) * iv(1 + k, b * s) * kv(1 + k, s)) + (-2) * s * iv( (-1) + k, s) * iv( k, s) * ( (-1) * b * ((-1) + k) * s * iv((-2) + k, b * s) * kv( (-1) + k, s) + 3 * b * s * iv(k, b * s) * kv( (-1) + k, s) + ( -3) * b * k * s * iv(k, b * s) * kv( (-1) + k, s) + (-8) * b * k ** 2 * iv( k, b * s) * kv( k, s) + 2 * iv((-1) + k, b * s) * ( ((-1) + k) * s * kv((-2) + k, s) + ( (-1) + 3 * k ** 2) * kv((-1) + k, s) + ( -1) * ((-1) + k) * s * kv(k, s)) + ( -4) * b * s * iv( k, b * s) * kv(1 + k, s) + 4 * b * k * s * iv( k, b * s) * kv(1 + k, s) + 8 * iv( 1 + k, b * s) * kv( 1 + k, s) + ( -4) * k * iv( 1 + k, b * s) * kv(1 + k, s)) + iv(k, s) ** 2 * ( (-2) * iv((-1) + k, b * s) * ( (4 * k * s + s ** 3) * kv((-2) + k, s) + ( 4 * k + 4 * k ** 2 + s ** 2 + 3 * k * s ** 2) * kv( (-1) + k, s) + (-1) * s ** 3 * kv( k, s)) + s * ( b * (4 * k + s ** 2) * iv((-2) + k, b * s) * kv( (-1) + k, s) + 8 * iv( 1 + k, b * s) * ( (-1) * k * kv(k, s) + s * kv(1 + k, s)) + b * iv( k, b * s) * ( 3 * (4 * k + s ** 2) * kv((-1) + k, s) + ( -4) * s * ( (-2) * k * kv(k, s) + s * kv( 1 + k, s)))))) psi1_k0 = lambda s, b: (1 / 16) * pi ** (-2) * iv(1, s) * ( (-4) * s ** 2 * (iv(0, s) + iv(2, s)) * ( b * iv(0, b * s) * kv(0, s) + (-1) * iv(1, b * s) * kv(1, s)) + ( -8) * s * (iv(0, s) + s * iv(1, s)) * ( b * iv(0, b * s) * kv(1, s) + (-1) * iv(1, b * s) * kv(2, s))) pi1_k0 = lambda s, b: (1 / 2) * pi ** (-2) * iv(1, s) * ( b * iv(0, b * s) + (-1) * s * iv(1, b * s) * ( iv(1, s) * kv(1, s) + iv(0, s) * kv(2, s))) self._psi_xn1 = psi1(self._k, self._xn, self._b) self._psi_yn1 = psi1(self._k, self._yn, self._b) self._pi_xn1 = pi1(self._k, self._xn, self._b) self._pi_yn1 = pi1(self._k, self._yn, self._b) self._omega_xn1 = omega1(self._k, self._xn, self._b) self._omega_yn1 = omega1(self._k, self._yn, self._b) self._psi_xn1_k0 = psi1_k0(self._xn_k0, self._b) self._omega_xn1_k0 = 0 self._pi_xn1_k0 = pi1_k0(self._xn_k0, self._b) self._finish1 = True return True def _solve_prepare2(self): err_msg = 'run _solve_prepare_yn first. ' assert self._finish_yn, err_msg psi2 = lambda k, s, b: (1 / 16) * pi ** (-2) * ( s ** 2 * ((iv((-2) + k, s) + iv(k, s)) * iv(1 + k, s) + iv((-1) + k, s) * ( iv(k, s) + iv(2 + k, s))) * ( iv((-1) + k, b * s) * kv((-1) + k, s) + (-1) * iv(1 + k, b * s) * kv(1 + k, s)) + ( -4) * b ** (-1) * (s * iv((-1) + k, s) + (-1) * ((-1) + k) * iv(k, s)) * ( b * ((-2) + k) * iv((-1) + k, b * s) * iv(1 + k, s) * kv((-1) + k, s) + ( -1) * k * iv(k, b * s) * iv( 1 + k, s) * kv(k, s) + iv((-1) + k, s) * ( (-1) * k * iv(k, b * s) * kv(k, s) + b * (2 + k) * iv(1 + k, b * s) * kv( 1 + k, s)))) pi2 = lambda k, s, b: (1 / 4) * b ** (-1) * pi ** (-2) * ( iv(k, s) * iv(1 + k, s) * ( b * ((-2) + k) * iv((-1) + k, b * s) * kv((-1) + k, s) + (-1) * k * iv(k, b * s) * kv( k, s)) + iv( (-1) + k, s) * ( (-1) * b * s * iv((-1) + k, b * s) * iv(1 + k, s) * kv((-1) + k, s) + b * s * iv( 1 + k, s) * iv(1 + k, b * s) * kv( 1 + k, s) + iv(k, s) * ( (-1) * k * iv(k, b * s) * kv(k, s) + b * (2 + k) * iv(1 + k, b * s) * kv( 1 + k, s)))) omega2 = lambda k, s, b: (1 / 2) * b ** (-1) * pi ** (-2) * s ** (-1) * ( (-1) * b * s ** 2 * iv((-1) + k, s) ** 2 * ( iv((-1) + k, b * s) * kv((-1) + k, s) + iv(1 + k, b * s) * kv(1 + k, s)) + b * s * iv( (-1) + k, s) * iv( k, s) * ( ((-2) + 3 * k) * iv((-1) + k, b * s) * kv((-1) + k, s) + ((-2) + k) * iv( 1 + k, b * s) * kv(1 + k, s)) + iv(k, s) ** 2 * ( b * (4 * k + (-2) * k ** 2 + s ** 2) * iv((-1) + k, b * s) * kv((-1) + k, s) + 2 * k ** 2 * iv( k, b * s) * kv( k, s) + b * s ** 2 * iv(1 + k, b * s) * kv(1 + k, s))) omega2_k0 = lambda s, b: pi ** (-2) * ( s * iv(0, s) ** 2 + (-2) * iv(0, s) * iv(1, s) + (-1) * s * iv(1, s) ** 2) * iv(1, b * s) * kv( 1, s) self._psi_xn2 = psi2(self._k, self._xn, self._b) self._psi_yn2 = psi2(self._k, self._yn, self._b) self._pi_xn2 = pi2(self._k, self._xn, self._b) self._pi_yn2 = pi2(self._k, self._yn, self._b) self._omega_xn2 = omega2(self._k, self._xn, self._b) self._omega_yn2 = omega2(self._k, self._yn, self._b) self._psi_yn2_k0 = 0 self._omega_yn2_k0 = omega2_k0(self._yn_k0, self._b) self._pi_yn2_k0 = 0 self._finish2 = True return True def _solve_prepare3(self): err_msg = 'run _solve_prepare_xn first. ' assert self._finish_xn, err_msg psi3 = lambda k, s, b: (1 / 8) * pi ** (-2) * s * ( ((iv((-2) + k, s) + iv(k, s)) * iv(1 + k, s) + iv((-1) + k, s) * ( iv(k, s) + iv(2 + k, s))) * ( (-1) * b * s * iv((-1) + k, b * s) * kv(k, s) + iv(k, b * s) * ( s * kv((-1) + k, s) + 2 * ((-1) + k) * kv(k, s))) + (-2) * ( s * iv((-1) + k, s) + (-1) * ((-1) + k) * iv(k, s)) * ( b * iv((-1) + k, b * s) * iv(1 + k, s) * kv((-1) + k, s) + (-1) * iv(k, b * s) * iv( 1 + k, s) * kv(k, s) + iv( (-1) + k, s) * ( (-1) * iv(k, b * s) * kv(k, s) + b * iv(1 + k, b * s) * kv(1 + k, s)))) pi3 = lambda k, s, b: (1 / 4) * pi ** (-2) * ( (-1) * s * iv(k, s) * iv(k, b * s) * iv(1 + k, s) * kv(k, s) + b * s * iv((-1) + k, b * s) * iv( 1 + k, s) * ( iv(k, s) * kv((-1) + k, s) + 2 * iv((-1) + k, s) * kv(k, s)) + iv((-1) + k, s) * ( (-1) * iv(k, b * s) * (s * iv(k, s) * kv(k, s) + 2 * iv(1 + k, s) * ( s * kv((-1) + k, s) + 2 * ((-1) + k) * kv(k, s))) + b * s * iv(k, s) * iv( 1 + k, b * s) * kv(1 + k, s))) omega3 = lambda k, s, b: (1 / 4) * pi ** (-2) * s ** (-1) * (s * iv(k, s) ** 2 * ( (-2) * k * iv(k, b * s) * (s * kv((-1) + k, s) + 2 * k * kv(k, s)) + b * iv( (-1) + k, b * s) * ( (4 * k + s ** 2) * kv((-1) + k, s) + 2 * k * s * kv(k, s)) + ( -1) * b * s ** 2 * iv(1 + k, b * s) * kv( 1 + k, s)) + s * iv((-1) + k, s) ** 2 * (2 * k * iv(k, b * s) * ( s * kv((-1) + k, s) + 2 * ((-1) + k) * kv(k, s)) + (-1) * b * s * iv((-1) + k, b * s) * ( s * kv((-1) + k, s) + 2 * k * kv(k, s)) + b * s ** 2 * iv( 1 + k, b * s) * kv(1 + k, s)) + 2 * iv((-1) + k, s) * iv(k, s) * ( (-2) * k ** 2 * iv(k, b * s) * ( s * kv( (-1) + k, s) + 2 * (( -1) + k) * kv( k, s)) + b * s * iv( (-1) + k, b * s) * ( (( -1) + k) * s * kv( (-1) + k, s) + 2 * k ** 2 * kv( k, s)) + ( -1) * b * (( -1) + k) * s ** 2 * iv( 1 + k, b * s) * kv( 1 + k, s))) psi3_k0 = lambda s, b: (1 / 4) * pi ** (-2) * s * iv(1, s) * (b * iv(1, b * s) * ( (-1) * s * (iv(0, s) + iv(2, s)) * kv(0, s) + (-2) * (iv(0, s) + s * iv(1, s)) * kv( 1, s)) + iv(0, b * s) * ( 2 * (s * iv(1, s) + ( -1) * iv(2, s)) * kv(0, s) + s * ( iv(0, s) + iv( 2, s)) * kv(1, s))) pi3_k0 = lambda s, b: (1 / 2) * pi ** (-2) * iv(1, s) * ( b * iv(1, b * s) + (-1) * s * iv(0, b * s) * ( iv(2, s) * kv(0, s) + iv(1, s) * kv(1, s))) self._psi_xn3 = psi3(self._k, self._xn, self._b) self._psi_yn3 = psi3(self._k, self._yn, self._b) self._pi_xn3 = pi3(self._k, self._xn, self._b) self._pi_yn3 = pi3(self._k, self._yn, self._b) self._omega_xn3 = omega3(self._k, self._xn, self._b) self._omega_yn3 = omega3(self._k, self._yn, self._b) self._psi_xn3_k0 = psi3_k0(self._xn_k0, self._b) self._omega_xn3_k0 = 0 self._pi_xn3_k0 = pi3_k0(self._xn_k0, self._b) self._finish3 = True return True def solve_u1(self, R, Phi, z): err_msg = 'run _solve_prepare1 first. ' assert self._finish1, err_msg AFPhi1nL = lambda xn, k, psi1, omega1, pi1, R, z, DmyD: (-2) * exp(1) ** ( (-1) * z * imag(xn)) * pi * imag( DmyD ** (-1) * exp(1) ** (sqrt(-1 + 0j) * z * real(xn)) * ( (-1) * (omega1 + k * pi1) * iv((-1) + k, R * xn) + k * ( omega1 + pi1 + k * pi1 + (-1) * psi1) * R ** ( -1) * xn ** (-1) * iv(k, R * xn))) AFPhi1nR = lambda yn, k, psi1, omega1, pi1, R, z, DmyD: (-1) * np.exp( (-1) * z * imag(yn)) * pi * imag( DmyD ** (-1) * ( (-1) * (omega1 + k * pi1) * iv((-1) + k, R * yn) + k * ( omega1 + pi1 + k * pi1 + (-1) * psi1) * R ** ( -1) * yn ** (-1) * iv(k, R * yn))) AFR1nL = lambda xn, k, psi1, omega1, pi1, R, z, DmyD: (-2) * np.exp( (-1) * z * imag(xn)) * pi * imag( DmyD ** (-1) * np.exp(sqrt(-1 + 0j) * z * real(xn)) * R ** (-1) * xn ** (-1) * ( ((-1) * pi1 + psi1) * R * xn * iv((-1) + k, R * xn) + ( k * (omega1 + pi1 + k * pi1 + (-1) * psi1) + pi1 * R ** 2 * xn ** 2) * iv(k, R * xn))) AFR1nR = lambda yn, k, psi1, omega1, pi1, R, z, DmyD: (-1) * np.exp( (-1) * z * imag(yn)) * pi * imag( DmyD ** (-1) * R ** (-1) * yn ** (-1) * ( ((-1) * pi1 + psi1) * R * yn * iv((-1) + k, R * yn) + ( k * (omega1 + pi1 + k * pi1 + (-1) * psi1) + pi1 * R ** 2 * yn ** 2) * iv(k, R * yn))) BFz1nL = lambda xn, k, psi1, omega1, pi1, R, z, DmyD: (-2) * np.exp( (-1) * z * imag(xn)) * pi * real( DmyD ** (-1) * np.exp(sqrt(-1 + 0j) * z * real(xn)) * ( pi1 * R * xn * iv((-1) + k, R * xn) + (pi1 + (-1) * k * pi1 + psi1) * iv(k, R * xn))) BFz1nR = lambda yn, k, psi1, omega1, pi1, R, z, DmyD: (-1) * np.exp( (-1) * z * imag(yn)) * pi * real( DmyD ** (-1) * (pi1 * R * yn * iv((-1) + k, R * yn) + ( pi1 + (-1) * k * pi1 + psi1) * iv(k, R * yn))) uR1_k0 = lambda xn, psi1, omega1, pi1, R, z, DmyD: (-2) * np.exp( (-1) * z * imag(xn)) * pi * imag( DmyD ** (-1) * np.exp(sqrt(-1 + 0j) * z * real(xn)) * ( pi1 * R * xn * iv(0, R * xn) + ((-1) * pi1 + psi1) * iv(1, R * xn))) uz1_k0 = lambda xn, psi1, omega1, pi1, R, z, DmyD: (-2) * np.exp( (-1) * z * imag(xn)) * pi * real( DmyD ** (-1) * np.exp(sqrt(-1 + 0j) * z * real(xn)) * ( (pi1 + psi1) * iv(0, R * xn) + pi1 * R * xn * iv(1, R * xn))) R = np.array(R, dtype=float).flatten() z = np.array(z, dtype=float).flatten() Phi = np.array(Phi, dtype=float) Phi_shape = Phi.shape Phi_flags = Phi.flags Phi = Phi.flatten() err_msg = 'both R and z should be scales. ' assert R.size == 1 and z.size == 1, err_msg uR1 = Phi.copy() uPhi1 = Phi.copy() uz1 = Phi.copy() uR1k0 = sum(uR1_k0(self._xn_k0, self._psi_xn1_k0, self._omega_xn1_k0, self._pi_xn1_k0, R, z, self._DmyD_xn_k0)) uPhi1k0 = 0 uz1k0 = sum(uz1_k0(self._xn_k0, self._psi_xn1_k0, self._omega_xn1_k0, self._pi_xn1_k0, R, z, self._DmyD_xn_k0)) t_AFR1nL = AFR1nL(self._xn, self._k, self._psi_xn1, self._omega_xn1, self._pi_xn1, R, z, self._DmyD_xn) t_AFR1nR = AFR1nR(self._yn, self._k, self._psi_yn1, self._omega_yn1, self._pi_yn1, R, z, self._DmyD_yn) t_AFPhi1nL = AFPhi1nL(self._xn, self._k, self._psi_xn1, self._omega_xn1, self._pi_xn1, R, z, self._DmyD_xn) t_AFPhi1nR = AFPhi1nR(self._yn, self._k, self._psi_yn1, self._omega_yn1, self._pi_yn1, R, z, self._DmyD_yn) t_BFz1nL = BFz1nL(self._xn, self._k, self._psi_xn1, self._omega_xn1, self._pi_xn1, R, z, self._DmyD_xn) t_BFz1nR = BFz1nR(self._yn, self._k, self._psi_yn1, self._omega_yn1, self._pi_yn1, R, z, self._DmyD_yn) for i0, phi in enumerate(Phi): uR1[i0] = uR1k0 + sum((t_AFR1nL + t_AFR1nR) * cos(self._k * phi)) uPhi1[i0] = uPhi1k0 + sum((t_AFPhi1nL + t_AFPhi1nR) * sin(self._k * phi)) uz1[i0] = uz1k0 + sum((t_BFz1nL + t_BFz1nR) * cos(self._k * phi)) if Phi_flags['C_CONTIGUOUS']: uR1 = uR1.reshape(Phi_shape, order='C') uPhi1 = uPhi1.reshape(Phi_shape, order='C') uz1 = uz1.reshape(Phi_shape, order='C') elif Phi_flags['F_CONTIGUOUS']: uR1 = uR1.reshape(Phi_shape, order='F') uPhi1 = uPhi1.reshape(Phi_shape, order='F') uz1 = uz1.reshape(Phi_shape, order='F') else: raise ValueError('C_CONTIGUOUS and F_CONTIGUOUS are both False. ') return uR1, uPhi1, uz1 def solve_u2(self, R, Phi, z): err_msg = 'run _solve_prepare2 first. ' assert self._finish2, err_msg AFPhi2nL = lambda xn, k, psi2, omega2, pi2, R, z, DmyD: (-2) * np.exp( (-1) * z * imag(xn)) * pi * imag( DmyD ** (-1) * np.exp(sqrt(-1 + 0j) * z * real(xn)) * ( ((-1) * omega2 + k * pi2) * iv((-1) + k, R * xn) + k * ( omega2 + (-1) * (1 + k) * pi2 + psi2) * R ** ( -1) * xn ** (-1) * iv(k, R * xn))) AFPhi2nR = lambda yn, k, psi2, omega2, pi2, R, z, DmyD: (-1) * np.exp( (-1) * z * imag(yn)) * pi * imag( DmyD ** (-1) * ( ((-1) * omega2 + k * pi2) * iv((-1) + k, R * yn) + k * ( omega2 + (-1) * (1 + k) * pi2 + psi2) * R ** ( -1) * yn ** (-1) * iv(k, R * yn))) AFR2nL = lambda xn, k, psi2, omega2, pi2, R, z, DmyD: (-2) * np.exp( (-1) * z * imag(xn)) * pi * imag( DmyD ** (-1) * np.exp(sqrt(-1 + 0j) * z * real(xn)) * R ** (-1) * xn ** (-1) * ( ((-1) * pi2 + psi2) * R * xn * iv((-1) + k, R * xn) + ( k * ((-1) * omega2 + pi2 + k * pi2 + ( -1) * psi2) + pi2 * R ** 2 * xn ** 2) * iv(k, R * xn))) AFR2nR = lambda yn, k, psi2, omega2, pi2, R, z, DmyD: (-1) * np.exp( (-1) * z * imag(yn)) * pi * imag( DmyD ** (-1) * R ** (-1) * yn ** (-1) * ( ((-1) * pi2 + psi2) * R * yn * iv((-1) + k, R * yn) + ( k * ((-1) * omega2 + pi2 + k * pi2 + ( -1) * psi2) + pi2 * R ** 2 * yn ** 2) * iv(k, R * yn))) BFz2nL = lambda xn, k, psi2, omega2, pi2, R, z, DmyD: (-2) * np.exp( (-1) * z * imag(xn)) * pi * real( DmyD ** (-1) * np.exp(sqrt(-1 + 0j) * z * real(xn)) * ( pi2 * R * xn * iv((-1) + k, R * xn) + (pi2 + (-1) * k * pi2 + psi2) * iv(k, R * xn))) BFz2nR = lambda yn, k, psi2, omega2, pi2, R, z, DmyD: (-1) * np.exp( (-1) * z * imag(yn)) * pi * real( DmyD ** (-1) * (pi2 * R * yn * iv((-1) + k, R * yn) + ( pi2 + (-1) * k * pi2 + psi2) * iv(k, R * yn))) uPhi2_k0 = lambda yn, psi2, omega2, pi2, R, z, DmyD: np.exp( (-1) * z * imag(yn)) * pi * imag( DmyD ** (-1) * omega2 * iv(1, R * yn)) R = np.array(R, dtype=float).flatten() z = np.array(z, dtype=float).flatten() Phi = np.array(Phi, dtype=float) Phi_shape = Phi.shape Phi_flags = Phi.flags Phi = Phi.flatten() err_msg = 'both R and z should be scales. ' assert R.size == 1 and z.size == 1, err_msg uR2 = Phi.copy() uPhi2 = Phi.copy() uz2 = Phi.copy() uR2k0 = 0 uPhi2k0 = sum( uPhi2_k0(self._yn_k0, self._psi_yn2_k0, self._omega_yn2_k0, self._pi_yn2_k0, R, z, self._DmyD_yn_k0)) uz2k0 = 0 t_AFR2nL = AFR2nL(self._xn, self._k, self._psi_xn2, self._omega_xn2, self._pi_xn2, R, z, self._DmyD_xn) t_AFR2nR = AFR2nR(self._yn, self._k, self._psi_yn2, self._omega_yn2, self._pi_yn2, R, z, self._DmyD_yn) t_AFPhi2nL = AFPhi2nL(self._xn, self._k, self._psi_xn2, self._omega_xn2, self._pi_xn2, R, z, self._DmyD_xn) t_AFPhi2nR = AFPhi2nR(self._yn, self._k, self._psi_yn2, self._omega_yn2, self._pi_yn2, R, z, self._DmyD_yn) t_BFz2nL = BFz2nL(self._xn, self._k, self._psi_xn2, self._omega_xn2, self._pi_xn2, R, z, self._DmyD_xn) t_BFz2nR = BFz2nR(self._yn, self._k, self._psi_yn2, self._omega_yn2, self._pi_yn2, R, z, self._DmyD_yn) for i0, phi in enumerate(Phi): uR2[i0] = uR2k0 + sum((t_AFR2nL + t_AFR2nR) * sin(self._k * phi)) uPhi2[i0] = uPhi2k0 + sum((t_AFPhi2nL + t_AFPhi2nR) * cos(self._k * phi)) uz2[i0] = uz2k0 + sum((t_BFz2nL + t_BFz2nR) * sin(self._k * phi)) if Phi_flags['C_CONTIGUOUS']: uR2 = uR2.reshape(Phi_shape, order='C') uPhi2 = uPhi2.reshape(Phi_shape, order='C') uz2 = uz2.reshape(Phi_shape, order='C') elif Phi_flags['F_CONTIGUOUS']: uR2 = uR2.reshape(Phi_shape, order='F') uPhi2 = uPhi2.reshape(Phi_shape, order='F') uz2 = uz2.reshape(Phi_shape, order='F') else: raise ValueError('C_CONTIGUOUS and F_CONTIGUOUS are both False. ') return uR2, uPhi2, uz2 def solve_u3(self, R, Phi, z): err_msg = 'run _solve_prepare3 first. ' assert self._finish3, err_msg BFPhi3nL = lambda xn, k, psi3, omega3, pi3, R, z, DmyD: 2 * np.exp( (-1) * z * imag(xn)) * pi * real( DmyD ** (-1) * np.exp(sqrt(-1 + 0j) * z * real(xn)) * ( (-1) * (omega3 + k * pi3) * iv((-1) + k, R * xn) + k * ( omega3 + pi3 + k * pi3 + (-1) * psi3) * R ** ( -1) * xn ** (-1) * iv(k, R * xn))) BFPhi3nR = lambda yn, k, psi3, omega3, pi3, R, z, DmyD: np.exp( (-1) * z * imag(yn)) * pi * real( DmyD ** (-1) * ( (-1) * (omega3 + k * pi3) * iv((-1) + k, R * yn) + k * ( omega3 + pi3 + k * pi3 + (-1) * psi3) * R ** ( -1) * yn ** (-1) * iv(k, R * yn))) BFR3nL = lambda xn, k, psi3, omega3, pi3, R, z, DmyD: 2 * np.exp( (-1) * z * imag(xn)) * pi * real( DmyD ** (-1) * np.exp(sqrt(-1 + 0j) * z * real(xn)) * R ** (-1) * xn ** (-1) * ( ((-1) * pi3 + psi3) * R * xn * iv((-1) + k, R * xn) + ( k * (omega3 + pi3 + k * pi3 + (-1) * psi3) + pi3 * R ** 2 * xn ** 2) * iv(k, R * xn))) BFR3nR = lambda yn, k, psi3, omega3, pi3, R, z, DmyD: np.exp( (-1) * z * imag(yn)) * pi * real( DmyD ** (-1) * R ** (-1) * yn ** (-1) * ( ((-1) * pi3 + psi3) * R * yn * iv((-1) + k, R * yn) + ( k * (omega3 + pi3 + k * pi3 + (-1) * psi3) + pi3 * R ** 2 * yn ** 2) * iv(k, R * yn))) AFz3nL = lambda xn, k, psi3, omega3, pi3, R, z, DmyD: (-2) * np.exp( (-1) * z * imag(xn)) * pi * imag( DmyD ** (-1) * np.exp(sqrt(-1 + 0j) * z * real(xn)) * ( pi3 * R * xn * iv((-1) + k, R * xn) + (pi3 + (-1) * k * pi3 + psi3) * iv(k, R * xn))) AFz3nR = lambda yn, k, psi3, omega3, pi3, R, z, DmyD: (-1) * np.exp( (-1) * z * imag(yn)) * pi * imag( DmyD ** (-1) * (pi3 * R * yn * iv((-1) + k, R * yn) + ( pi3 + (-1) * k * pi3 + psi3) * iv(k, R * yn))) uR3_k0 = lambda xn, psi3, omega3, pi3, R, z, DmyD: 2 * np.exp( (-1) * z * imag(xn)) * pi * real( DmyD ** (-1) * np.exp(sqrt(-1 + 0j) * z * real(xn)) * ( pi3 * R * xn * iv(0, R * xn) + ((-1) * pi3 + psi3) * iv(1, R * xn))) uz3_k0 = lambda xn, psi3, omega3, pi3, R, z, DmyD: (-2) * np.exp( (-1) * z * imag(xn)) * pi * imag( DmyD ** (-1) * np.exp(sqrt(-1 + 0j) * z * real(xn)) * ( (pi3 + psi3) * iv(0, R * xn) + pi3 * R * xn * iv(1, R * xn))) R = np.array(R, dtype=float).flatten() z = np.array(z, dtype=float).flatten() Phi = np.array(Phi, dtype=float) Phi_shape = Phi.shape Phi_flags = Phi.flags Phi = Phi.flatten() err_msg = 'both R and z should be scales. ' assert R.size == 1 and z.size == 1, err_msg uR3 = Phi.copy() uPhi3 = Phi.copy() uz3 = Phi.copy() uR3k0 = sum(uR3_k0(self._xn_k0, self._psi_xn3_k0, self._omega_xn3_k0, self._pi_xn3_k0, R, z, self._DmyD_xn_k0)) uPhi3k0 = 0 uz3k0 = sum(uz3_k0(self._xn_k0, self._psi_xn3_k0, self._omega_xn3_k0, self._pi_xn3_k0, R, z, self._DmyD_xn_k0)) t_BFR3nL = BFR3nL(self._xn, self._k, self._psi_xn3, self._omega_xn3, self._pi_xn3, R, z, self._DmyD_xn) t_BFR3nR = BFR3nR(self._yn, self._k, self._psi_yn3, self._omega_yn3, self._pi_yn3, R, z, self._DmyD_yn) t_BFPhi3nL = BFPhi3nL(self._xn, self._k, self._psi_xn3, self._omega_xn3, self._pi_xn3, R, z, self._DmyD_xn) t_BFPhi3nR = BFPhi3nR(self._yn, self._k, self._psi_yn3, self._omega_yn3, self._pi_yn3, R, z, self._DmyD_yn) t_AFz3nL = AFz3nL(self._xn, self._k, self._psi_xn3, self._omega_xn3, self._pi_xn3, R, z, self._DmyD_xn) t_AFz3nR = AFz3nR(self._yn, self._k, self._psi_yn3, self._omega_yn3, self._pi_yn3, R, z, self._DmyD_yn) for i0, phi in enumerate(Phi): uR3[i0] = uR3k0 + sum((t_BFR3nL + t_BFR3nR) * cos(self._k * phi)) uPhi3[i0] = uPhi3k0 + sum((t_BFPhi3nL + t_BFPhi3nR) * sin(self._k * phi)) uz3[i0] = uz3k0 + sum((t_AFz3nL + t_AFz3nR) * cos(self._k * phi)) if Phi_flags['C_CONTIGUOUS']: uR3 = uR3.reshape(Phi_shape, order='C') uPhi3 = uPhi3.reshape(Phi_shape, order='C') uz3 = uz3.reshape(Phi_shape, order='C') elif Phi_flags['F_CONTIGUOUS']: uR3 = uR3.reshape(Phi_shape, order='F') uPhi3 = uPhi3.reshape(Phi_shape, order='F') uz3 = uz3.reshape(Phi_shape, order='F') else: raise ValueError('C_CONTIGUOUS and F_CONTIGUOUS are both False. ') return uR3, uPhi3, uz3 def solve_prepare(self): self._set_xyk() self._solve_prepare_xn() self._solve_prepare_yn() self._solve_prepare1() self._solve_prepare2() self._solve_prepare3() return True def solve_u(self, R, Phi, z): uR1, uPhi1, uz1 = self.solve_u1(R, Phi, z) uR2, uPhi2, uz2 = self.solve_u2(R, Phi, z) uR3, uPhi3, uz3 = self.solve_u3(R, Phi, z) return uR1, uPhi1, uz1, uR2, uPhi2, uz2, uR3, uPhi3, uz3 def solve_uxyz(self, nodes): from petsc4py import PETSc phi = np.arctan2(nodes[:, 1], nodes[:, 0]) rho = np.sqrt(nodes[:, 0] ** 2 + nodes[:, 1] ** 2) z = nodes[:, 2] u1 = [] u2 = [] u3 = [] dmda = PETSc.DMDA().create(sizes=(nodes.shape[0],), dof=1, stencil_width=0, comm=PETSc.COMM_WORLD) dmda.setFromOptions() dmda.setUp() for i0 in range(dmda.getRanges()[0][0], dmda.getRanges()[0][1]): t_rho = rho[i0] t_phi = phi[i0] t_z = z[i0] abs_z = np.abs(t_z) sign_z = np.sign(t_z) if np.isclose(t_rho, 1): ux1 = 0 uy1 = 0 uz1 = 0 ux2 = 0 uy2 = 0 uz2 = 0 ux3 = 0 uy3 = 0 uz3 = 0 else: uR1, uPhi1, uz1, uR2, uPhi2, uz2, uR3, uPhi3, uz3 = self.solve_u(t_rho, t_phi, abs_z) ux1 = np.cos(t_phi) * uR1 - np.sin(t_phi) * uPhi1 ux2 = np.cos(t_phi) * uR2 - np.sin(t_phi) * uPhi2 ux3 = np.cos(t_phi) * uR3 - np.sin(t_phi) * uPhi3 uy1 = np.sin(t_phi) * uR1 + np.cos(t_phi) * uPhi1 uy2 = np.sin(t_phi) * uR2 + np.cos(t_phi) * uPhi2 uy3 = np.sin(t_phi) * uR3 + np.cos(t_phi) * uPhi3 u1.append((ux1, uy1, sign_z * uz1)) u2.append((ux2, uy2, sign_z * uz2)) u3.append((sign_z * ux3, sign_z * uy3, uz3)) comm = PETSc.COMM_WORLD.tompi4py() u1_all = np.vstack(comm.allgather(u1)) u2_all = np.vstack(comm.allgather(u2)) u3_all = np.vstack(comm.allgather(u3)) return u1_all, u2_all, u3_all class detail_light(detail): def __init__(self, threshold): super().__init__(threshold=threshold, b=0) def set_b(self, b): self._b = b return True def solve_prepare_light(self): self._set_xyk() self._solve_prepare_xn() self._solve_prepare_yn() return True def solve_prepare_b(self): self._solve_prepare1() self._solve_prepare2() self._solve_prepare3() return True def solve_u1_light(self, R, Phi, z): err_msg = 'run _solve_prepare1 first. ' assert self._finish1, err_msg AFPhi1nL = lambda xn, k, psi1, omega1, pi1, R, z, DmyD: (-2) * np.exp( (-1) * z * imag(xn)) * pi * imag( DmyD ** (-1) * np.exp(sqrt(-1 + 0j) * z * real(xn)) * ( (-1) * (omega1 + k * pi1) * iv((-1) + k, R * xn) + k * ( omega1 + pi1 + k * pi1 + (-1) * psi1) * R ** ( -1) * xn ** (-1) * iv(k, R * xn))) AFPhi1nR = lambda yn, k, psi1, omega1, pi1, R, z, DmyD: (-1) * np.exp( (-1) * z * imag(yn)) * pi * imag( DmyD ** (-1) * ( (-1) * (omega1 + k * pi1) * iv((-1) + k, R * yn) + k * ( omega1 + pi1 + k * pi1 + (-1) * psi1) * R ** ( -1) * yn ** (-1) * iv(k, R * yn))) AFR1nL = lambda xn, k, psi1, omega1, pi1, R, z, DmyD: (-2) * np.exp( (-1) * z * imag(xn)) * pi * imag( DmyD ** (-1) * np.exp(sqrt(-1 + 0j) * z * real(xn)) * R ** (-1) * xn ** (-1) * ( ((-1) * pi1 + psi1) * R * xn * iv((-1) + k, R * xn) + ( k * (omega1 + pi1 + k * pi1 + (-1) * psi1) + pi1 * R ** 2 * xn ** 2) * iv(k, R * xn))) AFR1nR = lambda yn, k, psi1, omega1, pi1, R, z, DmyD: (-1) * np.exp( (-1) * z * imag(yn)) * pi * imag( DmyD ** (-1) * R ** (-1) * yn ** (-1) * ( ((-1) * pi1 + psi1) * R * yn * iv((-1) + k, R * yn) + ( k * (omega1 + pi1 + k * pi1 + (-1) * psi1) + pi1 * R ** 2 * yn ** 2) * iv(k, R * yn))) BFz1nL = lambda xn, k, psi1, omega1, pi1, R, z, DmyD: (-2) * np.exp( (-1) * z * imag(xn)) * pi * real( DmyD ** (-1) * np.exp(sqrt(-1 + 0j) * z * real(xn)) * ( pi1 * R * xn * iv((-1) + k, R * xn) + (pi1 + (-1) * k * pi1 + psi1) * iv(k, R * xn))) BFz1nR = lambda yn, k, psi1, omega1, pi1, R, z, DmyD: (-1) * np.exp( (-1) * z * imag(yn)) * pi * real( DmyD ** (-1) * (pi1 * R * yn * iv((-1) + k, R * yn) + ( pi1 + (-1) * k * pi1 + psi1) * iv(k, R * yn))) uR1_k0 = lambda xn, psi1, omega1, pi1, R, z, DmyD: (-2) * np.exp( (-1) * z * imag(xn)) * pi * imag( DmyD ** (-1) * np.exp(sqrt(-1 + 0j) * z * real(xn)) * ( pi1 * R * xn * iv(0, R * xn) + ((-1) * pi1 + psi1) * iv(1, R * xn))) uz1_k0 = lambda xn, psi1, omega1, pi1, R, z, DmyD: (-2) * np.exp( (-1) * z *
imag(xn)
numpy.imag
"""Various VR utilities.""" import queue import threading import time from asyncio.streams import StreamReader from typing import Sequence, Dict import struct import numpy as np import serial.threaded from pykalman import KalmanFilter from copy import copy try: import cv2 from displayarray import read_updates from displayarray import display HAVE_VOD = True except Exception as e: print(f"failed to import displayarray and/or opencv, reason: {e}") print("camera based tracking methods will not be available") HAVE_VOD = False from itertools import islice, takewhile import re from typing import Optional def format_str_for_write(input_str: str) -> bytes: """Format a string for writing to SteamVR's stream.""" if len(input_str) < 1: return "".encode("utf-8") if input_str[-1] != "\n": return (input_str + "\n").encode("utf-8") return input_str.encode("utf-8") async def read(reader: StreamReader, read_len: int = 20) -> str: """Read one line from reader asynchronously.""" data = [] temp = " " while "\n" not in temp and temp != "": temp = await reader.read(read_len) temp = temp.decode("utf-8") data.append(temp) time.sleep(0) # allows thread switching return "".join(data) def read2(reader, read_len=20): """Read one line from reader asynchronously.""" data = [] temp = " " while "\n" not in temp and temp != "": temp = reader.recv(read_len) temp = temp.decode("utf-8") data.append(temp) time.sleep(0) # allows thread switching return "".join(data) async def read3(reader: StreamReader, read_len: int = 20) -> str: """Read one line from reader asynchronously.""" data = bytearray() temp = b" " while b"\n" not in temp and temp != b"": temp = await reader.read(read_len) data.extend(temp) # time.sleep(0) # allows thread switching return data def make_rotmat(angls, dtype=np.float64): """ Rotate a set of points around the x, y, then z axes. :param points: a point dictionary, such as: [[0, 0, 0], [1, 0, 0]] :param angles: the degrees to rotate on the x, y, and z axis """ rotx = np.array( [ [1, 0, 0], [0, np.cos(angls[0]), -np.sin(angls[0])], [0, np.sin(angls[0]), np.cos(angls[0])], ], dtype=dtype, ) roty = np.array( [ [np.cos(angls[1]), 0, np.sin(angls[1])], [0, 1, 0], [-np.sin(angls[1]), 0, np.cos(angls[1])], ], dtype=dtype, ) rotz = np.array( [ [np.cos(angls[2]), -np.sin(angls[2]), 0], [np.sin(angls[2]), np.cos(angls[2]), 0], [0, 0, 1], ], dtype=dtype, ) return np.matmul(
np.matmul(rotx, roty)
numpy.matmul
# This code is part of Qiskit. # # (C) Copyright IBM 2017, 2021. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. """Tests for LinearFunction class.""" import unittest import numpy as np from ddt import ddt, data from qiskit.test import QiskitTestCase from qiskit.circuit import QuantumCircuit from qiskit.circuit.library.standard_gates import CXGate, SwapGate from qiskit.circuit.library.generalized_gates import LinearFunction from qiskit.circuit.exceptions import CircuitError from qiskit.quantum_info.operators import Operator def random_linear_circuit(num_qubits, num_gates, seed=None): """Generate a pseudo random linear circuit.""" instructions = { "cx": (CXGate(), 2), "swap": (SwapGate(), 2), } if isinstance(seed, np.random.Generator): rng = seed else: rng = np.random.default_rng(seed) name_samples = rng.choice(tuple(instructions), num_gates) circ = QuantumCircuit(num_qubits) for name in name_samples: gate, nqargs = instructions[name] qargs = rng.choice(range(num_qubits), nqargs, replace=False).tolist() circ.append(gate, qargs) return circ def random_invertible_binary_matrix(num_qubits, seed=None): """Generates a random invertible n x n binary matrix.""" # This code is adapted from random_cnotdihedral if isinstance(seed, np.random.Generator): rng = seed else: rng = np.random.default_rng(seed) det = 0 while np.allclose(det, 0) or np.allclose(det, 2): binary_matrix = rng.integers(2, size=(num_qubits, num_qubits)) det =
np.linalg.det(binary_matrix)
numpy.linalg.det
#!/usr/bin/env python2 """Create 2D to 3D datasets from selected SMPL fits.""" # pylint: disable=invalid-name, wrong-import-order import os import os.path as path import sys import itertools import logging import csv import cPickle as pickle import numpy as np import scipy import scipy.io as sio import cv2 import click import opendr.camera as _odr_c import tqdm import h5py from clustertools.log import LOGFORMAT from up_tools.model import (robust_person_size, rlswap_landmarks_91,landmark_mesh_91, connections_landmarks_91, dots_landmarks_91, get_crop) # pylint: disable=no-name-in-module from up_tools.mesh import Mesh from up_tools.camera import (rotateY as rotateY, # pylint: disable=unused-import rotateX as rotateX) # pylint: disable=unused-import from up_tools.visualization import visualize_pose sys.path.insert(0, path.join(path.abspath(path.dirname(__file__)), '..', '..')) from config import SMPL_FP, DIRECT3D_DATA_FP, UP3D_FP try: # Robustify against setup. from smpl.serialization import load_model except ImportError: # pylint: disable=import-error try: from psbody.smpl.serialization import load_model except ImportError: sys.path.insert(0, SMPL_FP) from smpl_webuser.serialization import load_model LOGGER = logging.getLogger(__name__) DSET_ROOT_FP = DIRECT3D_DATA_FP MODEL_NEUTRAL_PATH = path.join( path.dirname(__file__), '..', '..', 'models', '3D', 'basicModel_neutral_lbs_10_207_0_v1.0.0.pkl') MODEL_NEUTRAL = load_model(MODEL_NEUTRAL_PATH) _TEMPLATE_MESH = Mesh(filename=path.join( path.dirname(__file__), '..', '..', 'models', '3D', 'template.ply')) if not path.exists(DSET_ROOT_FP): os.makedirs(DSET_ROOT_FP) def get_joints(indir): """Load the poses from an annotation tool dataset folder.""" if path.exists(path.join(indir, 'joints.mat')): joints = sio.loadmat(path.join(indir, 'joints.mat'))['joints'] else: joints = np.load(path.join(indir, 'joints.npz'))['poses'] if 'mpii' in indir: LOGGER.info("Using mpii joint set.") joints = joints[:, [0, 1, 2, 3, 4, 5, 10, 11, 12, 13, 14, 15, 8, 9], :] if joints.shape[0] > 3: joints = joints.transpose((1, 0, 2)) LOGGER.info("Joints for %d poses available.", joints.shape[2]) return joints def get_landmark_positions(stored_parameter_fp, # pylint: disable=too-many-locals, too-many-arguments resolution, resolution_orig, # pylint: disable=unused-argument landmarks, trans=(0, 0), # pylint: disable=unused-argument scale=1., steps_x=3, steps_y=12): """Get landmark positions for a given image.""" with open(stored_parameter_fp, 'rb') as inf: stored_parameters = pickle.load(inf) orig_pose = np.array(stored_parameters['pose']).copy() orig_rt = np.array(stored_parameters['rt']).copy() orig_trans = np.array(stored_parameters['trans']).copy() orig_t = np.array(stored_parameters['t']).copy() model = MODEL_NEUTRAL model.betas[:len(stored_parameters['betas'])] = stored_parameters['betas'] mesh = _TEMPLATE_MESH # Use always the image center for rendering. orig_t[0] = 0. orig_t[1] = 0. orig_t[2] /= scale # Prepare for rendering. angles_y = np.linspace(0., 2. * (1. - 1. / steps_y) * np.pi, steps_y) elevation_maxextent = (steps_x - 1) // 2 * 0.2 * np.pi angles_x = np.linspace(-elevation_maxextent, elevation_maxextent, steps_x) if steps_x == 1: # Assume plain layout. angles_x = (0.,) angles = itertools.product(angles_y, angles_x) landmark_positions = [] full_parameters = [] for angle_y, angle_x in angles: stored_parameters['rt'] = orig_rt.copy() stored_parameters['rt'][0] += angle_x stored_parameters['rt'][1] += angle_y ####################################################################### # Zero out camera translation and rotation and move this information # to the body root joint rotations and 'trans' parameter. #print orig_pose[:3] cam_rdg, _ = cv2.Rodrigues(np.array(stored_parameters['rt'])) per_rdg, _ = cv2.Rodrigues(np.array(orig_pose)[:3]) resrot, _ = cv2.Rodrigues(np.dot(per_rdg, cam_rdg.T)) restrans = np.dot(-np.array(orig_trans), cam_rdg.T) + np.array(orig_t) stored_parameters['pose'][:3] = (-resrot).flat stored_parameters['trans'][:] = restrans stored_parameters['rt'][:] = [0, 0, 0] stored_parameters['t'][:] = [0, 0, 0] ####################################################################### # Get the full rendered mesh. model.pose[:] = stored_parameters['pose'] model.trans[:] = stored_parameters['trans'] mesh.v = model.r mesh_points = mesh.v[tuple(landmarks.values()),] # Get the skeleton joints. J_onbetas = model.J_regressor.dot(mesh.v) skeleton_points = J_onbetas[(8, 5, 2, 1, 4, 7, 21, 19, 17, 16, 18, 20),] # Do the projection. camera = _odr_c.ProjectPoints( rt=stored_parameters['rt'], t=stored_parameters['t'], f=(stored_parameters['f'], stored_parameters['f']), c=np.array(resolution) / 2., k=np.zeros(5)) camera.v =
np.vstack((skeleton_points, mesh_points))
numpy.vstack
import matplotlib import matplotlib.pyplot as plt import numpy as np from util_plot import AddPlot is_3d = True ax, point_dim = AddPlot(is_3d).returns # Number of points of cluster n_cluster_points = 20 cluster_shape = (n_cluster_points, point_dim) points_color_idx = [] # Set number of neighbors and (x, y) of test point. neighbors_K = 3 # Spawn points rand_points1 = 0 + 2 * np.random.randn(*cluster_shape) rand_points2 = 7 + 3 * np.random.randn(*cluster_shape) if is_3d: test_point = np.array([2, 5, 2]) rand_points3 = [3, 0, 5] + 2 * np.random.randn(*cluster_shape) else: test_point = np.array([2, 5]) rand_points3 = [3, 0] + 2 *
np.random.randn(*cluster_shape)
numpy.random.randn
# -*- coding: utf-8 -*- """ Created on Mon Jun 8 10:35:23 2020 @author: X202722 """ import itertools import functools import pandas as pd import numpy as np from runVAPS_rev5 import parameters from fitVapToExpValuesTest import clapeyron, clapeyronFit # from samplingCoefficients_fitall import samplingCoefficient def make_parameter_VAP_fit (Tb_sim, Tb_exp,T_sim, p_sim, p_exp,T_exp, method): import numpy as np # this function returns offsets for moller and nannoolal # TODO Schwerpunkt finden # fit if possible BPT guess to OffsetP = 0 # creates parameters based on fit withexperimental data para = parameters() # use fit from antoine equation to average experimental errors # p3 = antoine(antoine_result.x,T) # using clapeyron to avoid falling into antoine optimization fuck up problems para2 = clapeyronFit(T_exp, p_exp) p3 = clapeyron (para2,T_sim) if method == 0: #Moller #get parameters from GCT via explicit solution, important to use simulation values # so gct values can be reverse engineered para.Moller_Bs, para.Moller_Ds = ParaMollerRarey(p_sim[1], p_sim[3], Tb_sim, T_sim[1], T_sim[3]) # iteration throws out bullshit when p_sample > p_atmosphere T2=T_exp[p_exp<1000] p4=p_exp[p_exp<1000] # print(p3, para2) #calculate parameters for models with fitted experimental values and BPT from DDBST # para.Bs3,para.Ds3 = ParaMollerRarey(p4[0], p4[-1],Tb_exp, T2[0], T2[-1]) para.Bs3,para.Ds3 = samplingCoefficient(T2,p4, Tb_exp, para,T_exp,p_exp, 'Moller') OffsetP = [para.Bs3-para.Moller_Bs,para.Ds3-para.Moller_Ds] p_sim_scal = PresMollerRarey(para.Moller_Bs,para.Moller_Ds,Tb_sim,T_exp) elif method == 1: #Nannoolal # get GCT values from DDBST Ds_sim= ParaNannoolalRarey(p_sim[1], Tb_sim, T_sim[1]) # create true values from experimental data # Ds_exp= ParaNannoolalRarey(p_exp[1], Tb_exp, T_sim[1]) _, Ds_exp = samplingCoefficient(T_exp, p_exp, Tb_exp, para,T_exp,p_exp, 'Nannolal') #calculate offset OffsetP= Ds_exp-Ds_sim p_sim_scal = PresNannoolalRarey(Ds_sim, Tb_sim, T_exp) return OffsetP, p_sim_scal def samplingCoefficient(T2,p4, Tb, para,T,p, model): #loop through all data points from experiment and check whether the fit works or not # take closest fit to boiling point res_fits = [] # p2 = p[p<1000] # T2 = T[p<1000] if model == 'Moller': combi = pd.DataFrame(itertools.combinations(range(len(p4)), 2)) for a in combi.index: # print(a) a,b = combi.iloc[a,:] # print(ParaMollerRarey_Tb(p4[a], p4[b],p4[c], Tb, T2[a], T2[b], T2[c],para.Moller_Bs, para.Moller_Ds)) res_fits.append(ParaMollerRarey(p4[a], p4[b], Tb, T2[a], T2[b])) # res_fits.append(ParaMollerRarey_Tb(p2[a], p2[b],p2[c], Tb, T2[a], T2[b], T2[c],para.Moller_Bs, para.Moller_Ds)) elif model == 'Nannolal': combi = pd.DataFrame(itertools.combinations(range(len(T2)), 2)) for a in combi.index: # print(a) a,b = combi.iloc[a,:] # print(ParaMollerRarey_Tb(p4[a], p4[b],p4[c], Tb, T2[a], T2[b], T2[c],para.Moller_Bs, para.Moller_Ds)) Tb = ParaNannoolalRarey_Tb(p4[a], T2[a], p4[b], T2[b]) Bs = ParaNannoolalRarey(p4[a], Tb, T2[a]) res_fits.append([Tb, Bs]) res_fits = pd.DataFrame(res_fits) if model == 'Moller': # res_fits['diffTb'] = np.abs(res_fits.iloc[:,2]-Tb) # res_fits['diffpT'] = res_fits.apply(lambda x:np.sum(np.abs(PresMollerRarey(x.iloc[0],x.iloc[1],x.iloc[2],T2)-p4)/p4),axis = 1) # res_fits_fit = res_fits[[0,1]][np.logical_and(res_fits[2]>Tb-200,res_fits[2]<2000)] res_fits_fit = res_fits elif model == 'Nannolal': res_fits['diffTb'] = np.abs(res_fits.iloc[:,0]-Tb) res_fits['diffpT'] = res_fits.apply(lambda x:np.sum(np.abs(PresNannoolalRarey(x.iloc[1],x.iloc[0],T2)-p4)/p4),axis = 1) res_fits_fit = res_fits[[0,1]][np.logical_and(res_fits[0]>Tb-200,res_fits[0]<2000)] # res_fits = res_fits.sort_values(by = 'diffTb', axis = 0).reset_index() return np.mean(res_fits_fit ) def useOffset_VAP(OffsetP, p_sim, Tb_sim, T_sim,Tb_opt, T_calc, method): # this function calculates optimized vapor pressure # First get GCT values then calculate needed P if method == 0: #MollerMethod Bs, Ds = ParaMollerRarey(p_sim[1], p_sim[3], Tb_sim, T_sim[1], T_sim[3]) VAPopt = PresMollerRarey(Bs+OffsetP[0],Ds+OffsetP[1],Tb_opt,T_calc) if method == 1: #Nannoolal Method Ds_sim= ParaNannoolalRarey(p_sim[1], Tb_sim, T_sim[1]) VAPopt = PresNannoolalRarey(Ds_sim+OffsetP,Tb_opt,T_calc) return VAPopt def ParaMollerRarey(p1,p2,Tb,T1,T2): import numpy as np # calculate GI parameters from RareyMoller pressure is taken in mbar C = -2.65+np.power(Tb,1.485)/135 # print(C) S1 = (T1-Tb)/(T1-C) S2 = (T2-Tb)/(T2-C) F1 = np.log(T1/Tb) F2 = np.log(T2/Tb) lp1 = np.log(p1/1013.25) lp2 = np.log(p2/1013.25) Ds = (lp1/S1-lp2/S2)/(F1/S1-F2/S2) Bs = (lp1-Ds*F1)/S1 # print('check',PresMollerRarey(Bs, Ds, Tb, T2)/p2) # print('check',PresMollerRarey(Bs, Ds, Tb, T1)/p1) return Bs,Ds def equationsMoller(k,p,T): import numpy as np T_b = k[0] B = k[1] D = k[2] # print(np.log(T[0]/T_b)-p[0]) return [(B*(T[0]-T_b))/(T[0]-(-2.65+
np.power(T_b,1.435)
numpy.power
import numpy as np import cv2 as cv # function to read image at a given path def readImage(path): return cv.imread(path, cv.CV_8UC1) # function to save image using given image name def saveImage(img, image_name): cv.imwrite(image_name, img) # function to show image until user pressed any key to close it def showImage(img): cv.imshow('img', img) cv.waitKey(0) # ones-kernel of dimensions size*size def blockKernel(size): return np.ones((size, size), np.uint8) # disk kernel of radius "size" def diskKernel(size): return cv.getStructuringElement(cv.MORPH_ELLIPSE, (2*size - 1, 2*size - 1)) path = 'line.bmp' # read image from given path img = readImage(path) # apply opening operation to remove lines/rectangles openImg = cv.morphologyEx(img, cv.MORPH_OPEN, diskKernel(6)) # save output of opening saveImage(openImg, '2_open.jpg') # find number of connected components in the image output = cv.connectedComponentsWithStats(
np.uint8(openImg)
numpy.uint8
import random import cv2 import numpy as np from aug import Operation, perform_randomly from aug import SaltNoise from aug.ops import utils @perform_randomly class Erosion(Operation): def __init__(self, kernel_size=5, reversed=False): self._kernel_size = kernel_size self._reversed = reversed def apply_on_image(self, image): self._kernel_size = self._kernel_size if self._kernel_size % 2 != 0 else self._kernel_size + 1 kernel = np.ones((self._kernel_size, self._kernel_size), np.uint8) er, dil = cv2.erode, cv2.dilate if self._reversed: er, dil = dil, er image[:, :, 0] = er(image[:, :, 0], kernel, iterations=1) image[:, :, 1] = er(image[:, :, 1], kernel, iterations=1) image[:, :, 2] = er(image[:, :, 2], kernel, iterations=1) if image.shape[2] > 3: image[:, :, 3] = dil(image[:, :, 3], kernel, iterations=1) return image @perform_randomly class Dilation(Operation): def __init__(self, kernel_size=3): self._kernel_size = kernel_size def apply_on_image(self, image): return Erosion(kernel_size=self._kernel_size, reversed=True).apply_on_image(image) class BoundingBoxesFinder(object): """ Find bounding boxes of letters. """ def apply_on_image(self, in_image): last_empty = None last_letter = None top_border = None bottom_border = None borders = [] image = in_image.copy() if len(image.shape) == 3: image = cv2.cvtColor(image, cv2.COLOR_RGBA2GRAY) im_height, im_width = image.shape[:2] # Find top/bottom border for i in range(im_height): column_sum = sum(image[i, :]) if column_sum != 255 * im_width and top_border is None: top_border = i column_sum = sum(image[-i, :]) if column_sum != 255 * im_width and bottom_border is None: bottom_border = im_height - i if top_border is not None and bottom_border is not None: break # Find vertical borders for i in range(im_width): column_sum = sum(image[:, i]) if column_sum != 255 * im_height: if last_letter != i - 1: borders.append(i) last_letter = i else: if last_empty is not None and last_empty != i - 1: borders.append(i) last_empty = i vertical_borders = sorted(borders) crop_borders = [] for i in range(len(vertical_borders), 2): crop_borders.append( [top_border, bottom_border, vertical_borders[i], vertical_borders[i + 1]]) return crop_borders class SeparatedLettersErosionOrDilation: EROSION_MODE = 0 DILATION_MODE = 1 MIX_MODE = 2 # padding - distance between countour and box borders def __init__(self, mode=MIX_MODE, padding=6, iterations=(1, 6), kernel_size=(5, 5), salt_noise=True): assert mode in [self.EROSION_MODE, self.DILATION_MODE, self.MIX_MODE] self._padding = padding self._mode = mode self._iterations = iterations self._kernel_size = kernel_size self._salt_noise = salt_noise def apply_on_image(self, image): im_height, im_width = image.shape[:2] if self._salt_noise: image = SaltNoise(p=1., percent=random.uniform(0.0001, 0.001)).apply_on_image(image) imgray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) ret, thresh = cv2.threshold(imgray, 75, 150, 3) thresh = 255 - thresh img, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) dst = image for i in range(len(contours)): mask = np.zeros_like(img) cv2.drawContours(mask, contours, i, 255, -1) x, y = np.where(mask == 255) topx, topy = np.min(x), np.min(y) bottomx, bottomy = np.max(x), np.max(y) out = image[topx - self._padding:bottomx + self._padding, topy - self._padding:bottomy + self._padding] # out = 255 - out kernel = cv2.getStructuringElement( random.choice([cv2.MORPH_ELLIPSE, cv2.MORPH_CROSS, cv2.MORPH_RECT]), self._kernel_size) if not self._mode == self.MIX_MODE: if self._mode == self.EROSION_MODE: transformed = cv2.erode(out, kernel, iterations=random.randint(*self._iterations)) elif self._mode == self.DILATION_MODE: transformed = cv2.dilate(out, kernel, iterations=random.randint(*self._iterations)) else: raise Exception('Unknown mode') else: if random.randint(0, 1): transformed = cv2.erode(out, kernel, iterations=random.randint(*self._iterations)) else: transformed = cv2.dilate(out, kernel, iterations=random.randint(*self._iterations)) transformed = 255 - transformed dst[topx - self._padding:bottomx + self._padding, topy - self._padding:bottomy + self._padding] = transformed dst = cv2.resize(dst, (im_width, im_height), interpolation=cv2.INTER_CUBIC) return dst @perform_randomly class ScatterLetters(Operation): def __init__(self, max_dev_ox=0.02, max_dev_oy=0.15): self._max_devx = max_dev_ox self._max_devy = max_dev_oy def apply_on_image(self, image): im_height, im_width = image.shape[:2] fill_color = (255, 255, 255, 0) h = int(self._max_devy * im_height + 1) w = int(self._max_devx * im_width + 1) image = cv2.copyMakeBorder(image, h, h, w, w, cv2.BORDER_CONSTANT, value=fill_color) borders = BoundingBoxesFinder().apply_on_image(image) for b in borders: y1, y2, x1, x2 = b ox_dev = int(random.uniform(-self._max_devx, self._max_devx) * im_width) / 2 oy_dev = int(random.uniform(-self._max_devy, self._max_devy) * im_height) / 2 tmp_x1, tmp_x2 = x1 + ox_dev, x2 + ox_dev tmp_y1, tmp_y2 = y1 + oy_dev, y2 + oy_dev tmp_tensor = image[y1:y2, x1:x2].copy() image[max(0, y1 - 1):min(image.shape[0], y2 + 1), max(0, x1 - 1):min(image.shape[1], x2 + 1)] = fill_color image[tmp_y1:tmp_y2, tmp_x1:tmp_x2] = tmp_tensor return cv2.resize(image, (im_width, im_height), interpolation=cv2.INTER_CUBIC) @perform_randomly class Noise(Operation): def __init__(self, mode='normal'): self._mode = mode assert self._mode in ['dotted', 'normal'] def noise(self, mask, image, color_diff=10, percent=0.05, radius=10): im_height, im_width = image.shape[:2] tmp = image.copy() tmp = 255 - tmp tmp = cv2.cvtColor(tmp, cv2.COLOR_RGBA2GRAY) _, tmp = cv2.threshold(tmp, 1, 255, cv2.THRESH_BINARY) number = int(percent * im_height * im_width) for _ in range(number): c = random.randint(0, color_diff) color = [c, c, c, 255] oy = random.randint(0, im_height - 1) ox = random.randint(0, im_width - 1) if mask[oy, ox]: cv2.circle(image, (ox, oy), 0, color, radius) return image def apply_noises(self, img, configs): mask = img.copy() mask = cv2.cvtColor(mask, cv2.COLOR_RGBA2GRAY) mask = 255 - mask img2 = np.zeros(img.shape, dtype=np.uint8) img2[:, :, :3] = 255 config = random.choice(configs) for params in config: img2 = self.noise(mask, img2, *params) return img2 def apply_noises_dotted_font(self, img): """ Apply different kinds of noises defined in configs (dotted fonts) Single config row: [x, y, z] x - max deviation from base color y - density of noise in percent z - radius of single dot """ configs = [[ [20, 2.2, 1], ]] return self.apply_noises(img, configs) def apply_noises_normal_font(self, img): """ Apply different kinds of noises defined in configs (normal fonts) Single config row: [x, y, z] x - max deviation from base color y - density of noise in percent z - radius of single dot """ configs = [[ [20, 0.7, 1], [100, 0.01, 7], [70, 0.05, 4], ], [ [20, 0.25, 3], [40, 0.2, 2], [130, 0.01, 2], ], [ [20, 2.2, 1], ]] return self.apply_noises(img, configs) def apply_on_image(self, image): if self._mode == 'normal': return self.apply_noises_normal_font(image) if self._mode == 'dotted': return self.apply_noises_dotted_font(image) @perform_randomly class RandomSizeBorder(Operation): def __init__(self, max_border=.1, horizontal_sides_probability=.5, vertical_sides_probability=.5): self._max_border = max_border self._horizontal_sides_probability = horizontal_sides_probability self._vertical_sides_probability = vertical_sides_probability def apply_on_image(self, image): im_height, im_width = image.shape[:2] borders = [ 0 if random.random() < self._horizontal_sides_probability else int( random.uniform(0., self._max_border * im_height)) for _ in range(2) ] borders.extend([ 0 if random.random() < self._vertical_sides_probability else int( random.uniform(0., self._max_border * im_width)) for _ in range(2) ]) image = cv2.copyMakeBorder(image, *borders, borderType=cv2.BORDER_CONSTANT, value=(255, 255, 255, 0)) return cv2.resize(image, (im_width, im_height), interpolation=cv2.INTER_CUBIC) @perform_randomly class HorizontalCut(Operation): def __init__(self, left=.1, right=.1, rescale=True, horizontal=True): self._left = left self._right = right self._rescale = rescale self._horizontal = horizontal def apply_on_image(self, image): im_height, im_width = image.shape[:2] if self._horizontal: left = int(im_width * self._left) right = int(im_width * self._right) image = image[:, left:im_width - right] else: top = int(im_height * self._left) bottom = int(im_height * self._right) image = image[top:im_height - bottom, :] if self._rescale: image = cv2.resize(image, (im_width, im_height), interpolation=cv2.INTER_CUBIC) return image @perform_randomly class VerticalCut(Operation): def __init__(self, top=.1, bottom=.1, rescale=True): self._top = top self._bottom = bottom self._rescale = rescale def apply_on_image(self, image): return HorizontalCut(self._top, self._bottom, self._rescale, horizontal=False).apply_on_image(image) @perform_randomly class Scratches(Operation): """ Scratches will be drawn only in box witch coords: left_top_corner=(min_x, min_y) and right_bottom_corner=(max_x, max_y) if corods will be not set, the scratches will be drawn on whole image """ def __init__(self, num_scratches=20, alpha=None): self._min_x = None self._max_x = None self._min_y = None self._max_y = None self._num_scratches = num_scratches self._alpha = alpha if alpha is not None else .5 def test_probability(self, prob): n = random.randint(0, 100) return n <= prob def apply_on_image(self, image): h, w = image.shape[:2] min_x, min_y = 0, 0 max_x, max_y = 2 * w, 2 * h scratches = np.zeros((max_y, max_x, 3), np.uint8) scratches[:] = 0 # main scratch for i in range(0, self._num_scratches): x1 = random.randint(min_x, max_x) x2 = random.randint(min_x, max_x) y1 = random.randint(min_y, max_y) y2 = random.randint(min_y, max_y) color = tuple([random.randint(0, 255)] * 3) cv2.line(scratches, (x1, y1), (x2, y2), color, thickness=1, lineType=cv2.LINE_AA) # additional scratches for main scratch num_additional_scratches = random.randint(1, 4) for j in range(0, num_additional_scratches): if self.test_probability(35): new_color = random.randint(15, 70) param_x1 = random.randint(1, 5) param_x2 = random.randint(1, 5) param_y1 = random.randint(1, 5) param_y2 = random.randint(1, 5) cv2.line(scratches, (x1 - param_x1, y1 - param_x2), (x2 - param_y1, y2 - param_y2), (new_color, new_color, new_color), thickness=1, lineType=cv2.LINE_AA) top, bottom = h // 2, scratches.shape[0] - (h - h // 2) left, right = w // 2, scratches.shape[1] - (w - w // 2) scratches = scratches[top:bottom, left:right] dst = cv2.addWeighted(image[:, :, :3], 1.0, scratches, self._alpha, 0.0) return cv2.resize(dst, (w, h), interpolation=cv2.INTER_CUBIC) @perform_randomly class TextureModification(Operation): """ Creates effect of dirt/dust. """ def __init__(self, blur_kernel=(3, 3), emboss_kernel_size=None, alpha=None): self._blur_kernel = blur_kernel self._emboss_kernel_size = random.choice([9, 11]) if \ emboss_kernel_size is None else emboss_kernel_size self._alpha = random.uniform(0.4, 0.7) if alpha is None else alpha def apply_on_image(self, image): def create_emboss_kernel_top_down(size): assert size % 2 == 1, "Kernel must be of an uneven size!" k = np.ones((size, size), dtype=np.int) for i in range(size): for j in range(size): k[i][j] = -1 if i > (size - 1) / 2: k[i][j] = 1 if i == (size - 1) / 2: k[i][j] = 0 return k h, w = image.shape[:2] k_size = max(int((h + w) // 300), 3) self._blur_kernel = k_size, k_size # creating 'dirt' random_noise = np.random.randint(0, 256, (h, w, 3), dtype=np.uint8) dirt_kernel = create_emboss_kernel_top_down(self._emboss_kernel_size) dirt_colour = cv2.filter2D(random_noise, -1, dirt_kernel) gray_dirt = cv2.cvtColor(dirt_colour, cv2.COLOR_BGR2GRAY) # back to 3 channels (can't use addWeighted() to add images that have different number of channels) gray_dirt_3_channels = cv2.cvtColor(gray_dirt, cv2.COLOR_GRAY2BGR) blurred_dirt = cv2.blur(gray_dirt_3_channels, self._blur_kernel) blurred_dirt = utils.unify_num_of_channels(image, blurred_dirt) final = cv2.addWeighted(image, 1.0, blurred_dirt, self._alpha, 0.0) return final @perform_randomly class Jitter(Operation): def __init__(self, magnitude=.25): super().__init__() self._magnitude = magnitude def apply_on_image(self, image): if image.ndim == 3: w, h, c = image.shape[:3] else: w, h = image.shape[:2] c = 1 magnitude = int(min(w, h) / 10 * self._magnitude) noise_x = np.random.randint(magnitude, size=w * h) - magnitude // 2 noise_y = np.random.randint(magnitude, size=w * h) - magnitude // 2 indices_x = np.clip(noise_x +
np.arange(w * h)
numpy.arange
""" @brief Evaluation metrics for segmentation applications. @author <NAME> (<EMAIL>) @date 30 Oct 2019. """ import numpy as np from scipy import ndimage from evaluation_metrics import lookup_tables def _binarize(seg, fg_class=1): """ Binarize a segmentation with label 1 for pixels/voxels the foreground class and label 0 for pixels/voxels the other classes. :param seg: int numpy array. :param fg_class: int; class in seg corresponding to the foreground. :return: binary segmentation corresponding to seg for the foreground class fg_class. """ bin_seg = np.zeros_like(seg, dtype=bool) bin_seg[seg == fg_class] = True return bin_seg # Basic metrics def true_positives(seg_pred, seg_gt): """ Number of True Positives for the predicted segmentation seg_pred and the ground-truth segmentation seg_gt. :param seg_pred: numpy bool array. :param seg_gt: numpy bool array. :return: int; number of true positives. """ assert seg_pred.dtype == np.bool, "seg_1 should be of type bool, " \ "found %s instead." % seg_pred.dtype assert seg_gt.dtype == np.bool, "seg_2 should be of type bool, " \ "found %s instead." % seg_gt.dtype num_tp = np.sum(seg_pred * seg_gt) return num_tp def false_positives(seg_pred, seg_gt): """ Number of False Positives for the predicted segmentation seg_pred and the ground-truth segmentation seg_gt. :param seg_pred: numpy bool array. :param seg_gt: numpy bool array. :return: int; number of false positives. """ assert seg_pred.dtype == np.bool, "seg_1 should be of type bool, " \ "found %s instead." % seg_pred.dtype assert seg_gt.dtype == np.bool, "seg_2 should be of type bool, " \ "found %s instead." % seg_gt.dtype num_fp = np.sum(seg_pred * (1 - seg_gt)) return num_fp def false_negatives(seg_pred, seg_gt): """ Number of False Negatives for the predicted segmentation seg_pred and the ground-truth segmentation seg_gt. :param seg_pred: numpy bool array. :param seg_gt: numpy bool array. :return: int; number of false negatives. """ assert seg_pred.dtype == np.bool, "seg_1 should be of type bool, " \ "found %s instead." % seg_pred.dtype assert seg_gt.dtype == np.bool, "seg_2 should be of type bool, " \ "found %s instead." % seg_gt.dtype num_fn = np.sum((1 - seg_pred) * seg_gt) return num_fn def true_negatives(seg_pred, seg_gt): """ Number of True Negatives for the predicted segmentation seg_pred and the ground-truth segmentation seg_gt. :param seg_pred: numpy bool array. :param seg_gt: numpy bool array. :return: int; number of true negatives. """ assert seg_pred.dtype == np.bool, "seg_1 should be of type bool, " \ "found %s instead." % seg_pred.dtype assert seg_gt.dtype == np.bool, "seg_2 should be of type bool, " \ "found %s instead." % seg_gt.dtype num_tn = np.sum((1 - seg_pred) * (1 - seg_gt)) return num_tn # Dice scores and variants def dice_score(seg_1, seg_2, fg_class=1): """ Compute the Dice score for class fg_class between the segmentations seg_1 and seg_2. For explanation about the formula used to compute the Dice score coefficient, see for example: "Generalised Wasserstein Dice Score for Imbalanced Multi-class Segmentation using Holistic Convolutional Networks", <NAME>, BrainLes 2017. :param seg_1: numpy int array. :param seg_2: numpy int array. :param fg_class: int. :return: float; Dice score value. """ assert seg_1.shape == seg_2.shape, "seg_1 and seg_2 must have the same shape " \ "to compute their dice score." # binarize the segmentations bin_seg_1 = _binarize(seg_1, fg_class=fg_class) bin_seg_2 = _binarize(seg_2, fg_class=fg_class) # compute the Dice score value tp = true_positives(bin_seg_1, bin_seg_2) fp = false_positives(bin_seg_1, bin_seg_2) fn = false_negatives(bin_seg_1, bin_seg_2) if tp + fp + fn == 0: # empty foreground for seg_1 and seg_2 dice_val = 1. else: dice_val = 2. * tp / (2. * tp + fp + fn) return dice_val def mean_dice_score(seg_1, seg_2, labels_list=[0, 1]): """ Compute the mean of the Dice scores for the labels in labels_list between the segmentations seg_1 and seg_2. :param seg_1: numpy int array. :param seg_2: numpy int array. :param labels_list: int list. :return: """ assert len(labels_list) > 0, "the list of labels to consider for the mean dice score" \ "must contain at least one label" dice_values = [] for l in labels_list: dice = dice_score(seg_1, seg_2, fg_class=l) dice_values.append(dice) mean_dice = np.mean(dice_values) return mean_dice # Jaccard index and variants def jaccard(seg_1, seg_2, fg_class=1): """ Compute the Jaccard for class fg_class between the segmentations seg_1 and seg_2. :param seg_1: numpy int array. :param seg_2: numpy int array. :param fg_class: int. :return: float; Jaccard value. """ assert seg_1.shape == seg_2.shape, "seg_1 and seg_2 must have the same shape " \ "to compute their dice score" # binarize the segmentations bin_seg_1 = _binarize(seg_1, fg_class=fg_class) bin_seg_2 = _binarize(seg_2, fg_class=fg_class) # compute the Jaccard index value tp = true_positives(bin_seg_1, bin_seg_2) fp = false_positives(bin_seg_1, bin_seg_2) fn = false_negatives(bin_seg_1, bin_seg_2) if tp + fp + fn == 0: # empty foreground for seg_1 and seg_2 jaccard = 1. else: jaccard = tp / (tp + fp + fn) return jaccard # Surface distances def haussdorff_distance(mask_gt, mask_pred, fg_class, percentile=100, spacing_mm=[0.8, 0.8, 0.8]): bin_mask_gt = np.squeeze(_binarize(mask_gt, fg_class=fg_class)) bin_mask_pred = np.squeeze(_binarize(mask_pred, fg_class=fg_class)) surface_distances = compute_surface_distances( bin_mask_gt, bin_mask_pred, spacing_mm) haussdorff_dist_value = compute_robust_hausdorff(surface_distances, percentile) return haussdorff_dist_value def compute_surface_distances(mask_gt, mask_pred, spacing_mm): """ Compute closest distances from all surface points to the other surface. Finds all surface elements "surfels" in the ground truth mask `mask_gt` and the predicted mask `mask_pred`, computes their area in mm^2 and the distance to the closest point on the other surface. It returns two sorted lists of distances together with the corresponding surfel areas. If one of the masks is empty, the corresponding lists are empty and all distances in the other list are `inf`. :param mask_gt: 3-dim Numpy array of type bool. The ground truth mask. :param mask_pred: 3-dim Numpy array of type bool. The predicted mask. :param spacing_mm: 3-element list-like structure. Voxel spacing in x0, x1 and x2 direction. :return: A dict with: "distances_gt_to_pred": 1-dim numpy array of type float. The distances in mm from all ground truth surface elements to the predicted surface, sorted from smallest to largest. "distances_pred_to_gt": 1-dim numpy array of type float. The distances in mm from all predicted surface elements to the ground truth surface, sorted from smallest to largest. "surfel_areas_gt": 1-dim numpy array of type float. The area in mm^2 of the ground truth surface elements in the same order as distances_gt_to_pred "surfel_areas_pred": 1-dim numpy array of type float. The area in mm^2 of the predicted surface elements in the same order as distances_pred_to_gt """ # compute the area for all 256 possible surface elements # (given a 2x2x2 neighbourhood) according to the spacing_mm neighbour_code_to_surface_area = np.zeros([256]) for code in range(256): normals = np.array(lookup_tables.neighbour_code_to_normals[code]) sum_area = 0 for normal_idx in range(normals.shape[0]): # normal vector n = np.zeros([3]) n[0] = normals[normal_idx, 0] * spacing_mm[1] * spacing_mm[2] n[1] = normals[normal_idx, 1] * spacing_mm[0] * spacing_mm[2] n[2] = normals[normal_idx, 2] * spacing_mm[0] * spacing_mm[1] area = np.linalg.norm(n) sum_area += area neighbour_code_to_surface_area[code] = sum_area # compute the bounding box of the masks to trim # the volume to the smallest possible processing subvolume mask_all = mask_gt | mask_pred bbox_min = np.zeros(3, np.int64) bbox_max = np.zeros(3, np.int64) # max projection to the x0-axis proj_0 = np.max(np.max(mask_all, axis=2), axis=1) idx_nonzero_0 = np.nonzero(proj_0)[0] if len(idx_nonzero_0) == 0: # pylint: disable=g-explicit-length-test return {"distances_gt_to_pred": np.array([]), "distances_pred_to_gt": np.array([]), "surfel_areas_gt": np.array([]), "surfel_areas_pred": np.array([])} bbox_min[0] = np.min(idx_nonzero_0) bbox_max[0] = np.max(idx_nonzero_0) # max projection to the x1-axis proj_1 = np.max(
np.max(mask_all, axis=2)
numpy.max
# coding=utf-8 # Copyright 2021 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Runs eval metrics for the shilling attack experiment in Section 4.""" # pylint: disable=missing-docstring # pylint: disable=redefined-outer-name # pylint: disable=dangerous-default-value # pylint: disable=invalid-name # pylint: disable=C6204 import collections import copy import json import os import matplotlib matplotlib.use('Agg') from matplotlib.lines import Line2D import matplotlib.pyplot as plt import numpy import pandas as pd import seaborn as sns sns.set_style('whitegrid') # User-defined hyperparameters for the experiment. These should match the first # three user parameters in polblogs_experiment.py SAVE_DIR = 'experiment_data/shilling' NUMBER_OF_EXPERIMENTS = 10 # Copy line 739 and 742 from shilling_experiment.py methods = ['deepwalk', 'glove', 'monet0', 'monet', 'random', 'nlp'] DB_LEVELS = [v / 100.0 for v in list(range(75, 100, 5)) + [50, 25]] ################################################################################ # Register results saving directory EVAL_SAVE_DIR = os.path.join(SAVE_DIR, 'exp_results') if not os.path.isdir(EVAL_SAVE_DIR): os.mkdir(EVAL_SAVE_DIR) # Helper function to get method name from debias (DB) level monet_alpha_encoder = lambda x: 'monet%0.2f' % x # Register names of methods and display names methods.extend([monet_alpha_encoder(db_level) for db_level in DB_LEVELS]) replace_dict = { 'deepwalk': 'DeepWalk', 'monet0': 'GloVe_meta', 'monet': 'MONET_G', 'random': 'Random', 'glove': 'GloVe', 'nlp': 'NLP' } def movielens_result_2d( # pylint: disable=dangerous-default-value, missing-docstring df, cpalette, ppalette, figsize=(13, 10), title='Attacked Vids in Top-20 vs MRR-Lift, k=20', xtitle=None, ytitle=None, ignore_methods=['Random', 'Adversary', 'MONET_G-0.75', 'MONET_G-0.25'], x_col='MRR@k / random-MRR@k', x_subtitle='(higher better)', y_col='Attacked Vids in Top-20', y_subtitle='(lower better)', method_col='Method', annotate_size=26.0, title_size=40.0, ax_label_size=28.0, ax_tick_size=26.0, legend_text_size=26.0, xlim=(3.0, 8.0), ylim=(-0.5, 11.0), markersize=300, legend_markersize=18, text_loff1=0.7, text_uoff1=0.1, text_loff2=0.35, text_uoff2=0.25, legpos='lower right', filename=None): if xtitle is None: xtitle = x_col if ytitle is None: ytitle = y_col method_names = colors_palette.keys() # General figure specs _ = plt.figure(figsize=figsize) plt.rc('axes', titlesize=title_size) # fontsize of the axes title plt.rc('axes', labelsize=ax_label_size) # fontsize of the x and y labels plt.rc('xtick', labelsize=ax_tick_size) # fontsize of the tick labels plt.rc('ytick', labelsize=ax_tick_size) # fontsize of the tick labels plt.rc('legend', fontsize=legend_text_size) # legend fontsize plt.suptitle(title, fontsize=title_size) plt.title('') plt.xlim(xlim) plt.ylim(ylim) plt.xlabel(xtitle) custom_points = [] # Plotting individual results for m in method_names: if m not in ignore_methods: x_mean = numpy.mean(df[df[method_col] == m][x_col]) y_mean = numpy.mean(df[df[method_col] == m][y_col]) plt.scatter( x=x_mean, y=y_mean, marker=ppalette[m], color=cpalette[m], s=markersize) plt.xlabel('%s\n%s' % (xtitle, x_subtitle)) plt.ylabel('%s\n%s' % (ytitle, y_subtitle)) if 'MONET' in m: if m == 'MONET_G': text = r'$\lambda$=1.00' custom_points.append( Line2D([0], [0], color='w', marker=ppalette[m], markerfacecolor=cpalette[m], label=m, markersize=legend_markersize)) else: text = r'$\lambda$=%s' % m[-4:] if m[-2:] == '50': plt.annotate( text, (x_mean - text_loff2, y_mean + text_uoff2), size=annotate_size) else: plt.annotate( text, (x_mean - text_loff1, y_mean + text_uoff1), size=annotate_size) else: custom_points.append( Line2D([0], [0], color='w', marker=ppalette[m], markerfacecolor=cpalette[m], label=m, markersize=legend_markersize)) # Plot GloVe_meta again m = 'GloVe_meta' x_mean = numpy.mean(df[df[method_col] == m][x_col]) y_mean = numpy.mean(df[df[method_col] == m][y_col]) plt.scatter( x=x_mean, y=y_mean, marker=ppalette[m], color=cpalette[m], s=markersize) plt.legend( handles=custom_points, loc=legpos, numpoints=1, shadow=True, fancybox=False) if filename is not None: plt.savefig(filename, bbox_inches='tight') # Load results and create master list exp_result_list = [] for experiment_number in range(NUMBER_OF_EXPERIMENTS): exp_save_dir = os.path.join(SAVE_DIR, 'experiment%d' % experiment_number) with open(os.path.join(exp_save_dir, '%d.txt' % experiment_number)) as f: exp_result = json.loads(f.read()) exp_result_list.append(exp_result) result_df = pd.DataFrame(exp_result_list) # Create timing and embedding distance CIs distcorr_dict = collections.defaultdict(list) time_dict = collections.defaultdict(list) for exp_result in exp_result_list: for method in methods: if '.' not in method: distcorr_dict[method].append(exp_result['%s_vs_glove_distcorr' % method]) if method not in ['nlp', 'random']: time_dict[method].append(exp_result['%s_time' % method]) # Change dict names to display names for method in methods: if method in time_dict: time_dict[replace_dict[method]] = time_dict[method] del time_dict[method] if method in distcorr_dict: distcorr_dict[replace_dict[method]] = distcorr_dict[method] del distcorr_dict[method] def m_pm_s3(m, ss): return '%0.3f $\pm$ %0.3f' % (m, ss) # pylint: disable=anomalous-backslash-in-string def m_pm_sint(m, ss): return '%d $\pm$ %d' % (m, ss) # pylint: disable=anomalous-backslash-in-string def two_col_float_with_std(name, mm1, ss1, mm2, ss2): if numpy.isnan(mm2): string2 = 'N/A' else: string2 = m_pm_sint(round(mm2), round(ss2)) return '%s & %s & %s \\\\' % (name, m_pm_s3(mm1, ss1), string2) flines = [] for method in methods: if '.' not in method: m1 = s1 = m2 = s2 = numpy.nan if replace_dict[method] in distcorr_dict: m1 = numpy.mean(distcorr_dict[replace_dict[method]]) s1 = numpy.std(distcorr_dict[replace_dict[method]]) if replace_dict[method] in time_dict: m2 = numpy.mean(time_dict[replace_dict[method]]) s2 =
numpy.std(time_dict[replace_dict[method]])
numpy.std
#Required imports import cv2 import numpy as np import math #Reading the Image ig1 = cv2.imread('hough.jpg') ig2 = cv2.imread('hough.jpg') sample = cv2.imread('hough.jpg',0) #Methods #Flips the kernel def flip_operator(kernel): kernel_copy = [[0 for x in range(kernel.shape[1])] for y in range(kernel.shape[0])] #kernel_copy = kernel.copy() for i in range(kernel.shape[0]): for j in range(kernel.shape[1]): kernel_copy[i][j] = kernel[kernel.shape[0]-i-1][kernel.shape[1]-j-1] kernel_copy = np.asarray(kernel_copy) return kernel_copy #Convolution Logic def convolution(image, kernel): #Flipping the kernel kernel = flip_operator(kernel) img_height = image.shape[0] img_width = image.shape[1] kernel_height = kernel.shape[0] kernel_width = kernel.shape[1] h = kernel_height//2 w = kernel_width//2 conv_result = [[0 for x in range(img_width)] for y in range(img_height)] for i in range(h, img_height-h): for j in range(w, img_width-w): sum = 0 for m in range(kernel_height): for n in range(kernel_width): sum = (sum + kernel[m][n]*image[i-h+m][j-w+n]) conv_result[i][j] = sum conv_result = np.asarray(conv_result) return conv_result #Defines the output image, combination of gradient_x and gradient_y def output(img1, img2): h, w = img1.shape result = [[0 for x in range(w)] for y in range(h)] for i in range(img1.shape[0]): for j in range(img1.shape[1]): result[i][j] = (img1[i][j]**2 + img2[i][j]**2)**(1/2) if(result[i][j] > 255): result[i][j] = 255 elif(result[i][j] < 0): result[i][j] = 0 result = np.asarray(result) return result #Returns the maximum value from gradient_y/gradient_x def maximum(gradient): max = gradient[0][0] for i in range(len(gradient)): for j in range(len(gradient[0])): if (max < gradient[i][j]): max = gradient[i][j] return max #Returns the gradient_y/gradient_x with absolute values def absolute_value(gradient): for i in range(len(gradient)): for j in range(len(gradient[0])): if(gradient[i][j] < 0): gradient[i][j] *= -1 else: continue return gradient #Plotting gradient_y w, h = 3, 3 kernel_y = [[0 for x in range(w)] for y in range(h)] kernel_y = np.asarray(kernel_y) kernel_y[0,0] = 1 kernel_y[0,1] = 2 kernel_y[0,2] = 1 kernel_y[1,0] = 0 kernel_y[1,1] = 0 kernel_y[1,2] = 0 kernel_y[2,0] = -1 kernel_y[2,1] = -2 kernel_y[2,2] = -1 gradient_y = convolution(sample, kernel_y) gradient_y = absolute_value(gradient_y) / maximum(absolute_value(gradient_y)) #Plotting gradient_x w, h = 3, 3 kernel_x = [[0 for x in range(w)] for y in range(h)] kernel_x = np.asarray(kernel_x) kernel_x[0,0] = 1 kernel_x[0,1] = 0 kernel_x[0,2] = -1 kernel_x[1,0] = 2 kernel_x[1,1] = 0 kernel_x[1,2] = -2 kernel_x[2,0] = 1 kernel_x[2,1] = 0 kernel_x[2,2] = -1 gradient_x = convolution(sample, kernel_x) gradient_x = absolute_value(gradient_x) / maximum(absolute_value(gradient_x)) #Plotting final output image sobel = output(gradient_x, gradient_y) #Thresholding on gradient_y gradient_yy = gradient_y * 255 def check_threshold1(image): img_height = image.shape[0] img_width = image.shape[1] T = 19 res = [[0 for x in range(img_width)] for y in range(img_height)] res = np.array(res) for i in range(image.shape[0]): for j in range(image.shape[1]): if(image[i][j] > T): res[i][j] = 255 else: res[i][j] = 0 return res gradient_yy = check_threshold1(gradient_yy) cv2.imwrite("thresholded_gradient_yy.jpg",gradient_yy) #Thresholding on gradient_x gradient_xx = gradient_x * 255 def check_threshold2(image): img_height = image.shape[0] img_width = image.shape[1] T = 100 res = [[0 for x in range(img_width)] for y in range(img_height)] res = np.array(res) for i in range(image.shape[0]): for j in range(image.shape[1]): if(image[i][j] > T): res[i][j] = 255 else: res[i][j] = 0 return res gradient_xx = check_threshold2(gradient_xx) cv2.imwrite("thresholded_gradient_xx.jpg",gradient_xx) def generate_accumulator(image): '''Reference: https://alyssaq.github.io/2014/understanding-hough-transform/''' thetas = np.deg2rad(np.arange(-90.0, 90.0)) w, h = image.shape diag_len = int(round(math.sqrt(w*w + h*h))) rhos = np.linspace(-diag_len, diag_len, diag_len * 2.0) cos_t = np.cos(thetas) sin_t = np.sin(thetas) num_thetas = len(thetas) accumulator = np.zeros((2 * diag_len, num_thetas), dtype=np.uint8) y_idxs, x_idxs = np.nonzero(image) '''Voting''' for i in range(len(x_idxs)): x = x_idxs[i] y = y_idxs[i] for t_idx in range(num_thetas): rho = int(round(x * cos_t[t_idx] + y * sin_t[t_idx]) + diag_len) accumulator[rho, t_idx] += 1 return accumulator, thetas, rhos #Vertical Lines acc1, thetas1, rhos1 = generate_accumulator(gradient_xx) #Slanting Lines acc2, thetas2, rhos2 = generate_accumulator(gradient_yy) cv2.imwrite("Accumulator1.jpg", acc1) cv2.imwrite("Accumulator2.jpg", acc2) ls_theta1 = [] def detect_lines_slant(acc, rhos, thetas, num_iterations): for i in range(num_iterations): arr = np.unravel_index(acc.argmax(), acc.shape) acc[arr[0]-18:arr[0]+15, arr[1]-7:arr[1]+7] = 0 rho = rhos[arr[0]] theta = thetas[arr[1]] a =
np.cos(theta)
numpy.cos
# -*- coding: utf-8 -*- """ 2016-08-06, 2016M-1.0 lzj A general matching algorithm """ import numpy as np def findneighbor(u1, v1, u2=None, v2=None, dis_limit=0.0005): """Distance match, find match between two list Originally part of match algorithm, also used in find isolated stars returns: a list of ndarray, outer list has n1 sub-lists, each sub-list contains indicies of star2 """ # if u2 v2 is omitted, means to find neighbor at u1 v1 itself if u2 is None: u2 = u1 if v2 is None: v2 = v1 n1, n2 = len(u1), len(u2) udis_limit = vdis_limit = dis_limit # sort items by their v values sort1 =
np.argsort(v1)
numpy.argsort
import numpy as np from keras import backend as K from keras.layers import Layer, Input, Activation from keras.models import Model from keras.optimizers import SGD from keras.initializers import RandomNormal, Identity, Orthogonal, Zeros, Constant, Ones from keras.callbacks import Callback from sklearn.svm import LinearSVC import argparse class StopAtThreshold(Callback): def __init__(self, monitor='loss', threshold=0.001): super(StopAtThreshold, self).__init__() self.monitor = monitor self.threshold = threshold def on_epoch_end(self, epoch, logs=None): current = logs.get(self.monitor) if current is None: return if current < self.threshold: self.model.stop_training = True return class RecordValues(Callback): def __init__(self, depth): self.depth = depth super(RecordValues, self).__init__() def on_epoch_end(self, epoch, logs=None): w = get_canonical_model(self.model, self.depth) w = np.abs(w) top_5_idx = np.argsort(w)[-5:] top_5_values = [w[i] for i in top_5_idx] logs['unnormalized_ev1'] = top_5_values[0] logs['unnormalized_ev2'] = top_5_values[1] logs['unnormalized_ev3'] = top_5_values[2] logs['unnormalized_ev4'] = top_5_values[3] logs['unnormalized_ev5'] = top_5_values[4] w /= np.linalg.norm(w) top_5_idx =
np.argsort(w)
numpy.argsort
import os import matplotlib import matplotlib.pyplot as plt import seaborn as sns import pandas as pd import numpy as np from sklearn.utils import shuffle from sklearn.model_selection import train_test_split from sklearn.model_selection import RandomizedSearchCV, GridSearchCV, cross_validate from sklearn.model_selection import StratifiedKFold, KFold from sklearn.linear_model import Lasso, LassoCV, LogisticRegressionCV, LogisticRegression from sklearn.linear_model import ElasticNet, ElasticNetCV, enet_path from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC from sklearn.feature_selection import SelectFromModel from sklearn.metrics import auc, roc_curve from utils import kernel from mics import classifier_mics ''' 函数名尽量保持了和scikit-learn相同的函数名,便于理解函数的作用 没有写留一法实现的函数,如果要用留一法直接在K折交叉验证参数中将折数设置为样本个数即实现了留一法(scikit-learn官方文件推荐) 不推荐在网格搜索法中使用留一法,当待选参数较多时会让模型开销极大 ''' class lasso(): '''LASSO特征选择的方法集锦,直接在class中选择是否进行交叉验证 输入: X_train, X_test, y_train, y_test: 训练集和测试集的特征与标签 feature_name: 特征名称,顺序和X的列必须对应 path: 记录文件的存储路径,自行定义 cv_val:布尔型,是否进行网格搜索交叉验证 ''' def __init__(self, X_train, X_test, y_train, y_test, feature_name, path, cv_val=True): self.X_train = X_train self.X_test = X_test self.y_train = y_train self.y_test = y_test self.name = feature_name self.cv_val = cv_val self.path = path def lasso(self, alpha, cv): '''使用LASSO进行特征选择,只进行一次,选择特征系数不为0的特征作为结果 得到的结果包括特征选择后的训练集和测试集特征,同时还有特征名和权重,每个特征名有一个权重值,顺序是对应的 输入: alpha: 参数alpha cv: int, 如果进行交叉验证,cv的折数 输出: best_alpha(只有使用交叉验证时才有): 最优lasso惩罚参数 new_train_feature: 选择的训练集特征矩阵 new_test_feature: 选择后的测试集特征矩阵 new_feature_name: 选择后的特征名称 feature_weight: 选择后特征对应的系数 ''' if self.cv_val is True: model_lasso = LassoCV(alphas=alpha, cv=cv) model_lasso.fit(self.X_train, self.y_train) coef = pd.Series(model_lasso.coef_) print("Lasso picked " + str(sum(coef != 0)) + " variables and eliminated the other " + str( sum(coef == 0)) + " variables") img_path = os.path.join(self.path, 'lassoCV') os.makedirs(img_path, exist_ok=True) # 交叉验证得到的最佳lasso惩罚参数 best_alpha = model_lasso.alpha_ print('-----------------------------') print('Best LASSO alpha:') print(best_alpha) # 将lasso中权重不为0的特征选择出来 model = SelectFromModel(model_lasso, prefit=True) # 分别将训练集和测试集的特征使用上述lasso进行筛选 X_new_train = model.transform(self.X_train) X_new_test = model.transform(self.X_test) # 所有特征的mask,保留的特征用True,被筛掉的特征用False mask = model.get_support() new_feature_name = [] feature_weight = [] # 根据mask将保留特征的名字和权重分别存储到 for bool, feature, coef in zip(mask, self.name, coef): if bool: new_feature_name.append(feature) feature_weight.append(coef) # 将训练集和测试集的保留特征加上特征名 new_train_feature = pd.DataFrame(data=X_new_train, columns=new_feature_name) new_test_feature = pd.DataFrame(data=X_new_test, columns=new_feature_name) feature_weight = pd.Series(feature_weight) return best_alpha, new_train_feature, new_test_feature, new_feature_name, feature_weight else: model_lasso = Lasso(alpha=alpha) model_lasso.fit(self.X_train, self.y_train) coef = pd.Series(model_lasso.coef_) print("Lasso picked " + str(sum(coef != 0)) + " variables and eliminated the other " + str( sum(coef == 0)) + " variables") img_path = os.path.join(self.path, 'lasso_only') os.makedirs(img_path, exist_ok=True) # 将lasso中权重不为0的特征选择出来 model = SelectFromModel(model_lasso, prefit=True) # 分别将训练集和测试集的特征使用上述lasso进行筛选 X_new_train = model.transform(self.X_train) X_new_test = model.transform(self.X_test) # 所有特征的mask,保留的特征用True,被筛掉的特征用False mask = model.get_support() new_feature_name = [] feature_weight = [] # 根据mask将保留特征的名字和权重分别存储到 for bool, feature, coef in zip(mask, self.name, coef): if bool: new_feature_name.append(feature) feature_weight.append(coef) # 将训练集和测试集的保留特征加上特征名 new_train_feature = pd.DataFrame(data=X_new_train, columns=new_feature_name) new_test_feature = pd.DataFrame(data=X_new_test, columns=new_feature_name) feature_weight = pd.Series(feature_weight) return new_train_feature, new_test_feature, new_feature_name, feature_weight def lasso_shuffle(self, shuffle_time, alpha_range, cv=10): '''通过多次循环,每次循环都将数据集进行打乱,最后统计每个特征出现的次数 输入: shuffle_time: 进行shuffle循环的次数 alpha_range: alpha的值,如果不进行网格搜索为int,如果进行网格搜索为list cv: 如果进行交叉验证的话,折数 输出: new_train_feature: 特征选择后的训练集特征(其实这个和下面的特征矩阵不重要,最后还是要用索引重新对原始特征矩阵进行抽取) new_test_feature: 特征选择后的测试集特征 select_feature_name: 选择出来的特征名 select_feature_name_freq: 对应特征名,每个特征在多次shuffle循环中出现的次数 feature_weight: 对应特征名,每个特征的系数 select_feature_index: 对应特征名,每个特征在原始特征矩阵中的索引,可以在特征选择完成后直接进行矩阵特征的抽取 ''' # 将返回的值存入txt文件中 lasso_txt = open(os.path.join(self.path, 'lasso_shuffle.txt'), 'w') lasso_txt.write('LASSO parameters set:\n') lasso_txt.write('\n---------------------------------------------\n') lasso_txt.write('Grid search: % s' % self.cv_val) lasso_txt.write('\nAlpha range: % s' % alpha_range) lasso_txt.write('\nShuffle time: % s' % shuffle_time) lasso_txt.write('\nGrid search cv-fold: % s' % cv) lasso_txt.write('\n---------------------------------------------\n') if self.cv_val is True: # 初始化权重为0,初始化特征列表为空 coef_sum = 0 select_list = [] # 初始化最佳参数alpha alpha_list = [] # 开始shuffle循环,每次都存储选择后的特征名 for i in range(shuffle_time): # 将数据进行shuffle X, y = shuffle(self.X_train, self.y_train) kfold = StratifiedKFold(n_splits=cv, shuffle=False) model_lasso = LassoCV(alphas=alpha_range, cv=cv) model_lasso.fit(X, y) coef = pd.Series(model_lasso.coef_) print("% s th shuffle, Lasso picked " % i + str( sum(coef != 0)) + " variables and eliminated the other " + str( sum(coef == 0)) + " variables") # 交叉验证得到的最佳lasso惩罚参数 alpha = model_lasso.alpha_ alpha_list.append(alpha) print('best alpha value is % s' % alpha) # 将每一次循环的coef都进行相加 coef_sum += model_lasso.coef_ # 提取非零特征的mask model = SelectFromModel(model_lasso, prefit=True) # 所有特征的mask,保留的特征用True,被筛掉的特征用False mask = model.get_support() # 根据mask将保留特征的名字存储到select_list for bool, name in zip(mask, self.name): if bool: select_list.append(name) # 求全部特征的coef平均值 coef_mean = coef_sum / shuffle_time # 每次的特征都存储在select_list中,然后统计每个特征出现的次数,存在feature_freq中,dict形式 feature_freq = dict(zip(*np.unique(select_list, return_counts=True))) # 每次的alpha都存储在select_list中,然后统计每个特征出现的次数,存在feature_freq中,dict形式 alpha_freq = dict(zip(*np.unique(alpha_list, return_counts=True))) # 按照特征出现的频率,从大到小进行排序,分别存储特征名和出现次数 select_feature_name = [] select_feature_name_freq = [] for k in sorted(feature_freq, key=feature_freq.__getitem__, reverse=True): # 特征名相对应的顺序,将每个特征出现的次数存在select_feature_name_freq中 select_feature_name_freq.append(feature_freq[k]) # 将特征名存在select_feature_name中,list形式 select_feature_name.append(k) # 获取lasso后特征的索引 select_feature_index = [] # 将lasso后特征的名字转为list name_list = list(select_feature_name) # 将原始所有特征的名字转为list all_name_list = list(self.name) # 获取特征选择后特征在原始特征list中的索引位置,将所有索引位置存在select_feature_index中 for i in range(len(select_feature_name)): index = all_name_list.index(name_list[i]) select_feature_index.append(index) # 按照alpha出现的频率,从大到小进行排序,分别存储alpha的大小和出现次数 alpha_value = [] alpha_value_freq = [] for k in sorted(alpha_freq, key=alpha_freq.__getitem__, reverse=True): # alpha值相对应的顺序,将每个alpha值出现的次数存在alpha_value_freq中 alpha_value_freq.append(alpha_freq[k]) # 将alpha的值存在alpha_value中,list形式 alpha_value.append(k) print('alpha value % s appeared % s times in the loop' % (k, alpha_freq[k])) # 通过索引将选择后的特征矩阵赋值给select_feature new_train_feature = self.X_train[:, select_feature_index] new_test_feature = self.X_test[:, select_feature_index] feature_weight = coef_mean[select_feature_index] # 将输出值存入txt文件 lasso_txt.write('\nSelected feature index:\n') lasso_txt.write(str(select_feature_index)) lasso_txt.write('\n---------------------------------------------\n') lasso_txt.write('\nSelected feature weight: \n') lasso_txt.write(str(feature_weight)) lasso_txt.write('\n---------------------------------------------\n') lasso_txt.write('\nSelected feature name:\n') lasso_txt.write(str(select_feature_name)) lasso_txt.write('\n---------------------------------------------\n') lasso_txt.write('\nSelected feature appearance frequency:\n') lasso_txt.write(str(select_feature_name_freq)) lasso_txt.write('\n---------------------------------------------\n') return new_train_feature, new_test_feature, select_feature_name, \ select_feature_name_freq, feature_weight, select_feature_index else: # 初始化权重为0,初始化特征列表为空 coef_sum = 0 select_list = [] # 开始shuffle循环,每次都存储选择后的特征名 for i in range(shuffle_time): # 将数据进行shuffle X, y = shuffle(self.X_train, self.y_train) model_lasso = Lasso(alpha=alpha_range) model_lasso.fit(X, y) coef = pd.Series(model_lasso.coef_) print("% s th shuffle, Lasso picked " % i + str( sum(coef != 0)) + " variables and eliminated the other " + str( sum(coef == 0)) + " variables") # 将每一次循环的coef都进行相加 coef_sum += model_lasso.coef_ # 提取非零特征的mask model = SelectFromModel(model_lasso, prefit=True) # 所有特征的mask,保留的特征用True,被筛掉的特征用False mask = model.get_support() # 根据mask将保留特征的名字存储到select_list for bool, name in zip(mask, self.name): if bool: select_list.append(name) # 求全部特征的coef平均值 coef_mean = coef_sum / shuffle_time # 每次的特征都存储在select_list中,然后统计每个特征出现的次数,存在feature_freq中,dict形式 feature_freq = dict(zip(*np.unique(select_list, return_counts=True))) # 按照特征出现的频率,从大到小进行排序,分别存储特征名和出现次数 select_feature_name = [] select_feature_name_freq = [] for k in sorted(feature_freq, key=feature_freq.__getitem__, reverse=True): # 特征名相对应的顺序,将每个特征出现的次数存在select_feature_name_freq中 select_feature_name_freq.append(feature_freq[k]) # 将特征名存在select_feature_name中,list形式 select_feature_name.append(k) # 获取lasso后特征的索引 select_feature_index = [] # 将lasso后特征的名字转为list name_list = list(select_feature_name) # 将原始所有特征的名字转为list all_name_list = list(self.name) # 获取特征选择后特征在原始特征list中的索引位置,将所有索引位置存在select_feature_index中 for i in range(len(select_feature_name)): index = all_name_list.index(name_list[i]) select_feature_index.append(index) # 通过索引将选择后的特征矩阵赋值给select_feature new_train_feature = self.X_train[:, select_feature_index] new_test_feature = self.X_test[:, select_feature_index] feature_weight = coef_mean[select_feature_index] # 将输出值存入txt文件 lasso_txt.write('\nSelected feature index:\n') lasso_txt.write(str(select_feature_index)) lasso_txt.write('\n---------------------------------------------\n') lasso_txt.write('\nSelected feature weight: \n') lasso_txt.write(str(feature_weight)) lasso_txt.write('\n---------------------------------------------\n') lasso_txt.write('\nSelected feature name:\n') lasso_txt.write(str(select_feature_name)) lasso_txt.write('\n---------------------------------------------\n') lasso_txt.write('\nSelected feature appearance frequency:\n') lasso_txt.write(str(select_feature_name_freq)) lasso_txt.write('\n---------------------------------------------\n') return new_train_feature, new_test_feature, select_feature_name, \ select_feature_name_freq, feature_weight, select_feature_index def logis_lasso(self, alpha, cv): '''使用logistic LASSO进行特征选择,可以选择是否使用交叉验证选择惩罚参数alpha 得到的结果包括特征选择后的训练集和测试集特征,同时还有特征名和权重,每个特征名有一个权重值,顺序是对应的 输入: alpha: 惩罚参数,这里因为是LASSO所以就相当于是alpha cv:如果进行交叉验证,次数 输出: best alpha(只有使用交叉验证时才有): 最优lasso惩罚参数 new_train_feature: 训练集特征选择后的特征矩阵 new_train_feature: 测试集特征选择后的特征矩阵 new_feature_name: 特征选择后的特征名称 feature_weight: 选择后每个特征对应的权重 ''' if self.cv_val is True: logis_lasso = LogisticRegressionCV(Cs=alpha, cv=cv, penalty='l1') logis_lasso.fit(self.X_train, self.y_train) coef = pd.Series(np.ravel(logis_lasso.coef_)) print("Lasso picked " + str(sum(coef != 0)) + " variables and eliminated the other " + str( sum(coef == 0)) + " variables") img_path = os.path.join(self.path, 'lassoCV') os.makedirs(img_path, exist_ok=True) # 交叉验证得到的最佳lasso惩罚参数 best_alpha = logis_lasso.Cs_ print('-----------------------------') print('Best LASSO alpha:') print(best_alpha) # 将lasso中权重不为0的特征选择出来 model = SelectFromModel(logis_lasso, prefit=True) # 分别将训练集和测试集的特征使用上述lasso进行筛选 X_new_train = model.transform(self.X_train) X_new_test = model.transform(self.X_test) # 所有特征的mask,保留的特征用True,被筛掉的特征用False mask = model.get_support() new_feature_name = [] feature_weight = [] # 根据mask将保留特征的名字和权重分别存储到 for bool, feature, coef in zip(mask, self.name, coef): if bool: new_feature_name.append(feature) feature_weight.append(coef) # 将训练集和测试集的保留特征加上特征名 new_train_feature = pd.DataFrame(data=X_new_train, columns=new_feature_name) new_test_feature = pd.DataFrame(data=X_new_test, columns=new_feature_name) feature_weight = pd.Series(feature_weight) return best_alpha, new_train_feature, new_test_feature, new_feature_name, feature_weight else: logis_lasso = LogisticRegression(C=alpha, penalty='l1') logis_lasso.fit(self.X_train, self.y_train) coef = pd.Series(logis_lasso.coef_) print("Lasso picked " + str(sum(coef != 0)) + " variables and eliminated the other " + str( sum(coef == 0)) + " variables") img_path = os.path.join(self.path, 'lasso_only') os.makedirs(img_path, exist_ok=True) # 将lasso中权重不为0的特征选择出来 model = SelectFromModel(logis_lasso, prefit=True) # 分别将训练集和测试集的特征使用上述lasso进行筛选 X_new_train = model.transform(self.X_train) X_new_test = model.transform(self.X_test) # 所有特征的mask,保留的特征用True,被筛掉的特征用False mask = model.get_support() new_feature_name = [] feature_weight = [] # 根据mask将保留特征的名字和权重分别存储到 for bool, feature, coef in zip(mask, self.name, coef): if bool: new_feature_name.append(feature) feature_weight.append(coef) # 将训练集和测试集的保留特征加上特征名 new_train_feature = pd.DataFrame(data=X_new_train, columns=new_feature_name) new_test_feature = pd.DataFrame(data=X_new_test, columns=new_feature_name) feature_weight = pd.Series(feature_weight) return new_train_feature, new_test_feature, new_feature_name, feature_weight def logis_lasso_shuffle(self, alpha_range, shuffle_time=100, cv=10): '''使用logistic lasso进行特征选择,通过多次循环,每次循环都将数据集进行打乱,最后统计每个特征出现的次数 输入: shuffle_time: 进行shuffle循环的次数 alpha_range: alpha的值,如果不进行网格搜索为int,如果进行网格搜索为list cv: 如果进行交叉验证的话,折数 输出: new_train_feature: 特征选择后的训练集特征(其实这个和下面的特征矩阵不重要,最后还是要用索引重新对原始特征矩阵进行抽取) new_test_feature: 特征选择后的测试集特征 select_feature_name: 选择出来的特征名 select_feature_name_freq: 对应特征名,每个特征在多次shuffle循环中出现的次数 feature_weight: 对应特征名,每个特征的系数 select_feature_index: 对应特征名,每个特征在原始特征矩阵中的索引,可以在特征选择完成后直接进行矩阵特征的抽取 ''' # 将返回的值存入txt文件中 lasso_txt = open(os.path.join(self.path, 'logistic lasso_shuffle.txt'), 'w') lasso_txt.write('LASSO parameters set:\n') lasso_txt.write('\n---------------------------------------------\n') lasso_txt.write('Grid search: % s' % self.cv_val) lasso_txt.write('\nAlpha range: % s' % alpha_range) lasso_txt.write('\nShuffle time: % s' % shuffle_time) lasso_txt.write('\nGrid search cv-fold: % s' % cv) lasso_txt.write('\n---------------------------------------------\n') if self.cv_val is True: # 初始化权重为0,初始化特征列表为空 coef_sum = 0 select_list = [] # 初始化最佳参数alpha alpha_list = [] # 开始shuffle循环,每次都存储选择后的特征名 for i in range(shuffle_time): # 将数据进行shuffle X, y = shuffle(self.X_train, self.y_train) kfold = StratifiedKFold(n_splits=cv, shuffle=False) model_lasso = LogisticRegressionCV(Cs=alpha_range, cv=cv, penalty='l1') model_lasso.fit(X, y) coef = pd.Series(np.ravel(model_lasso.coef_)) print("% s th shuffle, Lasso picked " % i + str( sum(coef != 0)) + " variables and eliminated the other " + str( sum(coef == 0)) + " variables") # 交叉验证得到的最佳lasso惩罚参数 alpha = model_lasso.Cs_ alpha_list.append(alpha) print('best alpha value is % s' % alpha) # 将每一次循环的coef都进行相加 coef_sum += model_lasso.coef_ # 提取非零特征的mask model = SelectFromModel(model_lasso, prefit=True) # 所有特征的mask,保留的特征用True,被筛掉的特征用False mask = model.get_support() # 根据mask将保留特征的名字存储到select_list for bool, name in zip(mask, self.name): if bool: select_list.append(name) # 求全部特征的coef平均值 coef_mean = coef_sum / shuffle_time # 每次的特征都存储在select_list中,然后统计每个特征出现的次数,存在feature_freq中,dict形式 feature_freq = dict(zip(*np.unique(select_list, return_counts=True))) # 每次的alpha都存储在select_list中,然后统计每个特征出现的次数,存在feature_freq中,dict形式 alpha_freq = dict(zip(*np.unique(alpha_list, return_counts=True))) # 按照特征出现的频率,从大到小进行排序,分别存储特征名和出现次数 select_feature_name = [] select_feature_name_freq = [] for k in sorted(feature_freq, key=feature_freq.__getitem__, reverse=True): # 特征名相对应的顺序,将每个特征出现的次数存在select_feature_name_freq中 select_feature_name_freq.append(feature_freq[k]) # 将特征名存在select_feature_name中,list形式 select_feature_name.append(k) # 获取lasso后特征的索引 select_feature_index = [] # 将lasso后特征的名字转为list name_list = list(select_feature_name) # 将原始所有特征的名字转为list all_name_list = list(self.name) # 获取特征选择后特征在原始特征list中的索引位置,将所有索引位置存在select_feature_index中 for i in range(len(select_feature_name)): index = all_name_list.index(name_list[i]) select_feature_index.append(index) # 按照alpha出现的频率,从大到小进行排序,分别存储alpha的大小和出现次数 alpha_value = [] alpha_value_freq = [] for k in sorted(alpha_freq, key=alpha_freq.__getitem__, reverse=True): # alpha值相对应的顺序,将每个alpha值出现的次数存在alpha_value_freq中 alpha_value_freq.append(alpha_freq[k]) # 将alpha的值存在alpha_value中,list形式 alpha_value.append(k) print('alpha value % s appeared % s times in the loop' % (k, alpha_freq[k])) # 通过索引将选择后的特征矩阵赋值给select_feature new_train_feature = self.X_train[:, select_feature_index] new_test_feature = self.X_test[:, select_feature_index] feature_weight = coef_mean[select_feature_index] # 将输出值存入txt文件 lasso_txt.write('\nSelected feature index:\n') lasso_txt.write(str(select_feature_index)) lasso_txt.write('\n---------------------------------------------\n') lasso_txt.write('\nSelected feature weight: \n') lasso_txt.write(str(feature_weight)) lasso_txt.write('\n---------------------------------------------\n') lasso_txt.write('\nSelected feature name:\n') lasso_txt.write(str(select_feature_name)) lasso_txt.write('\n---------------------------------------------\n') lasso_txt.write('\nSelected feature appearance frequency:\n') lasso_txt.write(str(select_feature_name_freq)) lasso_txt.write('\n---------------------------------------------\n') return new_train_feature, new_test_feature, select_feature_name, \ select_feature_name_freq, feature_weight, select_feature_index else: # 初始化权重为0,初始化特征列表为空 coef_sum = 0 select_list = [] # 开始shuffle循环,每次都存储选择后的特征名 for i in range(shuffle_time): # 将数据进行shuffle X, y = shuffle(self.X_train, self.y_train) model_lasso = LogisticRegression(C=alpha_range, penalty='l1') model_lasso.fit(X, y) coef = pd.Series(np.ravel(model_lasso.coef_)) print("% s th shuffle, Lasso picked " % i + str( sum(coef != 0)) + " variables and eliminated the other " + str( sum(coef == 0)) + " variables") # 将每一次循环的coef都进行相加 coef_sum += model_lasso.coef_ # 提取非零特征的mask model = SelectFromModel(model_lasso, prefit=True) # 所有特征的mask,保留的特征用True,被筛掉的特征用False mask = model.get_support() # 根据mask将保留特征的名字存储到select_list for bool, name in zip(mask, self.name): if bool: select_list.append(name) # 求全部特征的coef平均值 coef_mean = coef_sum / shuffle_time # 每次的特征都存储在select_list中,然后统计每个特征出现的次数,存在feature_freq中,dict形式 feature_freq = dict(zip(*np.unique(select_list, return_counts=True))) # 按照特征出现的频率,从大到小进行排序,分别存储特征名和出现次数 select_feature_name = [] select_feature_name_freq = [] for k in sorted(feature_freq, key=feature_freq.__getitem__, reverse=True): # 特征名相对应的顺序,将每个特征出现的次数存在select_feature_name_freq中 select_feature_name_freq.append(feature_freq[k]) # 将特征名存在select_feature_name中,list形式 select_feature_name.append(k) # 获取lasso后特征的索引 select_feature_index = [] # 将lasso后特征的名字转为list name_list = list(select_feature_name) # 将原始所有特征的名字转为list all_name_list = list(self.name) # 获取特征选择后特征在原始特征list中的索引位置,将所有索引位置存在select_feature_index中 for i in range(len(select_feature_name)): index = all_name_list.index(name_list[i]) select_feature_index.append(index) # 通过索引将选择后的特征矩阵赋值给select_feature new_train_feature = self.X_train[:, select_feature_index] new_test_feature = self.X_test[:, select_feature_index] feature_weight = coef_mean[select_feature_index] # 将输出值存入txt文件 lasso_txt.write('\nSelected feature index:\n') lasso_txt.write(str(select_feature_index)) lasso_txt.write('\n---------------------------------------------\n') lasso_txt.write('\nSelected feature weight: \n') lasso_txt.write(str(feature_weight)) lasso_txt.write('\n---------------------------------------------\n') lasso_txt.write('\nSelected feature name:\n') lasso_txt.write(str(select_feature_name)) lasso_txt.write('\n---------------------------------------------\n') lasso_txt.write('\nSelected feature appearance frequency:\n') lasso_txt.write(str(select_feature_name_freq)) lasso_txt.write('\n---------------------------------------------\n') return new_train_feature, new_test_feature, select_feature_name, \ select_feature_name_freq, feature_weight, select_feature_index class elastic_net(): '''elastic net用于特征选择,可以选择组特征 输入: X_train: 输入的训练集特征矩阵 X_test: 输入的测试集特征矩阵 y_train: 输入的训练集标签 y_test: 输入的测试集标签 feature_name: 特征矩阵对应的特征名 cv_val:布尔型,是否进行网格搜索交叉验证 path: 结果存储的路径 ''' def __init__(self, X_train, X_test, y_train, y_test, feature_name, cv_val, path): self.X_train = X_train self.X_test = X_test self.y_train = y_train self.y_test = y_test self.name = feature_name self.cv_val = cv_val self.path = path def elastic_net(self, l1, alphas, cv): if self.cv_val is True: elas = ElasticNetCV(l1_ratio=l1, alphas=alphas, cv=cv) elas.fit(self.X_train, self.y_train) coef = pd.Series(elas.coef_) print("Elastic Net picked " + str(sum(coef != 0)) + " variables and eliminated the other " + str( sum(coef == 0)) + " variables") img_path = os.path.join(self.path, 'ElasticNetCV') os.makedirs(img_path, exist_ok=True) # 交叉验证得到的最佳lasso惩罚参数 best_alpha = elas.alpha_ best_l1_ratio = elas.l1_ratio_ best_coef = elas.coef_ best_alphas = elas.alphas_ best_mse_path = elas.mse_path_ print('-----------------------------') print('Best Elastic Net alpha:') print(best_alpha) # 将lasso中权重不为0的特征选择出来 model = SelectFromModel(elas, prefit=True) # 分别将训练集和测试集的特征使用上述lasso进行筛选 X_new_train = model.transform(self.X_train) X_new_test = model.transform(self.X_test) # print(X_new_test.shape) # print(model.get_support()) # 所有特征的mask,保留的特征用True,被筛掉的特征用False mask = model.get_support() new_feature_name = [] feature_weight = [] # 根据mask将保留特征的名字和权重分别存储到 for bool, feature, coef in zip(mask, self.name, coef): if bool: new_feature_name.append(feature) feature_weight.append(coef) # 将训练集和测试集的保留特征加上特征名 new_train_feature = pd.DataFrame(data=X_new_train, columns=new_feature_name) new_test_feature = pd.DataFrame(data=X_new_test, columns=new_feature_name) feature_weight = pd.Series(feature_weight) return best_alpha, new_train_feature, new_test_feature, new_feature_name, feature_weight else: elas = ElasticNet(l1_ratio=l1, alpha=alphas) elas.fit(self.X_train, self.y_train) coef = pd.Series(elas.coef_) print("Elastic Net picked " + str(sum(coef != 0)) + " variables and eliminated the other " + str( sum(coef == 0)) + " variables") img_path = os.path.join(self.path, 'ElasticNetCV') os.makedirs(img_path, exist_ok=True) coef1 = elas.coef_ sparse = elas.sparse_coef_ # 将elas中权重不为0的特征选择出来 model = SelectFromModel(elas, prefit=True) # 分别将训练集和测试集的特征使用上述lasso进行筛选 X_new_train = model.transform(self.X_train) X_new_test = model.transform(self.X_test) # 所有特征的mask,保留的特征用True,被筛掉的特征用False mask = model.get_support() new_feature_name = [] feature_weight = [] # 根据mask将保留特征的名字和权重分别存储到 for bool, feature, coef in zip(mask, self.name, coef): if bool: new_feature_name.append(feature) feature_weight.append(coef) # 将训练集和测试集的保留特征加上特征名 new_train_feature = pd.DataFrame(data=X_new_train, columns=new_feature_name) new_test_feature = pd.DataFrame(data=X_new_test, columns=new_feature_name) feature_weight = pd.Series(feature_weight) return new_train_feature, new_test_feature, new_feature_name, feature_weight def elasticnet_shuffle(self, l1_range, alphas_range, shuffle_time=100, cv=10, freq_seq=False): '''通过多次shuffle循环来求特征的权重,最后通过每次循环被筛选特征出现的频率来选择 输入: freq_seq: 是否根据每个特征出现的频率对特征排序,False使用原始特征顺序,只是抽调部分特征 ''' # 将返回的值存入txt文件中 elas_txt = open(os.path.join(self.path, 'elastic net_shuffle.txt'), 'w') elas_txt.write('Elastic Net parameters set:\n') elas_txt.write('\n---------------------------------------------\n') elas_txt.write('Grid search: % s' % self.cv_val) elas_txt.write('\nL1_ratio range: % s' % l1_range) elas_txt.write('\nAlpha range: % s' % alphas_range) elas_txt.write('\nShuffle time: % s' % shuffle_time) elas_txt.write('\nGrid search cv-fold: % s' % cv) elas_txt.write('\n---------------------------------------------\n') if self.cv_val is True: # 初始化权重为0,初始化特征列表为空 coef_sum = 0 select_list = [] # 初始化最佳参数alpha alpha_list = [] # 开始shuffle循环,每次都存储选择后的特征名 for i in range(shuffle_time): # 将数据进行shuffle X, y = shuffle(self.X_train, self.y_train) kfold = StratifiedKFold(n_splits=cv, shuffle=False) model_elas = ElasticNetCV(l1_ratio=l1_range, alphas=alphas_range, cv=cv) model_elas.fit(X, y) coef = pd.Series(model_elas.coef_) print("% s th shuffle, Elastic net picked " % i + str( sum(coef != 0)) + " variables and eliminated the other " + str( sum(coef == 0)) + " variables") # 交叉验证得到的最佳lasso惩罚参数 alpha = model_elas.alpha_ l1_ratio = model_elas.l1_ratio_ alphas = model_elas.alphas_ mse_path = model_elas.mse_path_ alpha_list.append(alpha) print('best alpha value is % s' % alpha) # 将每一次循环的coef都进行相加 coef_sum += model_elas.coef_ # 提取非零特征的mask model = SelectFromModel(model_elas, prefit=True) # 所有特征的mask,保留的特征用True,被筛掉的特征用False mask = model.get_support() # 根据mask将保留特征的名字存储到select_list for bool, name in zip(mask, self.name): if bool: select_list.append(name) # 求全部特征的coef平均值,这里的平均值只是为了返回每个特征的权重均值,特征选择过程中不使用 coef_mean = coef_sum / shuffle_time # 每次的特征都存储在select_list中,然后统计每个特征出现的次数,存在feature_freq中,dict形式 feature_freq = dict(zip(*np.unique(select_list, return_counts=True))) # 每次的alpha都存储在select_list中,然后统计每个特征出现的次数,存在feature_freq中,dict形式 alpha_freq = dict(zip(*np.unique(alpha_list, return_counts=True))) # 按照特征出现的频率,从大到小进行排序,分别存储特征名和出现次数 select_feature_name = [] select_feature_name_freq = [] # 如果freq_seq为True,那么按照特征出现的频率为他们排序,否则按照原始顺序 if freq_seq is True: for k in sorted(feature_freq, key=feature_freq.__getitem__, reverse=True): # 特征名相对应的顺序,将每个特征出现的次数存在select_feature_name_freq中 select_feature_name_freq.append(feature_freq[k]) # 将特征名存在select_feature_name中,list形式 select_feature_name.append(k) elif freq_seq is False: for k in feature_freq: # 特征名相对应的顺序,将每个特征出现的次数存在select_feature_name_freq中 select_feature_name_freq.append(feature_freq[k]) # 将特征名存在select_feature_name中,list形式 select_feature_name.append(k) # 获取lasso后特征的索引 select_feature_index = [] # 将lasso后特征的名字转为list name_list = list(select_feature_name) # 将原始所有特征的名字转为list all_name_list = list(self.name) # 获取特征选择后特征在原始特征list中的索引位置,将所有索引位置存在select_feature_index中 for i in range(len(select_feature_name)): index = all_name_list.index(name_list[i]) select_feature_index.append(index) # 按照alpha出现的频率,从大到小进行排序,分别存储alpha的大小和出现次数 alpha_value = [] alpha_value_freq = [] for k in sorted(alpha_freq, key=alpha_freq.__getitem__, reverse=True): # alpha值相对应的顺序,将每个alpha值出现的次数存在alpha_value_freq中 alpha_value_freq.append(alpha_freq[k]) # 将alpha的值存在alpha_value中,list形式 alpha_value.append(k) print('alpha value % s appeared % s times in the loop' % (k, alpha_freq[k])) # 通过索引将选择后的特征矩阵赋值给select_feature new_train_feature = self.X_train[:, select_feature_index] new_test_feature = self.X_test[:, select_feature_index] feature_weight = coef_mean[select_feature_index] # 将输出值存入txt文件 elas_txt.write('\nSelected feature index:\n') elas_txt.write(str(select_feature_index)) elas_txt.write('\n---------------------------------------------\n') elas_txt.write('\nSelected feature weight: \n') elas_txt.write(str(feature_weight)) elas_txt.write('\n---------------------------------------------\n') elas_txt.write('\nSelected feature name:\n') elas_txt.write(str(select_feature_name)) elas_txt.write('\n---------------------------------------------\n') elas_txt.write('\nSelected feature appearance frequency:\n') elas_txt.write(str(select_feature_name_freq)) elas_txt.write('\n---------------------------------------------\n') return new_train_feature, new_test_feature, select_feature_name, \ select_feature_name_freq, feature_weight, select_feature_index else: # 初始化权重为0,初始化特征列表为空 coef_sum = 0 select_list = [] # 开始shuffle循环,每次都存储选择后的特征名 for i in range(shuffle_time): # 将数据进行shuffle X, y = shuffle(self.X_train, self.y_train) model_elas = ElasticNet(l1_ratio=l1_range, alpha=alphas_range) model_elas.fit(X, y) coef = pd.Series(model_elas.coef_) print("% s th shuffle, Elastic net picked " % i + str( sum(coef != 0)) + " variables and eliminated the other " + str( sum(coef == 0)) + " variables") # 绘制elastic net的路径 # from itertools import cycle # alphas_enet, coefs_enet, _ = enet_path(X, y, eps=5e-3, l1_ratio=l1_range, # fit_intercept=False) # plt.figure(1) # colors = cycle(['b', 'r', 'g', 'c', 'k']) # neg_log_alphas_enet = -np.log10(alphas_enet) # for coef_e in coefs_enet: # l2 = plt.plot(neg_log_alphas_enet, coef_e) # # plt.xlabel('-Log(alpha)') # plt.ylabel('coefficients') # plt.xlim((0, 2.2)) # plt.ylim((-0.1, 0.1)) # plt.axis('tight') # plt.show() # 将每一次循环的coef都进行相加 coef_sum += model_elas.coef_ # 提取非零特征的mask model = SelectFromModel(model_elas, prefit=True) # 所有特征的mask,保留的特征用True,被筛掉的特征用False mask = model.get_support() # 根据mask将保留特征的名字存储到select_list for bool, name in zip(mask, self.name): if bool: select_list.append(name) # 求全部特征的coef平均值 coef_mean = coef_sum / shuffle_time # 每次的特征都存储在select_list中,然后统计每个特征出现的次数,存在feature_freq中,dict形式 feature_freq = dict(zip(*np.unique(select_list, return_counts=True))) # 按照特征出现的频率,从大到小进行排序,分别存储特征名和出现次数 select_feature_name = [] select_feature_name_freq = [] # 如果freq_seq为True,那么按照特征出现的频率为他们排序,否则按照原始顺序 if freq_seq is True: for k in sorted(feature_freq, key=feature_freq.__getitem__, reverse=True): # 特征名相对应的顺序,将每个特征出现的次数存在select_feature_name_freq中 select_feature_name_freq.append(feature_freq[k]) # 将特征名存在select_feature_name中,list形式 select_feature_name.append(k) elif freq_seq is False: for k in feature_freq: # 特征名相对应的顺序,将每个特征出现的次数存在select_feature_name_freq中 select_feature_name_freq.append(feature_freq[k]) # 将特征名存在select_feature_name中,list形式 select_feature_name.append(k) # 获取lasso后特征的索引 select_feature_index = [] # 将lasso后特征的名字转为list name_list = list(select_feature_name) # 将原始所有特征的名字转为list all_name_list = list(self.name) # 获取特征选择后特征在原始特征list中的索引位置,将所有索引位置存在select_feature_index中 for i in range(len(select_feature_name)): index = all_name_list.index(name_list[i]) select_feature_index.append(index) # 通过索引将选择后的特征矩阵赋值给select_feature new_train_feature = self.X_train[:, select_feature_index] new_test_feature = self.X_test[:, select_feature_index] feature_weight = coef_mean[select_feature_index] # 将输出值存入txt文件 elas_txt.write('\nSelected feature index:\n') elas_txt.write(str(select_feature_index)) elas_txt.write('\n---------------------------------------------\n') elas_txt.write('\nSelected feature weight: \n') elas_txt.write(str(feature_weight)) elas_txt.write('\n---------------------------------------------\n') elas_txt.write('\nSelected feature name:\n') elas_txt.write(str(select_feature_name)) elas_txt.write('\n---------------------------------------------\n') elas_txt.write('\nSelected feature appearance frequency:\n') elas_txt.write(str(select_feature_name_freq)) elas_txt.write('\n---------------------------------------------\n') return new_train_feature, new_test_feature, select_feature_name, \ select_feature_name_freq, feature_weight, select_feature_index class SVM(): '''支持向量机进行分类的方法集锦,包括普通SVM, shuffle_SVM, nested SVM 输入: X: 输入的特征矩阵 y: 特征矩阵对应的标签 path: 结果存储的路径 属性: weight: 每个特征的SVM权重,因此长度和特征数量相同,list形式(注意该属性只有在核函数为linear时才有效) ''' # 初始化类属性SVM特征权重 weight = 0 def __init__(self, X, y, path): self.X = X self.y = y self.path = path def svm_only(self, kernel='linear', ratio=0.5, gamma=0.1, C=10, cv=3, gridsearch=True): '''进行单次SVM,可以选择使用网格搜索法寻找最优参数 输入: kernel: 核函数选择 ratio: 训练集和测试集的比例,默认为0.5 gamma: 超参数gamma(RBF专用),如果选择网格搜索法应该使用list,如果不使用参数搜索为int C: 超参数C,如果选择了网格搜索法应该使用list,如果不使用参数搜索为int cv: 交叉验证的次数,如果进行交叉验证网格搜索法,交叉验证的折数 gridsearch: 布尔型,是否使用网格搜索法寻找SVM最佳超参数 输出: best_para: dict型,如果进行网格搜索法,得到的最佳参数 pred_train: 训练集预测结果 y_score_train: 训练集预测结果的概率 pred_test: 测试集预测结果 y_score_test: 测试集预测结果的概率 ''' X_train, X_test, y_train, y_test = train_test_split(self.X, self.y, test_size=ratio, stratify=self.y) if gridsearch is True: svm = SVC(kernel=kernel, gamma=gamma, C=C, probability=True) para = { 'gamma': gamma, 'C': C, } grid = GridSearchCV(svm, para, n_jobs=1, verbose=1, scoring='accuracy', cv=cv) grid.fit(X_train, y_train) pred_train = grid.predict(X_train) pred_test = grid.predict(X_test) y_score_train = grid.predict_proba(X_train) y_score_test = grid.predict_proba(X_test) best_para = grid.best_params_ # 输出SVM最佳参数 print('SVM CV Best score: %0.3f' % grid.best_score_) print('SVM CV Best parameters set:') print('-------------------------------------------') for param_name in sorted(best_para.keys()): print('\t%s: %r' % (param_name, best_para[param_name])) # 将SVM的特征权重存储在类属性weight中 if kernel == 'linear': weight = svm.coef_ else: print('SVM coefficient is only available when using linear kernel function.') else: svm = SVC(kernel=kernel, gamma=gamma, C=C, probability=True) svm.fit(X_train, y_train) pred_train = svm.predict(X_train) pred_test = svm.predict(X_test) y_score_train = svm.predict_proba(X_train) y_score_test = svm.predict_proba(X_test) best_para = {'gamma': gamma, 'C': C} # 将SVM的特征权重存储在类属性weight中 if kernel == 'linear': weight = svm.coef_ else: print('SVM coefficient is only available when using linear kernel function.') return pred_train, y_score_train, pred_test, y_score_test, best_para def svm_shuffle(self, outer, para, svm_metrics, shuffle_time=100): '''SVM不进行超参数网格搜索,直接指定超参数,然后使用该模型对数据进行多次shuffle,最后取平均结果 该函数中绘制ROC的方法中,fpr不是真正的fpr,而是自定义的等差数列,然后根据真实tpr和fpr的趋势来进行插值,获得tpr 这样绘制的ROC是对的,不过最终的AUC计算是根据插值后的ROC计算的,和真实的AUC有微小误差,不过无妨 输入: outer: 每一次shuffle进行交叉验证时,交叉验证的折数 para: dict型,SVM的参数,包括: kernel: 目前仅支持linear, rbf C: 惩罚参数,linear和rbf都有 gamma: 如果使用rbf核函数,则有这个参数 svm_metrics: list型,SVM输出结果后需要计算的指标,目前支持accuracy, precision, recall, f1, sensitivity, specificity 必须写着完整名,不能用缩写 shuffle_time: 进行shuffle的次数 输出: train_means: dict型,键值对应于svm_metrics定义的指标,对应的值为list型,具体是shuffle_time次数的训练集中对应指标 在交叉验证过程中的平均值,该dict返回后可以通过mean来求总体的平均值。 train_std: 和上面的dict类似,不同的是计算的是所有shuffle的标准差而不是均值 test_means: 和上面的dict类似,不用的是计算的是测试集中的均值 test_std: 和上面的dict类似,计算的是测试集中的标准差 roc_dict: dict型,返回的是与绘制ROC相关的list,包含: tpr_train: list,训练集中每次shuffle的tpr交叉验证平均值 tpr_test: 测试集中每次shuffle的tpr交叉验证平均值 tpr_list_train: 二维list,训练集每次shuffle交叉验证每折的tpr都会存储,一次shuffle一个list tpr_list_test: 二维list,测试集每次shuffle交叉验证每折的tpr都会存储,一次shuffle一个list fpr_train: list, 训练集中每次shuffle的tpr交叉验证平均值(其实是自定义长度的等差数列) fpr_test: list, 测试集中每次shuffle的tpr交叉验证平均值(其实是自定义长度的等差数列) auc_list_train: list, 记录了训练集每次shuffle计算得到的AUC auc_list_test: list, 记录了测试集每次shuffle计算得到的AUC auc_train: float, 训练集上所有shuffle的AUC的平均值 auc_test: float, 测试集上所有shuffle的AUC的平均值 前四个dict主要是为了绘制shuffle和每种指标的关系图,mean用于绘制指标的曲线,std可以绘制标准差的变化区域 roc_dict真正实用的是tpr_train, tpr_test, fpr_train, fpr_test,这四个list再各自做平均后就可以获取绘制ROC的所有参数, auc_list可以绘制shuffle和AUC的曲线图,其他的值用处不大,仅仅以防万一要用 ''' from mics import classifier_mics # 初始化SVM权重为0 svm_weight = 0 svm_weight_cv = 0 # 将svm参数写入txt文档 svm_shuffle_path = os.path.join(self.path, 'svm_shuffle') os.makedirs(svm_shuffle_path, exist_ok=True) svm_txt = open(os.path.join(self.path, 'svm_shuffle_result.txt'), 'w') svm_txt.write('Support Vector Machine Shuffle parameters set:\n') svm_txt.write('\n---------------------------------------------\n') svm_txt.write('Kernel type: % s' % para['kernel']) svm_txt.write('\nC value: % s' % para['C']) if para['kernel'] == 'rbf': svm_txt.write('\nGamma value: % s' % para['gamma']) svm_txt.write('\nShuffle time: % s' % shuffle_time) svm_txt.write('\nCross validation-fold: % s' % outer) svm_txt.write('\nsvm metrics: % s\n' % svm_metrics) svm_txt.write('\n---------------------------------------------\n') # 传入svm_metrics中的每个指标都初始化空的train和test的均值和方差list metrics_num = len(svm_metrics) for name in svm_metrics: exec('train_{}_means = []'.format(name)) exec('train_{}_std = []'.format(name)) exec('test_{}_means = []'.format(name)) exec('test_{}_std = []'.format(name)) shuffle_path = os.path.join(self.path, 'svm', 'shuffle') os.makedirs(shuffle_path, exist_ok=True) # 直接将fpr定义为等差数列 meanfpr_outer_train = np.linspace(0, 1, 100) meanfpr_outer_test = np.linspace(0, 1, 100) # 将tpr和auc定义为空,最终tpr_outer_test和meanfpr_outer_test的长度相同,auc和shuffle的次数相同 tpr_outer_train = [] auc_list_train = [] tpr_outer_test = [] auc_list_test = [] for i in range(shuffle_time): # 外嵌套每一折的分配方法 outer_cv = StratifiedKFold(n_splits=outer, shuffle=True, random_state=i) # 根据svm模型的核函数来选择具体模型形式 if para['kernel'] == 'rbf': svm = SVC(kernel=para['kernel'], C=para['C'], gamma=para['gamma'], probability=True) elif para['kernel'] == 'linear': svm = SVC(kernel=para['kernel'], C=para['C'], probability=True) # 内循环,计算每次内循环的平均tpr tpr_inner_train = [] tpr_inner_test = [] # 每一折的四大指标进行初始化,只初始化svm_metrics中要求给的 for name in svm_metrics: exec('{}_inner_train = []'.format(name)) exec('{}_inner_test = []'.format(name)) for train, test in outer_cv.split(self.X, self.y): svm.fit(self.X[train], self.y[train]) # 求SVM的输出结果 pred_train = svm.predict(self.X[train]) pred_test = svm.predict(self.X[test]) prob_train = svm.predict_proba(self.X[train]) prob_test = svm.predict_proba(self.X[test]) # 如果使用的SVM核函数是linear则将权重进行累加 if para['kernel'] == 'linear': svm_weight_cv += np.ravel(svm.coef_) else: print('SVM coefficient is only available when using linear kernel function.') # 计算四大指标 mics = classifier_mics(self.y[train], pred_train, prob_train, self.y[test], pred_test, prob_test, 'svm_shuffle_result') accuracy_train, precision_train, recall_train, f1_train = mics.mics_sum_train() accuracy_test, precision_test, recall_test, f1_test = mics.mics_sum_test() sensitivity_train, sensitivity_test = mics.sensitivity() specificity_train, specificity_test = mics.specificity() # 虽然四大指标都算了,但是只向list中添加svm_metrics中要求给的 for name in svm_metrics: exec('{}_inner_train.append({}_train)'.format(name, name)) exec('{}_inner_test.append({}_test)'.format(name, name)) # 计算fpr和tpr fpr_train, tpr_train, thres_train = roc_curve(self.y[train], prob_train[:, 1]) fpr_test, tpr_test, thres_test = roc_curve(self.y[test], prob_test[:, 1]) # 根据meanfpr_outer_test的长度,通过fpr和tpr的范围进行插值 tpr_inner_train.append(np.interp(meanfpr_outer_train, fpr_train, tpr_train)) tpr_inner_test.append(np.interp(meanfpr_outer_test, fpr_test, tpr_test)) tpr_inner_train[-1][0] = 0.0 tpr_inner_test[-1][0] = 0.0 # 计算每一次shuffle交叉验证的SVM权重平均值 svm_weight_cv /= outer # 将每一次shuffle的权重值相加 svm_weight += svm_weight_cv # 计算每次shuffle时,每折tpr的平均值作为该次shuffle的tpr meantpr_inner_train = np.mean(tpr_inner_train, axis=0) meantpr_inner_test = np.mean(tpr_inner_test, axis=0) meantpr_inner_train[-1] = 1.0 meantpr_inner_test[-1] = 1.0 # 计算每次shuffle的auc并存储在zuc_list中 mean_auc_train = auc(meanfpr_outer_train, meantpr_inner_train) mean_auc_test = auc(meanfpr_outer_test, meantpr_inner_test) auc_list_train.append(mean_auc_train) auc_list_test.append(mean_auc_test) # 计算完auc之后,将每一次shuffle的tpr放进tpr_outer_test中 tpr_outer_train.append(meantpr_inner_train) tpr_outer_test.append(meantpr_inner_test) # 将外层嵌套循环的每种指标存储在list中 for name in svm_metrics: # 存储训练过程中交叉验证每个指标的平均值 exec('{}_inner_train = np.array({}_inner_train)'.format(name, name)) exec("train_{}_means.append({}_inner_train.mean())".format(name, name)) # 存储训练过程中交叉验证每个指标的标准差 exec("train_{}_std.append({}_inner_train.std())".format(name, name)) # 存储测试过程中交叉验证每个指标的平均值 exec('{}_inner_test = np.array({}_inner_test)'.format(name, name)) exec("test_{}_means.append({}_inner_test.mean())".format(name, name)) # 存储测试过程中交叉验证每个指标的标准差 exec("test_{}_std.append({}_inner_test.std())".format(name, name)) meantpr_outer_train = np.mean(tpr_outer_train, axis=0) meantpr_outer_test = np.mean(tpr_outer_test, axis=0) final_auc_train = auc(meanfpr_outer_train, meantpr_outer_train) final_auc_test = auc(meanfpr_outer_test, meantpr_outer_test) # 计算所有shuffle后的SVM权重平均值,并将该平均值赋给类属性weight svm_weight /= shuffle_time SVM.weight = svm_weight # 为了简洁,将绘制ROC曲线有关的变量用一个dict来表示 roc_dict = {} roc_dict['tpr_train'] = meantpr_outer_train roc_dict['tpr_test'] = meantpr_outer_test roc_dict['tpr_list_train'] = tpr_outer_train roc_dict['tpr_list_test'] = tpr_outer_test roc_dict['fpr_train'] = meanfpr_outer_train roc_dict['fpr_test'] = meanfpr_outer_test roc_dict['auc_list_train'] = auc_list_train roc_dict['auc_list_test'] = auc_list_test roc_dict['auc_train'] = final_auc_train roc_dict['auc_test'] = final_auc_test # 为了简洁,将训练、测试过程中的指标平均值和标准差以字典形式存储,再返回 train_means = {} train_std = {} test_means = {} test_std = {} for name in svm_metrics: exec("train_means['{}'] = train_{}_means".format(name, name)) exec("train_std['{}'] = train_{}_std".format(name, name)) exec("test_means['{}'] = test_{}_means".format(name, name)) exec("test_std['{}'] = test_{}_std".format(name, name)) # 将输出存在txt文件中 for name in svm_metrics: svm_txt.write('\n---------------------------------------------\n') exec("svm_txt.write('Train set {} mean value: % s' % np.mean(train_means['{}']))".format(name, name)) svm_txt.write('\n') exec("svm_txt.write('Train set {} max value: % s' % np.max(train_means['{}']))".format(name, name)) svm_txt.write('\n') exec("svm_txt.write('Train set {} min value: % s' % np.min(train_means['{}']))".format(name, name)) svm_txt.write('\n---------------------------------------------\n') exec("svm_txt.write('Test set {} mean value: % s' % np.mean(test_means['{}']))".format(name, name)) svm_txt.write('\n') exec("svm_txt.write('Test set {} max value: % s' % np.max(test_means['{}']))".format(name, name)) svm_txt.write('\n') exec("svm_txt.write('Test set {} min value: % s' % np.min(test_means['{}']))".format(name, name)) svm_txt.write('\n---------------------------------------------\n') svm_txt.write('\nTrain set AUC mean value: % s' % np.mean(roc_dict['auc_list_train'])) svm_txt.write('\nTrain set AUC max value: % s' % np.max(roc_dict['auc_list_train'])) svm_txt.write('\nTrain set AUC min value: % s' % np.min(roc_dict['auc_list_train'])) svm_txt.write('\n---------------------------------------------\n') svm_txt.write('\nTest set AUC mean value: % s' % np.mean(roc_dict['auc_list_test'])) svm_txt.write('\nTest set AUC max value: % s' % np.max(roc_dict['auc_list_test'])) svm_txt.write('\nTest set AUC min value: % s' % np.min(roc_dict['auc_list_test'])) # 存储SVM权重值 svm_txt.write('\n---------------------------------------------\n') svm_txt.write('\nSVM weight: % s' % svm_weight) return train_means, train_std, test_means, test_std, roc_dict def svm_nested(self, para, svm_metrics, shuffle_time=100, inner=5, outer=10, log=True): '''SVM内外嵌套交叉验证法,然后使用该模型对数据进行多次shuffle,最后取平均结果 该函数中绘制ROC的方法中,fpr不是真正的fpr,而是自定义的等差数列,然后根据真实tpr和fpr的趋势来进行插值,获得tpr 这样绘制的ROC是对的,不过最终的AUC计算是根据插值后的ROC计算的,和真实的AUC有微小误差,不过无妨 输入: outer: 每一次shuffle进行交叉验证时,交叉验证的折数 inner: 内部网格搜索法交叉验证时,交叉验证的折数 para: dict型,SVM的参数,包括: kernel: 目前仅支持linear, rbf C: 惩罚参数,linear和rbf都有 gamma: 如果使用rbf核函数,则有这个参数 svm_metrics: list型,SVM输出结果后需要计算的指标,目前支持accuracy, precision, recall, f1, sensitivity, specificity。 必须写着完整名,不能用缩写 shuffle_time: 进行shuffle的次数 log: bool型,是否将网格搜索法每一折的相信信息存入文件中 输出: train_means: dict型,键值对应于svm_metrics定义的指标,对应的值为list型,具体是shuffle_time次数的训练集中对应指标 在交叉验证过程中的平均值,该dict返回后可以通过mean来求总体的平均值。 train_std: 和上面的dict类似,不同的是计算的是所有shuffle的标准差而不是均值 test_means: 和上面的dict类似,不用的是计算的是测试集中的均值 test_std: 和上面的dict类似,计算的是测试集中的标准差 roc_dict: dict型,返回的是与绘制ROC相关的list,包含: tpr_train: list,训练集中每次shuffle的tpr交叉验证平均值 tpr_test: 测试集中每次shuffle的tpr交叉验证平均值 tpr_list_train: 二维list,训练集每次shuffle交叉验证每折的tpr都会存储,一次shuffle一个list tpr_list_test: 二维list,测试集每次shuffle交叉验证每折的tpr都会存储,一次shuffle一个list fpr_train: list, 训练集中每次shuffle的tpr交叉验证平均值(其实是自定义长度的等差数列) fpr_test: list, 测试集中每次shuffle的tpr交叉验证平均值(其实是自定义长度的等差数列) auc_list_train: list, 记录了训练集每次shuffle计算得到的AUC auc_list_test: list, 记录了测试集每次shuffle计算得到的AUC auc_train: float, 训练集上所有shuffle的AUC的平均值 auc_test: float, 测试集上所有shuffle的AUC的平均值 前四个dict主要是为了绘制shuffle和每种指标的关系图,mean用于绘制指标的曲线,std可以绘制标准差的变化区域 roc_dict真正实用的是tpr_train, tpr_test, fpr_train, fpr_test,这四个list再各自做平均后就可以获取绘制ROC的所有参数, auc_list可以绘制shuffle和AUC的曲线图,其他的值用处不大,仅仅以防万一要用 ''' from mics import classifier_mics # 将svm参数写入txt文档 svm_shuffle_path = os.path.join(self.path, 'svm_nested') os.makedirs(svm_shuffle_path, exist_ok=True) svm_txt = open(os.path.join(self.path, 'svm_nested_result.txt'), 'w') svm_txt.write('Nested Support Vector Machine parameters set:\n') svm_txt.write('\n---------------------------------------------\n') svm_txt.write('Kernel type: % s' % para['kernel']) svm_txt.write('\nC value: % s' % para['C']) if para['kernel'] == 'rbf': svm_txt.write('\nGamma value: % s' % para['gamma']) svm_txt.write('\nShuffle time: % s' % shuffle_time) svm_txt.write('\nGrid Search Cross validation-fold: % s' % inner) svm_txt.write('\nCross validation-fold: % s' % outer) svm_txt.write('\nsvm metrics: % s\n' % svm_metrics) svm_txt.write('\n---------------------------------------------\n') # 传入scoring中的每个指标都初始化空的train和test的均值和方差list metrics_num = len(svm_metrics) for name in svm_metrics: exec('train_{}_means = []'.format(name)) exec('train_{}_std = []'.format(name)) exec('test_{}_means = []'.format(name)) exec('test_{}_std = []'.format(name)) shuffle_path = os.path.join(self.path, 'svm', 'nest_cv') os.makedirs(shuffle_path, exist_ok=True) # 直接将fpr定义为等差数列 meanfpr_outer_train = np.linspace(0, 1, 100) meanfpr_outer_test =
np.linspace(0, 1, 100)
numpy.linspace
import numpy as np from shapely import geometry import sys import os import json import glob from convert import convert_videos MIN_SCORE = -9999 MAX_TRACK_ID = 10000 class Joint: def __init__(self): self.count = 21 self.wrist = 0 self.thumb_k = 1 self.thumb_b = 2 self.thumb_m = 3 self.thumb_t = 4 self.index_k = 5 self.index_b = 6 self.index_m = 7 self.index_t = 8 self.middle_k = 9 self.middle_b = 10 self.middle_m = 11 self.middle_t = 12 self.ring_k = 13 self.ring_b = 14 self.ring_m = 15 self.ring_t = 16 self.pinky_k = 17 self.pinky_b = 18 self.pinky_m = 19 self.pinky_t = 20 self.name = {} self.name[self.wrist] = "wrist" self.name[self.thumb_k] = "thumb_k" self.name[self.thumb_b] = "thumb_b" self.name[self.thumb_m] = "thumb_m" self.name[self.thumb_t] = "thumb_t" self.name[self.index_k] = "index_k" self.name[self.index_b] = "index_b" self.name[self.index_m] = "index_m" self.name[self.index_t] = "index_t" self.name[self.middle_k] = "middle_k" self.name[self.middle_b] = "middle_b" self.name[self.middle_m] = "middle_m" self.name[self.middle_t] = "middle_t" self.name[self.ring_k] = "ring_k" self.name[self.ring_b] = "ring_b" self.name[self.ring_m] = "ring_m" self.name[self.ring_t] = "ring_t" self.name[self.pinky_k] = "pinky_k" self.name[self.pinky_b] = "pinky_b" self.name[self.pinky_m] = "pinky_m" self.name[self.pinky_t] = "pinky_t" self.symmetric_joint = {} self.symmetric_joint[self.name[self.wrist]] = -1 self.symmetric_joint[self.name[self.thumb_k]] = -1 self.symmetric_joint[self.name[self.thumb_b]] = -1 self.symmetric_joint[self.name[self.thumb_m]] = -1 self.symmetric_joint[self.name[self.thumb_t]] = -1 self.symmetric_joint[self.name[self.index_k]] = -1 self.symmetric_joint[self.name[self.index_b]] = -1 self.symmetric_joint[self.name[self.index_m]] = -1 self.symmetric_joint[self.name[self.index_t]] = -1 self.symmetric_joint[self.name[self.middle_k]] = -1 self.symmetric_joint[self.name[self.middle_b]] = -1 self.symmetric_joint[self.name[self.middle_m]] = -1 self.symmetric_joint[self.name[self.middle_t]] = -1 self.symmetric_joint[self.name[self.ring_k]] = -1 self.symmetric_joint[self.name[self.ring_b]] = -1 self.symmetric_joint[self.name[self.ring_m]] = -1 self.symmetric_joint[self.name[self.ring_t]] = -1 self.symmetric_joint[self.name[self.pinky_k]] = -1 self.symmetric_joint[self.name[self.pinky_b]] = -1 self.symmetric_joint[self.name[self.pinky_m]] = -1 self.symmetric_joint[self.name[self.pinky_t]] = -1 def getPointGTbyID(points, pidx): point = [] for i in range(len(points)): if points[i]["id"] != None and points[i]["id"][0] == pidx: # if joint id matches point = points[i] break return point def getHeadSize(x1, y1, x2, y2): # (Note by Nate - 9/10/2020) Too large of a "headSize" caused many false matches when # calculating shortest distances # Tough to get some good reference measurement. I can go for the tip of the thumb # which is about ~0.2 the size of the bounding box for a full hand. headSize = 0.2 * np.linalg.norm(np.subtract([x2, y2], [x1, y1])); return headSize def formatCell(val, delim): return "{:>5}".format("%1.1f" % val) + delim def getHeader(): strHeader = "&" strHeader += " Wrist &" strHeader += " Thumb &" strHeader += " Index &" strHeader += " Middle &" strHeader += " Ring &" strHeader += " Pinky &" strHeader += " Total%s" % ("\\" + "\\") return strHeader def getMotHeader(): strHeader = "&" strHeader += " MOTA &" strHeader += " MOTA &" strHeader += " MOTA &" strHeader += " MOTA &" strHeader += " MOTA &" strHeader += " MOTA &" strHeader += " MOTA &" strHeader += " MOTP &" strHeader += " Prec &" strHeader += " Rec %s\n" % ("\\" + "\\") strHeader += "&" strHeader += " Wrist &" strHeader += " Thumb &" strHeader += " Index &" strHeader += " Middle &" strHeader += " Ring &" strHeader += " Pinky &" strHeader += " Total&" strHeader += " Total&" strHeader += " Total&" strHeader += " Total%s" % ("\\" + "\\") return strHeader def getCum(vals): cum = []; -1 cum += [(vals[[Joint().wrist], 0].mean())] cum += [(vals[[Joint().thumb_k, Joint().thumb_b, Joint().thumb_m, Joint().thumb_t], 0].mean())] cum += [(vals[[Joint().index_k, Joint().index_b, Joint().index_m, Joint().index_t], 0].mean())] cum += [(vals[[Joint().middle_k, Joint().middle_b, Joint().middle_m, Joint().middle_t], 0].mean())] cum += [(vals[[Joint().ring_k, Joint().ring_b, Joint().ring_m, Joint().ring_t], 0].mean())] cum += [(vals[[Joint().pinky_k, Joint().pinky_b, Joint().pinky_m, Joint().pinky_t], 0].mean())] for i in range(Joint().count, len(vals)): cum += [vals[i, 0]] return cum def getFormatRow(cum): row = "&" for i in range(len(cum) - 1): row += formatCell(cum[i], " &") row += formatCell(cum[len(cum) - 1], (" %s" % "\\" + "\\")) return row def printTable(vals, motHeader=False): cum = getCum(vals) row = getFormatRow(cum) if motHeader: header = getMotHeader() else: header = getHeader() print(header) print(row) return header + "\n", row + "\n" def printTableTracking(): cum = getCum(vals) row = getFormatRow(cum) print(getHeader()) print(row) return getHeader() + "\n", row + "\n" # compute recall/precision curve (RPC) values def computeRPC(scores, labels, total_positive): precision = np.zeros(len(scores)) recall = np.zeros(len(scores)) num_positive = 0; idxsSort = np.array(scores).argsort()[::-1] labelsSort = labels[idxsSort]; for sidx in range(len(idxsSort)): if labelsSort[sidx] == 1: num_positive += 1 # recall: how many true positives were found out of the total number of positives? recall[sidx] = 1.0 * num_positive / total_positive # precision: how many true positives were found out of the total number of samples? precision[sidx] = 1.0 * num_positive / (sidx + 1) return precision, recall, idxsSort # compute Average Precision using recall/precision values def VOCap(rec, prec): mpre = np.zeros([1, 2 + len(prec)]) mpre[0, 1:len(prec) + 1] = prec mrec = np.zeros([1, 2 + len(rec)]) mrec[0, 1:len(rec) + 1] = rec mrec[0, len(rec) + 1] = 1.0 for i in range(mpre.size - 2, -1, -1): mpre[0, i] = max(mpre[0, i], mpre[0, i + 1]) i = np.argwhere(~np.equal(mrec[0, 1:], mrec[0, :mrec.shape[1] - 1])) + 1 i = i.flatten() # compute area under the curve ap = np.sum(np.multiply(np.subtract(mrec[0, i], mrec[0, i - 1]), mpre[0, i])) return ap def get_data_dir(): dataDir = "./" return dataDir def help(msg=''): sys.stderr.write(msg + '\n') exit() def process_arguments(argv): mode = 'multi' if len(argv) > 3: mode = str.lower(argv[3]) elif len(argv) < 3 or len(argv) > 4: help() gt_file = argv[1] pred_file = argv[2] if not os.path.exists(gt_file): help('Given ground truth directory does not exist!\n') if not os.path.exists(pred_file): help('Given prediction directory does not exist!\n') return gt_file, pred_file, mode def process_arguments_server(argv): 'multi' print(len(argv)) assert (len(argv) == 10, "Wrong number of arguments") gt_dir = argv[1] pred_dir = argv[2] mode = str.lower(argv[3]) evaltrack = argv[4] shortname = argv[5] chl = argv[6] shortname_uid = argv[7] shakey = argv[8] timestamp = argv[9] if not os.path.exists(gt_dir): help('Given ground truth does not exist!\n') if not os.path.exists(pred_dir): help('Given prediction does not exist!\n') return gt_dir, pred_dir, mode, evaltrack, shortname, chl, shortname_uid, shakey, timestamp def load_data(argv): dataDir = get_data_dir() gt_file, pred_file, mode = process_arguments(argv) gtFilename = dataDir + gt_file predFilename = dataDir + pred_file # load ground truth (GT) with open(gtFilename) as data_file: data = json.load(data_file) gtFramesAll = data # load predictions with open(predFilename) as data_file: data = json.load(data_file) prFramesAll = data return gtFramesAll, prFramesAll def cleanupData(gtFramesAll, prFramesAll): # remove all GT frames with empty annorects and remove corresponding entries from predictions imgidxs = [] for imgidx in range(len(gtFramesAll)): if len(gtFramesAll[imgidx]["annorect"]) > 0: imgidxs += [imgidx] gtFramesAll = [gtFramesAll[imgidx] for imgidx in imgidxs] prFramesAll = [prFramesAll[imgidx] for imgidx in imgidxs] # remove all gt rectangles that do not have annotations for imgidx in range(len(gtFramesAll)): gtFramesAll[imgidx]["annorect"] = removeRectsWithoutPoints(gtFramesAll[imgidx]["annorect"]) prFramesAll[imgidx]["annorect"] = removeRectsWithoutPoints(prFramesAll[imgidx]["annorect"]) return gtFramesAll, prFramesAll def removeIgnoredPointsRects(rects, polyList): ridxs = list(range(len(rects))) for ridx in range(len(rects)): points = rects[ridx]["annopoints"][0]["point"] pidxs = list(range(len(points))) for pidx in range(len(points)): pt = geometry.Point(points[pidx]["x"][0], points[pidx]["y"][0]) bIgnore = False for poidx in range(len(polyList)): poly = polyList[poidx] if poly.contains(pt): bIgnore = True break if bIgnore: pidxs.remove(pidx) points = [points[pidx] for pidx in pidxs] if len(points) > 0: rects[ridx]["annopoints"][0]["point"] = points else: ridxs.remove(ridx) rects = [rects[ridx] for ridx in ridxs] return rects def removeIgnoredPoints(gtFramesAll, prFramesAll): [] for imgidx in range(len(gtFramesAll)): if ("ignore_regions" in gtFramesAll[imgidx].keys() and len(gtFramesAll[imgidx]["ignore_regions"]) > 0): regions = gtFramesAll[imgidx]["ignore_regions"] polyList = [] for ridx in range(len(regions)): points = regions[ridx]["point"] pointList = [] for pidx in range(len(points)): pt = geometry.Point(points[pidx]["x"][0], points[pidx]["y"][0]) pointList += [pt] poly = geometry.Polygon([[p.x, p.y] for p in pointList]) polyList += [poly] rects = prFramesAll[imgidx]["annorect"] prFramesAll[imgidx]["annorect"] = removeIgnoredPointsRects(rects, polyList) rects = gtFramesAll[imgidx]["annorect"] gtFramesAll[imgidx]["annorect"] = removeIgnoredPointsRects(rects, polyList) return gtFramesAll, prFramesAll def rectHasPoints(rect): return (("annopoints" in rect.keys()) and (len(rect["annopoints"]) > 0 and len(rect["annopoints"][0]) > 0) and ("point" in rect["annopoints"][0].keys())) def removeRectsWithoutPoints(rects): idxsPr = [] for ridxPr in range(len(rects)): if rectHasPoints(rects[ridxPr]): idxsPr += [ridxPr]; rects = [rects[ridx] for ridx in idxsPr] return rects def load_data_dir(argv): gt_dir, pred_dir, mode = process_arguments(argv) if not os.path.exists(gt_dir): help('Given GT directory ' + gt_dir + ' does not exist!\n') if not os.path.exists(pred_dir): help('Given prediction directory ' + pred_dir + ' does not exist!\n') filenames = glob.glob(gt_dir + "/*.json") gtFramesAll = [] prFramesAll = [] for i in range(len(filenames)): # load each annotation json file with open(filenames[i]) as data_file: data = json.load(data_file) if not "annolist" in data: data = convert_videos(data)[0] gt = data["annolist"] for imgidx in range(len(gt)): gt[imgidx]["seq_id"] = i gt[imgidx]["seq_name"] = os.path.basename(filenames[i]).split('.')[0] for idx_gt in range(len(gt[imgidx]["annorect"])): if "track_id" in gt[imgidx]["annorect"][idx_gt].keys(): # adjust track_ids to make them unique across all sequences assert (gt[imgidx]["annorect"][idx_gt]["track_id"][0] < MAX_TRACK_ID) gt[imgidx]["annorect"][idx_gt]["track_id"][0] += i * MAX_TRACK_ID gtFramesAll += gt gtBasename = os.path.basename(filenames[i]) predFilename = pred_dir + gtBasename if not os.path.exists(predFilename): raise IOError('Prediction file ' + predFilename + ' does not exist') # load predictions with open(predFilename) as data_file: data = json.load(data_file) if not "annolist" in data: data = convert_videos(data)[0] pr = data["annolist"] if len(pr) != len(gt): raise Exception('# prediction frames %d != # GT frames %d for %s' % (len(pr), len(gt), predFilename)) for imgidx in range(len(pr)): track_id_frame = [] for ridxPr in range(len(pr[imgidx]["annorect"])): if "track_id" in pr[imgidx]["annorect"][ridxPr].keys(): track_id = pr[imgidx]["annorect"][ridxPr]["track_id"][0] track_id_frame += [track_id] # adjust track_ids to make them unique across all sequences assert (track_id < MAX_TRACK_ID) pr[imgidx]["annorect"][ridxPr]["track_id"][0] += i * MAX_TRACK_ID track_id_frame_unique = np.unique(np.array(track_id_frame)).tolist() if len(track_id_frame) != len(track_id_frame_unique): raise Exception('Non-unique tracklet IDs found in frame %s of prediction %s' % (pr[imgidx]["image"][0]["name"], predFilename)) prFramesAll += pr gtFramesAll, prFramesAll = cleanupData(gtFramesAll, prFramesAll) gtFramesAll, prFramesAll = removeIgnoredPoints(gtFramesAll, prFramesAll) return gtFramesAll, prFramesAll def writeJson(val, fname): with open(fname, 'w') as data_file: json.dump(val, data_file) def assignGTmulti(gt_frames_all, pred_frames_all, dish_thresh): assert (len(gt_frames_all) == len(pred_frames_all)) num_joint = Joint().count # part detection scores scores_all = {} # positive / negative labels labels_all = {} # number of annotated GT joints per image num_GT_all = np.zeros([num_joint, len(gt_frames_all)]) for pidx in range(num_joint): scores_all[pidx] = {} labels_all[pidx] = {} for img_idx in range(len(gt_frames_all)): scores_all[pidx][img_idx] = np.zeros([0, 0], dtype=np.float32) labels_all[pidx][img_idx] = np.zeros([0, 0], dtype=np.int8) # number of GT poses num_GT_hands = np.zeros((len(gt_frames_all), 1)) # number of predicted poses num_pred_hands = np.zeros((len(gt_frames_all), 1)) # container to save info for computing MOT metrics MOT_all = {} for img_idx in range(len(gt_frames_all)): # distance between predicted and GT joints dist = np.full((len(pred_frames_all[img_idx]["annorect"]), len(gt_frames_all[img_idx]["annorect"]), num_joint), np.inf) # score of the predicted joint scores = np.full((len(pred_frames_all[img_idx]["annorect"]), num_joint), np.nan) # body joint prediction exist has_pred = np.zeros((len(pred_frames_all[img_idx]["annorect"]), num_joint), dtype=bool) # body joint is annotated has_gt = np.zeros((len(gt_frames_all[img_idx]["annorect"]), num_joint), dtype=bool) track_idx_gt = [] track_idx_pred = [] idxs_pred = [] for idx_gt in range(len(pred_frames_all[img_idx]["annorect"])): if (("annopoints" in pred_frames_all[img_idx]["annorect"][idx_gt].keys()) and ("point" in pred_frames_all[img_idx]["annorect"][idx_gt]["annopoints"][0].keys())): idxs_pred += [idx_gt]; pred_frames_all[img_idx]["annorect"] = [pred_frames_all[img_idx]["annorect"][idx] for idx in idxs_pred] num_pred_hands[img_idx, 0] = len(pred_frames_all[img_idx]["annorect"]) num_GT_hands[img_idx, 0] = len(gt_frames_all[img_idx]["annorect"]) # iterate over GT poses for idx_gt in range(len(gt_frames_all[img_idx]["annorect"])): # GT pose rect_gt = gt_frames_all[img_idx]["annorect"][idx_gt] if "track_id" in rect_gt.keys(): track_idx_gt += [rect_gt["track_id"][0]] points_gt = [] if len(rect_gt["annopoints"]) > 0: points_gt = rect_gt["annopoints"][0]["point"] # iterate over all possible body joints for idx_gt in range(num_joint): # GT joint in LSP format point_gt = getPointGTbyID(points_gt, idx_gt) if len(point_gt) > 0: has_gt[idx_gt, idx_gt] = True # iterate over predicted poses for idx_gt in range(len(pred_frames_all[img_idx]["annorect"])): # predicted pose rect_pred = pred_frames_all[img_idx]["annorect"][idx_gt] if "track_id" in rect_pred.keys(): track_idx_pr += [rect_pred["track_id"][0]] points_pred = rect_pred["annopoints"][0]["point"] for idx_gt in range(num_joint): # predicted joint in LSP format point_pred = getPointGTbyID(points_pred, idx_gt) if len(point_pred) > 0: if not ("score" in point_pred.keys()): # use minimum score if predicted score is missing if img_idx == 0: print('WARNING: prediction score is missing. Setting fallback score={}'.format(MIN_SCORE)) scores[idx_gt, idx_gt] = MIN_SCORE else: scores[idx_gt, idx_gt] = point_pred["score"][0] has_pred[idx_gt, idx_gt] = True if len(pred_frames_all[img_idx]["annorect"]) and len(gt_frames_all[img_idx]["annorect"]): # predictions and GT are present # iterate over GT poses for idx_gt in range(len(gt_frames_all[img_idx]["annorect"])): # GT pose rect_gt = gt_frames_all[img_idx]["annorect"][idx_gt] # compute reference distance as head size head_size = getHeadSize(rect_gt["x1"][0], rect_gt["y1"][0], rect_gt["x2"][0], rect_gt["y2"][0]) points_gt = [] if len(rect_gt["annopoints"]) > 0: points_gt = rect_gt["annopoints"][0]["point"] # iterate over predicted poses for idx_pred in range(len(pred_frames_all[img_idx]["annorect"])): # predicted pose rect_pred = pred_frames_all[img_idx]["annorect"][idx_pred] points_pred = rect_pred["annopoints"][0]["point"] # iterate over all possible body joints for id in range(num_joint): # GT joint point_gt = getPointGTbyID(points_gt, id) # predicted joint point_pred = getPointGTbyID(points_pred, id) # compute distance between predicted and GT joint locations if has_pred[idx_gt, id] and has_gt[idx_gt, id]: point_gt = [point_gt["x"][0], point_gt["y"][0]] point_pr = [point_pred["x"][0], point_pred["y"][0]] dist[idx_pred, idx_gt, id] = np.linalg.norm(
np.subtract(point_gt, point_pr)
numpy.subtract
#!/usr/bin/env python u""" read_cryosat_L2.py Written by <NAME> (05/2021) Reads CryoSat Level-2 data products from baselines A, B and C Reads CryoSat Level-2 netCDF4 data products from baseline D Supported CryoSat Modes: LRM, SAR, SARin, FDM, SID, GDR INPUTS: full_filename: full path of CryoSat .DBL or .nc file OUTPUTS: Data_1Hz: Time and Orbit Parameters Corrections: Elevation Corrections and Flags Data_20Hz: Geolocation and Elevation Measurements with Quality Parameters METADATA: MPH, SPH and DSD Header data PYTHON DEPENDENCIES: numpy: Scientific Computing Tools For Python https://numpy.org https://numpy.org/doc/stable/user/numpy-for-matlab-users.html netCDF4: Python interface to the netCDF C library https://unidata.github.io/netcdf4-python/netCDF4/index.html UPDATE HISTORY: Updated 05/2021: use raw binary string prefixes (rb) for regular expressions Updated 02/2021: replaced numpy bool to prevent deprecation warning Updated 06/2020: patch error in CryoSat-2 GDR pointer variables using the 1Hz mapping variable ind_meas_1hz_20_ku to remap the index Updated 02/2020: tilde-expansion of cryosat-2 files before opening add scale factors function for converting packed units in binary files convert from hard to soft tabulation Updated 11/2019: empty placeholder dictionary for baseline D DSD headers Updated 09/2019: added netCDF4 read function for baseline D will output with same variable names as the binary read functions output 20Hz data as masked arrays for all baselines Updated 08/2019: generalize regular expression patterns in read_DSD function Updated 10/2018: updated header read functions for python3 Updated 11/2016: added Abs_Orbit and Ascending_flag to Data_1Hz outputs Abs_Orbit should be same as in read_cryosat_ground_tracks.py Ascending_flag can use in surface regression fits (McMillan, 2014) Updated 05/2016: using __future__ print and division functions Written 03/2016 """ from __future__ import print_function from __future__ import division import os import re import netCDF4 import numpy as np #-- PURPOSE: Initiate L2 MDS variables for CryoSat Baselines A and B def cryosat_baseline_AB(fid,record_size,n_records): #-- CryoSat-2 1 Hz data fields (Location Group) #-- Time and Orbit Parameters plus Measurement Mode Data_1Hz = {} #-- Time: day part Data_1Hz['Day'] = np.zeros((n_records),dtype=np.int32) #-- Time: second part Data_1Hz['Second'] = np.zeros((n_records),dtype=np.int32) #-- Time: microsecond part Data_1Hz['Micsec'] = np.zeros((n_records),dtype=np.int32) #-- SIRAL mode Data_1Hz['Siral_mode'] = np.zeros((n_records),dtype=np.uint64) #-- Lat_1Hz: packed units (0.1 micro-degree, 1e-7 degrees) Data_1Hz['Lat_1Hz'] = np.zeros((n_records),dtype=np.int32) #-- Lon_1Hz: packed units (0.1 micro-degree, 1e-7 degrees) Data_1Hz['Lon_1Hz'] = np.zeros((n_records),dtype=np.int32) #-- Alt_1Hz: packed units (mm, 1e-3 m) #-- Altitude of COG above reference ellipsoid (interpolated value) Data_1Hz['Alt_1Hz'] = np.zeros((n_records),dtype=np.int32) #-- Mispointing: packed units (millidegrees, 1e-3 degrees) Data_1Hz['Mispointing'] = np.zeros((n_records),dtype=np.int16) #-- Number of valid records in the block of twenty that contain data #-- Last few records of the last block of a dataset may be blank blocks #-- inserted to bring the file up to a multiple of twenty. Data_1Hz['N_valid'] = np.zeros((n_records),dtype=np.int16) #-- CryoSat-2 geophysical corrections (External Corrections Group) Corrections = {} #-- Dry Tropospheric Correction packed units (mm, 1e-3 m) Corrections['dryTrop'] = np.zeros((n_records),dtype=np.int16) #-- Wet Tropospheric Correction packed units (mm, 1e-3 m) Corrections['wetTrop'] = np.zeros((n_records),dtype=np.int16) #-- Inverse Barometric Correction packed units (mm, 1e-3 m) Corrections['InvBar'] = np.zeros((n_records),dtype=np.int16) #-- Dynamic Atmosphere Correction packed units (mm, 1e-3 m) Corrections['DAC'] = np.zeros((n_records),dtype=np.int16) #-- Ionospheric Correction packed units (mm, 1e-3 m) Corrections['Iono'] = np.zeros((n_records),dtype=np.int16) #-- Sea State Bias Correction packed units (mm, 1e-3 m) Corrections['SSB'] = np.zeros((n_records),dtype=np.int16) #-- Ocean tide Correction packed units (mm, 1e-3 m) Corrections['ocTideElv'] = np.zeros((n_records),dtype=np.int16) #-- Long period equilibrium ocean tide Correction packed units (mm, 1e-3 m) Corrections['lpeTideElv'] = np.zeros((n_records),dtype=np.int16) #-- Ocean loading tide Correction packed units (mm, 1e-3 m) Corrections['olTideElv'] = np.zeros((n_records),dtype=np.int16) #-- Solid Earth tide Correction packed units (mm, 1e-3 m) Corrections['seTideElv'] = np.zeros((n_records),dtype=np.int16) #-- Geocentric Polar tide Correction packed units (mm, 1e-3 m) Corrections['gpTideElv'] = np.zeros((n_records),dtype=np.int16) Corrections['Spare1'] = np.zeros((n_records),dtype=np.int16) #-- Surface Type: Packed in groups of three bits for each of the 20 records Corrections['Surf_type'] = np.zeros((n_records),dtype=np.uint64) #-- Mean Sea Surface or Geoid packed units (mm, 1e-3 m) Corrections['MSS_Geoid'] = np.zeros((n_records),dtype=np.int32) #-- Ocean Depth/Land Elevation Model (ODLE) packed units (mm, 1e-3 m) Corrections['ODLE'] = np.zeros((n_records),dtype=np.int32) #-- Ice Concentration packed units (%/100) Corrections['Ice_conc'] = np.zeros((n_records),dtype=np.int16) #-- Snow Depth packed units (mm, 1e-3 m) Corrections['Snow_depth'] = np.zeros((n_records),dtype=np.int16) #-- Snow Density packed units (kg/m^3) Corrections['Snow_density'] = np.zeros((n_records),dtype=np.int16) Corrections['Spare2'] = np.zeros((n_records),dtype=np.int16) #-- Corrections Status Flag Corrections['C_status'] = np.zeros((n_records),dtype=np.uint32) #-- Significant Wave Height (SWH) packed units (mm, 1e-3) Corrections['SWH'] = np.zeros((n_records),dtype=np.int16) #-- Wind Speed packed units (mm/s, 1e-3 m/s) Corrections['Wind_speed'] = np.zeros((n_records),dtype=np.uint16) Corrections['Spare3'] = np.zeros((n_records),dtype=np.int16) Corrections['Spare4'] = np.zeros((n_records),dtype=np.int16) Corrections['Spare5'] = np.zeros((n_records),dtype=np.int16) Corrections['Spare6'] = np.zeros((n_records),dtype=np.int16) #-- CryoSat-2 20 Hz data fields (Measurement Group) #-- Derived from instrument measurement parameters n_blocks = 20 Data_20Hz = {} #-- Delta between the timestamps for 20Hz record and the 1Hz record #-- D_time_mics packed units (microseconds) Data_20Hz['D_time_mics'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32) Data_20Hz['D_time_mics'].mask = np.ones((n_records,n_blocks),dtype=bool) #-- Lat: packed units (0.1 micro-degree, 1e-7 degrees) Data_20Hz['Lat'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32) Data_20Hz['Lat'].mask = np.ones((n_records,n_blocks),dtype=bool) #-- Lon: packed units (0.1 micro-degree, 1e-7 degrees) Data_20Hz['Lon'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32) Data_20Hz['Lon'].mask = np.ones((n_records,n_blocks),dtype=bool) #-- Measured elevation above ellipsoid from retracker: packed units (mm, 1e-3 m) Data_20Hz['Elev'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32) Data_20Hz['Elev'].mask = np.ones((n_records,n_blocks),dtype=bool) #-- Interpolated Sea Surface Height Anomaly: packed units (mm, 1e-3 m) Data_20Hz['SSHA_interp'] = np.ma.zeros((n_records,n_blocks),dtype=np.int16) Data_20Hz['SSHA_interp'].mask = np.ones((n_records,n_blocks),dtype=bool) #-- Interpolated Sea Surface Height measurement count Data_20Hz['SSHA_interp_count'] = np.ma.zeros((n_records,n_blocks),dtype=np.int16) Data_20Hz['SSHA_interp_count'].mask = np.ones((n_records,n_blocks),dtype=bool) #-- Interpolation quality estimate RSS: packed units (mm, 1e-3 m) Data_20Hz['SSHA_interp_RMS'] = np.ma.zeros((n_records,n_blocks),dtype=np.int16) Data_20Hz['SSHA_interp_RMS'].mask = np.ones((n_records,n_blocks),dtype=bool) #-- Sigma Zero Backscatter for retracker: packed units (1e-2 dB) Data_20Hz['Sig0'] = np.ma.zeros((n_records,n_blocks),dtype=np.int16) Data_20Hz['Sig0'].mask = np.ones((n_records,n_blocks),dtype=bool) #-- Peakiness: packed units (1e-2) Data_20Hz['Peakiness'] = np.ma.zeros((n_records,n_blocks),dtype=np.uint16) Data_20Hz['Peakiness'].mask = np.ones((n_records,n_blocks),dtype=bool) #-- Freeboard: packed units (mm, 1e-3 m) #-- -9999 default value indicates computation has not been performed Data_20Hz['Freeboard'] = np.ma.zeros((n_records,n_blocks),dtype=np.int16) Data_20Hz['Freeboard'].mask = np.ones((n_records,n_blocks),dtype=bool) #-- Number of averaged echoes or beams Data_20Hz['N_avg'] = np.ma.zeros((n_records,n_blocks),dtype=np.int16) Data_20Hz['N_avg'].mask = np.ones((n_records,n_blocks),dtype=bool) Data_20Hz['Spare1'] = np.ma.zeros((n_records,n_blocks),dtype=np.int16) Data_20Hz['Spare1'].mask = np.ones((n_records,n_blocks),dtype=bool) #-- Quality flags Data_20Hz['Quality_flag'] = np.ma.zeros((n_records,n_blocks),dtype=np.uint32) Data_20Hz['Quality_flag'].mask = np.ones((n_records,n_blocks),dtype=bool) Data_20Hz['Spare2'] = np.ma.zeros((n_records,n_blocks),dtype=np.int16) Data_20Hz['Spare2'].mask = np.ones((n_records,n_blocks),dtype=bool) Data_20Hz['Spare3'] = np.ma.zeros((n_records,n_blocks),dtype=np.int16) Data_20Hz['Spare3'].mask = np.ones((n_records,n_blocks),dtype=bool) Data_20Hz['Spare4'] = np.ma.zeros((n_records,n_blocks),dtype=np.int16) Data_20Hz['Spare4'].mask = np.ones((n_records,n_blocks),dtype=bool) Data_20Hz['Spare5'] = np.ma.zeros((n_records,n_blocks),dtype=np.int16) Data_20Hz['Spare5'].mask = np.ones((n_records,n_blocks),dtype=bool) #-- for each record in the CryoSat file for r in range(n_records): #-- CryoSat-2 Location Group for record r Data_1Hz['Day'][r] = np.fromfile(fid,dtype='>i4',count=1) Data_1Hz['Second'][r] = np.fromfile(fid,dtype='>i4',count=1) Data_1Hz['Micsec'][r] = np.fromfile(fid,dtype='>i4',count=1) Data_1Hz['Siral_mode'][r] = np.fromfile(fid,dtype='>u8',count=1) Data_1Hz['Lat_1Hz'][r] = np.fromfile(fid,dtype='>i4',count=1) Data_1Hz['Lon_1Hz'][r] = np.fromfile(fid,dtype='>i4',count=1) Data_1Hz['Alt_1Hz'][r] = np.fromfile(fid,dtype='>i4',count=1) Data_1Hz['Mispointing'][r] = np.fromfile(fid,dtype='>i2',count=1) Data_1Hz['N_valid'][r] = np.fromfile(fid,dtype='>i2',count=1) #-- CryoSat-2 External Corrections Group for record r Corrections['dryTrop'][r] = np.fromfile(fid,dtype='>i2',count=1) Corrections['wetTrop'][r] = np.fromfile(fid,dtype='>i2',count=1) Corrections['InvBar'][r] = np.fromfile(fid,dtype='>i2',count=1) Corrections['DAC'][r] = np.fromfile(fid,dtype='>i2',count=1) Corrections['Iono'][r] = np.fromfile(fid,dtype='>i2',count=1) Corrections['SSB'][r] = np.fromfile(fid,dtype='>i2',count=1) Corrections['ocTideElv'][r] = np.fromfile(fid,dtype='>i2',count=1) Corrections['lpeTideElv'][r] = np.fromfile(fid,dtype='>i2',count=1) Corrections['olTideElv'][r] = np.fromfile(fid,dtype='>i2',count=1) Corrections['seTideElv'][r] = np.fromfile(fid,dtype='>i2',count=1) Corrections['gpTideElv'][r] = np.fromfile(fid,dtype='>i2',count=1) Corrections['Spare1'][r] = np.fromfile(fid,dtype='>i2',count=1) Corrections['Surf_type'][r] = np.fromfile(fid,dtype='>u8',count=1) Corrections['MSS_Geoid'][r] = np.fromfile(fid,dtype='>i4',count=1) Corrections['ODLE'][r] = np.fromfile(fid,dtype='>i4',count=1) Corrections['Ice_conc'][r] = np.fromfile(fid,dtype='>i2',count=1) Corrections['Snow_depth'][r] = np.fromfile(fid,dtype='>i2',count=1) Corrections['Snow_density'][r] = np.fromfile(fid,dtype='>i2',count=1) Corrections['Spare2'][r] = np.fromfile(fid,dtype='>i2',count=1) Corrections['C_status'][r] = np.fromfile(fid,dtype='>u4',count=1) Corrections['SWH'][r] = np.fromfile(fid,dtype='>i2',count=1) Corrections['Wind_speed'][r] = np.fromfile(fid,dtype='>u2',count=1) Corrections['Spare3'][r] = np.fromfile(fid,dtype='>i2',count=1) Corrections['Spare4'][r] = np.fromfile(fid,dtype='>i2',count=1) Corrections['Spare5'][r] = np.fromfile(fid,dtype='>i2',count=1) Corrections['Spare6'][r] = np.fromfile(fid,dtype='>i2',count=1) #-- CryoSat-2 Measurements Group for record r and block b for b in range(n_blocks): Data_20Hz['D_time_mics'].data[r,b] = np.fromfile(fid,dtype='>i4',count=1) Data_20Hz['Lat'].data[r,b] = np.fromfile(fid,dtype='>i4',count=1) Data_20Hz['Lon'].data[r,b] = np.fromfile(fid,dtype='>i4',count=1) Data_20Hz['Elev'].data[r,b] = np.fromfile(fid,dtype='>i4',count=1) Data_20Hz['SSHA_interp'].data[r,b] = np.fromfile(fid,dtype='>i2',count=1) Data_20Hz['SSHA_interp_count'].data[r,b] = np.fromfile(fid,dtype='>i2',count=1) Data_20Hz['SSHA_interp_RMS'].data[r,b] = np.fromfile(fid,dtype='>i2',count=1) Data_20Hz['Sig0'].data[r,b] = np.fromfile(fid,dtype='>i2',count=1) Data_20Hz['Peakiness'].data[r,b] = np.fromfile(fid,dtype='>u2',count=1) Data_20Hz['Freeboard'].data[r,b] = np.fromfile(fid,dtype='>i2',count=1) Data_20Hz['N_avg'].data[r,b] = np.fromfile(fid,dtype='>i2',count=1) Data_20Hz['Spare1'].data[r,b] = np.fromfile(fid,dtype='>i2',count=1) Data_20Hz['Quality_flag'].data[r,b] = np.fromfile(fid,dtype='>u4',count=1) Data_20Hz['Spare2'].data[r,b] = np.fromfile(fid,dtype='>i2',count=1) Data_20Hz['Spare3'].data[r,b] = np.fromfile(fid,dtype='>i2',count=1) Data_20Hz['Spare4'].data[r,b] = np.fromfile(fid,dtype='>i2',count=1) Data_20Hz['Spare5'].data[r,b] = np.fromfile(fid,dtype='>i2',count=1) #-- Set CryoSat-2 Measurements Group Masks for record r Data_20Hz['D_time_mics'].mask[r,:Data_1Hz['N_valid'][r]] = False Data_20Hz['Lat'].mask[r,:Data_1Hz['N_valid'][r]] = False Data_20Hz['Lon'].mask[r,:Data_1Hz['N_valid'][r]] = False Data_20Hz['Elev'].mask[r,:Data_1Hz['N_valid'][r]] = False Data_20Hz['SSHA_interp'].mask[r,:Data_1Hz['N_valid'][r]] = False Data_20Hz['SSHA_interp_count'].mask[r,:Data_1Hz['N_valid'][r]] = False Data_20Hz['SSHA_interp_RMS'].mask[r,:Data_1Hz['N_valid'][r]] = False Data_20Hz['Sig0'].mask[r,:Data_1Hz['N_valid'][r]] = False Data_20Hz['Peakiness'].mask[r,:Data_1Hz['N_valid'][r]] = False Data_20Hz['Freeboard'].mask[r,:Data_1Hz['N_valid'][r]] = False Data_20Hz['N_avg'].mask[r,:Data_1Hz['N_valid'][r]] = False Data_20Hz['Spare1'].mask[r,:Data_1Hz['N_valid'][r]] = False Data_20Hz['Quality_flag'].mask[r,:Data_1Hz['N_valid'][r]] = False Data_20Hz['Spare2'].mask[r,:Data_1Hz['N_valid'][r]] = False Data_20Hz['Spare3'].mask[r,:Data_1Hz['N_valid'][r]] = False Data_20Hz['Spare4'].mask[r,:Data_1Hz['N_valid'][r]] = False Data_20Hz['Spare5'].mask[r,:Data_1Hz['N_valid'][r]] = False #-- Bind all the bits of the l2_mds together into a single dictionary CS_l2_mds = {} CS_l2_mds['Data_1Hz'] = Data_1Hz CS_l2_mds['Corrections'] = Corrections CS_l2_mds['Data_20Hz'] = Data_20Hz #-- return the output dictionary return CS_l2_mds #-- PURPOSE: Initiate L2 MDS variables for CryoSat Baseline C def cryosat_baseline_C(fid,record_size,n_records): #-- CryoSat-2 1 Hz data fields (Location Group) #-- Time and Orbit Parameters plus Measurement Mode Data_1Hz = {} #-- Time: day part Data_1Hz['Day'] = np.zeros((n_records),dtype=np.int32) #-- Time: second part Data_1Hz['Second'] = np.zeros((n_records),dtype=np.int32) #-- Time: microsecond part Data_1Hz['Micsec'] = np.zeros((n_records),dtype=np.int32) #-- SIRAL mode Data_1Hz['Siral_mode'] = np.zeros((n_records),dtype=np.uint64) #-- Lat_1Hz: packed units (0.1 micro-degree, 1e-7 degrees) Data_1Hz['Lat_1Hz'] = np.zeros((n_records),dtype=np.int32) #-- Lon_1Hz: packed units (0.1 micro-degree, 1e-7 degrees) Data_1Hz['Lon_1Hz'] = np.zeros((n_records),dtype=np.int32) #-- Alt_1Hz: packed units (mm, 1e-3 m) #-- Altitude of COG above reference ellipsoid (interpolated value) Data_1Hz['Alt_1Hz'] = np.zeros((n_records),dtype=np.int32) #-- Roll: packed units (0.1 micro-degree, 1e-7 degrees) Data_1Hz['Roll'] = np.zeros((n_records),dtype=np.int32) #-- Pitch: packed units (0.1 micro-degree, 1e-7 degrees) Data_1Hz['Pitch'] = np.zeros((n_records),dtype=np.int32) #-- Yaw: packed units (0.1 micro-degree, 1e-7 degrees) Data_1Hz['Yaw'] = np.zeros((n_records),dtype=np.int32) Data_1Hz['Spare'] = np.zeros((n_records),dtype=np.int16) #-- Number of valid records in the block of twenty that contain data #-- Last few records of the last block of a dataset may be blank blocks #-- inserted to bring the file up to a multiple of twenty. Data_1Hz['N_valid'] = np.zeros((n_records),dtype=np.int16) #-- CryoSat-2 geophysical corrections (External Corrections Group) Corrections = {} #-- Dry Tropospheric Correction packed units (mm, 1e-3 m) Corrections['dryTrop'] = np.zeros((n_records),dtype=np.int16) #-- Wet Tropospheric Correction packed units (mm, 1e-3 m) Corrections['wetTrop'] = np.zeros((n_records),dtype=np.int16) #-- Inverse Barometric Correction packed units (mm, 1e-3 m) Corrections['InvBar'] = np.zeros((n_records),dtype=np.int16) #-- Dynamic Atmosphere Correction packed units (mm, 1e-3 m) Corrections['DAC'] = np.zeros((n_records),dtype=np.int16) #-- Ionospheric Correction packed units (mm, 1e-3 m) Corrections['Iono'] = np.zeros((n_records),dtype=np.int16) #-- Sea State Bias Correction packed units (mm, 1e-3 m) Corrections['SSB'] = np.zeros((n_records),dtype=np.int16) #-- Ocean tide Correction packed units (mm, 1e-3 m) Corrections['ocTideElv'] = np.zeros((n_records),dtype=np.int16) #-- Long period equilibrium ocean tide Correction packed units (mm, 1e-3 m) Corrections['lpeTideElv'] = np.zeros((n_records),dtype=np.int16) #-- Ocean loading tide Correction packed units (mm, 1e-3 m) Corrections['olTideElv'] = np.zeros((n_records),dtype=np.int16) #-- Solid Earth tide Correction packed units (mm, 1e-3 m) Corrections['seTideElv'] = np.zeros((n_records),dtype=np.int16) #-- Geocentric Polar tide Correction packed units (mm, 1e-3 m) Corrections['gpTideElv'] = np.zeros((n_records),dtype=np.int16) Corrections['Spare1'] = np.zeros((n_records),dtype=np.int16) #-- Surface Type: Packed in groups of three bits for each of the 20 records Corrections['Surf_type'] = np.zeros((n_records),dtype=np.uint64) #-- Mean Sea Surface or Geoid packed units (mm, 1e-3 m) Corrections['MSS_Geoid'] = np.zeros((n_records),dtype=np.int32) #-- Ocean Depth/Land Elevation Model (ODLE) packed units (mm, 1e-3 m) Corrections['ODLE'] = np.zeros((n_records),dtype=np.int32) #-- Ice Concentration packed units (%/100) Corrections['Ice_conc'] = np.zeros((n_records),dtype=np.int16) #-- Snow Depth packed units (mm, 1e-3 m) Corrections['Snow_depth'] = np.zeros((n_records),dtype=np.int16) #-- Snow Density packed units (kg/m^3) Corrections['Snow_density'] = np.zeros((n_records),dtype=np.int16) Corrections['Spare2'] = np.zeros((n_records),dtype=np.int16) #-- Corrections Status Flag Corrections['C_status'] = np.zeros((n_records),dtype=np.uint32) #-- Significant Wave Height (SWH) packed units (mm, 1e-3) Corrections['SWH'] = np.zeros((n_records),dtype=np.int16) #-- Wind Speed packed units (mm/s, 1e-3 m/s) Corrections['Wind_speed'] = np.zeros((n_records),dtype=np.uint16) Corrections['Spare3'] = np.zeros((n_records),dtype=np.int16) Corrections['Spare4'] = np.zeros((n_records),dtype=np.int16) Corrections['Spare5'] = np.zeros((n_records),dtype=np.int16) Corrections['Spare6'] = np.zeros((n_records),dtype=np.int16) #-- CryoSat-2 20 Hz data fields (Measurement Group) #-- Derived from instrument measurement parameters n_blocks = 20 Data_20Hz = {} #-- Delta between the timestamps for 20Hz record and the 1Hz record #-- D_time_mics packed units (microseconds) Data_20Hz['D_time_mics'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32) Data_20Hz['D_time_mics'].mask = np.ones((n_records,n_blocks),dtype=bool) #-- Lat: packed units (0.1 micro-degree, 1e-7 degrees) Data_20Hz['Lat'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32) Data_20Hz['Lat'].mask = np.ones((n_records,n_blocks),dtype=bool) #-- Lon: packed units (0.1 micro-degree, 1e-7 degrees) Data_20Hz['Lon'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32) Data_20Hz['Lon'].mask = np.ones((n_records,n_blocks),dtype=bool) #-- Measured elevation above ellipsoid from retracker 1: packed units (mm, 1e-3 m) Data_20Hz['Elev_1'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32) Data_20Hz['Elev_1'].mask = np.ones((n_records,n_blocks),dtype=bool) #-- Measured elevation above ellipsoid from retracker 2: packed units (mm, 1e-3 m) Data_20Hz['Elev_2'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32) Data_20Hz['Elev_2'].mask = np.ones((n_records,n_blocks),dtype=bool) #-- Measured elevation above ellipsoid from retracker 3: packed units (mm, 1e-3 m) Data_20Hz['Elev_3'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32) Data_20Hz['Elev_3'].mask = np.ones((n_records,n_blocks),dtype=bool) #-- Sigma Zero Backscatter for retracker 1: packed units (1e-2 dB) Data_20Hz['Sig0_1'] = np.ma.zeros((n_records,n_blocks),dtype=np.int16) Data_20Hz['Sig0_1'].mask = np.ones((n_records,n_blocks),dtype=bool) #-- Sigma Zero Backscatter for retracker 2: packed units (1e-2 dB) Data_20Hz['Sig0_2'] = np.ma.zeros((n_records,n_blocks),dtype=np.int16) Data_20Hz['Sig0_2'].mask = np.ones((n_records,n_blocks),dtype=bool) #-- Sigma Zero Backscatter for retracker 3: packed units (1e-2 dB) Data_20Hz['Sig0_3'] = np.ma.zeros((n_records,n_blocks),dtype=np.int16) Data_20Hz['Sig0_3'].mask = np.ones((n_records,n_blocks),dtype=bool) #-- Freeboard: packed units (mm, 1e-3 m) #-- -9999 default value indicates computation has not been performed Data_20Hz['Freeboard'] = np.ma.zeros((n_records,n_blocks),dtype=np.int16) Data_20Hz['Freeboard'].mask = np.ones((n_records,n_blocks),dtype=bool) #-- Interpolated Sea Surface Height Anomaly: packed units (mm, 1e-3 m) Data_20Hz['SSHA_interp'] = np.ma.zeros((n_records,n_blocks),dtype=np.int16) Data_20Hz['SSHA_interp'].mask = np.ones((n_records,n_blocks),dtype=bool) #-- Interpolated Sea Surface Height measurement count Data_20Hz['SSHA_interp_count'] = np.ma.zeros((n_records,n_blocks),dtype=np.int16) Data_20Hz['SSHA_interp_count'].mask = np.ones((n_records,n_blocks),dtype=bool) #-- Interpolation quality estimate RSS: packed units (mm, 1e-3 m) Data_20Hz['SSHA_interp_RMS'] = np.ma.zeros((n_records,n_blocks),dtype=np.int16) Data_20Hz['SSHA_interp_RMS'].mask = np.ones((n_records,n_blocks),dtype=bool) #-- Peakiness: packed units (1e-2) Data_20Hz['Peakiness'] = np.ma.zeros((n_records,n_blocks),dtype=np.uint16) Data_20Hz['Peakiness'].mask = np.ones((n_records,n_blocks),dtype=bool) #-- Number of averaged echoes or beams Data_20Hz['N_avg'] = np.ma.zeros((n_records,n_blocks),dtype=np.int16) Data_20Hz['N_avg'].mask = np.ones((n_records,n_blocks),dtype=bool) Data_20Hz['Spare1'] = np.ma.zeros((n_records,n_blocks),dtype=np.int16) Data_20Hz['Spare1'].mask = np.ones((n_records,n_blocks),dtype=bool) #-- Quality flags Data_20Hz['Quality_flag'] = np.ma.zeros((n_records,n_blocks),dtype=np.uint32) Data_20Hz['Quality_flag'].mask = np.ones((n_records,n_blocks),dtype=bool) #-- Corrections Application Flag Data_20Hz['Corrections_flag'] = np.ma.zeros((n_records,n_blocks),dtype=np.uint32) Data_20Hz['Corrections_flag'].mask = np.ones((n_records,n_blocks),dtype=bool) #-- Quality metric for retracker 1 Data_20Hz['Quality_1'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32) Data_20Hz['Quality_1'].mask = np.ones((n_records,n_blocks),dtype=bool) #-- Quality metric for retracker 2 Data_20Hz['Quality_2'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32) Data_20Hz['Quality_2'].mask = np.ones((n_records,n_blocks),dtype=bool) #-- Quality metric for retracker 3 Data_20Hz['Quality_3'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32) Data_20Hz['Quality_3'].mask = np.ones((n_records,n_blocks),dtype=bool) #-- for each record in the CryoSat file for r in range(n_records): #-- CryoSat-2 Location Group for record r Data_1Hz['Day'][r] = np.fromfile(fid,dtype='>i4',count=1) Data_1Hz['Second'][r] = np.fromfile(fid,dtype='>i4',count=1) Data_1Hz['Micsec'][r] = np.fromfile(fid,dtype='>i4',count=1) Data_1Hz['Siral_mode'][r] =
np.fromfile(fid,dtype='>u8',count=1)
numpy.fromfile
import os import re import subprocess import time # import system module import sys # import some PyQt5 modules from PyQt5.QtWidgets import QApplication, QFileDialog, QWidget from PyQt5.QtGui import QImage, QPixmap from PyQt5.QtCore import QTimer, QThread, QObject, QEventLoop, pyqtSignal, pyqtSlot import qimage2ndarray #import configparser to read from conf file import configparser #import numpy and math for easy data handling import numpy as np np.set_printoptions(threshold=sys.maxsize) import math import imutils # import Opencv module import cv2 class Spectrometer(QThread): raw_spectrum_stream = pyqtSignal(np.ndarray) dark_spectrum_stream = pyqtSignal(np.ndarray) emission_spectrum_stream = pyqtSignal(np.ndarray) reference_spectrum_stream = pyqtSignal(np.ndarray) transmission_spectrum_stream = pyqtSignal(np.ndarray) image_overview_stream = pyqtSignal(np.ndarray) image_cropped_stream = pyqtSignal(np.ndarray) downsampling = 0.5 # % of original stream_open = False stream = False spectral_sensitivity_calibrated = False def __init__(self, *args, **kwargs): QThread.__init__(self, *args, **kwargs) def run(self): self.setup() self.cap = cv2.VideoCapture(1) self.set_video_capture_settings() while(True): self.viewCam() time.sleep(0.04) def stop(self): if(self.stream_open): # release video capture self.cap.release() def viewCam(self): if(self.stream): self.stream_open = True # read image in BGR format ret, image = self.cap.read() empty_image = True attempts = 0 while(empty_image): try: # convert image to RGB format image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) empty_image = False except: attempts = attempts + 1 print(str(attempts) + " attempts") if(attempts > 20): print("Video connection failed") self.cap.release() self.run() break if(self.rotation_global!= 0): # Global rotation (center image is center of rotation) image = imutils.rotate(image, self.rotation_global) if(self.rotation_spectrum!= 0): # Spectrum rotation (center of rotation is center of spectrum) M = cv2.getRotationMatrix2D((round((self.start_x+self.stop_x)/2),self.central_line) , self.rotation_spectrum, 1.0) (h, w) = image.shape[:2] image = cv2.warpAffine(image, M, (w, h)) # extract spectral data gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) line_range = round(self.no_of_lines/2) for n in range(-line_range,line_range): self.intensities = self.intensities + np.flip(gray[self.central_line+int(n)][self.width-self.stop_x:self.width-self.start_x]) # if the number of averages has been met, proceed to calculate averaged spectrum and plot if (self.cntr == self.averages - 1): self.intensities = self.intensities/self.averages # check if any special acqusition has been selected (i.e. dark, emission, reference or transmission) if (self.acquireDark): #self.intensitiesDark = self.intensities self.intensitiesDark = self.intensities self.acquireDark = False self.dark_spectrum_stream.emit(self.intensitiesDark) elif (self.acquireEmission): #self.intensitiesEmission = self.intensities-self.intensitiesDark intensities_temp = self.intensities-self.intensitiesDark self.intensitiesEmission = np.divide(intensities_temp, self.spectral_sensitivity, out=np.zeros_like(intensities_temp), where=self.spectral_sensitivity!=0) self.acquireEmission = False self.emission_spectrum_stream.emit(self.intensitiesEmission) elif (self.acquireReference): #self.intensitiesReference = self.intensities temp_intensities = self.intensities-self.intensitiesDark self.intensitiesReference = np.divide(temp_intensities, self.spectral_sensitivity, out=np.zeros_like(self.intensities), where=self.spectral_sensitivity!=0) self.acquireReference = False self.reference_spectrum_stream.emit(self.intensitiesReference) elif (self.acquireTransmission): temp_intensities = self.intensities-self.intensitiesDark # Handle division by 0 by replacing output elements with 0 temp_intensities_corrected = np.divide(temp_intensities, self.spectral_sensitivity, out=np.zeros_like(temp_intensities), where=self.spectral_sensitivity!=0) self.intensitiesTransmission= np.divide(temp_intensities_corrected, self.intensitiesReference, out=np.zeros_like(temp_intensities_corrected), where=self.intensitiesReference!=0) self.acquireTransmission = False self.transmission_spectrum_stream.emit(self.intensitiesTransmission) #if (sum(self.intensitiesDark)>0 and sum(self.intensitiesReference)>0 and sum(self.intensitiesTransmission)>0): # self.updateCalcPlot() self.cntr = 0 self.raw_spectrum_stream.emit(self.intensities) #self.raw_spectrum_stream.emit((self.intensities-self.intensitiesDark)/self.spectral_sensitivity) self.cntr = self.cntr + 1 # highlight the data-line for i in range(0,2): image[self.central_line-round(self.no_of_lines/2)-2+i][self.width-self.stop_x:self.width-self.start_x][:] = 255 image[self.central_line+round(self.no_of_lines/2)-2+i][self.width-self.stop_x:self.width-self.start_x][:] = 255 # overview image image_overview = image # Downsample to reduce data flow (does not affect spectral resolution) image_overview_downsampled = self.downsample_image(image_overview) # Crop image image_cropped = image[self.central_line-round(self.no_of_lines/2)-30:self.central_line+round(self.no_of_lines/2)+30, self.width-self.stop_x+5:self.width-self.start_x-5] # Send numpy array as signal self.image_overview_stream.emit(image_overview_downsampled) # Send numpy array as signal self.image_cropped_stream.emit(image_cropped) def downsample_image(self, image): width = int(image.shape[1] * self.downsampling) height = int(image.shape[0] * self.downsampling) dim = (width, height) resized_image = cv2.resize(image, dim, interpolation = cv2.INTER_AREA) return resized_image def setup(self): # Additional acquisition settings self.bins = self.stop_x-self.start_x # Needs to be defined in a more dynamic way self.waves = np.arange(self.start_x,self.stop_x,1) self.cntr = 0 self.intensities = np.zeros(self.bins) # Transmission spectrum settings self.intensitiesDark = np.zeros(self.bins) self.intensitiesEmission = np.zeros(self.bins) self.intensitiesReference = np.zeros(self.bins) self.intensitiesTransmission = np.zeros(self.bins) if(self.spectral_sensitivity_calibrated == False): self.spectral_sensitivity = np.ones(self.bins) self.acquireDark = False self.acquireEmission = False self.acquireReference = False self.acquireTransmission = False def set_video_capture_settings(self): # The following commands does not seems to work on all platforms (although I haven't found alternatives) # (worked for me on Windows systems, not on Mac OS) # Set to manual exposure (0.75 -> Auto exposure, 0.25 -> Manual exposure) self.cap.set(cv2.CAP_PROP_AUTO_EXPOSURE, 0.25) self.cap.set(cv2.CAP_PROP_CONVERT_RGB, False) # Set gain and exposure time self.cap.set(cv2.CAP_PROP_EXPOSURE,np.log(self.integration_time_ms*1e-3)/
np.log(2)
numpy.log
import numpy as np from skimage import transform def shift_holo(shift_gui, em_data): # em_data.holo_2_aligned = shift(em_data.holo_2, shift_gui) em_data.holo_2_aligned = np.roll(em_data.holo_2, shift_gui, axis=(0, 1)) def crop_phase(shift_gui, image_not_shifted, image_shifted): print('shift image horizontal ', shift_gui[1]) print('shift gui vertical ', shift_gui[0]) if shift_gui[0] > 0: if shift_gui[1] > 0: image_not_shifted_crop = np.array(image_not_shifted[shift_gui[0]:, shift_gui[1]:]) image_shifted_crop = np.array(image_shifted[:-shift_gui[0], :-shift_gui[1]]) return [image_not_shifted_crop, image_shifted_crop] if shift_gui[1] < 0: image_not_shifted_crop = np.array(image_not_shifted[shift_gui[0]:, :shift_gui[1]]) image_shifted_crop =
np.array(image_shifted[:-shift_gui[0], -shift_gui[1]:])
numpy.array
import cv2 import numpy as np from typing import List from scipy import ndimage as ndi from skimage import morphology as morph from scipy.ndimage.morphology import distance_transform_edt # From https://github.com/scikit-image/scikit-image/blob/main/skimage/morphology/misc.py # warning removed def remove_small_objects( ar: np.ndarray, min_size: int=64, connectivity: int=1, in_place: bool=False, *, out: np.ndarray=None): """Remove objects smaller than the specified size. Expects ar to be an array with labeled objects, and removes objects smaller than min_size. If `ar` is bool, the image is first labeled. This leads to potentially different behavior for bool and 0-and-1 arrays. Parameters ---------- ar : ndarray (arbitrary shape, int or bool type) The array containing the objects of interest. If the array type is int, the ints must be non-negative. min_size : int, optional (default: 64) The smallest allowable object size. connectivity : int, {1, 2, ..., ar.ndim}, optional (default: 1) The connectivity defining the neighborhood of a pixel. Used during labelling if `ar` is bool. in_place : bool, optional (default: False) If ``True``, remove the objects in the input array itself. Otherwise, make a copy. Deprecated since version 0.19. Please use `out` instead. out : ndarray Array of the same shape as `ar`, into which the output is placed. By default, a new array is created. Raises ------ TypeError If the input array is of an invalid type, such as float or string. ValueError If the input array contains negative values. Returns ------- out : ndarray, same shape and type as input `ar` The input array with small connected components removed. Examples -------- >>> from skimage import morphology >>> a = np.array([[0, 0, 0, 1, 0], ... [1, 1, 1, 0, 0], ... [1, 1, 1, 0, 1]], bool) >>> b = morphology.remove_small_objects(a, 6) >>> b array([[False, False, False, False, False], [ True, True, True, False, False], [ True, True, True, False, False]]) >>> c = morphology.remove_small_objects(a, 7, connectivity=2) >>> c array([[False, False, False, True, False], [ True, True, True, False, False], [ True, True, True, False, False]]) >>> d = morphology.remove_small_objects(a, 6, out=a) >>> d is a True """ if out is not None: in_place = False if in_place: out = ar elif out is None: out = ar.copy() if min_size == 0: # shortcut for efficiency return out if out.dtype == bool: selem = ndi.generate_binary_structure(ar.ndim, connectivity) ccs = np.zeros_like(ar, dtype=np.int32) ndi.label(ar, selem, output=ccs) else: ccs = out try: component_sizes = np.bincount(ccs.ravel()) except ValueError: raise ValueError("Negative value labels are not supported. Try " "relabeling the input with `scipy.ndimage.label` or " "`skimage.morphology.label`.") too_small = component_sizes < min_size too_small_mask = too_small[ccs] out[too_small_mask] = 0 return out def binarize(inst_map: np.ndarray) -> np.ndarray: """ Binarize a labelled instance map Args: ---------- inst_map (np.ndarray): Instance map to be binarized Returns: ----------- np.ndarray: Binary mask. Shape (H, W). """ binary = np.copy(inst_map > 0) return binary.astype("uint8") # ported from https://github.com/vqdang/hover_net/blob/master/src/loader/augs.py def fix_duplicates(inst_map: np.ndarray) -> np.ndarray: """ Deal with duplicated instances in an inst map. For example, duplicated instances due to mirror padding. Args: ----------- inst_map (np.ndarray): Inst map Returns: ----------- np.ndarray: The instance segmentation map without duplicated indices. Shape (H, W). """ current_max_id = np.amax(inst_map) inst_list = list(np.unique(inst_map)) inst_list.remove(0) # 0 is background for inst_id in inst_list: inst = np.array(inst_map == inst_id, np.uint8) remapped_ids = ndi.label(inst)[0] remapped_ids[remapped_ids > 1] += current_max_id inst_map[remapped_ids > 1] = remapped_ids[remapped_ids > 1] current_max_id = np.amax(inst_map) return inst_map # ported from https://github.com/vqdang/hover_net/blob/master/src/loader/augs.py def remove_1px_boundary(inst_map: np.ndarray) -> np.ndarray: """ Removes 1px around each instance, removing overlaps of cells in an inst map Args: ---------- inst_map (np.ndarray): instance map Returns: ----------- np.ndarray: The instance segmentation map with 1px of instance boundaries removed. Shape (H, W). """ new_inst_map = np.zeros(inst_map.shape[:2], np.int32) inst_list = list(np.unique(inst_map)) inst_list.remove(0) # 0 is background k = morph.disk(1) for inst_id in inst_list: inst = np.array(inst_map == inst_id, np.uint8) inst = cv2.erode(inst, k, iterations=1) new_inst_map[inst > 0] = inst_id return new_inst_map # ported from https://github.com/vqdang/hover_net/blob/master/src/loader/augs.py def get_weight_map( inst_map: np.ndarray, sigma: float=5.0, w0: float=10.0 ) -> np.ndarray: """ Generate a weight map like in U-Net paper Args: ----------- inst_map (np.ndarray): Instance map sigma (float): Factor multiplied to the for the distance maps w0 (float): Weight multiplied to the penalty map Returns: ----------- np.ndarray: Nuclei boundary weight map. Shape (H, W). """ inst_list = list(np.unique(inst_map)) inst_list.remove(0) # 0 is background if len(inst_list) <= 1: # 1 instance only return np.zeros(inst_map.shape[:2]) stacked_inst_bgd_dst = np.zeros(inst_map.shape[:2] + (len(inst_list),)) for idx, inst_id in enumerate(inst_list): inst_bgd_map = np.array(inst_map != inst_id , np.uint8) inst_bgd_dst = distance_transform_edt(inst_bgd_map) stacked_inst_bgd_dst[..., idx] = inst_bgd_dst near1_dst = np.amin(stacked_inst_bgd_dst, axis=2) near2_dst = np.expand_dims(near1_dst, axis=2) near2_dst = stacked_inst_bgd_dst - near2_dst near2_dst[near2_dst == 0] = np.PINF # very large near2_dst = np.amin(near2_dst, axis=2) near2_dst[inst_map > 0] = 0 # the instances near2_dst = near2_dst + near1_dst # to fix pixel where near1 == near2 near2_eve = np.expand_dims(near1_dst, axis=2) # to avoide the warning of a / 0 near2_eve = (1.0 + stacked_inst_bgd_dst) / (1.0 + near2_eve) near2_eve[near2_eve != 1] = 0 near2_eve = np.sum(near2_eve, axis=2) near2_dst[near2_eve > 1] = near1_dst[near2_eve > 1] # pix_dst = near1_dst + near2_dst pen_map = pix_dst / sigma pen_map = w0 * np.exp(- pen_map**2 / 2) pen_map[inst_map > 0] = 0 # inner instances zero return pen_map def center_crop(img: np.ndarray, ch: int, cw: int) -> np.ndarray: """ Center crop an input image Args: ---------- img (np.ndarray): Input img. Shape (H, W). ch (int): Crop height cw (int): crop width Returns: ---------- np.ndarray: Center cropped image. Shape (ch, cw). """ if len(img.shape) == 3: H, W, _ = img.shape else: H, W = img.shape x = W // 2 - (cw // 2) y = H // 2 - (ch // 2) if len(img.shape) == 3: img = img[y:y + ch, x:x + cw, :] else: img = img[y:y + ch, x:x + cw] return img # Ported from https://github.com/vqdang/hover_net/blob/master/src/misc/utils.py def bounding_box(inst_map: np.ndarray) -> List[int]: """ Bounding box coordinates for nuclei instance that is given as input. This assumes that the inst_map has only one instance in it. Args: ---------- inst_map (np.ndarray): Instance labels Returns: ---------- List: List of the origin- and end-point coordinates of the bbox """ rows = np.any(inst_map, axis=1) cols = np.any(inst_map, axis=0) rmin, rmax = np.where(rows)[0][[0, -1]] cmin, cmax = np.where(cols)[0][[0, -1]] # due to python indexing, need to add 1 to max # else accessing will be 1px in the box, not out rmax += 1 cmax += 1 return [rmin, rmax, cmin, cmax] # ported from https://github.com/vqdang/hover_net/tree/master/src/metrics/sample def remap_label(pred: np.ndarray) -> np.ndarray: """ Rename all instance id so that the id is contiguous i.e [0, 1, 2, 3] not [0, 2, 4, 6]. The ordering of instances (which one comes first) is preserved unless by_size=True, then the instances will be reordered so that bigger nucler has smaller ID Args: ----------- pred (np.ndarray): The 2d array contain instances where each instances is marked by non-zero integer Returns: ----------- np.ndarray: inst map with remapped contiguous labels """ pred_id = list(np.unique(pred)) pred_id.remove(0) if len(pred_id) == 0: return pred # no label new_pred = np.zeros(pred.shape, np.int32) for idx, inst_id in enumerate(pred_id): new_pred[pred == inst_id] = idx + 1 return new_pred # Ported from https://github.com/vqdang/hover_net/blob/master/src/misc/utils.py def get_inst_centroid(inst_map: np.ndarray) -> np.ndarray: """ Get centroid x, y coordinates from each unique nuclei instance Args: ---------- inst_map (np.ndarray): Nuclei instance map Returns: ---------- an np.ndarray of shape (num_instances, 2) Example: array([[780.05089286, 609.11741071], [890.64603817, 237.89589358], [944.37971014, 541.3942029 ], ..., [ 77.5 , 536. ], [ 78.21428571, 541.64285714], [485. , 893. ]]) """ inst_centroid_list = [] inst_id_list = list(np.unique(inst_map)) for inst_id in inst_id_list[1:]: # avoid 0 i.e background mask = np.array(inst_map == inst_id, np.uint8) inst_moment = cv2.moments(mask) inst_centroid = [(inst_moment["m10"] / inst_moment["m00"]), (inst_moment["m01"] / inst_moment["m00"])] inst_centroid_list.append(inst_centroid) return np.array(inst_centroid_list) def get_inst_types( inst_map: np.ndarray, type_map: np.ndarray ) -> np.ndarray: """ Get the types of every single instance in an instance map and write them to a 1D-Array Args: ---------- inst_map (np.ndarray): Instance map of shape (H, W) type_map (np.ndarray): Type map of shape (H, W). Labels are indices. Returns: ---------- an np.ndarray of shape (num_instances, 1) Example: array([[3], [3], [3], ..., [1], [1], [1]], dtype=int32) """ inst_ids = list(np.unique(inst_map)) inst_ids.remove(0) inst_types = np.full((len(inst_ids), 1), 0, dtype=np.int32) for j, id_ in enumerate(inst_ids): inst_type = np.unique(type_map[inst_map == id_])[0] inst_types[j] = inst_type return inst_types def get_type_instances( inst_map: np.ndarray, type_map: np.ndarray, class_num: int ) -> np.ndarray: """ Get the instances from an instance map that belong to class 'class_num' Drop everything else. The type map and inst map need to have the exact same non-zero pixels. Args: ---------- inst_map (np.ndarray): Instance map of shape (H, W) type_map (np.ndarray): Type map of shape (H, W). Labels are indices. class_num (int): Class label Returns: ---------- np.ndarray: Numpy ndarray of shape (H, W) where the values equalling 'class_num' are dropped """ t = type_map.astype("uint8") == class_num imap = np.copy(inst_map) imap[~t] = 0 return imap def one_hot(type_map: np.ndarray, num_classes: int) -> np.ndarray: """ Convert type map of shape (H, W) to one hot encoded types of shape: (H, W, C) Args: ----------- type_map (np.ndarray): Type map of shape (H, W). Labels are indices. num_classes (int): Number of classes in the dataset Returns: ----------- np.ndarray: Numpy ndarray of the input array (H, W) in one hot format. Shape: (H, W, num_classes). """ return np.eye(num_classes+1)[type_map] def type_map_flatten(type_map: np.ndarray) -> np.ndarray: """ Convert a one hot type map of shape (H, W, C) to a single channel indice map of shape (H, W) Args: ----------- type_map (np.ndarray): Type map to be flattened Returns ----------- np.ndarray: Flattened one hot np.ndarray. I.e. (H, W, C) --> (H, W) """ type_out = np.zeros([type_map.shape[0], type_map.shape[1]]) for i, t in enumerate(np.unique(type_map)): type_tmp = type_map[..., i] == t type_out += (type_tmp * t) return type_out def to_inst_map(binary_mask: np.ndarray) -> np.ndarray: """ Takes in a binary mask -> fill holes -> removes small objects -> label connected components. If class channel is included this assumes that binary_mask[..., 0] is the bg channel and binary_mask[..., 1] the foreground. Args: ----------- binary_mask (np.ndarray): A binary mask to be labelled. Shape (H, W) or (H, W, C) Returns: ----------- np.ndarray: labelled instances np.ndarray of shape (H, W) """ if len(binary_mask.shape) == 3: binary_mask = binary_mask[..., 1] mask = ndi.binary_fill_holes(binary_mask) mask = remove_small_objects(binary_mask.astype(bool), min_size=10) inst_map = ndi.label(mask)[0] return inst_map def cv2_opening( inst_map: np.ndarray, iterations: int=2 ) -> np.ndarray: """ Takes in an inst_map -> binarize -> apply morphological opening (2 iterations) -> label Args: ----------- inst_map (np.ndarray): Instance map to be opened. Shape (H, W) iterations (int, default=2): Number of iterations for the operation Returns: ----------- np.ndarray: Morphologically opened np.ndarray of shape (H, W) """ inst_map = binarize(inst_map) kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5,5)) new_inst_map = (inst_map*255).astype(np.uint8) new_inst_map = cv2.morphologyEx( new_inst_map, cv2.MORPH_OPEN, kernel, iterations=iterations ) inst_map = ndi.label(new_inst_map)[0] return inst_map def cv2_closing( inst_map: np.ndarray, iterations: int=2 ) -> np.ndarray: """ Takes in an inst_map -> binarize -> apply morphological closing (2 iterations) -> label Args: ----------- inst_map (np.ndarray): Instance map to be opened. Shape (H, W) iterations (int, default=2): Number of iterations for the operation Returns: ----------- np.ndarray: Morphologically closed np.ndarray of shape (H, W) """ inst_map = binarize(inst_map) kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5,5)) new_inst_map = (inst_map*255).astype(np.uint8) new_inst_map = cv2.morphologyEx( new_inst_map, cv2.MORPH_CLOSE, kernel, iterations=iterations ) inst_map = ndi.label(new_inst_map)[0] return inst_map def remove_debris(inst_map: np.ndarray, min_size: int = 10): """ (Actually) Remove small objects from an inst map Args: ------------ inst_map (np.ndarray): Instance map. Shape (H, W) min_size (int, default=10): Min size for the objects that are left untouched Returns: ----------- np.ndarray: Cleaned np.ndarray of shape (H, W) """ res = np.zeros(inst_map.shape, np.int32) for ix in np.unique(inst_map)[1:]: nuc_map = np.copy(inst_map == ix) y1, y2, x1, x2 = bounding_box(nuc_map) y1 = y1 - 2 if y1 - 2 >= 0 else y1 x1 = x1 - 2 if x1 - 2 >= 0 else x1 x2 = x2 + 2 if x2 + 2 <= inst_map.shape[1] - 1 else x2 y2 = y2 + 2 if y2 + 2 <= inst_map.shape[0] - 1 else y2 nuc_map_crop = nuc_map[y1:y2, x1:x2].astype("int32") nuc_map_crop = remove_small_objects( nuc_map_crop.astype(bool), min_size, connectivity=1 ).astype("int32") nuc_map_crop[nuc_map_crop > 0] = ix res[y1:y2, x1:x2] += nuc_map_crop return res def remove_area_debris(sem_map: np.ndarray, min_size: int=10000): """ Remove small objects from a semantic area map Args: ------------ sem_map (np.ndarray): Semantic seg map. Shape (H, W) min_size (int, default=5000): Min size for the objects that are left untouched Returns: ----------- np.ndarray: Cleaned np.ndarray of shape (H, W) """ res = np.copy(sem_map) classes = np.unique(sem_map) # skip bg if 0 in classes: classes = classes[1:] for i in classes: area = np.array(res == i, np.uint32) inst_map = ndi.label(area)[0] labels, counts = np.unique(inst_map, return_counts=True) for label, npixls in zip(labels, counts): if npixls < min_size: res[inst_map == label] = 0 # get the fill label y1, y2, x1, x2 = bounding_box(inst_map == label) y1 = y1 - 2 if y1 - 2 >= 0 else y1 x1 = x1 - 2 if x1 - 2 >= 0 else x1 x2 = x2 + 2 if x2 + 2 <= res.shape[1] - 1 else x2 y2 = y2 + 2 if y2 + 2 <= res.shape[0] - 1 else y2 l, c = np.unique(res[y1:y2, x1:x2], return_counts=True) if 0 in l and len(l) > 1: l = l[1:] c = c[1:] fill_label = l[np.argmax(c)] res[inst_map == label] = fill_label return res def fill_holes(sem_map: np.ndarray, min_size: int=5000): """ Fill holes from a semantic area map Args: ------------ sem_map (np.ndarray): Semantic seg map. Shape (H, W) min_size (int, default=5000): Min size for the objects that are left untouched Returns: ----------- np.ndarray: Cleaned np.ndarray of shape (H, W) """ res = np.copy(sem_map) bg = res == 0 bg_objs = ndi.label(bg)[0] for i in np.unique(bg_objs)[1:]: y1, y2, x1, x2 = bounding_box(bg_objs == i) y1 = y1 - 2 if y1 - 2 >= 0 else y1 x1 = x1 - 2 if x1 - 2 >= 0 else x1 x2 = x2 + 2 if x2 + 2 <= res.shape[1] - 1 else x2 y2 = y2 + 2 if y2 + 2 <= res.shape[0] - 1 else y2 crop = res[y1:y2, x1:x2] labels, counts = np.unique(crop, return_counts=True) if counts[0] > min_size: continue if len(counts) == 1: continue # skip 0 index labels = labels[1:] counts = counts[1:] # fill bg objs fill_label = labels[
np.argmax(counts)
numpy.argmax
import pandas as pd import numpy as np import os import shutil class MockData: test_dir = os.path.dirname(os.path.realpath(__file__)) script_loc = os.path.split(test_dir)[0] config_dir = os.path.join(script_loc, "mspypeline", "config") go_dir = os.path.join(config_dir, "go_terms") pathway_dir = os.path.join(config_dir, "pathways") mock_data_dir = os.path.join(test_dir, "mock_data") @staticmethod def create_mock_data( test_cases = (0, 10, 20, 50, 100), number_of_non_pathway_genes = 1000, average = 28, gene_sigma = 2, group_sigma = 1, experiment_sigma = 1, tech_rep_sigma = 0.3, noise_sigma = 0.5, design_combinations=((True, True), (False, True), (True, False), (False, False)), #(has group, has technical replicates) number_of_technical_replicates = (3, 8, 1, 1), # bad fix to make it easy number_of_experiments = (3, 6, 15, 30), number_of_groups = (4, 1, 8, 1), # bad fix to make it easy seed = 100, save_to_disk=True ): # TODO this seed seems insufficient? np.seed = seed N = sum(test_cases) + number_of_non_pathway_genes os.makedirs(MockData.mock_data_dir, exist_ok=True) # accumulate all genes pathway_genes = {} for file_name in os.listdir(MockData.pathway_dir): file = os.path.join(MockData.pathway_dir, file_name) with open(file) as f: pathway = f.readline().strip() pathway_genes[pathway] = [] f.readline() for line in f: pathway_genes[pathway].append(line.strip()) # sort the pathways into the different possible cases # TODO this samples a lot of duplicate gene names free_pathway_genes = set(pathway_genes) test_case_dict = {t: [] for t in test_cases} for test_case in reversed(sorted(test_cases)): to_rm = set() for pathway in free_pathway_genes: if len(pathway_genes[pathway]) >= test_case: test_case_dict[test_case].append(pathway) to_rm.add(pathway) free_pathway_genes = free_pathway_genes - to_rm # randomly sample the genes from the pathways genes = [] for test_case in test_cases: if test_case == 0: continue pathway = np.random.choice(test_case_dict[test_case], 1)[0] genes += list(np.random.choice(pathway_genes[pathway], test_case, replace=False)) # add the non pathway genes for i in range(number_of_non_pathway_genes): genes.append(f"GENENP{i}") assert len(genes) == N genes = pd.Series(genes, name="Gene names") for i, (has_group, has_tech_rep) in enumerate(design_combinations): n_experiments = number_of_experiments[i] n_tech_reps = number_of_technical_replicates[i] n_groups = number_of_groups[i] print(f"n group: {n_groups}, n exp: {n_experiments}, n tech: {n_tech_reps}") if has_group: assert n_groups > 1 else: assert n_groups == 1 if has_tech_rep: assert n_tech_reps > 1 else: assert n_tech_reps == 1 assert n_experiments > 1 group_name = np.array([f"Group{x}_" for x in range(n_groups)]).reshape((n_groups, 1, 1)) experiment_name = np.array([f"Experiment{x}_" for x in range(n_experiments)]).reshape((1, n_experiments, 1)) technical_replicate_name = np.array([f"Rep{x}" for x in range(n_tech_reps)]).reshape((1, 1, n_tech_reps)) names = np.core.defchararray.add( np.core.defchararray.add(group_name, experiment_name), technical_replicate_name ).reshape((n_groups * n_experiments * n_tech_reps)) average_effect = np.ones((N, n_groups, n_experiments, n_tech_reps)) * average gene_effect = np.random.normal(0, gene_sigma, (N, 1, 1, 1)) group_effect = np.random.uniform(0, group_sigma, (N, n_groups, 1, 1)) experiment_effect = np.random.normal(0, experiment_sigma, (1, 1, n_experiments, 1)) technical_replicate_effect = np.random.normal(0, tech_rep_sigma, (1, 1, 1, n_tech_reps)) noise =
np.random.normal(0, noise_sigma, (N, n_groups, n_experiments, n_tech_reps))
numpy.random.normal
import random import pandas as pd import numpy as np from multiprocessing import Pool from scipy.spatial import distance from scipy.spatial.distance import cdist from src.configs import GENERAL, PREPROCESSING, MODELING N_RANDOM_OBS = None N_POTENTIAL_EL = 3 DISTANCE_TYPE = MODELING['distance_type'] class TripletGenerator: def __init__(self, n_jobs=5): self.n_jobs = n_jobs self.paired_nodes = [] @staticmethod def map_parallel(func, iterable_args, n_jobs=1): if n_jobs==1: return map(func, iterable_args) with Pool(n_jobs) as pool: result = pool.starmap(func, iterable_args) return result @staticmethod def corrected_cosine(x, y, corr): x, y, corr = np.array(x), np.array(y), np.array(corr) corrected_x = x - corr corrected_y = y - corr return distance.cosine(corrected_x, corrected_y) def choose_pos_x_hard(self, X, y, anchor_x, anchor_y, n_random_objects=N_RANDOM_OBS, distance_type=DISTANCE_TYPE): """ choose the pos label with attention on the most remote examples """ X = X[y==anchor_y] y = y[y==anchor_y] if n_random_objects is not None: n_random_objects = n_random_objects if n_random_objects < X.shape[0] else X.shape[0] else: n_random_objects = X.shape[0] indices = np.random.choice(X.shape[0], n_random_objects, replace=False) X, y = X[indices], y[indices] y = np.array(y) if distance_type == 'euclidean': d = self.map_parallel( lambda x, y: distance.euclidean(x, y)/distance.cosine(x, y), [(anchor_x, ex) for ex in X]) elif distance_type == 'cosine': d = self.map_parallel(distance.cosine, [(anchor_x, ex) for ex in X]) elif distance_type == 'minkowski': d = self.map_parallel( lambda x, y: distance.minkowski(x, y)/distance.cosine(x, y), [(anchor_x, ex) for ex in X]) elif distance_type == 'chebyshev': d = self.map_parallel( lambda x, y: distance.chebyshev(x, y)/distance.cosine(x, y), [(anchor_x, ex) for ex in X]) elif distance_type == 'cityblock': d = self.map_parallel( lambda x, y: distance.cityblock(x, y)/distance.cosine(x, y), [(anchor_x, ex) for ex in X]) else: raise KeyError('Unknown distance metric!') #print('pos', d.shape, X.shape) pos_x = X[np.argmax(d)] return pos_x def choose_neg_x_hard(self, X, y, anchor_x, pos_x, anchor_y, n_random_objects=N_RANDOM_OBS, distance_type=DISTANCE_TYPE): """ choose the neg label with attention on the closest exaples """ X = X[y!=anchor_y] y = y[y!=anchor_y] if n_random_objects is not None: n_random_objects = n_random_objects if n_random_objects < X.shape[0] else X.shape[0] else: n_random_objects = X.shape[0] indices =
np.random.choice(X.shape[0], n_random_objects, replace=False)
numpy.random.choice
# Copyright 2016 <NAME> and The Novo Nordisk Foundation Center for Biosustainability, DTU. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import numpy as np import pytest from marsi.chemistry import openbabel, rdkit from marsi.chemistry.common import SOLUBILITY, tanimoto_coefficient, tanimoto_distance from marsi.chemistry.molecule import Molecule TEST_DIR = os.path.dirname(__file__) MOL_VOLUMES = { "Diphenylketene": 173.769, "Acetate": None, "Cobamamide": None } MOL_RINGS = { "Diphenylketene": 2, "Acetate": 0, "Cobamamide": 15 } MOL_BONDS = { "Diphenylketene": 26, "Acetate": 6, "Cobamamide": 223 } MOL_ATOMS = { "Diphenylketene": 25, "Acetate": 7, "Cobamamide": 209 } MOL_CARBONS = { "Diphenylketene": 14, "Acetate": 2, "Cobamamide": 72 } molecules = list(MOL_CARBONS.keys()) CARBON_ATOMIC_NUMBER = 6 HYDROGEN_ATOMIC_NUMBER = 1 INCHI = "InChI=1S/C11H12N2O2/c12-9(11(14)15)5-7-6-13-10-4-2-1-3-8(7)10/h1-4,6,9,13H,5,12H2,(H,14,15)/t9-/m0/s1" INCHI_KEY = "<KEY>" class openbabel_handler(object): @staticmethod def num_atoms(mol): return len(mol.atoms) @staticmethod def num_bonds(mol): return mol.OBMol.NumBonds() @staticmethod def num_carbon(mol): return len([a for a in mol.atoms if a.atomicnum == CARBON_ATOMIC_NUMBER]) @staticmethod def num_protons(mol): return len([a for a in mol.atoms if a.atomicnum == HYDROGEN_ATOMIC_NUMBER]) class rdkit_handler(object): @staticmethod def num_atoms(mol): return mol.GetNumAtoms() @staticmethod def num_bonds(mol): return mol.GetNumBonds() @staticmethod def num_carbon(mol): return len([a for a in mol.GetAtoms() if a.GetAtomicNum() == CARBON_ATOMIC_NUMBER]) @staticmethod def num_protons(mol): return len([a for a in mol.GetAtoms() if a.GetAtomicNum() == HYDROGEN_ATOMIC_NUMBER]) @pytest.fixture(params=['rdkit', 'openbabel']) def chemlib(request): if request.param == 'rdkit': return rdkit, rdkit_handler elif request.param == 'openbabel': return openbabel, openbabel_handler else: raise ValueError("Invalid param %s" % request.param) def test_solubility_thresholds(): high_solubility_values = [0.00007, 0.00016, 0.1, 1000] medium_solubility_values = [0.00001, 0.00002, 0.00004, 0.00006] low_solubility_values = [0.000001, 0.000002, 0.0000025, 0.000005, 0.0000099] assert all(SOLUBILITY['all'](v) for v in high_solubility_values) assert all(SOLUBILITY['all'](v) for v in medium_solubility_values) assert all(SOLUBILITY['all'](v) for v in low_solubility_values) assert all(SOLUBILITY['high'](v) for v in high_solubility_values) assert not any(SOLUBILITY['high'](v) for v in medium_solubility_values) assert not any(SOLUBILITY['high'](v) for v in low_solubility_values) assert not any(SOLUBILITY['medium'](v) for v in high_solubility_values) assert all(SOLUBILITY['medium'](v) for v in medium_solubility_values) assert not any(SOLUBILITY['medium'](v) for v in low_solubility_values) assert not any(SOLUBILITY['low'](v) for v in high_solubility_values) assert not any(SOLUBILITY['low'](v) for v in medium_solubility_values) assert all(SOLUBILITY['low'](v) for v in low_solubility_values) def test_tanimoto_coefficient(benchmark): fp1 = np.array([1, 2, 3], dtype=np.int32) fp2 = np.array([1, 2, 4], dtype=np.int32) fp3 =
np.array([1, 2, 3, 4], dtype=np.int32)
numpy.array
#-*- coding:utf-8 -*- from __future__ import division from __future__ import absolute_import from __future__ import print_function import os import torch import argparse import torch.nn as nn import torch.utils.data as data import torch.backends.cudnn as cudnn import torchvision.transforms as transforms import cv2 import time import numpy as np from PIL import Image from data.config import cfg from s3fd import build_s3fd from torch.autograd import Variable from utils.augmentations import to_chw_bgr parser = argparse.ArgumentParser(description='s3df demo') parser.add_argument('--save_dir', type=str, default='tmp/', help='Directory for detect result') parser.add_argument('--model', type=str, default='weights/s3fd.pth', help='trained model') parser.add_argument('--thresh', default=0.6, type=float, help='Final confidence threshold') args = parser.parse_args() if not os.path.exists(args.save_dir): os.makedirs(args.save_dir) use_cuda = torch.cuda.is_available() if use_cuda: torch.set_default_tensor_type('torch.cuda.FloatTensor') else: torch.set_default_tensor_type('torch.FloatTensor') def detect(net, img_path, thresh): #img = cv2.imread(img_path, cv2.IMREAD_COLOR) img = Image.open(img_path) if img.mode == 'L': img = img.convert('RGB') img =
np.array(img)
numpy.array
import numpy as np ''' Constants ''' # 1) Parameters of Short Time Fourier Analysis: Fs_ref = 16e3 # 1.1) Reference Sampling frequency M_ref = 512 # 1.2) Size of analysis window #Mo_ref = 0.75*M_ref # 1.3) Number of overlapping samples in consecutive frames Mo_ref = 352 # 2) Parameters of Noise Spectrum Estimate w = 1 # 2.1) Size of frequency smoothing window function = 2*w+1 alpha_s_ref = 0.9 # 2.2) Recursive averaging parameter for the smoothing operation Nwin = 8 # 2.3) Resolution of local minima search Vwin = 15 delta_s = 1.67 # 2.4) Local minimum factor Bmin = 1.66 delta_y = 4.6 # 2.4) Local minimum factor delta_yt = 3 alpha_d_ref = 0.85 # 2.7) Recursive averaging parameter for the noise # 3) Parameters of a Priori Probability for Signal-Absence Estimate alpha_xi_ref = 0.7 # 3.1) Recursive averaging parameter # 4) Parameters of "Decision-Directed" a Priori SNR Estimate alpha_eta_ref = 0.95 # 4.1) Recursive averaging parameter eta_min_dB = -18 # 4.2) Lower limit constraint # 5) Flags nonstat = 'medium' #Non stationarity # new version Fs = Fs_ref M = int(M_ref) Mo = int(Mo_ref) Mno = int(M-Mo) alpha_s = alpha_s_ref alpha_d = alpha_d_ref alpha_eta = alpha_eta_ref alpha_xi = alpha_xi_ref alpha_d_long = 0.99 eta_min = 10**(eta_min_dB/10) #b = hanning(2*w+1) #b = b/sum(b) # normalize the window function b = np.array([0, 1, 0]) M21 = int(M/2+1) class NoiseEstimator(object): def update(self, features): pass class ImcraNoiseEstimator(NoiseEstimator): def __init__(self): self.l = 0 #count of frame self.l_mod_lswitch = 0 self.S = np.zeros(M21) self.St = np.zeros(M21) self.Sy = np.zeros(M21) self.Smin = np.zeros(M21) self.Smint = np.zeros(M21) self.SMact = np.zeros(M21) self.SMactt = np.zeros(M21) self.SW = np.zeros((M21,Nwin)) self.SWt = np.zeros((M21,Nwin)) self.lambda_d = np.zeros(M21) self.lambda_dav = np.zeros(M21) def update(self, features): Ya2 = features['signal_power'] self.eta_2term = features['eta_2term'] self.l = self.l + 1 gamma = Ya2 / np.maximum(self.lambda_d, 1e-10) #post_snr eta = alpha_eta*self.eta_2term + (1-alpha_eta)*np.maximum(gamma-1,0) #prior_snr eta = np.maximum(eta,eta_min) v = gamma*eta/(1+eta) # 2.1. smooth over frequency Sf = np.convolve(b, Ya2) # smooth over frequency Sf = Sf[w:M21+w] # if l==1 if self.l == 1 : self.Sy = Ya2 self.S = Sf self.St = Sf self.lambda_dav = Ya2 else : self.S = alpha_s * self.S + (1-alpha_s) * Sf # smooth over time if self.l < 15 : self.Smin = self.S self.SMact = self.S else : self.Smin = np.minimum(self.Smin, self.S) self.SMact = np.minimum(self.SMact, self.S) # Local Minima Search I_f = np.zeros(M21) for i in range(M21) : I_f[i] = Ya2[i]<delta_y*Bmin*self.Smin[i] and self.S[i]<delta_s*Bmin*self.Smin[i] and 1 conv_I = np.convolve(b, I_f) conv_I = conv_I[w:M21+w] Sft = self.St idx = [i for i, v in enumerate(conv_I) if v>0] if len(idx)!=0 : if w : conv_Y = np.convolve(b, I_f*Ya2) conv_Y = conv_Y[w:M21+w] Sft[idx] = conv_Y[idx]/conv_I[idx] else : Sft[idx] = Ya2[idx] if self.l < 15 : self.St = self.S self.Smint = self.St self.SMactt = self.St else : self.St[:] = alpha_s * self.St + (1-alpha_s) * Sft self.Smint[:] = np.minimum(self.Smint, self.St) self.SMactt[:] = np.minimum(self.SMactt, self.St) qhat =
np.ones(M21)
numpy.ones
# This module has been generated automatically from space group information # obtained from the Computational Crystallography Toolbox # """ Space groups This module contains a list of all the 230 space groups that can occur in a crystal. The variable space_groups contains a dictionary that maps space group numbers and space group names to the corresponding space group objects. .. moduleauthor:: <NAME> <<EMAIL>> """ #----------------------------------------------------------------------------- # Copyright (C) 2013 The Mosaic Development Team # # Distributed under the terms of the BSD License. The full license is in # the file LICENSE.txt, distributed as part of this software. #----------------------------------------------------------------------------- import numpy as N class SpaceGroup(object): """ Space group All possible space group objects are created in this module. Other modules should access these objects through the dictionary space_groups rather than create their own space group objects. """ def __init__(self, number, symbol, transformations): """ :param number: the number assigned to the space group by international convention :type number: int :param symbol: the Hermann-Mauguin space-group symbol as used in PDB and mmCIF files :type symbol: str :param transformations: a list of space group transformations, each consisting of a tuple of three integer arrays (rot, tn, td), where rot is the rotation matrix and tn/td are the numerator and denominator of the translation vector. The transformations are defined in fractional coordinates. :type transformations: list """ self.number = number self.symbol = symbol self.transformations = transformations self.transposed_rotations = N.array([N.transpose(t[0]) for t in transformations]) self.phase_factors = N.exp(N.array([(-2j*N.pi*t[1])/t[2] for t in transformations])) def __repr__(self): return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol)) def __len__(self): """ :return: the number of space group transformations :rtype: int """ return len(self.transformations) def symmetryEquivalentMillerIndices(self, hkl): """ :param hkl: a set of Miller indices :type hkl: Scientific.N.array_type :return: a tuple (miller_indices, phase_factor) of two arrays of length equal to the number of space group transformations. miller_indices contains the Miller indices of each reflection equivalent by symmetry to the reflection hkl (including hkl itself as the first element). phase_factor contains the phase factors that must be applied to the structure factor of reflection hkl to obtain the structure factor of the symmetry equivalent reflection. :rtype: tuple """ hkls = N.dot(self.transposed_rotations, hkl) p = N.multiply.reduce(self.phase_factors**hkl, -1) return hkls, p space_groups = {} transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(1, 'P 1', transformations) space_groups[1] = sg space_groups['P 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(2, 'P -1', transformations) space_groups[2] = sg space_groups['P -1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(3, 'P 1 2 1', transformations) space_groups[3] = sg space_groups['P 1 2 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(4, 'P 1 21 1', transformations) space_groups[4] = sg space_groups['P 1 21 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(5, 'C 1 2 1', transformations) space_groups[5] = sg space_groups['C 1 2 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(6, 'P 1 m 1', transformations) space_groups[6] = sg space_groups['P 1 m 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(7, 'P 1 c 1', transformations) space_groups[7] = sg space_groups['P 1 c 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(8, 'C 1 m 1', transformations) space_groups[8] = sg space_groups['C 1 m 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(9, 'C 1 c 1', transformations) space_groups[9] = sg space_groups['C 1 c 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(10, 'P 1 2/m 1', transformations) space_groups[10] = sg space_groups['P 1 2/m 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(11, 'P 1 21/m 1', transformations) space_groups[11] = sg space_groups['P 1 21/m 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(12, 'C 1 2/m 1', transformations) space_groups[12] = sg space_groups['C 1 2/m 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(13, 'P 1 2/c 1', transformations) space_groups[13] = sg space_groups['P 1 2/c 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(14, 'P 1 21/c 1', transformations) space_groups[14] = sg space_groups['P 1 21/c 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(15, 'C 1 2/c 1', transformations) space_groups[15] = sg space_groups['C 1 2/c 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(16, 'P 2 2 2', transformations) space_groups[16] = sg space_groups['P 2 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(17, 'P 2 2 21', transformations) space_groups[17] = sg space_groups['P 2 2 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(18, 'P 21 21 2', transformations) space_groups[18] = sg space_groups['P 21 21 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(19, 'P 21 21 21', transformations) space_groups[19] = sg space_groups['P 21 21 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(20, 'C 2 2 21', transformations) space_groups[20] = sg space_groups['C 2 2 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(21, 'C 2 2 2', transformations) space_groups[21] = sg space_groups['C 2 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(22, 'F 2 2 2', transformations) space_groups[22] = sg space_groups['F 2 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(23, 'I 2 2 2', transformations) space_groups[23] = sg space_groups['I 2 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(24, 'I 21 21 21', transformations) space_groups[24] = sg space_groups['I 21 21 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(25, 'P m m 2', transformations) space_groups[25] = sg space_groups['P m m 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(26, 'P m c 21', transformations) space_groups[26] = sg space_groups['P m c 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(27, 'P c c 2', transformations) space_groups[27] = sg space_groups['P c c 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(28, 'P m a 2', transformations) space_groups[28] = sg space_groups['P m a 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(29, 'P c a 21', transformations) space_groups[29] = sg space_groups['P c a 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(30, 'P n c 2', transformations) space_groups[30] = sg space_groups['P n c 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(31, 'P m n 21', transformations) space_groups[31] = sg space_groups['P m n 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(32, 'P b a 2', transformations) space_groups[32] = sg space_groups['P b a 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(33, 'P n a 21', transformations) space_groups[33] = sg space_groups['P n a 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(34, 'P n n 2', transformations) space_groups[34] = sg space_groups['P n n 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(35, 'C m m 2', transformations) space_groups[35] = sg space_groups['C m m 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(36, 'C m c 21', transformations) space_groups[36] = sg space_groups['C m c 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(37, 'C c c 2', transformations) space_groups[37] = sg space_groups['C c c 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(38, 'A m m 2', transformations) space_groups[38] = sg space_groups['A m m 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(39, 'A b m 2', transformations) space_groups[39] = sg space_groups['A b m 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(40, 'A m a 2', transformations) space_groups[40] = sg space_groups['A m a 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(41, 'A b a 2', transformations) space_groups[41] = sg space_groups['A b a 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(42, 'F m m 2', transformations) space_groups[42] = sg space_groups['F m m 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,1,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,1,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,3,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,3,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(43, 'F d d 2', transformations) space_groups[43] = sg space_groups['F d d 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(44, 'I m m 2', transformations) space_groups[44] = sg space_groups['I m m 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(45, 'I b a 2', transformations) space_groups[45] = sg space_groups['I b a 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(46, 'I m a 2', transformations) space_groups[46] = sg space_groups['I m a 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(47, 'P m m m', transformations) space_groups[47] = sg space_groups['P m m m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(48, 'P n n n :2', transformations) space_groups[48] = sg space_groups['P n n n :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(49, 'P c c m', transformations) space_groups[49] = sg space_groups['P c c m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(50, 'P b a n :2', transformations) space_groups[50] = sg space_groups['P b a n :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(51, 'P m m a', transformations) space_groups[51] = sg space_groups['P m m a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(52, 'P n n a', transformations) space_groups[52] = sg space_groups['P n n a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(53, 'P m n a', transformations) space_groups[53] = sg space_groups['P m n a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(54, 'P c c a', transformations) space_groups[54] = sg space_groups['P c c a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(55, 'P b a m', transformations) space_groups[55] = sg space_groups['P b a m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(56, 'P c c n', transformations) space_groups[56] = sg space_groups['P c c n'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(57, 'P b c m', transformations) space_groups[57] = sg space_groups['P b c m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(58, 'P n n m', transformations) space_groups[58] = sg space_groups['P n n m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(59, 'P m m n :2', transformations) space_groups[59] = sg space_groups['P m m n :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(60, 'P b c n', transformations) space_groups[60] = sg space_groups['P b c n'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(61, 'P b c a', transformations) space_groups[61] = sg space_groups['P b c a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(62, 'P n m a', transformations) space_groups[62] = sg space_groups['P n m a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(63, 'C m c m', transformations) space_groups[63] = sg space_groups['C m c m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(64, 'C m c a', transformations) space_groups[64] = sg space_groups['C m c a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(65, 'C m m m', transformations) space_groups[65] = sg space_groups['C m m m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(66, 'C c c m', transformations) space_groups[66] = sg space_groups['C c c m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(67, 'C m m a', transformations) space_groups[67] = sg space_groups['C m m a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(68, 'C c c a :2', transformations) space_groups[68] = sg space_groups['C c c a :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(69, 'F m m m', transformations) space_groups[69] = sg space_groups['F m m m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([4,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([4,4,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([4,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([4,4,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,3,3]) trans_den = N.array([1,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([4,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,3,1]) trans_den = N.array([4,4,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,1,1]) trans_den = N.array([4,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,1,1]) trans_den = N.array([4,4,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([2,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([3,0,3]) trans_den = N.array([4,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,1,1]) trans_den = N.array([4,4,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,-1,1]) trans_den = N.array([2,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([4,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,-1,1]) trans_den = N.array([4,4,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,3,1]) trans_den = N.array([2,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([3,1,1]) trans_den = N.array([4,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,3,0]) trans_den = N.array([4,4,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([2,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([4,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([4,4,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(70, 'F d d d :2', transformations) space_groups[70] = sg space_groups['F d d d :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(71, 'I m m m', transformations) space_groups[71] = sg space_groups['I m m m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(72, 'I b a m', transformations) space_groups[72] = sg space_groups['I b a m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(73, 'I b c a', transformations) space_groups[73] = sg space_groups['I b c a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(74, 'I m m a', transformations) space_groups[74] = sg space_groups['I m m a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(75, 'P 4', transformations) space_groups[75] = sg space_groups['P 4'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,3]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(76, 'P 41', transformations) space_groups[76] = sg space_groups['P 41'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(77, 'P 42', transformations) space_groups[77] = sg space_groups['P 42'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,3]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(78, 'P 43', transformations) space_groups[78] = sg space_groups['P 43'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(79, 'I 4', transformations) space_groups[79] = sg space_groups['I 4'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(80, 'I 41', transformations) space_groups[80] = sg space_groups['I 41'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(81, 'P -4', transformations) space_groups[81] = sg space_groups['P -4'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(82, 'I -4', transformations) space_groups[82] = sg space_groups['I -4'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(83, 'P 4/m', transformations) space_groups[83] = sg space_groups['P 4/m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(84, 'P 42/m', transformations) space_groups[84] = sg space_groups['P 42/m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(85, 'P 4/n :2', transformations) space_groups[85] = sg space_groups['P 4/n :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(86, 'P 42/n :2', transformations) space_groups[86] = sg space_groups['P 42/n :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(87, 'I 4/m', transformations) space_groups[87] = sg space_groups['I 4/m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-3,-3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,5,5]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,-1,-1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(88, 'I 41/a :2', transformations) space_groups[88] = sg space_groups['I 41/a :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(89, 'P 4 2 2', transformations) space_groups[89] = sg space_groups['P 4 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(90, 'P 4 21 2', transformations) space_groups[90] = sg space_groups['P 4 21 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,3]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,3]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(91, 'P 41 2 2', transformations) space_groups[91] = sg space_groups['P 41 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([2,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([2,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(92, 'P 41 21 2', transformations) space_groups[92] = sg space_groups['P 41 21 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(93, 'P 42 2 2', transformations) space_groups[93] = sg space_groups['P 42 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(94, 'P 42 21 2', transformations) space_groups[94] = sg space_groups['P 42 21 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,3]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,3]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(95, 'P 43 2 2', transformations) space_groups[95] = sg space_groups['P 43 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([2,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([2,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(96, 'P 43 21 2', transformations) space_groups[96] = sg space_groups['P 43 21 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(97, 'I 4 2 2', transformations) space_groups[97] = sg space_groups['I 4 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(98, 'I 41 2 2', transformations) space_groups[98] = sg space_groups['I 41 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(99, 'P 4 m m', transformations) space_groups[99] = sg space_groups['P 4 m m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(100, 'P 4 b m', transformations) space_groups[100] = sg space_groups['P 4 b m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(101, 'P 42 c m', transformations) space_groups[101] = sg space_groups['P 42 c m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(102, 'P 42 n m', transformations) space_groups[102] = sg space_groups['P 42 n m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(103, 'P 4 c c', transformations) space_groups[103] = sg space_groups['P 4 c c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(104, 'P 4 n c', transformations) space_groups[104] = sg space_groups['P 4 n c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(105, 'P 42 m c', transformations) space_groups[105] = sg space_groups['P 42 m c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(106, 'P 42 b c', transformations) space_groups[106] = sg space_groups['P 42 b c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(107, 'I 4 m m', transformations) space_groups[107] = sg space_groups['I 4 m m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(108, 'I 4 c m', transformations) space_groups[108] = sg space_groups['I 4 c m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(109, 'I 41 m d', transformations) space_groups[109] = sg space_groups['I 41 m d'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(110, 'I 41 c d', transformations) space_groups[110] = sg space_groups['I 41 c d'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(111, 'P -4 2 m', transformations) space_groups[111] = sg space_groups['P -4 2 m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(112, 'P -4 2 c', transformations) space_groups[112] = sg space_groups['P -4 2 c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(113, 'P -4 21 m', transformations) space_groups[113] = sg space_groups['P -4 21 m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(114, 'P -4 21 c', transformations) space_groups[114] = sg space_groups['P -4 21 c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(115, 'P -4 m 2', transformations) space_groups[115] = sg space_groups['P -4 m 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(116, 'P -4 c 2', transformations) space_groups[116] = sg space_groups['P -4 c 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(117, 'P -4 b 2', transformations) space_groups[117] = sg space_groups['P -4 b 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(118, 'P -4 n 2', transformations) space_groups[118] = sg space_groups['P -4 n 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(119, 'I -4 m 2', transformations) space_groups[119] = sg space_groups['I -4 m 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(120, 'I -4 c 2', transformations) space_groups[120] = sg space_groups['I -4 c 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(121, 'I -4 2 m', transformations) space_groups[121] = sg space_groups['I -4 2 m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(122, 'I -4 2 d', transformations) space_groups[122] = sg space_groups['I -4 2 d'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(123, 'P 4/m m m', transformations) space_groups[123] = sg space_groups['P 4/m m m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(124, 'P 4/m c c', transformations) space_groups[124] = sg space_groups['P 4/m c c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(125, 'P 4/n b m :2', transformations) space_groups[125] = sg space_groups['P 4/n b m :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(126, 'P 4/n n c :2', transformations) space_groups[126] = sg space_groups['P 4/n n c :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(127, 'P 4/m b m', transformations) space_groups[127] = sg space_groups['P 4/m b m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(128, 'P 4/m n c', transformations) space_groups[128] = sg space_groups['P 4/m n c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(129, 'P 4/n m m :2', transformations) space_groups[129] = sg space_groups['P 4/n m m :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(130, 'P 4/n c c :2', transformations) space_groups[130] = sg space_groups['P 4/n c c :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(131, 'P 42/m m c', transformations) space_groups[131] = sg space_groups['P 42/m m c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(132, 'P 42/m c m', transformations) space_groups[132] = sg space_groups['P 42/m c m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(133, 'P 42/n b c :2', transformations) space_groups[133] = sg space_groups['P 42/n b c :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(134, 'P 42/n n m :2', transformations) space_groups[134] = sg space_groups['P 42/n n m :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(135, 'P 42/m b c', transformations) space_groups[135] = sg space_groups['P 42/m b c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(136, 'P 42/m n m', transformations) space_groups[136] = sg space_groups['P 42/m n m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(137, 'P 42/n m c :2', transformations) space_groups[137] = sg space_groups['P 42/n m c :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(138, 'P 42/n c m :2', transformations) space_groups[138] = sg space_groups['P 42/n c m :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(139, 'I 4/m m m', transformations) space_groups[139] = sg space_groups['I 4/m m m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(140, 'I 4/m c m', transformations) space_groups[140] = sg space_groups['I 4/m c m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,3,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,3,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-3,-1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-3,-1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,5,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,3,5]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([3,5,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([3,3,5]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,-1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,-1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(141, 'I 41/a m d :2', transformations) space_groups[141] = sg space_groups['I 41/a m d :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,3,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-3,-1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-3,-3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,5,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,3,5]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([3,5,5]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([3,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,-1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,-1,-1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(142, 'I 41/a c d :2', transformations) space_groups[142] = sg space_groups['I 41/a c d :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(143, 'P 3', transformations) space_groups[143] = sg space_groups['P 3'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(144, 'P 31', transformations) space_groups[144] = sg space_groups['P 31'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(145, 'P 32', transformations) space_groups[145] = sg space_groups['P 32'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(146, 'R 3 :H', transformations) space_groups[146] = sg space_groups['R 3 :H'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(147, 'P -3', transformations) space_groups[147] = sg space_groups['P -3'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(148, 'R -3 :H', transformations) space_groups[148] = sg space_groups['R -3 :H'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(149, 'P 3 1 2', transformations) space_groups[149] = sg space_groups['P 3 1 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(150, 'P 3 2 1', transformations) space_groups[150] = sg space_groups['P 3 2 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(151, 'P 31 1 2', transformations) space_groups[151] = sg space_groups['P 31 1 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(152, 'P 31 2 1', transformations) space_groups[152] = sg space_groups['P 31 2 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(153, 'P 32 1 2', transformations) space_groups[153] = sg space_groups['P 32 1 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(154, 'P 32 2 1', transformations) space_groups[154] = sg space_groups['P 32 2 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(155, 'R 3 2 :H', transformations) space_groups[155] = sg space_groups['R 3 2 :H'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(156, 'P 3 m 1', transformations) space_groups[156] = sg space_groups['P 3 m 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(157, 'P 3 1 m', transformations) space_groups[157] = sg space_groups['P 3 1 m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(158, 'P 3 c 1', transformations) space_groups[158] = sg space_groups['P 3 c 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(159, 'P 3 1 c', transformations) space_groups[159] = sg space_groups['P 3 1 c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(160, 'R 3 m :H', transformations) space_groups[160] = sg space_groups['R 3 m :H'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,7]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,7]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,7]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,5]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,5]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,5]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(161, 'R 3 c :H', transformations) space_groups[161] = sg space_groups['R 3 c :H'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(162, 'P -3 1 m', transformations) space_groups[162] = sg space_groups['P -3 1 m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(163, 'P -3 1 c', transformations) space_groups[163] = sg space_groups['P -3 1 c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(164, 'P -3 m 1', transformations) space_groups[164] = sg space_groups['P -3 m 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(165, 'P -3 c 1', transformations) space_groups[165] = sg space_groups['P -3 c 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(166, 'R -3 m :H', transformations) space_groups[166] = sg space_groups['R -3 m :H'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,7]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,7]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,7]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,1]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,1]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,1]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,5]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,5]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,5]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,-1]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,-1]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,-1]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(167, 'R -3 c :H', transformations) space_groups[167] = sg space_groups['R -3 c :H'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(168, 'P 6', transformations) space_groups[168] = sg space_groups['P 6'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,5]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(169, 'P 61', transformations) space_groups[169] = sg space_groups['P 61'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,5]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(170, 'P 65', transformations) space_groups[170] = sg space_groups['P 65'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(171, 'P 62', transformations) space_groups[171] = sg space_groups['P 62'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(172, 'P 64', transformations) space_groups[172] = sg space_groups['P 64'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(173, 'P 63', transformations) space_groups[173] = sg space_groups['P 63'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(174, 'P -6', transformations) space_groups[174] = sg space_groups['P -6'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(175, 'P 6/m', transformations) space_groups[175] = sg space_groups['P 6/m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(176, 'P 63/m', transformations) space_groups[176] = sg space_groups['P 63/m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(177, 'P 6 2 2', transformations) space_groups[177] = sg space_groups['P 6 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,5]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,5]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(178, 'P 61 2 2', transformations) space_groups[178] = sg space_groups['P 61 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,5]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,5]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(179, 'P 65 2 2', transformations) space_groups[179] = sg space_groups['P 65 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(180, 'P 62 2 2', transformations) space_groups[180] = sg space_groups['P 62 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(181, 'P 64 2 2', transformations) space_groups[181] = sg space_groups['P 64 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(182, 'P 63 2 2', transformations) space_groups[182] = sg space_groups['P 63 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(183, 'P 6 m m', transformations) space_groups[183] = sg space_groups['P 6 m m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(184, 'P 6 c c', transformations) space_groups[184] = sg space_groups['P 6 c c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(185, 'P 63 c m', transformations) space_groups[185] = sg space_groups['P 63 c m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(186, 'P 63 m c', transformations) space_groups[186] = sg space_groups['P 63 m c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(187, 'P -6 m 2', transformations) space_groups[187] = sg space_groups['P -6 m 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(188, 'P -6 c 2', transformations) space_groups[188] = sg space_groups['P -6 c 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(189, 'P -6 2 m', transformations) space_groups[189] = sg space_groups['P -6 2 m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(190, 'P -6 2 c', transformations) space_groups[190] = sg space_groups['P -6 2 c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(191, 'P 6/m m m', transformations) space_groups[191] = sg space_groups['P 6/m m m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(192, 'P 6/m c c', transformations) space_groups[192] = sg space_groups['P 6/m c c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(193, 'P 63/m c m', transformations) space_groups[193] = sg space_groups['P 63/m c m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(194, 'P 63/m m c', transformations) space_groups[194] = sg space_groups['P 63/m m c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(195, 'P 2 3', transformations) space_groups[195] = sg space_groups['P 2 3'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(196, 'F 2 3', transformations) space_groups[196] = sg space_groups['F 2 3'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(197, 'I 2 3', transformations) space_groups[197] = sg space_groups['I 2 3'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(198, 'P 21 3', transformations) space_groups[198] = sg space_groups['P 21 3'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(199, 'I 21 3', transformations) space_groups[199] = sg space_groups['I 21 3'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(200, 'P m -3', transformations) space_groups[200] = sg space_groups['P m -3'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(201, 'P n -3 :2', transformations) space_groups[201] = sg space_groups['P n -3 :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(202, 'F m -3', transformations) space_groups[202] = sg space_groups['F m -3'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([4,4,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([4,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([4,4,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([4,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([4,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([4,4,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([4,4,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([4,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([4,4,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([4,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([4,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([4,4,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,3,1]) trans_den = N.array([4,4,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,3,3]) trans_den = N.array([1,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([4,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,3,1]) trans_den = N.array([4,4,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([4,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,3,3]) trans_den = N.array([1,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,3,3]) trans_den = N.array([1,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([4,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,3,1]) trans_den = N.array([4,4,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([-1,1,1]) trans_den = N.array([4,4,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([-1,1,1]) trans_den = N.array([4,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([-1,1,1]) trans_den = N.array([4,4,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([-1,1,1]) trans_den = N.array([4,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,1,1]) trans_den = N.array([4,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,1,1]) trans_den = N.array([4,4,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([3,1,1]) trans_den = N.array([4,4,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([2,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([3,0,3]) trans_den = N.array([4,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([3,1,1]) trans_den = N.array([4,4,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([3,0,3]) trans_den = N.array([4,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([2,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([2,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([3,0,3]) trans_den = N.array([4,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,1,1]) trans_den = N.array([4,4,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,-1,1]) trans_den = N.array([4,4,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,-1,1]) trans_den = N.array([2,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([4,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,-1,1]) trans_den = N.array([4,4,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([4,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,-1,1]) trans_den = N.array([2,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,-1,1]) trans_den = N.array([2,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([4,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,-1,1]) trans_den = N.array([4,4,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([3,3,0]) trans_den = N.array([4,4,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,3,1]) trans_den = N.array([2,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([3,1,1]) trans_den = N.array([4,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([3,3,0]) trans_den = N.array([4,4,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([3,1,1]) trans_den = N.array([4,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,3,1]) trans_den = N.array([2,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,3,1]) trans_den = N.array([2,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([3,1,1]) trans_den = N.array([4,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,3,0]) trans_den = N.array([4,4,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([4,4,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([2,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([4,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([4,4,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([4,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([2,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([2,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([4,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([4,4,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(203, 'F d -3 :2', transformations) space_groups[203] = sg space_groups['F d -3 :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot =
N.array([0,0,-1,-1,0,0,0,-1,0])
numpy.array
""" ======================================= Clustering text documents using k-means ======================================= This is an example showing how the scikit-learn API can be used to cluster documents by topics using a `Bag of Words approach <https://en.wikipedia.org/wiki/Bag-of-words_model>`_. Two algorithms are demoed: :class:`~sklearn.cluster.KMeans` and its more scalable variant, :class:`~sklearn.cluster.MiniBatchKMeans`. Additionally, latent semantic analysis is used to reduce dimensionality and discover latent patterns in the data. This example uses two different text vectorizers: a :class:`~sklearn.feature_extraction.text.TfidfVectorizer` and a :class:`~sklearn.feature_extraction.text.HashingVectorizer`. See the example notebook :ref:`sphx_glr_auto_examples_text_plot_hashing_vs_dict_vectorizer.py` for more information on vectorizers and a comparison of their processing times. For document analysis via a supervised learning approach, see the example script :ref:`sphx_glr_auto_examples_text_plot_document_classification_20newsgroups.py`. """ # Author: <NAME> <<EMAIL>> # <NAME> # <NAME> <<EMAIL>> # <NAME> <<EMAIL>> # License: BSD 3 clause # %% # Loading text data # ================= # # We load data from :ref:`20newsgroups_dataset`, which comprises around 18,000 # newsgroups posts on 20 topics. For illustrative purposes and to reduce the # computational cost, we select a subset of 4 topics only accounting for around # 3,400 documents. See the example # :ref:`sphx_glr_auto_examples_text_plot_document_classification_20newsgroups.py` # to gain intuition on the overlap of such topics. # # Notice that, by default, the text samples contain some message metadata such # as `"headers"`, `"footers"` (signatures) and `"quotes"` to other posts. We use # the `remove` parameter from :func:`~sklearn.datasets.fetch_20newsgroups` to # strip those features and have a more sensible clustering problem. import numpy as np from sklearn.datasets import fetch_20newsgroups categories = [ "alt.atheism", "talk.religion.misc", "comp.graphics", "sci.space", ] dataset = fetch_20newsgroups( remove=("headers", "footers", "quotes"), subset="all", categories=categories, shuffle=True, random_state=42, ) labels = dataset.target unique_labels, category_sizes = np.unique(labels, return_counts=True) true_k = unique_labels.shape[0] print(f"{len(dataset.data)} documents - {true_k} categories") # %% # Quantifying the quality of clustering results # ============================================= # # In this section we define a function to score different clustering pipelines # using several metrics. # # Clustering algorithms are fundamentally unsupervised learning methods. # However, since we happen to have class labels for this specific dataset, it is # possible to use evaluation metrics that leverage this "supervised" ground # truth information to quantify the quality of the resulting clusters. Examples # of such metrics are the following: # # - homogeneity, which quantifies how much clusters contain only members of a # single class; # # - completeness, which quantifies how much members of a given class are # assigned to the same clusters; # # - V-measure, the harmonic mean of completeness and homogeneity; # # - Rand-Index, which measures how frequently pairs of data points are grouped # consistently according to the result of the clustering algorithm and the # ground truth class assignment; # # - Adjusted Rand-Index, a chance-adjusted Rand-Index such that random cluster # assignment have an ARI of 0.0 in expectation. # # If the ground truth labels are not known, evaluation can only be performed # using the model results itself. In that case, the Silhouette Coefficient comes # in handy. # # For more reference, see :ref:`clustering_evaluation`. from collections import defaultdict from sklearn import metrics from time import time evaluations = [] evaluations_std = [] def fit_and_evaluate(km, X, name=None, n_runs=5): name = km.__class__.__name__ if name is None else name train_times = [] scores = defaultdict(list) for seed in range(n_runs): km.set_params(random_state=seed) t0 = time() km.fit(X) train_times.append(time() - t0) scores["Homogeneity"].append(metrics.homogeneity_score(labels, km.labels_)) scores["Completeness"].append(metrics.completeness_score(labels, km.labels_)) scores["V-measure"].append(metrics.v_measure_score(labels, km.labels_)) scores["Adjusted Rand-Index"].append( metrics.adjusted_rand_score(labels, km.labels_) ) scores["Silhouette Coefficient"].append( metrics.silhouette_score(X, km.labels_, sample_size=2000) ) train_times = np.asarray(train_times) print(f"clustering done in {train_times.mean():.2f} ± {train_times.std():.2f} s ") evaluation = { "estimator": name, "train_time": train_times.mean(), } evaluation_std = { "estimator": name, "train_time": train_times.std(), } for score_name, score_values in scores.items(): mean_score, std_score = np.mean(score_values),
np.std(score_values)
numpy.std